text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
```
%reload_ext autoreload
%autoreload 2
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import os
import re
import pickle
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib import rc
rc('text', usetex=True)
def bold_text(string):
return r'\textbf{{{}}}'.format(string)
from IPython.display import Markdown
def printmd(string):
"""Embed the input string into Markdown."""
display(Markdown(string))
def list_files(startpath):
level_colours = {0: '#339fff', 1: '#ff5b33'}
for root, dirs, files in os.walk(startpath):
if os.path.basename(root) == startpath:
continue
level = root.replace(startpath, '').count(os.sep) - 1
indent = ' ' * 4 * (level)
printmd('<pre>{}<b><font color={}>{}</font></b></pre>'.format(indent, level_colours[level], os.path.basename(root)))
if len(files) > 0:
print('{}{}'.format(indent, files))
```
# Importing data
Explore the contents of the folder with all data files
```
data_folder = 'session_210302'
printmd('**Data contents**')
list_files(data_folder)
```
Store all data in the form ```{(market, treatment): {'deals': df_deals, 'games': df_games, 'offers': df_offers, 'players': df_players}}```
```
all_data = {}
data_types = []
for path, folders, files in os.walk(data_folder):
for file in files:
treatment = tuple(path.split('\\')[1:])
dtype = re.match(r'^.*_(.*)\.csv.*$', file).group(1)
data_types.append(dtype)
if treatment not in all_data.keys():
all_data[treatment] = {}
all_data[treatment][dtype] = pd.read_csv('{}\\{}'.format(path, file))
data_types = set(data_types)
```
Check whether all .csv files share the same structure and print out the names of their columns
```
for dtype in data_types:
printmd('**{}**'.format(dtype))
data = [d[dtype] for d in all_data.values()]
all([(data[0].columns.intersection(df.columns) == data[0].columns).all() for df in data])
data[0].columns.to_list()
```
Note:\
```var_id``` global id\
```var_iid``` local id
## Game information
```
all_data[('Externalities', 'bystanders_negative')]['games'].columns.to_list()
```
Find all columns with non-constant values
```
for treatment, data in all_data.items():
print(treatment, list(data['games'].columns[data['games'].nunique() > 1]))
for treatment, data in all_data.items():
printmd('**{}**'.format(treatment))
data['games'][['game_iid', 'title', 'elapsed_time']]
```
## Player information
```
all_data[('Externalities', 'bystanders_negative')]['players'].columns.to_list()
```
Find all columns with non-constant values
```
for treatment, data in all_data.items():
print(treatment, list(data['players'].columns[data['players'].nunique() > 1]))
```
## Offer information
```
all_data[('Externalities', 'bystanders_negative')]['offers'].columns.to_list()
for treatment, data in all_data.items():
printmd('**{}**'.format(treatment))
data_offers = data['offers']
print('status: {}'.format(set(data_offers['status'])))
print('type: {}'.format(set(data_offers['type'])))
printmd('status == ```Accepted``` if and only if the bid/ask resulted in a deal')
set(data_offers[data_offers['status'] == 'Replaced']['matched_price'].dropna())
set(data_offers[data_offers['status'] == 'Expired']['matched_price'].dropna())
set(data_offers[data_offers['matched_price'].notna()]['status'])
printmd('type == ```Auto``` corresponds to accepting a deal')
data_offers[(data_offers['type'] == 'Auto') & (data_offers['matched_price'].isna())]
```
Add treatments information and remove redundant/unnecessary columns
```
all_data.keys()
treatment_names = {
('Externalities', 'bystanders_negative'): 'FullExtNeg',
('Externalities', 'bystanders_positive'): 'FullExtPos',
('Externalities', 'normal'): 'FullExtNorm',
('LimitedAsks', 'black_box'): 'BBLimS',
('LimitedAsks', 'open_book'): 'FullLimS'
}
for treatment, data in all_data.items():
#data['offers'].drop(['game_id', 'round_id', 'status'], axis=1, inplace=True)
# Keep the status column
data['offers'].drop(['game_id', 'round_id'], axis=1, inplace=True)
data['offers']['treatment'] = treatment_names[treatment]
data['offers'].rename({'game_iid': 'game', 'round_iid': 'round', 'amount': 'bid',
'player_id': 'id', 'matched_price': 'price'}, axis=1, inplace=True)
```
Add ```match_id``` and ```match_time```
```
for treatment, data in all_data.items():
for idx, row in data['deals'].iterrows():
game, rnd, match_time, buyer, seller, askID, bidID, bprice, sprice = row[['game_iid', 'round_iid', 'time', 'buyer_id',
'seller_id', 'ask_id', 'bid_id', 'bprice', 'sprice']]
game_round = (data['offers']['game'] == game) & (data['offers']['round'] == rnd)
ask_row = (data['offers']['offer_db_id'] == askID)
bid_row = (data['offers']['offer_db_id'] == bidID)
data['offers'].loc[game_round & ask_row, 'match_time'] = match_time
data['offers'].loc[game_round & ask_row, 'match_id'] = buyer
data['offers'].loc[game_round & ask_row, 'price_temp'] = sprice
data['offers'].loc[game_round & bid_row, 'match_time'] = match_time
data['offers'].loc[game_round & bid_row, 'match_id'] = seller
data['offers'].loc[game_round & bid_row, 'price_temp'] = bprice
for treatment, data in all_data.items():
data['offers']['price'].equals(data['offers']['price_temp'])
for treatment, data in all_data.items():
data['offers'].drop(['price_temp'], axis=1, inplace=True)
```
Add ```valuation```
```
for treatment, data in all_data.items():
for (game, idx), dfi in data['offers'].groupby(['game', 'id']):
val = data['players'][data['players']['player_id'] == idx]['rprice'].values[0]
data['offers'].loc[dfi.index, 'valuation'] = val
```
Rearrange to match the order in the rest of the data
```
for treatment, data in all_data.items():
data['offers'] = data['offers'][['treatment', 'game', 'round', 'time', 'id', 'side', 'valuation',
'bid', 'price', 'match_id', 'match_time', 'type', 'status']]
```
# Merging data
Store all datasets in a single dataframe
```
df = pd.DataFrame()
for treatment, data in all_data.items():
df = df.append(data['offers'], ignore_index=True)
```
Create globally unique subject IDs
```
# Create globally unique subject IDs
df['old_id'] = df['id']
df['id'] = df.groupby(['treatment', 'game', 'id']).ngroup()
# Update the column with match IDs accordingly
for (treatment, game), df_game in df.groupby(['treatment', 'game']):
for idx, row in df_game[df_game['match_id'].notna()].iterrows():
df.loc[idx, 'match_id'] = df_game[df_game['old_id'] == row['match_id']]['id'].iloc[0]
df.drop(columns=['old_id'], axis=1, inplace=True)
```
Cast the valuations to ```int```
```
(df['valuation'] % 1 == 0).all()
df['valuation'] = df['valuation'].astype(int)
```
When a buyer and a seller are automatically matched under the first-price mechanism, a new entry with the bid/ask equal to the resulting price is automatically generated for the buyer/seller who submitted the bid/ask last. Remove all such entries and copy the corresopnding prices to the entries with the bids/asks submitted last.
```
df[['type', 'status']].drop_duplicates()
df.groupby(['type', 'status']).size()
```
The status of type ```Auto``` can only be ```Accepted```
```
set(df[df['type'] == 'Auto']['status'])
```
The status of a bid/ask is set to ```Accepted``` if and only if it results in a deal
```
set(df[df['price'].notna()]['status'])
df[df['status'] == 'Accepted']['price'].isna().any()
```
Each bid–ask pair striking a deal is stored as follows: the first of the two is recorded as ``Manual``, the second as ``Auto``.
```
df_prices = df[df['price'].notna()]
bid_ask_pairs = {'MM': 0, 'MA': 0, 'AA': 0}
for (treatment, game, rnd), dfr in df_prices.groupby(['treatment', 'game', 'round']):
for row_id, row in dfr.iterrows():
if row['id'] < row['match_id']:
id1 = row['id']
id2 = row['match_id']
types = {dfr[dfr['id'] == id1]['type'].iloc[0], dfr[dfr['id'] == id2]['type'].iloc[0]}
if len(types) == 2:
bid_ask_pairs['MA'] += 1
elif types == {'Manual'}:
bid_ask_pairs['MM'] += 1
else:
bid_ask_pairs['AA'] += 1
bid_ask_pairs
```
```Auto``` always take place after ```Manual``` (or, possibly, simultaneously)
A match is made at most 1 second after a bid and an ask are compatible
```
times = {'same': 0, 'M then A': 0, 'A then M': 0}
indices = {'M then A': 0, 'A then M': 0}
delays_to_match = []
for (treatment, game, rnd), dfr in df_prices.groupby(['treatment', 'game', 'round']):
for row_id, row in dfr.iterrows():
if row['id'] < row['match_id']:
match = dfr[dfr['id'].isin([row['id'], row['match_id']])]
types = set(match['type'])
if len(types) == 2:
M_time = match[match['type'] == 'Manual']['time'].iloc[0]
A_time = match[match['type'] == 'Auto']['time'].iloc[0]
M_id = match[match['type'] == 'Manual'].index
A_id = match[match['type'] == 'Auto'].index
if M_time == A_time:
times['same'] += 1
elif M_time < A_time:
times['M then A'] += 1
else:
times['A then M'] += 1
if M_id < A_id:
indices['M then A'] += 1
else:
indices['A then M'] += 1
if int(match['match_time'].iloc[0]) != max(match['time']):
delays_to_match.append(int(match['match_time'].iloc[0]) - max(match['time']))
times
indices
delays_to_match
```
<font color=blue>The redundant rows (automatic matching enforced by the computer) correspond to ```Auto``` bids/asks following ```Replaced``` bids/asks which were high/low enough to result in a deal</font>
```
df_new = df.copy()
df_new['redundant'] = False
status = {'Accepted': 0, 'Replaced': 0, 'Expired': 0}
for (treatment, game, rnd, idx), dfi in df_new.groupby(['treatment', 'game', 'round', 'id']):
for row_id, row in dfi.iterrows():
if row['type'] == 'Auto':
if len(dfi) > 1:
preceding = dfi.loc[:row.name].iloc[-2]
status[preceding['status']] += 1
if preceding['status'] == 'Replaced':
if row['side'] == 'Buyer':
if preceding['bid'] >= row['bid']:
df_new.loc[row.name, 'redundant'] = True
df_new.loc[preceding.name, 'price'] = row['price']
df_new.loc[preceding.name, 'match_id'] = row['match_id']
df_new.loc[preceding.name, 'match_time'] = row['match_time']
else:
if preceding['bid'] <= row['bid']:
df_new.loc[row.name, 'redundant'] = True
df_new.loc[preceding.name, 'price'] = row['price']
df_new.loc[preceding.name, 'match_id'] = row['match_id']
df_new.loc[preceding.name, 'match_time'] = row['match_time']
status
len(df_new)
len(df)
df_new.drop(['redundant', 'price', 'match_id', 'match_time'], axis=1).equals(df.drop(['price', 'match_id', 'match_time'], axis=1))
df_new = df_new[~df_new['redundant']]
df_new.drop('redundant', axis=1, inplace=True)
len(df_new)
df_new.groupby('type').size()
df_prices = df_new[df_new['price'].notna()]
delays_to_match = []
for (treatment, game, rnd), dfr in df_prices.groupby(['treatment', 'game', 'round']):
for row_id, row in dfr.iterrows():
if row['id'] < row['match_id']:
match = dfr[dfr['id'].isin([row['id'], row['match_id']])]
if (len(match) != 2) or (match['match_time'].count() != 2) or (match['match_id'].count() != 2) or (match['price'].count() != 2):
'Some data is missing'
if int(match['match_time'].iloc[0]) != max(match['time']):
delays_to_match.append(int(match['match_time'].iloc[0]) - max(match['time']))
delays_to_match
for treatment, df_treatment in df.groupby(['treatment']):
printmd(treatment)
diff = pd.merge(df, df_new, how='outer', suffixes=('','_y'), indicator=True)
diff = diff[diff['_merge'] != 'both']
diff.sort_values(['treatment', 'game', 'round', 'time', 'id']).iloc[1:51]
df = df_new.copy()
```
# Overview of the data
```
index = pd.MultiIndex.from_tuples(df[['treatment', 'game']].drop_duplicates().itertuples(index=False, name=None),
names=['Treatment', 'Game'])
overview = pd.DataFrame(index=index, columns=['Buyers', 'Sellers', 'Bids', 'Asks'])
for (treatment, game, side), df_side in df.groupby(['treatment', 'game', 'side']):
if side == 'Buyer':
overview.loc[(treatment, game), 'Buyers'] = len(set(df_side['id']))
overview.loc[(treatment, game), 'Bids'] = len(df_side)
elif side == 'Seller':
overview.loc[(treatment, game), 'Sellers'] = len(set(df_side['id']))
overview.loc[(treatment, game), 'Asks'] = len(df_side)
else:
print('No side provided.')
overview
```
# Exporting data
## Externalities
```
df_ext = df[df['treatment'].str.contains('Ext')].copy()
```
Create globally unique subject IDs
```
# Create globally unique subject IDs
df_ext['old_id'] = df_ext['id']
df_ext['id'] = df_ext.groupby(['treatment', 'game', 'id']).ngroup()
# Update the column with match IDs accordingly
for (treatment, game), df_game in df_ext.groupby(['treatment', 'game']):
for idx, row in df_game[df_game['match_id'].notna()].iterrows():
df_ext.loc[idx, 'match_id'] = df_game[df_game['old_id'] == row['match_id']]['id'].iloc[0]
df_ext.drop(columns=['old_id'], axis=1, inplace=True)
df_ext
df_ext.to_csv('../Data/data_externalities.csv', index=False)
```
## Restricted asks
```
df_LimS = df[df['treatment'].str.contains('LimS')].copy()
```
Create globally unique subject IDs
```
# Create globally unique subject IDs
df_LimS['old_id'] = df_LimS['id']
df_LimS['id'] = df_LimS.groupby(['treatment', 'game', 'id']).ngroup()
# Update the column with match IDs accordingly
for (treatment, game), df_game in df_LimS.groupby(['treatment', 'game']):
for idx, row in df_game[df_game['match_id'].notna()].iterrows():
df_LimS.loc[idx, 'match_id'] = df_game[df_game['old_id'] == row['match_id']]['id'].iloc[0]
df_LimS.drop(columns=['old_id'], axis=1, inplace=True)
df_LimS
df_LimS.to_csv('../Data/data_restricted_asks.csv', index=False)
```
|
github_jupyter
|
```
%load_ext autoreload
%autoreload 2
from aflow.entries import Entry
a = {
"compound": "Be2O2",
"auid":"aflow:ed51b7b3938f117f",
"aurl":"aflowlib.duke.edu:AFLOWDATA/ICSD_WEB/HEX/Be1O1_ICSD_15620",
"agl_thermal_conductivity_300K":"53.361",
"Egap":"7.4494"
}
A = Entry(**a)
A.kpoints
from aflow.caster import _kpoints
_kpoints("16,16,8;17,17,9;\Gamma-M,M-K,K-\Gamma,\Gamma-A,A-L,L-H,H-A,L-M,K-H;20")
from aflow.keywords import *
from aflow.keywords import reset
reset()
k = ((Egap > 6) | (Egap < 21)) & (PV_cell < 13)
reset()
k1 = ((Egap > 6) | (Egap < 21)) & ((PV_cell < 13) | (PV_cell > 2))
str(k1)
reset()
k3 = ((Egap > 0) & (Egap < 2) | (Egap == 5))
str(k3)
str(PV_cell)
str(~PV_cell)
str(PV_cell)
k = (data_source == 'aflowlib') | (species % 'Si')
str(k)
reset()
k2 = (data_source < 'aflow') & (species < 'Ag')
str(k2)
%load_ext autoreload
%autoreload 2
import aflow
from aflow.keywords import *
Si = aflow.search(catalog="icsd").filter(species == 'Si').select(positions_cartesian)
for i, entry in enumerate(Si[90:110]):
print(i, entry.aurl)
sorted(Si.responses[2].keys())
sisl = Si[0:10]
sisl._iter
sisl._iter, sisl._max_entry
len(Si.responses)
for entry in sisl:
print(entry.positions_cartesian)
ss = slice(0, 10)
ss.
import json
keys = json.loads("""{"__schema^2__":{"__comment__":["The zeroth element of any object or array in this document is meta.","If last element is null, element parent considered optional.","If last element is '.', element value can be anything.","If last element is '', element value can be nothing.","This document is the AAPI schema, it is self validating and order sensitive.","."],"class":["intended for document organization, defines major section. Must be one of","API only","chemistry","crystal","electronics","thermodynamics","magnetics","scintillation","mechanical","optical properties","other","calculation"],"delimiter":["An ordered set of single character seperators for distinguishing plural type property values",null],"description":["intended for popup help boxes, describes the current property: freeform text","."],"example":["Actual result that may occur in API or search context, developmental: structured text","."],"expression":["intended for materials reports, developmental. Must be one of","declarative","directive","derivative"],"format":["intended for printf style formating of property value: corresponds to the type attribute","."],"inclusion":["intended for search filters and materials reports. Must be one of","mandatory","conditional","optional","forbidden"],"search":[["intended for search and stat, Must be one of","equals -> exact match input (select or freeform) to value","contains -> substring match (select or freeform) in value","range -> bounded match (select or freeform) in value"],"equals","contains","range",null],"status":["Development stage of property. Must be one of","production","development","deprecated","reserved"],"subclass":["intended for document organization, defines minor section","label","calculation parameters","computational resources","version","provenance","real space lattice","bravais lattice of the crystal","point group of the crystal","bravais lattice of the lattice","super lattice","reciprocal space lattice","space group","parameters",""],"syntax":["Actual setting that may be used in API or search context, developmental: structured text","."],"title":["intended for labeling property in document rendering: freeform text (HTML?)","."],"type":["intended for DB and document type handling: must be one of","string","strings","number","numbers"],"units":["units for search filter number in HTML: optional",null],"verification":["Optional list of property references designed to certify that the result is contextually relevant.",null]},"Bravais_lattice_orig":{"__comment__":[""],"description":"Returns the Bravais lattice of the original unrelaxed structure before the calculation.","title":"original bravais lattice","format":"%s","class":"crystal","subclass":"bravais lattice of the crystal","type":"string","inclusion":"optional","expression":"declarative","example":"Bravais_lattice_orig=MCLC","status":"production","syntax":"$aurl/?Bravais_lattice_orig"},"Bravais_lattice_relax":{"__comment__":[""],"description":"Returns the Bravais lattice of the original relaxed structure after the calculation.","title":"relaxed bravais lattice","format":"%s","class":"crystal","subclass":"bravais lattice of the crystal","type":"string","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","forces","kpoints","stress_tensor"],"example":"Bravais_lattice_relax=MCLC","status":"production","syntax":"$aurl/?Bravais_lattice_relax"},"Egap":{"__comment__":[""],"description":"Band gap calculated with the approximations and pseudopotentials described by other keywords.","title":"energy gap","format":"%s","class":"electronics","subclass":"","type":"number","units":"eV","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"Egap=2.5","status":"production","syntax":"$aurl/?Egap"},"Egap_fit":{"__comment__":[""],"description":"Simple cross-validated correction (fit) of Egap.","title":"fitted band gap","format":"%s","class":"electronics","subclass":"","type":"number","units":"eV","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"Egap_fit=3.5","status":"production","syntax":"$aurl/?Egap_fit"},"Egap_type":{"__comment__":[""],"description":"Given a band gap, this keyword describes if the system is a metal, a semi-metal, an insulator with direct or indirect band gap.","title":"band gap type","format":"%s","class":"electronics","subclass":"","type":"string","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"Egap_type=insulator_direct","status":"production","syntax":"$aurl/?Egap_type"},"PV_atom":{"__comment__":[""],"description":"Pressure multiplied by volume of the atom.","title":"atomic pressure*volume","format":"%s","class":"mechanical","subclass":"","type":"number","units":"eV/atom","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"PV_atom=12.13","status":"production","syntax":"$aurl/?PV_atom"},"PV_cell":{"__comment__":[""],"description":"Pressure multiplied by volume of the unit cell.","title":"unit cell pressure*volume","format":"%s","class":"mechanical","subclass":"","type":"number","units":"eV","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"PV_cell=12.13","status":"production","syntax":"$aurl/?PV_cell"},"Pearson_symbol_orig":{"__comment__":[""],"description":"Returns the Pearson symbol of the original-unrelaxed structure before the calculation.","title":"original pearson symbol","format":"%s","class":"crystal","subclass":"bravais lattice of the crystal","type":"string","inclusion":"mandatory","expression":"declarative","example":"Pearson_symbol_orig=mS32","status":"production","syntax":"$aurl/?Pearson_symbol_orig"},"Pearson_symbol_relax":{"__comment__":[""],"description":"Returns the Pearson symbol of the relaxed structure after the calculation.","title":"relaxed pearson symbol","format":"%s","class":"crystal","subclass":"bravais lattice of the crystal","type":"string","inclusion":"mandatory","expression":"derivative","verification":["stress_tensor"],"example":"Pearson_symbol_relax=mS32","status":"production","syntax":"$aurl/?Pearson_symbol_relax"},"Pulay_stress":{"__comment__":[""],"description":"Returns a metric of the basis set inconsistency for the calculation.","title":"Pulay Stress","format":"%s","class":"mechanical","subclass":"","type":"number","units":"kbar","inclusion":"mandatory","expression":"derivative","example":"pulay_stress=10.0","status":"development","syntax":"$aurl/?pulay_stress"},"Pullay_stress":{"__comment__":[""],"description":"Returns a metric of the basis set inconsistency for the calculation.","title":"Pulay Stress","format":"%s","class":"mechanical","subclass":"","type":"number","units":"kbar","inclusion":"mandatory","expression":"derivative","example":"Pullay_stress=10.0","status":"deprecated","syntax":"$aurl/?Pullay_stress"},"ael_bulk_modulus_reuss":{"__comment__":[""],"description":"Returns the bulk modulus as calculated using the Reuss method with AEL.","title":"AEL Reuss bulk modulus","format":"%s","class":"mechanical","subclass":"","type":"number","units":"GPa","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"ael_bulk_modulus_reuss=105.315","status":"production","syntax":"$aurl/?ael_bulk_modulus_reuss"},"ael_bulk_modulus_voigt":{"__comment__":[""],"description":"Returns the bulk modulus as calculated using the Voigt method with AEL.","title":"AEL Voigt bulk modulus","format":"%s","class":"mechanical","subclass":"","type":"number","units":"GPa","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"ael_bulk_modulus_voiht=105.315","status":"production","syntax":"$aurl/?ael_bulk_modulus_voigt"},"ael_bulk_modulus_vrh":{"__comment__":[""],"description":"Returns the bulk modulus as calculated using the Voigt-Reuss-Hill average with AEL.","title":"AEL VRH bulk modulus","format":"%s","class":"mechanical","subclass":"","type":"number","units":"GPa","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"ael_bulk_modulus_vrh=105.315","status":"production","syntax":"$aurl/?ael_bulk_modulus_vrh"},"ael_elastic_anisotropy":{"__comment__":[""],"description":"Returns the elastic anisotropy as calculated with AEL.","title":"AEL elastic anisotropy","format":"%s","class":"mechanical","subclass":"","type":"number","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"ael_elastic_anisotropy=0.0008165","status":"production","syntax":"$aurl/?ael_elastic_anisotropy"},"ael_poisson_ratio":{"__comment__":[""],"description":"Returns the istropic Poisson ratio as calculated with AEL.","title":"AEL Poisson ratio","format":"%s","class":"mechanical","subclass":"","type":"number","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"ael_poisson_ratio=0.216","status":"production","syntax":"$aurl/?ael_poisson_ratio"},"ael_shear_modulus_reuss":{"__comment__":[""],"description":"Returns the shear modulus as calculated using the Reuss method with AEL.","title":"AEL Reuss shear modulus","format":"%s","class":"mechanical","subclass":"","type":"number","units":"GPa","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"ael_shear_modulus_reuss=73.787","status":"production","syntax":"$aurl/?ael_shear_modulus_reuss"},"ael_shear_modulus_voigt":{"__comment__":[""],"description":"Returns the shear modulus as calculated using the Voigt method with AEL.","title":"AEL Voigt shear modulus","format":"%s","class":"mechanical","subclass":"","type":"number","units":"GPa","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"ael_shear_modulus_voigt=73.799","status":"production","syntax":"$aurl/?ael_shear_modulus_voigt"},"ael_shear_modulus_vrh":{"__comment__":[""],"description":"Returns the shear modulus as calculated using the Voigt-Reuss-Hill average with AEL.","title":"AEL VRH shear modulus","format":"%s","class":"mechanical","subclass":"","type":"number","units":"GPa","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"ael_shear_modulus_vrh=73.793","status":"production","syntax":"$aurl/?ael_shear_modulus_vrh"},"aflow_version":{"__comment__":[""],"description":"Returns the version number of AFLOW used to perform the calculation.","title":"aflow version","format":"%s","class":"calculation","subclass":"version","type":"string","inclusion":"optional","expression":"declarative","example":"aflow_version=aflow30641","status":"production","syntax":"$aurl/?aflow_version"},"aflowlib_date":{"__comment__":[""],"description":"Returns the date of the AFLOW post-processor which generated the entry for the library.","title":"material generation date","format":"%s","class":"calculation","subclass":"version","type":"string","inclusion":"optional","expression":"declarative","example":"aflowlib_date=20140204_13:10:39_GMT-5","status":"production","syntax":"$aurl/?aflowlib_date"},"aflowlib_entries":{"__comment__":[""],"description":"For projects and set-layer entries, aflowlib_entries lists the available sub-entries which are associated with the $aurl of the subdirectories. By parsing $aurl/?aflowlib_entries (containing $aurl/aflowlib_entries_number entries) the user finds further locations to interrogate.","title":"aflowlib entries","format":"%s","class":"API only","subclass":"","type":"strings","delimiter":",","inclusion":"conditional","expression":"directive","example":"aflowlib_entries=AgAl,AgAs,AgAu,AgB_h,AgBa_sv,AgBe_sv,AgBi_d,AgBr,AgCa_sv,...","status":"production","syntax":"$aurl/?aflowlib_entries"},"aflowlib_entries_number":{"__comment__":[""],"description":"For projects and set-layer entries, aflowlib_entrieslists the available sub-entries which are associated with the $aurl of the subdirectories. By parsing $aurl/?aflowlib_entries (containing $aurl/aflowlib_entries_number entries) the user finds further locations to interrogate.","title":"aflowlib entry count","format":"%s","class":"API only","subclass":"","type":"number","inclusion":"conditional","expression":"directive","example":"aflowlib_entries_number=654","status":"production","syntax":"$aurl/?aflowlib_entries_number"},"aflowlib_version":{"__comment__":[""],"description":"Returns the version of the AFLOW post-processor which generated the entry for the library.","title":"aflowlib version","format":"%s","class":"calculation","subclass":"version","type":"string","inclusion":"optional","expression":"declarative","example":"aflowlib_version=3.1.103","status":"production","syntax":"$aurl/?aflowlib_version"},"agl_acoustic_debye":{"__comment__":[""],"description":"Returns the acoustic Debye temperature as calculated with AGL.","title":"AGL acoustic Debye temperature","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"K","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"agl_acoustic_debye=492","status":"production","syntax":"$aurl/?agl_acoustic_debye"},"agl_bulk_modulus_isothermal_300K":{"__comment__":[""],"description":"Returns the isothermal bulk modulus at 300K as calculated with AGL.","title":"AGL isothermal bulk modulus 300K","format":"%s","class":"mechanical","subclass":"","type":"number","units":"GPa","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"agl_bulk_modulus_isothermal_300K=96.6","status":"production","syntax":"$aurl/?agl_bulk_modulus_isothermal_300K"},"agl_bulk_modulus_static_300K":{"__comment__":[""],"description":"Returns the static bulk modulus at 300K as calculated with AGL.","title":"AGL static bulk modulus 300K","format":"%s","class":"mechanical","subclass":"","type":"number","units":"GPa","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"agl_bulk_modulus_static_300K=99.6","status":"production","syntax":"$aurl/?agl_bulk_modulus_static_300K"},"agl_debye":{"__comment__":[""],"description":"Returns the Debye temperature as calculated with AGL.","title":"AGL Debye temperature","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"K","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"agl_debye=620","status":"production","syntax":"$aurl/?agl_debye"},"agl_gruneisen":{"__comment__":[""],"description":"Returns the Gruneisen parameter as calculated with AGL.","title":"AGL Gruneisen parameter","format":"%s","class":"thermodynamics","subclass":"","type":"number","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"agl_gruneisen=2.06","status":"production","syntax":"$aurl/?agl_gruneisen"},"agl_heat_capacity_Cp_300K":{"__comment__":[""],"description":"Returns the heat capacity at constant pressure as calculated with AGL at 300K.","title":"AGL heat capacity Cp","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"kB/cell","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"agl_heat_capacity_Cp_300K=5.502","status":"production","syntax":"$aurl/?agl_heat_capacity_Cp_300K"},"agl_heat_capacity_Cv_300K":{"__comment__":[""],"description":"Returns the heat capacity at constant volume as calculated with AGL at 300K.","title":"AGL heat capacity Cv","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"kB/cell","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"agl_heat_capacity_Cv_300K=4.901","status":"production","syntax":"$aurl/?agl_heat_capacity_Cv_300K"},"agl_thermal_conductivity_300K":{"__comment__":[""],"description":"Returns the thermal conductivity as calculated with AGL at 300K.","title":"AGL thermal conductivity","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"W/m*K","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"agl_thermal_conductivity_300K=24.41","status":"production","syntax":"$aurl/?agl_thermal_conductivity_300K"},"agl_thermal_expansion_300K":{"__comment__":[""],"description":"Returns the thermal expansion as calculated with AGL at 300K.","title":"AGL thermal expansion","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"1/K","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"agl_thermal_expansion_300K=4.997e-05","status":"production","syntax":"$aurl/?agl_thermal_expansion_300K"},"auid":{"__comment__":[""],"description":"AFLOWLIB Unique Identifier for the entry, AUID, which can be used as a publishable object identifier.","title":"AFLOWLIB Unique Identifier","format":"%s","class":"calculation","subclass":"","type":"string","inclusion":"mandatory","expression":"declarative","example":"auid=aflow:e9c6d914c4b8d9ca","status":"production","syntax":"$aurl/?auid"},"aurl":{"__comment__":[""],"description":"AFLOWLIB Uniform Resource Locator returns the AURL of the entry.","title":"AFLOWLIB Uniform Resource Locator","format":"%s","class":"calculation","subclass":"","type":"string","inclusion":"mandatory","expression":"declarative","example":"aurl=aflowlib.duke.edu:AFLOWDATA/LIB3_RAW/Bi_dRh_pvTi_sv/T0003.ABC:LDAU2","status":"production","syntax":"$aurl/?aurl"},"author":{"__comment__":[""],"description":"Returns the name (not necessarily an individual) and affiliation associated with authorship of the data.","title":"author","format":"%s","class":"calculation","subclass":"provenance","type":"strings","delimiter":",","inclusion":"optional","expression":"declarative","example":"author=Marco_Buongiorno_Nardelli,Ohad_Levy,Jesus_Carrete","status":"development","syntax":"$aurl/?author"},"bader_atomic_volumes":{"__comment__":[""],"description":"Returns the volume of each atom of the primitive cell as calculated by the Bader Atoms in Molecules Analysis. This volume encapsulates the electron density associated with each atom above a threshold of 0.0001 electrons.","title":"atomic volume per atom","format":"%s","class":"chemistry","subclass":"","type":"numbers","delimiter":",","units":"Å<sup>3</sup>","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"bader_atomic_volumes=15.235,12.581,13.009","status":"production","syntax":"$aurl/?bader_atomic_volumes"},"bader_net_charges":{"__comment__":[""],"description":"Returns a comma delimited set of partial charges per atom of the primitive cell as calculated by the Bader Atoms in Molecules Analysis.","title":"partial charge per atom","format":"%s","class":"chemistry","subclass":"","type":"numbers","delimiter":",","units":"electrons","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"bader_net_charges=0.125,0.125,-0.25","status":"production","syntax":"$aurl/?bader_net_charges"},"calculation_cores":{"__comment__":[""],"description":"Number of processors/cores used for the calculation.","title":"used CPU cores","format":"%s","class":"calculation","subclass":"computational resources","type":"number","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"calculation_cores=32","status":"production","syntax":"$aurl/?calculation_cores"},"calculation_memory":{"__comment__":[""],"description":"The maximum memory used for the calculation.","title":"used RAM","format":"%s","class":"calculation","subclass":"computational resources","type":"number","units":"Megabytes","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"calculation_memory=32","status":"production","syntax":"$aurl/?calculation_memory"},"calculation_time":{"__comment__":[""],"description":"Total time taken for the calculation.","title":"used time","format":"%s","class":"calculation","subclass":"computational resources","type":"number","units":"seconds","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"calculation_time=32","status":"production","syntax":"$aurl/?calculation_time"},"catalog":{"__comment__":[""],"description":"Returns the context set for the calculation.","title":"catalog","format":"%s","class":"calculation","subclass":"version","type":"string","inclusion":"optional","expression":"declarative","example":"catalog=icsd","status":"production","syntax":"$aurl/?catalog"},"code":{"__comment__":[""],"description":"Returns the software name and version used to perform the simulation.","title":"ab initio code","format":"%s","class":"calculation","subclass":"version","type":"string","inclusion":"optional","expression":"declarative","example":"code=vasp.4.6.35","status":"production","syntax":"$aurl/?code"},"composition":{"__comment__":[""],"description":"Returns a comma delimited composition description of the structure entry in the calculated cell.","title":"composition","format":"%s","class":"chemistry","subclass":"","type":"numbers","delimiter":",","inclusion":"optional","expression":"declarative","example":"composition=2,6,6","status":"production","syntax":"$aurl/?composition"},"compound":{"__comment__":[""],"description":"Returns the composition description of the compound in the calculated cell.","title":"chemical formula","format":"%s","class":"chemistry","subclass":"","type":"string","inclusion":"mandatory","expression":"declarative","example":"compound=Co2Er6Si6","status":"production","syntax":"$aurl/?compound"},"corresponding":{"__comment__":[""],"description":"Returns the name (not necessarily an individual) and affiliation associated with the data origin concerning correspondence about data.","title":"coresponding","format":"%s","class":"calculation","subclass":"provenance","type":"strings","delimiter":",","inclusion":"optional","expression":"declarative","example":"[email protected]","status":"development","syntax":"$aurl/?corresponding"},"data_api":{"__comment__":[""],"description":"AFLOWLIB version of the entry, API.}","title":"REST API version","format":"%s","class":"API only","subclass":"","type":"string","inclusion":"mandatory","expression":"declarative","example":"data_api=aapi1.0","status":"production","syntax":"$aurl/?data_api"},"data_language":{"__comment__":[""],"description":"Gives the language of the data in AFLOWLIB.","title":"data language","format":"%s","class":"calculation","subclass":"version","type":"strings","delimiter":",","inclusion":"optional","expression":"declarative","example":"data_language=aflowlib","status":"production","syntax":"$aurl/?data_language"},"data_source":{"__comment__":[""],"description":"Gives the source of the data in AFLOWLIB.","title":"data source","format":"%s","class":"calculation","subclass":"version","type":"strings","delimiter":",","inclusion":"optional","expression":"declarative","example":"data_source=aflowlib","status":"production","syntax":"$aurl/?data_source"},"delta_electronic_energy_convergence":{"__comment__":[""],"description":"Returns the change in energy from the last step of the convergence iteration.","title":"Electronic Energy of Convergence Step","format":"%s","class":"calculation","subclass":"","type":"number","inclusion":"optional","expression":"derivative","example":"delta_electronic_energy_convergence=6.09588e-05","status":"development","syntax":"$aurl/?delta_electronic_energy_convergence"},"delta_electronic_energy_threshold":{"__comment__":[""],"description":"Returns the maximimum change in energy required for the convergence iteration.","title":"Electronic Energy of Convergence Threshold","format":"%s","class":"calculation","subclass":"","type":"number","inclusion":"optional","expression":"declarative","example":"delta_electronic_energy_threshold=0.0001","status":"development","syntax":"$aurl/?delta_electronic_energy_threshold"},"density":{"__comment__":[""],"description":"Returns the mass density in grams/cm3.","title":"mass density","format":"%s","class":"chemistry","subclass":"real space lattice","type":"number","units":"grams/cm<sup>3</sup>","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints","pressure_residual","stress_tensor"],"example":"density=7.76665","status":"production","syntax":"$aurl/?density"},"dft_type":{"__comment__":[""],"description":"Returns information about the pseudopotential type, the exchange correlation functional used (normal or hybrid) and use of GW.","title":"DFT type","format":"%s","class":"chemistry","subclass":"parameters","type":"strings","delimiter":",","inclusion":"optional","expression":"declarative","example":"dft_type=PAW_PBE,HSE06","status":"production","syntax":"$aurl/?dft_type"},"eentropy_atom":{"__comment__":[""],"description":"Returns the electronic entropy of the atom used to converge the ab initio calculation (smearing).","title":"atomistic electronic entropy","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"eV/atom","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"eentropy_atom=0.0011","status":"production","syntax":"$aurl/?eentropy_atom"},"eentropy_cell":{"__comment__":[""],"description":"Returns the electronic entropy of the unit cell used to converge the ab initio calculation (smearing).","title":"unit cell electronic entropy","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"eV/atom","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"eentropy_cell=0.0011","status":"production","syntax":"$aurl/?eentropy_cell"},"energy_atom":{"__comment__":[""],"description":"Returns the total ab initio energy per atom- the value of energy_cell/$N$).","title":"atomic energy","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"eV/atom","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints","pressure_residual","stress_tensor"],"example":"energy_atom=-82.1656","status":"production","syntax":"$aurl/?energy_atom"},"energy_cell":{"__comment__":[""],"description":"Returns the total ab initio energy of the unit cell, E. At T=0K and p=0, this is the internal energy of the system (per unit cell).","title":"unit cell energy","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"eV","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints","pressure_residual","stress_tensor"],"example":"energy_cell=-82.1656","status":"production","syntax":"$aurl/?energy_cell"},"energy_cutoff":{"__comment__":[""],"description":"Set of energy cut-offs used during the various steps of the calculations.","title":"energy cutoff","format":"%s","class":"calculation","subclass":"parameters","type":"numbers","delimiter":",","units":"eV","inclusion":"optional","expression":"declarative","example":"energy_cutoff=384.1,384.1,384.1","status":"production","syntax":"$aurl/?energy_cutoff"},"enthalpy_atom":{"__comment__":[""],"description":"Returns the enthalpy per atom- the value of enthalpy_cell/N).","title":"atomic enthalpy","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"eV/atom","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints","pressure_residual","stress_tensor"],"example":"enthalpy_atom=-82.1656","status":"production","syntax":"$aurl/?enthalpy_atom"},"enthalpy_cell":{"__comment__":[""],"description":"Returns the enthalpy of the system of the unit cell, H = E + PV.","title":"unit cell enthalpy","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"eV","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints","pressure_residual","stress_tensor"],"example":"enthalpy_cell=-82.1656","status":"production","syntax":"$aurl/?enthalpy_cell"},"enthalpy_formation_atom":{"__comment__":[""],"description":"Returns the formation enthalpy DeltaHFatomic per atom).","title":"atomic formation enthalpy","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"eV/atom","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"enthalpy_formation_atom=-33.1587","status":"production","syntax":"$aurl/?enthalpy_formation_atom"},"enthalpy_formation_cell":{"__comment__":[""],"description":"Returns the formation enthalpy DeltaHF per unit cell.","title":"unit cell formation enthalpy","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"eV","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"enthalpy_formation_cell=-33.1587","status":"production","syntax":"$aurl/?enthalpy_formation_cell"},"entropic_temperature":{"__comment__":[""],"description":"Returns the entropic temperature for the structure.","title":"entropic temperature","format":"%s","class":"thermodynamics","subclass":"","type":"number","units":"Kelvin","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"entropic_temperature=1072.1","status":"production","syntax":"$aurl/?entropic_temperature"},"files":{"__comment__":[""],"description":"Provides access to the input and output files used in the simulation (provenance data).","title":"I/O files","format":"%s","class":"calculation","subclass":"","type":"strings","delimiter":",","inclusion":"conditional","expression":"directive","example":"files=Bi_dRh_pv.33.cif,Bi_dRh_pv.33.png,CONTCAR.relax,CONTCAR.relax1,","status":"production","syntax":"$aurl/?files"},"forces":{"__comment__":[""],"description":"Final quantum mechanical forces (Fi,Fj,Fk) in the notation of the code.","title":"Quantum Forces","format":"%s","class":"mechanical","subclass":"","type":"numbers","delimiter":";,","units":"eV/Å","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"forces=0,-0.023928,0.000197;0,0.023928,-0.000197;...","status":"development","syntax":"$aurl/?forces"},"geometry":{"__comment__":[""],"description":"Returns geometrical data describing the unit cell in the usual a,b,c,alpha,beta,gamma notation.","title":"unit cell basis","format":"%s","class":"chemistry","subclass":"real space lattice","type":"numbers","delimiter":",","units":"Å","inclusion":"mandatory","expression":"declarative","example":"geometry=18.82,18.82,18.82,32.41,32.41,32.41","status":"production","verification":["energy_cutoff","kpoints","pressure_residual","stress_tensor"],"syntax":"$aurl/?geometry"},"keywords":{"__comment__":[""],"description":"This includes the list of keywords available in the entry, separated by commas.","title":"Title","format":"%s","class":"API only","subclass":"","type":"strings","delimiter":",","inclusion":"mandatory","expression":"directive","example":"keywords=aurl,auid,loop,code,compound,prototype,nspecies,natoms,...","status":"production","syntax":"$aurl/?keywords"},"kpoints":{"__comment__":[""],"description":"Set of k-point meshes uniquely identifying the various steps of the calculations, e.g. relaxation, static and electronic band structure (specifying the k-space symmetry points of the structure).","title":"K-point mesh","format":"%s","class":"calculation","subclass":"parameters","type":"numbers","delimiter":":,","inclusion":"optional","expression":"declarative","example":"kpoints=10,10,10;16,16,16;G-X-W-K-G-L-U-W-L-K+U-X","status":"production","syntax":"$aurl/?kpoints"},"lattice_system_orig":{"__comment__":[""],"description":"Return the lattice system and lattice variation (Brillouin zone) of the original-unrelaxed structure before the calculation.","title":"original lattice system","format":"%s","class":"crystal","subclass":"bravais lattice of the crystal","type":"string","inclusion":"mandatory","expression":"declarative","example":"lattice_system_orig=rhombohedral","status":"production","syntax":"$aurl/?lattice_system_orig"},"lattice_system_relax":{"__comment__":[""],"description":"Return the lattice system and lattice variation (Brillouin zone) of the relaxed structure after the calculation.","title":"relaxed lattice system","format":"%s","class":"crystal","subclass":"bravais lattice of the crystal","type":"string","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","forces","kpoints","stress_tensor"],"example":"lattice_system_relax=rhombohedral","status":"production","syntax":"$aurl/?lattice_system_relax"},"lattice_variation_orig":{"__comment__":[""],"description":"Return the lattice system and lattice variation (Brillouin zone) of the original-unrelaxed structure before the calculation.","title":"original lattice variation","format":"%s","class":"crystal","subclass":"bravais lattice of the crystal","type":"string","inclusion":"mandatory","expression":"declarative","example":"lattice_variation_orig=rhombohedral","status":"production","syntax":"$aurl/?lattice_variation_orig"},"lattice_variation_relax":{"__comment__":[""],"description":"Return the lattice system and lattice variation (Brillouin zone) of the relaxed structure after the calculation.","title":"relaxed lattice variation","format":"%s","class":"crystal","subclass":"bravais lattice of the crystal","type":"string","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","forces","kpoints","stress_tensor"],"example":"lattice_variation_relax=rhombohedral","status":"production","syntax":"$aurl/?lattice_variation_relax"},"ldau_TLUJ":{"__comment__":[""],"description":"This vector of numbers contains the parameters of the DFT+U calculations, based on a corrective functional inspired by the Hubbard model.","title":"on site coulomb interaction","format":"%s","class":"chemistry","subclass":"parameters","type":"numbers","delimiter":";,","inclusion":"mandatory","expression":"declarative","example":"ldau_TLUJ=2;2,0,0;5,0,0;0,0,0","status":"development","syntax":"$aurl/?ldau_TLUJ"},"loop":{"__comment__":[""],"description":"Informs the user of the type of post-processing that was performed.","title":"process category","format":"%s","class":"calculation","subclass":"parameters","type":"strings","delimiter":",","inclusion":"optional","expression":"directive","example":"loop=thermodynamics,bands,magnetic","status":"production","syntax":"$aurl/?loop"},"natoms":{"__comment__":[""],"description":"Returns the number of atoms in the unit cell of the structure entry. The number can be non integer if partial occupation is considered within appropriate approximations.","title":"unit cell atom count","format":"%s","class":"crystal","subclass":"real space lattice","type":"number","inclusion":"mandatory","expression":"declarative","example":"natoms=12","status":"production","syntax":"$aurl/?natoms"},"nbondxx":{"__comment__":[""],"description":"Nearest neighbors bond lengths of the relaxed structure per ordered set of species Ai,Aj greater than or equal to i.","title":"Nearest neighbors bond lengths","format":"%s","class":"crystal","subclass":"","type":"numbers","delimiter":",","units":"Å","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","forces","kpoints","pressure_residual","stress_tensor"],"example":"nbondxx=1.2599,1.0911,1.0911,1.7818,1.2599,1.7818","status":"production","syntax":"$aurl/?nbondxx"},"node_CPU_Cores":{"__comment__":[""],"description":"Information about the number of cores in the node/cluster where the calculation was performed.","title":"available CPU cores","format":"%s","class":"calculation","subclass":"computational resources","type":"number","inclusion":"optional","expression":"declarative","example":"node_CPU_Cores=12","status":"production","syntax":"$aurl/?node_CPU_Cores"},"node_CPU_MHz":{"__comment__":[""],"description":"Information about the CPU speed in the node/cluster where the calculation was performed.","title":"CPU rate","format":"%s","class":"calculation","subclass":"computational resources","type":"number","units":"Megahertz","inclusion":"optional","expression":"declarative","example":"node_CPU_MHz=12","status":"production","syntax":"$aurl/?node_CPU_MHz"},"node_CPU_Model":{"__comment__":[""],"description":"Information about the CPU model in the node/cluster where the calculation was performed.","title":"CPU model","format":"%s","class":"calculation","subclass":"computational resources","type":"string","inclusion":"optional","expression":"declarative","example":"node_CPU_Model=12","status":"production","syntax":"$aurl/?node_CPU_Model"},"node_RAM_GB":{"__comment__":[""],"description":"Information about the RAM in the node/cluster where the calculation was performed.","title":"available RAM","format":"%s","class":"calculation","subclass":"","type":"number","units":"Gigabytes","inclusion":"optional","expression":"declarative","example":"node_RAM_GB=12","status":"production","syntax":"$aurl/?node_RAM_GB"},"nspecies":{"__comment__":[""],"description":"Returns the number of species in the system (e.g., binary = 2, ternary = 3, etc.).","title":"species count","format":"%s","class":"chemistry","subclass":"","type":"number","inclusion":"mandatory","expression":"declarative","example":"nspecies=3","status":"production","syntax":"$aurl/?nspecies"},"positions_cartesian":{"__comment__":[""],"description":"Final Cartesian positions (xi,xj,xk) in the notation of the code.","title":"relaxed absolute positions","format":"%s","class":"other","subclass":"bravais lattice of the crystal","type":"numbers","delimiter":";,","units":"Å","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","forces","kpoints","pressure_residual","stress_tensor"],"example":"positions_cartesian=0,0,0;18.18438,0,2.85027;...","status":"development","syntax":"$aurl/?positions_cartesian"},"positions_fractional":{"__comment__":[""],"description":"Final fractional positions (xi,xj,xk) with respect to the unit cell as specified in $geometry.","title":"relaxed relative positions","format":"%s","class":"other","subclass":"","type":"numbers","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","forces","kpoints","pressure_residual","stress_tensor"],"example":"positions_fractional=0,0,0;0.25,0.25,0.25;...","status":"development","syntax":"$aurl/?positions_fractional"},"pressure":{"__comment__":[""],"description":"Returns the target pressure selected for the simulation.","title":"external pressure","format":"%s","class":"mechanical","subclass":"","type":"number","units":"kbar","inclusion":"mandatory","expression":"declarative","example":"pressure=10.0","status":"production","syntax":"$aurl/?pressure"},"pressure_final":{"__comment__":[""],"description":"Returns the external pressure achieved by the simulation.","title":"resulting pressure","format":"%s","class":"mechanical","subclass":"","type":"number","units":"kbar","inclusion":"mandatory","expression":"derivative","example":"pressure_final=10.0","status":"development","syntax":"$aurl/?pressure_final"},"pressure_residual":{"__comment__":[""],"description":"Returns the external pressure achieved by the simulation.","title":"residual pressure","format":"%s","class":"mechanical","subclass":"","type":"number","units":"kbar","inclusion":"mandatory","expression":"derivative","example":"pressure_residual=10.0","status":"development","syntax":"$aurl/?pressure_residual"},"prototype":{"__comment__":[""],"description":"Returns the AFLOW unrelaxed prototype which was used for the calculation.","title":"original prototype","format":"%s","class":"crystal","subclass":"label","type":"string","inclusion":"mandatory","expression":"declarative","example":"prototype=T0001.A2BC","status":"production","syntax":"$aurl/?prototype"},"scintillation_attenuation_length":{"__comment__":[""],"description":"Returns the scintillation attenuation length of the compound in cm.","title":"attenuation length","format":"%s","class":"scintillation","subclass":"","type":"number","units":"cm","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"scintillation_attenuation_length=2.21895","status":"production","syntax":"$aurl/?scintillation_attenuation_length"},"sg":{"__comment__":[""],"description":"Evolution of the space group of the compound. The first, second and third string represent space group name/number before the first, after the first, and after the last relaxation of the calculation.","title":"compound space group","format":"%s","class":"crystal","subclass":"space group","type":"strings","delimiter":",","inclusion":"mandatory","expression":"directive","verification":["energy_cutoff","forces","kpoints","stress_tensor"],"example":"sg=Fm-3m#225,Fm-3m#225,Fm-3m#225","status":"production","syntax":"$aurl/?sg"},"sg2":{"__comment__":[""],"description":"Evolution of the space group of the compound. The first, second and third string represent space group name/number before the first, after the first, and after the last relaxation of the calculation.","title":"refined compound space group","format":"%s","class":"crystal","subclass":"space group","type":"strings","delimiter":",","inclusion":"mandatory","expression":"directive","verification":["energy_cutoff","forces","kpoints","stress_tensor"],"example":"sg2=Fm-3m#225,Fm-3m#225,Fm-3m#225","status":"production","syntax":"$aurl/?sg2"},"spacegroup_orig":{"__comment__":[""],"description":"Returns the spacegroup number of the original-unrelaxed structure before the calculation.","title":"original space group number","format":"%s","class":"crystal","subclass":"bravais lattice of the crystal","type":"number","inclusion":"mandatory","expression":"declarative","example":"spacegroup_orig=225","status":"production","syntax":"$aurl/?spacegroup_orig"},"spacegroup_relax":{"__comment__":[""],"description":"Returns the spacegroup number of the relaxed structure after the calculation.","title":"relaxed space group number","format":"%s","class":"crystal","subclass":"bravais lattice of the crystal","type":"number","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","forces","kpoints","stress_tensor"],"example":"spacegroup_relax=225","status":"production","syntax":"$aurl/?spacegroup_relax"},"species":{"__comment__":[""],"description":"Species of the atoms in this material.","title":"atomic species","format":"%s","class":"chemistry","subclass":"","type":"strings","delimiter":",","inclusion":"mandatory","expression":"declarative","example":"species=Y,Zn,Zr","status":"production","syntax":"$aurl/?species"},"species_pp":{"__comment__":[""],"description":"Pseudopotentials of the atomic species.","title":"species pseudopotential(s)","format":"%s","class":"chemistry","subclass":"","type":"strings","delimiter":",","inclusion":"mandatory","expression":"declarative","example":"species_pp=Y,Zn,Zr","status":"production","syntax":"$aurl/?species_pp"},"species_pp_ZVAL":{"__comment__":[""],"description":"Returns the number of valence electrons of the atomic species.","title":"valence atoms per species","format":"%s","class":"calculation","subclass":"","type":"numbers","delimiter":",","units":"electrons","inclusion":"optional","expression":"declarative","example":"species_pp_ZVAL=3","status":"production","syntax":"$aurl/?species_pp_ZVAL"},"species_pp_version":{"__comment__":[""],"description":"Species of the atoms, pseudopotentials species, and pseudopotential versions.","title":"pseudopotential species/version","format":"%s","class":"chemistry","subclass":"","type":"strings","delimiter":",","inclusion":"mandatory","expression":"declarative","example":"species_pp_version=Y,Zn,Zr","status":"production","syntax":"$aurl/?species_pp_version"},"spinD":{"__comment__":[""],"description":"For spin polarized calculations, the spin decomposition over the atoms of the cell.","title":"atomic spin decomposition","format":"%s","class":"magnetics","subclass":"","type":"numbers","delimiter":",","units":"μ<sub>B</sub>","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"spinD=0.236,0.236,-0.023,1.005","status":"production","syntax":"$aurl/?spinD"},"spinF":{"__comment__":[""],"description":"For spin polarized calculations, the magnetization of the cell at the Fermi level.","title":"fermi level spin decomposition","format":"%s","class":"magnetics","subclass":"","type":"number","units":"μ<sub>B</sub>","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"spinF=0.410879","status":"production","syntax":"$aurl/?spinF"},"spin_atom":{"__comment__":[""],"description":"For spin polarized calculations, the magnetization per atom.","title":"atomic spin polarization","format":"%s","class":"magnetics","subclass":"","type":"number","units":"μ<sub>B</sub>/atom","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"spin_atom=2.16419","status":"production","syntax":"$aurl/?spin_atom"},"spin_cell":{"__comment__":[""],"description":"For spin polarized calculations, the total magnetization of the cell.","title":"unit cell spin polarization","format":"%s","class":"magnetics","subclass":"","type":"number","units":"μ<sub>B</sub>","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"spin_cell=2.16419","status":"production","syntax":"$aurl/?spin_cell"},"sponsor":{"__comment__":[""],"description":"Returns information about funding agencies and other sponsors for the data.","title":"sponsor","format":"%s","class":"calculation","subclass":"provenance","type":"strings","delimiter":",","inclusion":"optional","expression":"declarative","example":"sponsor=DOD_N000141310635,NIST_70NANB12H163","status":"development","syntax":"$aurl/?sponsor"},"stoich":{"__comment__":[""],"description":"Similar to composition, returns a comma delimited stoichiometry description of the structure entry in the calculated cell.","title":"unit cell stoichiometry","format":"%s","class":"chemistry","subclass":"","type":"numbers","delimiter":",","inclusion":"optional","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"stoichiometry=0.5,0.25,0.25","status":"deprecated","syntax":"$aurl/?stoichiometry"},"stoichiometry":{"__comment__":[""],"description":"Similar to composition, returns a comma delimited stoichiometry description of the structure entry in the calculated cell.","title":"unit cell stoichiometry","format":"%s","class":"chemistry","subclass":"","type":"numbers","delimiter":",","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"stoichiometry=0.5,0.25,0.25","status":"production","syntax":"$aurl/?stoichiometry"},"stress_tensor":{"__comment__":[""],"description":"Returns the stress tensor of the completed calculation.","title":"Stress Tensor","format":"%s","class":"mechanical","subclass":"","type":"numbers","inclusion":"mandatory","expression":"derivative","example":"stress_tensor=-0.96,-0,-0,-0,-0.96,-0,-0,-0,-0.96","status":"development","syntax":"$aurl/?stress_tensor"},"valence_cell_iupac":{"__comment__":[""],"description":"Returns IUPAC valence, the maximum number of univalent atoms that may combine with the atoms.","title":"unit cell IUPAC valence","format":"%s","class":"chemistry","subclass":"","type":"number","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"valence_cell_iupac=22","status":"production","syntax":"$aurl/?valence_cell_iupac"},"valence_cell_std":{"__comment__":[""],"description":"Returns standard valence, the maximum number of univalent atoms that may combine with the atoms.","title":"unit cell standard valence","format":"%s","class":"chemistry","subclass":"","type":"number","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","kpoints"],"example":"valence_cell_std=22","status":"production","syntax":"$aurl/?valence_cell_std"},"volume_atom":{"__comment__":[""],"description":"Returns the volume per atom in the unit cell.","title":"atomic volume","format":"%s","class":"crystal","subclass":"real space lattice","type":"number","units":"Å<sup>3</sup>/atom","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","forces","kpoints","pressure_residual","stress_tensor"],"example":"volume_atom=100.984","status":"production","syntax":"$aurl/?volume_atom"},"volume_cell":{"__comment__":[""],"description":"Returns the volume of the unit cell.","title":"unit cell volume","format":"%s","class":"crystal","subclass":"real space lattice","type":"number","units":"Å<sup>3</sup>","inclusion":"mandatory","expression":"derivative","verification":["energy_cutoff","forces","kpoints","pressure_residual","stress_tensor"],"example":"volume_cell=100.984","status":"production","syntax":"$aurl/?volume_cell"}}""")
keys["energy_cutoff"]
from aflow.entries import Entry
hasattr(Entry, "Egap")
```
|
github_jupyter
|
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
from keras.datasets import mnist
# Digit recognition when data is in 'pixel form'
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Shape of the pictures
X_test[4,:,:].shape
df = pd.DataFrame(X_train[0,:,:])
df
img = X_test[30,:,:]
plt.imshow(img, cmap = 'gray')
plt.title(y_train[0])
plt.axis('off')
from keras.utils import to_categorical
X_train_new = X_train[:10000,:,:]
y_train_new = y_train[:10000]
X_test_new = X_test[:2500,:,:]
y_test_new = y_test[:2500]
y_train_new = to_categorical(y_train_new, 10)
y_test_new = to_categorical(y_test_new, 10)
X_train_new = X_train_new.reshape(10000, 28, 28, 1)
X_test_new = X_test_new.reshape(2500, 28, 28, 1)
y_test_new.shape
# Convolutional Neural Network for identifying the digits
from keras.models import Sequential
from keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
model = Sequential()
model.add(Conv2D(32, kernel_size = (3, 3), input_shape = (28,28,1), activation = 'relu'))
model.add(MaxPooling2D((2,2)))
model.add(Conv2D(32, kernel_size = (3, 3), activation = 'relu'))
model.add(MaxPooling2D((2,2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(10, activation = 'softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
model.fit(X_train_new, y_train_new, epochs = 15, batch_size = 128, validation_data = (X_test_new, y_test_new), verbose = 1)
score = model.evaluate(X_test_new, y_test_new, verbose = 0)
print('Test loss: ', score[0])
print('Accuracy: ', score[1])
# Plotting the accuracy
accuracy = model.history.history
plt.figure(figsize = (9,7))
plt.plot(accuracy['loss'], lw = 2)
plt.title('Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
from sklearn.metrics import confusion_matrix, classification_report
prediction = model.predict(X_test_new)
prediction_classes = np.argmax(prediction, axis = 1)
y_true = np.argmax(y_test_new, axis = 1)
cm = confusion_matrix(y_true, prediction_classes)
print(classification_report(y_true, prediction_classes))
import seaborn as sns
plt.figure(figsize = (10,8))
sns.heatmap(cm, annot = True, cmap = 'viridis')
#b, t = plt.ylim()
#plt.ylim(b + 0.5, t - 0.5)
plt.title('Confusion Matrix')
# New prediction
new_sample = X_test_new[11:12,:,:,:]
new_sample.shape
new_pred = model.predict(new_sample)
new_pred = new_pred.ravel()
np.argmax(new_pred, axis = 0)
# Saving model for reproduction
# model.save('conv_model.h5')
from keras.models import load_model
reconstructed_model = load_model('conv_model.h5')
# Let's check:
# np.testing.assert_allclose(model.predict(new_sample), reconstructed_model.predict(new_sample))
# The reconstructed model is already compiled and has retained the optimizer
# state, so training can resume:
# reconstructed_model.fit(test_input, test_target)
# Creating my own digit picture using Paint
# Let's import them with the Pillow library
from PIL import Image
import matplotlib.image as mpimg
image = Image.open('numbers/number_eight.jpg')
image = image.resize((28, 28))
#image.save('numbers28/28X28number_eight.jpg')
image = mpimg.imread('numbers28/number00.jpg')
plt.imshow(image)
# Converting from RGB to grayscale and making prediction
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
gray = rgb2gray(image)
gray = gray.reshape(1, 28, 28, 1)
gray_pred = reconstructed_model.predict(gray)
print('Predicted value:', np.argmax(gray_pred))
import matplotlib.image as mpimg
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140])
def image():
files = [f for f in os.listdir('numbers/') if f.endswith('.jpg')]
predictions = []
for i in range(len(files)):
image = Image.open('numbers/' + files[i])
image = image.resize((28, 28))
image.save('numbers28/number0' + str(i) + '.jpg')
image = mpimg.imread('numbers28/number0' + str(i) + '.jpg')
gray = rgb2gray(image)
gray = gray.reshape(1, 28, 28, 1)
gray_pred = reconstructed_model.predict(gray)
predictions.append(gray_pred.argmax())
return predictions, image
def plot_images(predictions, images):
truth = [8, 5, 4, 9, 1, 7, 6, 3, 2, 0]
plt.figure(figsize = (12, 6))
for i in range(len(truth)):
plt.subplot(2, 5, i+1)
plt.axis('off')
image = mpimg.imread('numbers28/number0' + str(i) + '.jpg')
color = 'green' if truth[i] == predictions[i] else 'red'
plt.imshow(image)
plt.title('Predicted value:\n' + str(predictions[i]), size = 12, color = color)
plt.subplots_adjust(wspace = 0.2)
return plt.show()
predictions, images = image()
plot_images(predictions, images)
```
|
github_jupyter
|
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from pyod.utils.data import generate_data, get_outliers_inliers
import warnings
warnings.filterwarnings('ignore')
```
## 샘플 데이터 생성
OCSVM은 Unsupervised Learning Method 중 하나이며, Novelty Detection에서 사용되는 방법 중 하나이다.
따라서, 모든 데이터를 정상이라고 가정하고 모델 훈련을 수행해야 한다.
샘플 데이터를 생성하는 과정은 다음과 같다.
- PyoD 라이브러리를 사용하여 샘플 데이터 생성. 이 때, 실제 Outlier 비율은 전체 데이터의 5%로 지정한다.
- 전체 데이터에서 훈련 데이터와 테스트 데이터를 분할한다.
```
train, test = generate_data(random_state = 42, train_only = True, contamination = 0.05)
X_train, X_test, y_train, y_test = train_test_split(train, test, test_size = 0.2, random_state = 42)
```
## 모델 적합
앞서 말했듯이, OCSVM은 라벨 데이터를 필요로하지 않는다. 따라서, 피쳐 데이터만을 이용해 모델을 적합시킨다.
```
clf = svm.OneClassSVM(nu = 0.1, kernel = 'rbf', gamma = 0.1)
clf.fit(X_train) # Unsupervised Learning Method
```
## 적합 모델을 이용한 라벨 분류
```
class OCSVM:
def __init__(self, nu, kernel, gamma):
self.nu = nu
self.kernel = kernel
self.gamma = gamma
self.result_df = pd.DataFrame()
self.clf = svm.OneClassSVM(nu = self.nu, kernel = self.kernel, gamma = self.gamma)
def fit(self, X_train, ground_truth):
self.X_train = X_train
self.y_train = ground_truth
self.clf.fit(self.X_train)
return self.clf
def predict(self, X_test, is_return = False):
self.X_test = X_test
self.prediction = self.clf.predict(self.X_test)
if is_return:
return self.prediction
def visualization(self):
self.result_df['X1'] = self.X_train[:, 0]
self.result_df['X2'] = self.X_train[:, 1]
self.result_df['Prediction'] = pd.Series(self.prediction).apply(lambda x: 0 if x == 1 else 1)
self.result_df['Actual'] = self.y_train
xx, yy = np.meshgrid(np.linspace(self.result_df['X1'].min() - 1, self.result_df['X1'].max() + 1, 500),
np.linspace(self.result_df['X2'].min() - 1, self.result_df['X2'].max() + 1, 500))
z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
plt.title("Novelty Detection\nNu = {}, Kernel = {}, Gamma = {}".format(self.nu, self.kernel, self.gamma))
plt.contourf(xx, yy, levels = np.linspace(z.min(), 0, 7), cmap = plt.cm.PuBu)
a = plt.contourf(xx, yy, z, level = [0], linewidths = 2, color = 'darkred')
plt.contourf(xx, yy, z, levels=[0, z.max()], colors='palevioletred')
s = 40
b1 = plt.scatter(self.X_train[:, 0], self.X_train[:, 1], c = 'white', s = s, edgecolors = 'k')
outlier = plt.scatter(self.result_df.loc[self.result_df['Prediction'] == 1]['X1'], self.result_df.loc[self.result_df['Prediction'] == 1]['X2'],
c = 'red', edgecolor = 'k')
actual = plt.scatter(self.result_df.loc[self.result_df['Actual'] == 1]['X1'], self.result_df.loc[self.result_df['Actual'] == 1]['X2'],
c = 'gold', edgecolor = 'k', alpha = 0.8)
plt.axis('tight')
plt.xlim((self.result_df['X1'].min() - 1, self.result_df['X1'].max() + 1))
plt.ylim((self.result_df['X2'].min() - 1, self.result_df['X2'].max() + 1))
plt.show()
nu = 0.1
kernel = 'rbf'
gamma = 0.007
model = OCSVM(nu = nu, kernel = kernel, gamma = gamma)
model.fit(X_train, y_train)
model.predict(X_train)
```
## 시각화
```
model.visualization()
```
그래프를 통해 알 수 있다시피, OCSVM 하이퍼파라미터의 Nu는 SVM의 c와 비슷한 의미를 가진다. 다른 의미로 말하면, 오분류 비율에 대한 최대 상한 값이라고 볼 수도 있다. 예를 들어, Nu = 0.05로 설정하면 훈련 데이터의 최대 5%가 잘 못 분류된다고 말할 수 있다.
|
github_jupyter
|
```
from regular_expression_visualization.visualize_reg import search_pattern
```
search_pattern is a helper function that cross matches several regular expressions against several strings. It visulizes the result by surrounding the matched substring in red border. Only the first matched substring is bordered.
## Simple pattern
```
patterns = [
'ee', # exactly ee
'ea', # exactly ea
'ai',
'aa'
]
strings = ['tee', 'tea', 'bail']
search_pattern(patterns, strings)
```
## One of the pattern
Use ```|``` to seperate several pattern
```
patterns = [
'ee|ea|ai', # ee or ea or ai
]
strings = ['tee', 'tea', 'bail']
search_pattern(patterns, strings)
```
Pattern order matters
```
patterns = [
'oo|ooo', # oo is tried first
'ooo|oo', # ooo is tried first
]
strings = ['loong', 'looong', 'long']
search_pattern(patterns, strings)
```
When "one of pattern" is followed by or following other regular expressions, use () to seperate to seperate from them
```
patterns = [
'b(ea|ee)', # b + (ea or ee)
'bea|ee' # bea or ee
]
strings = ['bead', 'bee']
search_pattern(patterns, strings)
```
## Qualifiers
### appear m to n times
Use ```{m,n}```
```
patterns = [
'ooo', # o, three times
'o{3}', # o, three times
'o{2,3}', # o, 2~3 time
'o{2, 3}', # o, Not working! Don't put in the blank!
'o{2,}', # o, more than 2 times
'lo{,3}', # l + o, o appears 0 to 3 times
'o{,3}', # seems not working alone
]
strings = ['looong', 'long', 'loong']
search_pattern(patterns, strings)
```
### appear at least once
```
patterns = [
'o+n', # o, at least 1 time
'o{1,}n'# same as above
]
strings = ['looong', 'long', 'bug']
search_pattern(patterns, strings)
```
### appear zero or more times
```
patterns = [
'lo*ng', # long, o appears zero or more time
'lo{0,}ng' # same as above
]
strings = ['long', 'lng', 'loong', 'leong']
search_pattern(patterns, strings)
```
### appear zero or one time
```
patterns = [
'apples?', # apple, ending s may not appear
'apples{0,1}' # same as above
]
strings = ['apple', 'apples']
search_pattern(patterns, strings)
```
## Sub expression
use ```()```
```
patterns = [
'ba(na){2}', # b + na, na appears two times
'banana', # same as above
'bana{2}', # ban + a, a appear 2 times,
'banaa', # same as above
]
strings = ['banana', 'banaa']
search_pattern(patterns, strings)
patterns = [
'(a+_+){2}', # two consecutive pattern which match a+_+, they are not necessarily the same string
'a+_+a+_+', # same as above
'a+_+'
]
strings = ['aa_a__', 'a_', 'a__a_a_']
search_pattern(patterns, strings)
```
## Character Set
### Any character
```.``` stands for any character
```
patterns = [
'b.d', # b + any character + d
'be..' # b + e + any character + any character
]
strings = ['bed', 'bid','bee', 'benign', 'beed']
search_pattern(patterns, strings)
```
### Any character in a set
Use ```[...]```
```
patterns = [
'b[ei]d', # b + e or i + d
'bed|bid' # same as above
]
strings = ['bed', 'bid', 'bee', 'bud']
search_pattern(patterns, strings)
```
Use ```-``` for character range
```
patterns = [
'id_[0-5]', # id_ + any number in 0 to 5
'id_[012345]' # same as above
]
strings = ['id_1', 'id_6']
search_pattern(patterns, strings)
patterns = [
'type_[a-ex]', # type_ + any character in range a to e and x,
'type_[abcdex]', # same as above
'type_[a-zA-Z]' # any letter
]
strings = ['type_a', 'type_b', 'type_x', 'type_Z']
search_pattern(patterns, strings)
```
### Any character not in set
Use ```[^...]```
```
patterns = [
'type_[^a-z]' # type_ + any character not in a to z
]
strings = ['type_1', 'type_a', 'type_c']
search_pattern(patterns, strings)
```
### Any number
Use ```\d```
```
patterns = [
'id_\d\d', # id_ + any number character + any number character
'id_[0-9][0-9]' # same as above
]
strings = ['id_12', 'id_0', 'id']
search_pattern(patterns, strings)
```
### Any non-number character
Use ```\D```
```
patterns = [
'e\D', # e + any character which is not number character
'e[^0-9]' # same as above
]
strings = ['bee', 'tel', 'te1']
search_pattern(patterns, strings)
```
### Any word charcters
Use ```\w```, word character means a-z, A-Z, 0-9 and _
```
patterns = [
'\w+', # any word character, more than one time
'[a-zA-Z0-9_]+' # same as above
]
strings = [':id_1.']
search_pattern(patterns, strings)
```
### Any non-word characters
Use ```\W```
```
patterns = [
'\W+', # any non-word character, more than one time
'[^a-zA-Z0-9_]+'# same as above
]
strings = ['id_1 + id_2']
search_pattern(patterns, strings)
```
### Any space
Use ```\s```
```
patterns = [
'\s.*\s', # blank + any string + blank
'[\t\n\f\r ].*[\t\n\f\r ]' # same as above
]
strings = ['Monkey D Luffy']
search_pattern(patterns, strings)
```
### Any Non Space
```
patterns = [
'\S.+\S', # any character except space + any string + any character except space
'[^\t\n\f\r ].*[^\t\n\f\r ]' # same as above
]
strings = ['on the\ntree']
search_pattern(patterns, strings)
```
## Escaping
As you see, many characters like ```(```,```.```,```+``` have special means in regular expression. If you want to disable these and search for these characters, add ```\``` before them
```
patterns = [
'($\d+.\d+)', # $ . + are not treated as characters
'\(\$\d+\.\d+\)' # $ . + are treated as characters
]
strings = ['apple ($3.25)']
search_pattern(patterns, strings)
```
## Anchor
Anchor are searched but won't be part be of the matching result
### followed by
Use ```(?=...)```
```
patterns = [
'\w+(?=\.)', # word character string, followed by comma. the comma is not returned in the matching result
'\w+\.' # comma is returned in the matching result
]
strings = ['Apple juice.']
search_pattern(patterns, strings)
```
### Not followed by
Use ```(?!...)```
```
patterns = [
'\w+(?!\.)', # word character string, not followed by comma
'\w+[^\.]' # word character string, followed by any character which is not comma
]
strings = ['Apple juice.']
search_pattern(patterns, strings)
```
### Following
Use ```(?<=...)```
```
patterns = [
'(?<=:)\d+', # number character string, following :
':\d+' # : + number character string
]
strings = ['apple:10']
search_pattern(patterns, strings)
```
### not following
Use ```(?<!)```
```
patterns = [
'(?<!A)\d+', # number character string, not followed by A
'[^A]\d+' # any character expect A + number character string
]
strings = ['A123 123']
search_pattern(patterns, strings)
```
### border of word
```
patterns = [
r'\beat\b', # eat surrounded by border of word, (whole word searching)
'eat' #
]
strings = ['I eat food', 'beat']
search_pattern(patterns, strings)
```
Why use ```r``` in ```r'\beat\b'```? ```\b``` in python has special meaning (like ```+``` has special meaning in regular expression), it represents a back space character [(see here)](https://stackoverflow.com/questions/25065608/what-does-backward-slash-b-do-in-python)
To disable this behaviour, add ```r``` in front of the string. (Like we add ```\``` before ```+``` in regular expression)
### not border of word
```
patterns = [
r'\Beat\B', # eat, not following or followed by word border, (appear within a word)
'eat'
]
strings = ['I eat food', 'beats']
search_pattern(patterns, strings)
```
## Exercises
Try to search valid email address
```
patterns = [
'^[^\d@][\w+\.]+@\w+(\.com)?\.(cn|com|org)',
]
strings = [
'[email protected]',
'fredness7@@hotmail.com',
'frendess7@htcom',
'[email protected]',
'@ht.com.cn',
'[email protected]',
'@[email protected]',
]
search_pattern(patterns, strings)
```
|
github_jupyter
|
```
%load_ext autoreload
%autoreload 2
from __future__ import division
import pickle
import os
from collections import defaultdict
import types
import numpy as np
import pandas as pd
from statsmodels.stats.anova import AnovaRM
import statsmodels.api as sm
from sensei.envs import GridWorldNavEnv, GuideEnv
from sensei import utils
from sensei import ase
from sensei.gw_user_study import HumanGridWorldUser
from sensei.guide_models import GridWorldGuide
from matplotlib import pyplot as plt
import matplotlib as mpl
%matplotlib inline
mpl.rcParams.update({'font.size': 18})
data_dir = utils.gw_human_data_dir
fig_dir = os.path.join(data_dir, 'figures')
if not os.path.exists(fig_dir):
os.makedirs(fig_dir)
user_ids = [str(i) for i in range(12) if str(i) in os.listdir(data_dir)]
baseline_guide_evals_of_user = {}
train_logs_of_user = {}
for user_id in user_ids:
user_data_dir = os.path.join(data_dir, user_id)
baselines_eval_path = os.path.join(user_data_dir, 'guide_evals.pkl')
with open(baselines_eval_path, 'rb') as f:
baseline_guide_evals = pickle.load(f)
train_logs_path = os.path.join(user_data_dir, 'train_logs.pkl')
with open(train_logs_path, 'rb') as f:
train_logs = pickle.load(f)
baseline_guide_evals_of_user[user_id] = baseline_guide_evals
train_logs_of_user[user_data_dir] = train_logs
perf_of_guide = {}
rollouts_of_guide = defaultdict(list)
for user_id, baseline_guide_evals in baseline_guide_evals_of_user.items():
for guide_name, guide_eval in baseline_guide_evals.items():
rollouts = guide_eval['rollouts']
rollouts_of_guide[guide_name].extend(rollouts)
for guide_name, guide_eval_rollouts in rollouts_of_guide.items():
perf = utils.compute_perf_metrics(guide_eval_rollouts, None, max_ep_len=25)
perf_of_guide[guide_name] = perf
plt.xlabel('Time')
plt.ylabel('Distance to Goal')
plt.title('2D Navigation')
for guide_name in ['iden', 'naive', 'learned']:
perf = perf_of_guide[guide_name]
tilts = perf['dist_to_goal_t']
tilt_stderrs = perf['dist_to_goal_stderr_t']
label = utils.label_of_guide[guide_name]
color = utils.color_of_guide[guide_name]
xs = np.arange(0, len(tilts), 1)
ys = np.array(tilts)
yerrs = np.array(tilt_stderrs)
y_mins = ys - yerrs
y_maxs = ys + yerrs
plt.fill_between(
xs,
y_mins,
y_maxs,
where=y_maxs >= y_mins,
interpolate=False,
label=label,
color=color,
alpha=0.5)
plt.plot(xs, ys, color=color)
plt.legend(loc='upper right', prop={'size': 18})
plt.savefig(os.path.join(fig_dir, 'gw-user-study.pdf'), bbox_inches='tight')
plt.show()
n_users = len(baseline_guide_evals_of_user)
depvar = 'response'
subject = 'user_id'
within = 'condition'
metrics = ['rollout_len']
for metric in metrics:
rows = []
for user_id, baseline_guide_evals in baseline_guide_evals_of_user.items():
rows.append({subject: user_id, depvar: baseline_guide_evals['iden']['perf'][metric], within: 'unassisted'})
rows.append({subject: user_id, depvar: baseline_guide_evals['learned']['perf'][metric], within: 'assisted'})
data = pd.DataFrame(rows)
aovrm = AnovaRM(data=data, depvar=depvar, subject=subject, within=[within])
res = aovrm.fit()
print(res)
questions = [
'I was often able to infer my current position and orientation',
'I was often able to move toward the goal',
'I often found the guidance helpful',
'I relied primarily on the most recent guidance to infer my current position and orientation',
'I relied primarily on past guidance and recent movements to infer my current position and orientation',
'I often forgot which position and orientation I believed was in'
]
responses = [
[[6, 5, 6, 4, 7, 5], [6, 6, 6, 7, 4, 7], [7, 7, 7, 7, 3, 1]],
[[7, 6, 7, 7, 3, 2], [5, 5, 4, 3, 6, 5], [7, 7, 7, 7, 3, 1]],
[[5, 6, 6, 6, 4, 4], [6, 6, 6, 6, 5, 3], [7, 7, 7, 6, 5, 1]],
[[6, 6, 6, 6, 2, 4], [6, 6, 6, 6, 3, 4], [7, 7, 7, 7, 2, 1]],
[[2, 3, 6, 5, 6, 5], [6, 6, 6, 5, 6, 2], [7, 7, 7, 5, 7, 1]],
[[5, 5, 7, 6, 6, 3], [6, 6, 6, 6, 6, 1], [7, 7, 7, 7, 6, 1]],
[[6, 6, 6, 1, 6, 1], [6, 6, 6, 1, 6, 2], [7, 7, 7, 1, 6, 1]],
[[6, 6, 6, 2, 6, 2], [5, 6, 5, 4, 6, 3], [7, 7, 7, 7, 5, 2]],
[[5, 4, 4, 3, 6, 3], [4, 4, 3, 2, 6, 3], [6, 6, 7, 4, 6, 2]],
[[6, 7, 6, 5, 5, 5], [6, 7, 6, 5, 5, 4], [7, 7, 6, 6, 4, 4]],
[[7, 7, 7, 4, 4, 1], [7, 4, 7, 6, 6, 2], [7, 7, 7, 7, 2, 1]],
[[5, 5, 5, 4, 4, 3], [5, 5, 5, 4, 5, 3], [6, 6, 7, 6, 3, 1]],
]
n_users = len(responses)
n_phases = len(responses[0])
responses_of_q = [[[np.nan for _ in range(n_users)] for _ in questions] for _ in range(n_phases)]
for phase_idx in range(n_phases):
for user_idx, user_responses in enumerate(responses):
for q_idx, response in enumerate(responses[user_idx][phase_idx]):
responses_of_q[phase_idx][q_idx][user_idx] = response
# one-way repeated measures ANOVA with the presence of assistance as a factor influencing responses
n_users = len(responses)
depvar = 'response'
subject = 'user_id'
within = 'condition'
assistant_labels = [
'\\multirow{4}{*}{\\rotatebox[origin=c]{90}{Naive ASE}}',
'\\multirow{4}{*}{\\rotatebox[origin=c]{90}{ASE}}'
]
for assisted_phase in [1, 2]:
for i, q in enumerate(questions):
if i == 0:
assistant_label = assistant_labels[assisted_phase-1]
else:
assistant_label = ''
rows = []
for user_id in user_ids:
user_id = int(user_id)
rows.append({subject: user_id, depvar: responses_of_q[0][i][user_id], within: 'unassisted'})
rows.append({subject: user_id, depvar: responses_of_q[assisted_phase][i][user_id], within: 'assisted'})
data = pd.DataFrame(rows)
aovrm = AnovaRM(data=data, depvar=depvar, subject=subject, within=[within])
res = aovrm.fit()
p = res.anova_table['Pr > F'].values[0]
print('%s & %s & $%s%s%s$ & %0.2f & %s%0.2f%s \\\\' % (assistant_label, q, '\\mathbf{' if p < 0.05 else '', utils.discretize_p_value(p), '}' if p < 0.05 else '', np.nanmean(responses_of_q[0][i]), '\\textbf{' if p < 0.05 else '', np.nanmean(responses_of_q[assisted_phase][i]), '}' if p < 0.05 else ''))
if assisted_phase == 1:
print('\midrule')
guide_names = ['prac', 'iden', 'learned']
n_rollouts_of_guide = {
'prac': 3,
'iden': 5,
'learned': 5
}
perfs_of_guide = {guide_name: [[] for _ in range(n_rollouts)] for guide_name, n_rollouts in n_rollouts_of_guide.items()}
for guide_name, n_rollouts in n_rollouts_of_guide.items():
for i in range(n_rollouts):
for baseline_guide_evals in baseline_guide_evals_of_user.values():
rollouts = [baseline_guide_evals[guide_name]['rollouts'][i]]
if guide_name == 'iden':
rollouts.append(baseline_guide_evals['naive']['rollouts'][i])
for rollout in rollouts:
perf = utils.compute_perf_metrics(rollouts, None, max_ep_len=25)
perfs_of_guide[guide_name][i].append(perf)
metric = 'rollout_len'
plt.xlabel('Episode Number')
plt.ylabel(utils.label_of_perf_met[metric])
plt.title('2D Navigation')
guide_names = ['iden', 'learned']
for i, guide_name in enumerate(guide_names):
perfs = perfs_of_guide[guide_name]
all_perfs = [user_perf[metric] for perf in perfs for user_perf in perf]
if guide_name == 'learned':
label = 'ASE (Our Method)'
elif guide_name == 'iden':
label = 'Unassisted + Naive ASE (counterbalanced)'
else:
label = utils.label_of_guide[guide_name]
color = utils.color_of_guide[guide_name]
shift = sum(len(perfs_of_guide[guide_names[j]]) for j in range(i))
n_users = len(perfs[0])
xs = np.tile(np.arange(1 + shift, 1 + len(perfs) + shift, 1), n_users)
ys = np.array(all_perfs)
plt.scatter(xs, ys, color=color, alpha=0.25)
results = sm.OLS(ys,sm.add_constant(xs - shift - 1)).fit()
X_plot = np.linspace(1, len(perfs), 100)
plt.plot(X_plot + shift, X_plot*results.params[1] + results.params[0], label=label, color=color, linestyle='--', linewidth=2)
xs = np.arange(1 + shift, 1 + len(perfs) + shift, 1)
ys = np.array([np.mean([user_perf[metric] for user_perf in perf]) for perf in perfs])
stderr = lambda x: np.std(x) / np.sqrt(len(x))
yerrs = np.array([stderr([user_perf[metric] for user_perf in perf]) for perf in perfs])
plt.legend(loc='upper left', prop={'size': 12}, bbox_to_anchor=(0.025, -0.2))
plt.savefig(os.path.join(fig_dir, 'gw-user-study-learning-effect.pdf'), bbox_inches='tight')
plt.show()
gw_size = 5
n_goals = gw_size**2
n_states = 4*gw_size**2
n_objes_per_set = gw_size**2
n_obj_instances_of_set = [1, 2, 1]
n_obj_sets = len(n_obj_instances_of_set)
n_objes = n_objes_per_set*n_obj_sets
n_obses = n_objes + n_obj_sets
ground_truth = np.zeros((n_obses, n_states))
ticks = np.arange(0, gw_size, 1)
poses = utils.enumerate_gw_poses(ticks, ticks)
poses_of_obs = [[] for _ in range(n_obses)]
for obj_set in range(n_obj_sets):
for obj in range(n_objes_per_set):
obs = obj_set*(n_objes_per_set+1)+obj
obj_poses = [poses[obj*4]]
for i in range(1, n_obj_instances_of_set[obj_set]):
obj_poses.append(poses[np.random.choice(list(range(n_objes_per_set)))*4])
poses_of_obs[obs] = obj_poses
for obj_pos in obj_poses:
for state, user_pos in enumerate(poses):
conds = []
conds.append(obj_pos[0] == user_pos[0] and obj_pos[1] == user_pos[1] + 1 and user_pos[2] == 2)
conds.append(obj_pos[0] == user_pos[0] and obj_pos[1] == user_pos[1] - 1 and user_pos[2] == 0)
conds.append(obj_pos[1] == user_pos[1] and obj_pos[0] == user_pos[0] + 1 and user_pos[2] == 3)
conds.append(obj_pos[1] == user_pos[1] and obj_pos[0] == user_pos[0] - 1 and user_pos[2] == 1)
if any(conds):
ground_truth[obs, state] = 1
for obj_set in range(n_obj_sets):
obs = obj_set*(n_objes_per_set+1)+n_objes_per_set
for state, user_pos in enumerate(poses):
conds = []
conds.append(user_pos[0] == 0 and user_pos[2] == 1)
conds.append(user_pos[0] == gw_size - 1 and user_pos[2] == 3)
conds.append(user_pos[1] == 0 and user_pos[2] == 0)
conds.append(user_pos[1] == gw_size - 1 and user_pos[2] == 2)
if any(conds):
ground_truth[obs, state] = 1
ground_truth = utils.smooth_matrix(ground_truth, n_states, eps=1e-6)
ground_truth_obs_model = np.log(ground_truth)
max_ep_len = gw_size**2
env = GridWorldNavEnv(
gw_size=gw_size,
n_goals=n_goals,
max_ep_len=max_ep_len,
ground_truth_obs_model=ground_truth_obs_model
)
env.n_objes_per_set = n_objes_per_set
env.n_obj_sets = n_obj_sets
def is_obs_informative(self, obs):
n_uninf_obses = self.n_obses // self.n_obj_sets
return obs >= n_uninf_obses
env.is_obs_informative = types.MethodType(is_obs_informative, env)
env.practice = False
def set_practice_mode(self, mode):
self.practice = mode
env.set_practice_mode = types.MethodType(set_practice_mode, env)
sess = utils.make_tf_session(gpu_mode=False)
masked_obses = np.arange(0, env.n_obses // env.n_obj_sets, 1)
internal = np.exp(env.ground_truth_obs_model)
obs_weights = np.ones(env.n_obses)
for obs in masked_obses:
obs_weights[obs] = 1e-6
internal = utils.smooth_matrix(internal, env.n_obses, eps=(1-obs_weights[:, np.newaxis]))
internal = np.log(internal)
internal_obs_model = internal
user_init_belief_conf = 1e-9
user_model = HumanGridWorldUser(
env,
internal_obs_model,
env.make_dynamics_model(eps=1e-6),
q_func=env.Q,
init_belief_conf=user_init_belief_conf
)
guide_env = GuideEnv(env, user_model, n_obs_per_act=1)
def get_theta_of_user(user_id):
user_data_dir = os.path.join(utils.gw_human_data_dir, user_id)
init_belief_conf = 1-1e-9
dynamics_model = env.make_dynamics_model(eps=1e-9)
internal_dynamics_model = env.make_dynamics_model(eps=0.1)
tabular_obs_model_kwargs = {
'scope_file': os.path.join(user_data_dir, 'guide_scope.pkl'),
'tf_file': os.path.join(user_data_dir, 'guide.tf'),
'user_init_belief_conf': user_init_belief_conf,
'obs_params_only': True,
'prior_coeff': 0.,
'warm_start': False
}
guide_train_kwargs = {
'iterations': 1000,
'ftol': 1e-6,
'batch_size': 32,
'learning_rate': 1e-2,
'val_update_freq': 100,
'verbose': True,
'show_plots': False
}
guide_model = GridWorldGuide(
sess,
env,
env.ground_truth_obs_model,
dynamics_model,
env.Q,
n_obs_per_act=guide_env.n_obs_per_act,
prior_internal_obs_model=env.ground_truth_obs_model,
internal_dynamics_model=internal_dynamics_model,
tabular_obs_model_kwargs=tabular_obs_model_kwargs,
learn_internal_obs_model=True,
init_belief_conf=init_belief_conf,
user_init_belief_conf=user_init_belief_conf
)
guide_evals = baseline_guide_evals_of_user[user_id]
init_train_rollouts = guide_evals['iden']['rollouts']
guide_optimizer = ase.InteractiveGuideOptimizer(sess, env, guide_env)
guide_optimizer.run(
guide_model,
n_train_batches=0,
n_rollouts_per_batch=0,
guide_train_kwargs={'iterations': 0, 'verbose': False},
verbose=True,
init_train_rollouts=init_train_rollouts,
n_eval_rollouts=None
)
guide_model.load()
theta = sess.run(guide_model.internal_obs_model.obs_weights)[0, 0, 0]
return theta
thetas = [get_theta_of_user(user_id) for user_id in user_ids]
thetas
plt.title('2D Navigation')
plt.xlabel(r'Learned Model of User Bias $\hat{\theta}$')
plt.ylabel('Number of Users')
plt.hist(thetas, bins=20, color='orange', label='ASE (Our Method)', align='left')
plt.hist(np.ones(len(thetas)), bins=20, color='teal', label='Naive ASE (Baseline)', align='left')
plt.axvline(x=0, linestyle='--', color='black', label='Ground Truth')
plt.xlim([-0.1, 1.1])
plt.yticks(range(0, 14, 2))
plt.legend(loc='upper center')
plt.savefig(os.path.join(fig_dir, 'gw-learned-theta.pdf'), bbox_inches='tight', dpi=500)
plt.show()
```
|
github_jupyter
|
# Hugging Face Transformers with `Pytorch`
### Text Classification Example using vanilla `Pytorch`, `Transformers`, `Datasets`
# Introduction
Welcome to this end-to-end multilingual Text-Classification example using PyTorch. In this demo, we will use the Hugging Faces `transformers` and `datasets` library together with `Pytorch` to fine-tune a multilingual transformer for text-classification. This example is a derived version of the [text-classificiaton.ipynb](https://github.com/philschmid/transformers-pytorch-text-classification/blob/main/text-classification.ipynb) notebook and uses Amazon SageMaker for distributed training. In the [text-classificiaton.ipynb](https://github.com/philschmid/transformers-pytorch-text-classification/blob/main/text-classification.ipynb) we showed how to fine-tune `distilbert-base-multilingual-cased` on the `amazon_reviews_multi` dataset for `sentiment-analysis`. This dataset has over 1.2 million data points, which is huge. Running training would take on 1x NVIDIA V100 takes around 6,5h for `batch_size` 16, which is quite long.
To scale and accelerate our training we will use [Amazon SageMaker](https://aws.amazon.com/de/sagemaker/), which provides two strategies for [distributed training](https://huggingface.co/docs/sagemaker/train#distributed-training), [data parallelism](https://huggingface.co/docs/sagemaker/train#data-parallelism) and model parallelism. Data parallelism splits a training set across several GPUs, while [model parallelism](https://huggingface.co/docs/sagemaker/train#model-parallelism) splits a model across several GPUs. We are going to use [SageMaker Data Parallelism](https://aws.amazon.com/blogs/aws/managed-data-parallelism-in-amazon-sagemaker-simplifies-training-on-large-datasets/), which has been built into the [Trainer](https://huggingface.co/transformers/main_classes/trainer.html) API. To be able use data-parallelism we only have to define the `distribution` parameter in our `HuggingFace` estimator.
I moved the "training" part of the [text-classificiaton.ipynb](https://github.com/philschmid/transformers-pytorch-text-classification/blob/main/text-classification.ipynb) notebook into a separate training script [train.py](./scripts/train.py), which accepts the same hyperparameter and can be run on Amazon SageMaker using the `HuggingFace` estimator.
Our goal is to decrease the training duration by scaling our global/effective batch size from 16 up to 128, which is 8x bigger than before. For monitoring our training we will use the new Training Metrics support by the [Hugging Face Hub](hf.co/models)
### Installation
```
#!pip install sagemaker
!pip install transformers datasets tensorboard datasets[s3] --upgrade
```
This example will use the [Hugging Face Hub](https://huggingface.co/models) as remote model versioning service. To be able to push our model to the Hub, you need to register on the [Hugging Face](https://huggingface.co/join).
If you already have an account you can skip this step.
After you have an account, we will use the `notebook_login` util from the `huggingface_hub` package to log into our account and store our token (access key) on the disk.
```
from huggingface_hub import notebook_login
notebook_login()
```
## Setup & Configuration
In this step we will define global configurations and parameters, which are used across the whole end-to-end fine-tuning proccess, e.g. `tokenizer` and `model` we will use.
```
import sagemaker
sess = sagemaker.Session()
# sagemaker session bucket -> used for uploading data, models and logs
# sagemaker will automatically create this bucket if it not exists
sagemaker_session_bucket=None
if sagemaker_session_bucket is None and sess is not None:
# set to default bucket if a bucket name is not given
sagemaker_session_bucket = sess.default_bucket()
role = sagemaker.get_execution_role()
sess = sagemaker.Session(default_bucket=sagemaker_session_bucket)
print(f"sagemaker role arn: {role}")
print(f"sagemaker bucket: {sess.default_bucket()}")
print(f"sagemaker session region: {sess.boto_region_name}")
```
_Note: The execution role is only available when running a notebook within SageMaker (SageMaker Notebook Instances or Studio). If you run `get_execution_role` in a notebook not on SageMaker, expect a region error._
You can comment in the cell below and provide a an IAM Role name with SageMaker permissions to setup your environment out side of SageMaker.
```
# import sagemaker
# import boto3
# import os
# os.environ["AWS_DEFAULT_REGION"]="your-region"
# # This ROLE needs to exists with your associated AWS Credentials and needs permission for SageMaker
# ROLE_NAME='role-name-of-your-iam-role-with-right-permissions'
# iam_client = boto3.client('iam')
# role = iam_client.get_role(RoleName=ROLE_NAME)['Role']['Arn']
# sess = sagemaker.Session()
# print(f"sagemaker role arn: {role}")
# print(f"sagemaker bucket: {sess.default_bucket()}")
# print(f"sagemaker session region: {sess.boto_region_name}")
```
In this example are we going to fine-tune the [distilbert-base-multilingual-cased](https://huggingface.co/distilbert-base-multilingual-cased) a multilingual DistilBERT model.
```
model_id = "distilbert-base-multilingual-cased"
# name for our repository on the hub
model_name = model_id.split("/")[-1] if "/" in model_id else model_id
repo_name = f"{model_name}-sentiment"
```
## Dataset & Pre-processing
As Dataset we will use the [amazon_reviews_multi](https://huggingface.co/datasets/amazon_reviews_multi) a multilingual text-classification. The dataset contains reviews in English, Japanese, German, French, Chinese and Spanish, collected between November 1, 2015 and November 1, 2019. Each record in the dataset contains the review text, the review title, the star rating, an anonymized reviewer ID, an anonymized product ID and the coarse-grained product category (e.g. ‘books’, ‘appliances’, etc.) The corpus is balanced across stars, so each star rating constitutes 20% of the reviews in each language.
```
dataset_id="amazon_reviews_multi"
dataset_config="all_languages"
seed=33
```
To load the `amazon_reviews_multi` dataset, we use the `load_dataset()` method from the 🤗 Datasets library.
```
from datasets import load_dataset
dataset = load_dataset(dataset_id,dataset_config)
```
### Pre-processing & Tokenization
The [amazon_reviews_multi](https://huggingface.co/datasets/amazon_reviews_multi) has 5 classes (`stars`) to match those into a `sentiment-analysis` task we will map those star ratings to the following classes `labels`:
* `[1-2]`: `Negative`
* `[3]`: `Neutral`
* `[4-5]`: `Positive`
Those `labels` can be later used to create a user friendly output after we fine-tuned our model.
```
from datasets import ClassLabel
def map_start_to_label(review):
if review["stars"] < 3:
review["stars"] = 0
elif review["stars"] == 3:
review["stars"] = 1
else:
review["stars"] = 2
return review
# convert 1-5 star reviews to 0,1,2
dataset = dataset.map(map_start_to_label)
# convert feature from Value to ClassLabel
class_feature = ClassLabel(names=['negative','neutral', 'positive'])
dataset = dataset.cast_column("stars", class_feature)
# rename our target column to labels
dataset = dataset.rename_column("stars","labels")
# drop columns that are not needed
dataset = dataset.remove_columns(['review_id', 'product_id', 'reviewer_id', 'review_title', 'language', 'product_category'])
dataset["train"].features
```
Before we prepare the dataset for training. Lets take a quick look at the class distribution of the dataset.
```
import pandas as pd
df = dataset["train"].to_pandas()
df.hist()
```
The Distribution is not perfect, but lets give it a try and improve on this later.
To train our model we need to convert our "Natural Language" to token IDs. This is done by a 🤗 Transformers Tokenizer which will tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary). If you are not sure what this means check out [chapter 6](https://huggingface.co/course/chapter6/1?fw=tf) of the Hugging Face Course.
```
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_id)
```
Additionally we add the `truncation=True` and `max_length=512` to align the length and truncate texts that are bigger than the maximum size allowed by the model.
```
def process(examples):
tokenized_inputs = tokenizer(
examples["review_body"], truncation=True, max_length=512
)
return tokenized_inputs
tokenized_datasets = dataset.map(process, batched=True)
tokenized_datasets["train"].features
```
Before we can start our distributed Training, we need to upload our already pre-processed dataset to Amazon S3. Therefore we will use the built-in utils of `datasets`
```
import botocore
from datasets.filesystems import S3FileSystem
s3 = S3FileSystem()
# save train_dataset to s3
training_input_path = f's3://{sess.default_bucket()}/{dataset_id}/train'
tokenized_datasets["train"].save_to_disk(training_input_path, fs=s3)
# save validation_dataset to s3
eval_input_path = f's3://{sess.default_bucket()}/{dataset_id}/test'
tokenized_datasets["validation"].save_to_disk(eval_input_path, fs=s3)
```
## Creating an Estimator and start a training job
Last step before we can start our managed training is to define our Hyperparameters, create our sagemaker `HuggingFace` estimator and configure distributed training.
```
from sagemaker.huggingface import HuggingFace
from huggingface_hub import HfFolder
# hyperparameters, which are passed into the training job
hyperparameters={
'model_id':'distilbert-base-multilingual-cased',
'epochs': 3,
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'learning_rate': 3e-5*8,
'fp16': True,
# logging & evaluation strategie
'strategy':'steps',
'steps':5_000,
'save_total_limit':2,
'load_best_model_at_end':True,
'metric_for_best_model':"f1",
# push to hub config
'push_to_hub': True,
'hub_model_id': 'distilbert-base-multilingual-cased-sentiment-2',
'hub_token': HfFolder.get_token()
}
# configuration for running training on smdistributed Data Parallel
distribution = {'smdistributed':{'dataparallel':{ 'enabled': True }}}
# create the Estimator
huggingface_estimator = HuggingFace(
entry_point = 'train.py',
source_dir = './scripts',
instance_type = 'ml.p3.16xlarge',
instance_count = 1,
role = role,
transformers_version = '4.12',
pytorch_version = '1.9',
py_version = 'py38',
hyperparameters = hyperparameters,
distribution = distribution
)
```
Since, we are using SageMaker Data Parallelism our total_batch_size will be per_device_train_batch_size * n_gpus.
```
# define a data input dictonary with our uploaded s3 uris
data = {
'train': training_input_path,
'eval': eval_input_path
}
# starting the train job with our uploaded datasets as input
# setting wait to False to not expose the HF Token
huggingface_estimator.fit(data,wait=False)
```
Since we are using the Hugging Face Hub intergration with Tensorboard we can inspect our progress directly on the hub, as well as testing checkpoints during the training.
```
from huggingface_hub import HfApi
whoami = HfApi().whoami()
username = whoami['name']
print(f"https://huggingface.co/{username}/{hyperparameters['hub_model_id']}")
```

|
github_jupyter
|
# Quantum teleportation
By the end of this post, we will teleport the quantum state
$$\sqrt{0.70}\vert0\rangle + \sqrt{0.30}\vert1\rangle$$ from Alice's qubit to Bob's qubit.
Recall that the teleportation algorithm consists of four major components:
1. Initializing the state to be teleported. We will do this on Alice's qubit `q0`.
2. Creating entanglement between two qubits. We will use qubits `q1` and `q2` for this. Recall that Alice owns `q1`, and Bob owns `q2`.
3. Applying a Bell measurement on Alice's qubits `q0` and `q1`.
4. Applying classically controlled operations on Bob's qubit `q2` depending on the outcomes of the Bell measurement on Alice's qubits.
This exercise guides you through each of these steps.
### Initializing the state to be teleported
First, we create a quantum circuit that has the state $$\sqrt{0.70}\vert0\rangle + \sqrt{0.30}\vert1\rangle$$ We can do this by using `Qiskit`'s `initialize` function.
```
import numpy as np
import math
def initialize_qubit(given_circuit, qubit_index):
### WRITE YOUR CODE BETWEEN THESE LINES - START
desired_vector = [math.sqrt(0.70),math.sqrt(0.30)]
given_circuit.initialize(desired_vector, [all_qubits_Alice[qubit_index]])
### WRITE YOUR CODE BETWEEN THESE LINES - END
return given_circuit
```
Next, we need to create entanglement between Alice's and Bob's qubits.
```
def entangle_qubits(given_circuit, qubit_Alice, qubit_Bob):
### WRITE YOUR CODE BETWEEN THESE LINES - START
given_circuit.h(qubit_Alice)
given_circuit.cx(qubit_Alice,qubit_Bob)
mycircuit.barrier()
given_circuit.cx(0,1)
given_circuit.h(0)
### WRITE YOUR CODE BETWEEN THESE LINES - END
return given_circuit
```
Next, we need to do a Bell measurement of Alice's qubits.
```
def bell_meas_Alice_qubits(given_circuit, qubit1_Alice, qubit2_Alice, clbit1_Alice, clbit2_Alice):
### WRITE YOUR CODE BETWEEN THESE LINES - START
given_circuit.measure([0,1], [0,1])
### WRITE YOUR CODE BETWEEN THESE LINES - END
return given_circuit
```
Finally, we apply controlled operations on Bob's qubit. Recall that the controlled operations are applied in this order:
- an $X$ gate is applied on Bob's qubit if the measurement coutcome of Alice's second qubit, `clbit2_Alice`, is `1`.
- a $Z$ gate is applied on Bob's qubit if the measurement coutcome of Alice's first qubit, `clbit1_Alice`, is `1`.
```
def controlled_ops_Bob_qubit(given_circuit, qubit_Bob, clbit1_Alice, clbit2_Alice):
### WRITE YOUR CODE BETWEEN THESE LINES - START
given_circuit.x(qubit_Bob).c_if(clbit2_Alice, 1)
given_circuit.z(qubit_Bob).c_if(clbit1_Alice, 1)
### WRITE YOUR CODE BETWEEN THESE LINES - END
return given_circuit
```
The next lines of code put everything together.
```
### imports
from qiskit import QuantumRegister, ClassicalRegister
### set up the qubits and classical bits
all_qubits_Alice = QuantumRegister(2)
all_qubits_Bob = QuantumRegister(1)
creg1_Alice = ClassicalRegister(1)
creg2_Alice = ClassicalRegister(1)
### quantum teleportation circuit here
# Initialize
mycircuit = QuantumCircuit(all_qubits_Alice, all_qubits_Bob, creg1_Alice, creg2_Alice)
initialize_qubit(mycircuit, 0)
mycircuit.barrier()
# Entangle
entangle_qubits(mycircuit, 1, 2)
mycircuit.barrier()
# Do a Bell measurement
bell_meas_Alice_qubits(mycircuit, all_qubits_Alice[0], all_qubits_Alice[1], creg1_Alice, creg2_Alice)
mycircuit.barrier()
# Apply classically controlled quantum gates
controlled_ops_Bob_qubit(mycircuit, all_qubits_Bob[0], creg1_Alice, creg2_Alice)
### Look at the complete circuit
mycircuit.draw()
from qiskit import BasicAer
from qiskit.visualization import plot_histogram, plot_bloch_multivector
backend = BasicAer.get_backend('statevector_simulator')
out_vector = execute(mycircuit, backend).result().get_statevector()
plot_bloch_multivector(out_vector)
```
As you can see, the state from the qubit 1 is teleported to the qubit 2. However, please note something, in the teleportation process the original qubit state was destroyed when we did the measurements of Alice's qubits.
## References:
The original lab can be found in the link: https://qiskit.org/learn/intro-qc-qh/
Here is just my solution to the original lab file. I made some modifications to fit the style of the blog as well.
|
github_jupyter
|
```
import time
import pandas as pd
import numpy as np
import nltk
nltk.download('gutenberg')
import tensorflow as tf
keras = tf.keras
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import matplotlib.pyplot as plt
plt.style.use('ggplot')
```
# NLP Concepts #6 - RNNs in practice
## Define helpers
```
def get_slices(text, slice_len=100):
text_split = text.split(' ')
n_chunks = int(len(text_split) / slice_len)
current_start_id = 0
slices = []
for i in range(n_chunks + 1):
current_slice = text_split[current_start_id:current_start_id + slice_len]
if len(current_slice) > 0:
slices.append(' '.join(current_slice))
current_start_id += slice_len
return slices
```
## Get and prepare data
```
# Print corpora and their lengths
for i in nltk.corpus.gutenberg.fileids():
src = nltk.corpus.gutenberg.words(i)
print(i, len(src))
```
### Join and check lengths
```
# Shakespeare's "Macbeth"
shkspr = nltk.corpus.gutenberg.words('shakespeare-macbeth.txt')
shkspr_join = ' '.join(shkspr)
len(shkspr)
# Carroll's "Alice's adventures (...)"
carroll = nltk.corpus.gutenberg.words('carroll-alice.txt')[:23140]
carroll_join = ' '.join(carroll)
len(carroll)
```
### Get slices and generate labels
```
# Get slices
shkspr_slices = get_slices(shkspr_join, 250)
carroll_slices = get_slices(carroll_join, 250)
len(shkspr_slices), len(carroll_slices)
# Create X
X = shkspr_slices + carroll_slices
# Create y
y = np.array([0] * 93 + [1] * 93)
```
### Train test split
```
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
```
### Tokenize texts
```
# Initialize a tokenizer
VOCAB_SIZE = 20000
tokenizer = tf.keras.preprocessing.text.Tokenizer(
num_words=VOCAB_SIZE,
lower=True,
oov_token=1
)
# Fit the toknizer
tokenizer.fit_on_texts(X_train)
# Tokenize
X_train_tok = tokenizer.texts_to_sequences(X_train)
X_test_tok = tokenizer.texts_to_sequences(X_test)
# Plot seq lens
seq_lens_train = [len(seq) for seq in X_train_tok]
seq_lens_test = [len(seq) for seq in X_test_tok]
plt.hist(seq_lens_train, density=True, alpha=.7, label='Train')
plt.hist(seq_lens_test, density=True, alpha=.7, label='Test')
plt.legend()
plt.show()
# Find maxlen
MAXLEN = max([len(x.split(' ')) for x in X_train])
# Pad sequences
X_train_tok_pad = pad_sequences(X_train_tok, maxlen=MAXLEN, padding='post')
X_test_tok_pad = pad_sequences(X_test_tok, maxlen=MAXLEN, padding='post')
```
## Classification example
```
def train_and_evaluate(model, X_train, y_train, X_val, y_val, epochs=30, lr=1e-4, verbose=2):
# Compile
model.compile(loss = 'binary_crossentropy',
optimizer = tf.keras.optimizers.Adam(lr),
metrics = ['accuracy'])
# Callbacks
early = keras.callbacks.EarlyStopping(patience=10, restore_best_weights=True)
# Time it
start = time.time()
# Fit
history = model.fit(X_train, y_train,
validation_data = (X_val, y_val),
callbacks = [early],
epochs = epochs,
verbose = verbose)
# Time it
training_time = time.time() - start
# Plot learning curve
plt.figure(figsize=(10, 4))
plt.subplot(121)
plt.plot(history.history['loss'], label='train')
plt.plot(history.history['val_loss'], label='val')
plt.legend()
plt.title('Loss')
plt.subplot(122)
plt.plot(history.history['accuracy'], label='train')
plt.plot(history.history['val_accuracy'], label='val')
plt.legend()
plt.title('Accuracy')
plt.show()
# Evaluate
loss, acc = model.evaluate(X_val, y_val, verbose=0)
print(f'Val. accuracy: {acc}')
print(f'Training time: {training_time:.02f} seconds')
```
### Build a simple model
```
model = tf.keras.Sequential([
tf.keras.layers.Embedding(
input_dim = VOCAB_SIZE,
output_dim = 100,
mask_zero = True,
input_length = MAXLEN),
tf.keras.layers.LSTM(64),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.summary()
train_and_evaluate(model, X_train_tok_pad, y_train, X_test_tok_pad, y_test, verbose=0, epochs=30)
```
### Build a deeper model
```
model_2 = tf.keras.Sequential([
tf.keras.layers.Embedding(
input_dim = VOCAB_SIZE,
output_dim = 100,
mask_zero = True,
input_length = MAXLEN),
tf.keras.layers.LSTM(64, return_sequences=True),
tf.keras.layers.LSTM(128),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model_2.summary()
train_and_evaluate(model_2, X_train_tok_pad, y_train, X_test_tok_pad, y_test, verbose=0, epochs=30)
```
## Build a bi-directional model
```
model_3 = tf.keras.Sequential([
tf.keras.layers.Embedding(
input_dim = VOCAB_SIZE,
output_dim = 100,
mask_zero = True,
input_length = MAXLEN),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model_3.summary()
train_and_evaluate(model_3, X_train_tok_pad, y_train, X_test_tok_pad, y_test, verbose=0, epochs=30)
```
## Build a deep bi-directional model
```
model_4 = tf.keras.Sequential([
tf.keras.layers.Embedding(
input_dim = VOCAB_SIZE,
output_dim = 100,
mask_zero = True,
input_length = MAXLEN),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True)),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(128)),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model_4.summary()
train_and_evaluate(model_4, X_train_tok_pad, y_train, X_test_tok_pad, y_test, verbose=0, epochs=30)
```
## Long distance dependencies - `SimpleRNN()` and `LSTM()`
#### Experiment:
We will put a negative number at the beginnig of a random sequence of positive numbers. We'll manipulate sequence length and check how it affects performance of `SimpleRNN` and `LSTM` in a classification task.
<br>
<img src="https://www.hackingwithswift.com/uploads/matrix.jpg" alt="Numbers" style="width: 400px;"/>
```
LENGTHS = [10, 20, 50, 200]
def build_dataset(length, n_examples):
X = []
y = []
for i in range(n_examples):
class_ = np.random.choice([0, 1])
if class_ == 1:
row = np.array([-1] + list(np.random.choice(np.arange(0, 1, .01), length - 1)))
elif class_ == 0:
row = np.random.choice(np.arange(0, 1, .01), length)
X.append(row)
y.append(class_)
return np.array(X)[:, :, np.newaxis], np.array(y)
def build_model(rnn_type, len_):
if rnn_type == 'rnn':
rnn_layer = tf.keras.layers.SimpleRNN
elif rnn_type == 'lstm':
rnn_layer = tf.keras.layers.LSTM
model = tf.keras.Sequential([
rnn_layer(64, input_shape=(len_, 1), return_sequences=True),
rnn_layer(128),
tf.keras.layers.Dense(32, activation='tanh'),
tf.keras.layers.Dropout(.2),
tf.keras.layers.Dense(1, activation='sigmoid')
])
return model
for len_ in LENGTHS:
# Prep data
print(f'Buidling dataset of length {len_}')
X, y = build_dataset(len_, 200)
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.2)
# Build models
rnn_model = build_model('rnn', len_)
lstm_model = build_model('lstm', len_)
# Train and evaluate
print(f'\nRNN for {len_}')
train_and_evaluate(rnn_model, X_train, y_train, X_test, y_test, verbose=0, epochs=30)
print(f'\nLSTM for {len_}')
train_and_evaluate(lstm_model, X_train, y_train, X_test, y_test, verbose=0, epochs=30)
```
|
github_jupyter
|
# base
```
import vectorbt as vbt
from vectorbt.base import column_grouper, array_wrapper, combine_fns, index_fns, indexing, reshape_fns
import numpy as np
import pandas as pd
from datetime import datetime
from numba import njit
import itertools
v1 = 0
a1 = np.array([1])
a2 = np.array([1, 2, 3])
a3 = np.array([[1, 2, 3]])
a4 = np.array([[1], [2], [3]])
a5 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
sr_none = pd.Series([1])
print(sr_none)
sr1 = pd.Series([1], index=pd.Index(['x1'], name='i1'), name='a1')
print(sr1)
sr2 = pd.Series([1, 2, 3], index=pd.Index(['x2', 'y2', 'z2'], name='i2'), name='a2')
print(sr2)
df_none = pd.DataFrame([[1]])
print(df_none)
df1 = pd.DataFrame(
[[1]],
index=pd.Index(['x3'], name='i3'),
columns=pd.Index(['a3'], name='c3'))
print(df1)
df2 = pd.DataFrame(
[[1], [2], [3]],
index=pd.Index(['x4', 'y4', 'z4'], name='i4'),
columns=pd.Index(['a4'], name='c4'))
print(df2)
df3 = pd.DataFrame(
[[1, 2, 3]],
index=pd.Index(['x5'], name='i5'),
columns=pd.Index(['a5', 'b5', 'c5'], name='c5'))
print(df3)
df4 = pd.DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
index=pd.Index(['x6', 'y6', 'z6'], name='i6'),
columns=pd.Index(['a6', 'b6', 'c6'], name='c6'))
print(df4)
multi_i = pd.MultiIndex.from_arrays([['x7', 'y7', 'z7'], ['x8', 'y8', 'z8']], names=['i7', 'i8'])
multi_c = pd.MultiIndex.from_arrays([['a7', 'b7', 'c7'], ['a8', 'b8', 'c8']], names=['c7', 'c8'])
df5 = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=multi_i, columns=multi_c)
print(df5)
```
## column_grouper
```
some_columns = pd.MultiIndex.from_arrays([
[1, 1, 1, 1, 0, 0, 0, 0],
[3, 3, 2, 2, 1, 1, 0, 0],
[7, 6, 5, 4, 3, 2, 1, 0]
], names=['first', 'second', 'third'])
print(column_grouper.group_by_to_index(some_columns, group_by=0))
print(column_grouper.group_by_to_index(some_columns, group_by='first'))
print(column_grouper.group_by_to_index(some_columns, group_by=[0, 1]))
print(column_grouper.group_by_to_index(some_columns, group_by=['first', 'second']))
print(column_grouper.group_by_to_index(some_columns, group_by=np.array([3, 2, 1, 1, 1, 0, 0, 0])))
print(column_grouper.group_by_to_index(some_columns, group_by=pd.Index([3, 2, 1, 1, 1, 0, 0, 0], name='fourth')))
# group_arr comes always from 0 to n, also keeps order
print(column_grouper.get_groups_and_index(some_columns, 0))
print(column_grouper.get_groups_and_index(some_columns, [0, 1]))
print(column_grouper.get_groups_and_index(some_columns, np.array([3, 2, 1, 1, 1, 0, 0, 0])))
print(column_grouper.get_group_lens_nb(np.array([0, 0, 0, 0, 1, 1, 1, 1])))
print(column_grouper.get_group_lens_nb(np.array([0, 1])))
print(column_grouper.get_group_lens_nb(np.array([0, 0])))
print(column_grouper.get_group_lens_nb(np.array([0])))
print(column_grouper.get_group_lens_nb(np.array([])))
print(column_grouper.ColumnGrouper(sr2.to_frame().columns, group_by=np.array([0])).group_by)
print(column_grouper.ColumnGrouper(sr2.to_frame().columns, group_by=np.array([0])).get_groups_and_columns())
print(column_grouper.ColumnGrouper(sr2.to_frame().columns, group_by=np.array([0])).get_groups())
print(column_grouper.ColumnGrouper(sr2.to_frame().columns, group_by=np.array([0])).get_columns())
print(column_grouper.ColumnGrouper(sr2.to_frame().columns, group_by=np.array([0])).get_group_lens())
print(column_grouper.ColumnGrouper(sr2.to_frame().columns, group_by=np.array([0])).get_group_start_idxs())
print(column_grouper.ColumnGrouper(sr2.to_frame().columns, group_by=np.array([0])).get_group_end_idxs())
print(column_grouper.ColumnGrouper(df4.columns, group_by=np.array([0, 0, 1])).group_by)
print(column_grouper.ColumnGrouper(df4.columns, group_by=np.array([0, 0, 1])).get_groups_and_columns())
print(column_grouper.ColumnGrouper(df4.columns, group_by=np.array([0, 0, 1])).get_groups())
print(column_grouper.ColumnGrouper(df4.columns, group_by=np.array([0, 0, 1])).get_columns())
print(column_grouper.ColumnGrouper(df4.columns, group_by=np.array([0, 0, 1])).get_group_lens())
print(column_grouper.ColumnGrouper(df4.columns, group_by=np.array([0, 0, 1])).get_group_start_idxs())
print(column_grouper.ColumnGrouper(df4.columns, group_by=np.array([0, 0, 1])).get_group_end_idxs())
```
## array_wrapper
```
sr2_wrapper = array_wrapper.ArrayWrapper.from_obj(sr2)
df4_wrapper = array_wrapper.ArrayWrapper.from_obj(df4)
sr2_wrapper_co = sr2_wrapper.copy(column_only_select=True)
df4_wrapper_co = df4_wrapper.copy(column_only_select=True)
sr2_grouped_wrapper = sr2_wrapper.copy(group_by=np.array([0]))
df4_grouped_wrapper = df4_wrapper.copy(group_by=np.array([0, 0, 1]))
sr2_grouped_wrapper_co = sr2_grouped_wrapper.copy(column_only_select=True)
df4_grouped_wrapper_co = df4_grouped_wrapper.copy(column_only_select=True)
# test indexing
print(sr2_wrapper._indexing_func_meta(lambda x: x.iloc[:2])[1:])
print(df4_wrapper._indexing_func_meta(lambda x: x.iloc[0, :2])[1:])
print(df4_wrapper._indexing_func_meta(lambda x: x.iloc[:2, 0])[1:])
print(df4_wrapper._indexing_func_meta(lambda x: x.iloc[:2, [0]])[1:])
print(df4_wrapper._indexing_func_meta(lambda x: x.iloc[:2, :2])[1:])
print(df4_wrapper_co._indexing_func_meta(lambda x: x.iloc[0])[1:])
print(df4_wrapper_co._indexing_func_meta(lambda x: x.iloc[[0]])[1:])
print(df4_wrapper_co._indexing_func_meta(lambda x: x.iloc[:2])[1:])
print(sr2_grouped_wrapper._indexing_func_meta(lambda x: x.iloc[:2])[1:])
print(df4_grouped_wrapper._indexing_func_meta(lambda x: x.iloc[:2, 0])[1:])
print(df4_grouped_wrapper._indexing_func_meta(lambda x: x.iloc[:2, 1])[1:])
print(df4_grouped_wrapper._indexing_func_meta(lambda x: x.iloc[:2, [1]])[1:])
print(df4_grouped_wrapper._indexing_func_meta(lambda x: x.iloc[:2, :2])[1:])
print(df4_grouped_wrapper_co._indexing_func_meta(lambda x: x.iloc[0])[1:])
print(df4_grouped_wrapper_co._indexing_func_meta(lambda x: x.iloc[1])[1:])
print(df4_grouped_wrapper_co._indexing_func_meta(lambda x: x.iloc[[1]])[1:])
print(df4_grouped_wrapper_co._indexing_func_meta(lambda x: x.iloc[:2])[1:])
print(sr2_wrapper.iloc[:2].index)
print(sr2_wrapper.iloc[:2].columns)
print(sr2_wrapper.iloc[:2].ndim)
print(df4_wrapper.iloc[0, :2].index)
print(df4_wrapper.iloc[0, :2].columns)
print(df4_wrapper.iloc[0, :2].ndim)
print(df4_wrapper.iloc[:2, 0].index)
print(df4_wrapper.iloc[:2, 0].columns)
print(df4_wrapper.iloc[:2, 0].ndim)
print(df4_wrapper.iloc[:2, [0]].index)
print(df4_wrapper.iloc[:2, [0]].columns)
print(df4_wrapper.iloc[:2, [0]].ndim)
print(df4_wrapper.iloc[:2, :2].index)
print(df4_wrapper.iloc[:2, :2].columns)
print(df4_wrapper.iloc[:2, :2].ndim)
print(df4_wrapper_co.iloc[0].index)
print(df4_wrapper_co.iloc[0].columns)
print(df4_wrapper_co.iloc[0].ndim)
print(df4_wrapper_co.iloc[[0]].index)
print(df4_wrapper_co.iloc[[0]].columns)
print(df4_wrapper_co.iloc[[0]].ndim)
print(df4_wrapper_co.iloc[:2].index)
print(df4_wrapper_co.iloc[:2].columns)
print(df4_wrapper_co.iloc[:2].ndim)
print(sr2_grouped_wrapper.iloc[:2].index)
print(sr2_grouped_wrapper.iloc[:2].columns)
print(sr2_grouped_wrapper.iloc[:2].ndim)
print(sr2_grouped_wrapper.iloc[:2].grouped_ndim)
print(sr2_grouped_wrapper.iloc[:2].grouper.group_by)
print(df4_grouped_wrapper.iloc[:2, 0].index)
print(df4_grouped_wrapper.iloc[:2, 0].columns)
print(df4_grouped_wrapper.iloc[:2, 0].ndim)
print(df4_grouped_wrapper.iloc[:2, 0].grouped_ndim)
print(df4_grouped_wrapper.iloc[:2, 0].grouper.group_by)
print(df4_grouped_wrapper.iloc[:2, 1].index)
print(df4_grouped_wrapper.iloc[:2, 1].columns)
print(df4_grouped_wrapper.iloc[:2, 1].ndim)
print(df4_grouped_wrapper.iloc[:2, 1].grouped_ndim)
print(df4_grouped_wrapper.iloc[:2, 1].grouper.group_by)
print(df4_grouped_wrapper.iloc[:2, [1]].index)
print(df4_grouped_wrapper.iloc[:2, [1]].columns)
print(df4_grouped_wrapper.iloc[:2, [1]].ndim)
print(df4_grouped_wrapper.iloc[:2, [1]].grouped_ndim)
print(df4_grouped_wrapper.iloc[:2, [1]].grouper.group_by)
print(df4_grouped_wrapper.iloc[:2, :2].index)
print(df4_grouped_wrapper.iloc[:2, :2].columns)
print(df4_grouped_wrapper.iloc[:2, :2].ndim)
print(df4_grouped_wrapper.iloc[:2, :2].grouped_ndim)
print(df4_grouped_wrapper.iloc[:2, :2].grouper.group_by)
print(df4_grouped_wrapper_co.iloc[0].index)
print(df4_grouped_wrapper_co.iloc[0].columns)
print(df4_grouped_wrapper_co.iloc[0].ndim)
print(df4_grouped_wrapper_co.iloc[0].grouped_ndim)
print(df4_grouped_wrapper_co.iloc[0].grouper.group_by)
print(df4_grouped_wrapper_co.iloc[1].index)
print(df4_grouped_wrapper_co.iloc[1].columns)
print(df4_grouped_wrapper_co.iloc[1].ndim)
print(df4_grouped_wrapper_co.iloc[1].grouped_ndim)
print(df4_grouped_wrapper_co.iloc[1].grouper.group_by)
print(df4_grouped_wrapper_co.iloc[[1]].index)
print(df4_grouped_wrapper_co.iloc[[1]].columns)
print(df4_grouped_wrapper_co.iloc[[1]].ndim)
print(df4_grouped_wrapper_co.iloc[[1]].grouped_ndim)
print(df4_grouped_wrapper_co.iloc[[1]].grouper.group_by)
print(df4_grouped_wrapper_co.iloc[:2].index)
print(df4_grouped_wrapper_co.iloc[:2].columns)
print(df4_grouped_wrapper_co.iloc[:2].ndim)
print(df4_grouped_wrapper_co.iloc[:2].grouped_ndim)
print(df4_grouped_wrapper_co.iloc[:2].grouper.group_by)
big_df = pd.DataFrame(np.empty((1000, 1000)))
big_df_wrapper = array_wrapper.ArrayWrapper.from_obj(big_df)
big_df_wrapper_co = big_df_wrapper.copy(column_only_select=True)
big_df_grouped_wrapper = df4_wrapper.copy(group_by=np.array([0, 0, 1]))
big_df_grouped_wrapper_co = big_df_grouped_wrapper.copy(column_only_select=True)
%timeit big_df_wrapper.iloc[:, 0]
%timeit big_df_wrapper.iloc[:, :]
%timeit big_df_wrapper_co.iloc[0]
%timeit big_df_wrapper_co.iloc[:]
%timeit big_df_grouped_wrapper.iloc[:, 0]
%timeit big_df_grouped_wrapper.iloc[:, :]
%timeit big_df_grouped_wrapper_co.iloc[0]
%timeit big_df_grouped_wrapper_co.iloc[:]
print(df4_grouped_wrapper_co.wrap(np.array([[1, 2], [3, 4], [5, 6]])))
print(df4_grouped_wrapper_co.wrap_reduced(np.array([1, 2])))
print(df4_grouped_wrapper_co.wrap(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), group_by=False))
print(df4_grouped_wrapper_co.wrap_reduced(np.array([1, 2, 3]), group_by=False))
print(df4_grouped_wrapper_co.iloc[0].wrap(np.array([1, 2, 3])))
print(df4_grouped_wrapper_co.iloc[0].wrap_reduced(np.array([1])))
print(df4_grouped_wrapper_co.iloc[0].wrap(np.array([[1, 2], [3, 4], [5, 6]]), group_by=False))
print(df4_grouped_wrapper_co.iloc[0].wrap_reduced(np.array([1, 2]), group_by=False))
print(df4_grouped_wrapper_co.iloc[[0]].wrap(np.array([1, 2, 3])))
print(df4_grouped_wrapper_co.iloc[[0]].wrap_reduced(np.array([1])))
print(df4_grouped_wrapper_co.iloc[[0]].wrap(np.array([[1, 2], [3, 4], [5, 6]]), group_by=False))
print(df4_grouped_wrapper_co.iloc[[0]].wrap_reduced(np.array([1, 2]), group_by=False))
print(df4_grouped_wrapper_co.iloc[1].wrap(np.array([1, 2, 3])))
print(df4_grouped_wrapper_co.iloc[1].wrap_reduced(np.array([1])))
print(df4_grouped_wrapper_co.iloc[1].wrap(np.array([1, 2, 3]), group_by=False))
print(df4_grouped_wrapper_co.iloc[1].wrap_reduced(np.array([1]), group_by=False))
print(df4_grouped_wrapper_co.iloc[[1]].wrap(np.array([1, 2, 3])))
print(df4_grouped_wrapper_co.iloc[[1]].wrap_reduced(np.array([1])))
print(df4_grouped_wrapper_co.iloc[[1]].wrap(np.array([1, 2, 3]), group_by=False))
print(df4_grouped_wrapper_co.iloc[[1]].wrap_reduced(np.array([1]), group_by=False))
```
## index_fns
```
i1 = index_fns.index_from_values([0.1, 0.2], name='a')
i2 = index_fns.index_from_values(np.tile(np.arange(1, 4)[:, None][:, None], (1, 3, 3)), name='b')
i3 = index_fns.index_from_values(np.random.uniform(size=(3, 3, 3)), name='c')
print(i1)
print(i2)
print(i3)
print(index_fns.repeat_index(i2, 3))
print(index_fns.repeat_index(multi_i, 3))
print(index_fns.tile_index(i2, 3))
print(index_fns.tile_index(multi_i, 3))
i23 = index_fns.stack_indexes(i2, i3)
i32 = index_fns.stack_indexes(i3, i2)
print(i23)
print(i32)
print(index_fns.stack_indexes(multi_i, multi_i, drop_duplicates=False))
print(index_fns.stack_indexes(multi_i, multi_i, drop_duplicates=True))
print(index_fns.stack_indexes([0, 1], ['a', 'b'], drop_redundant=False))
print(index_fns.stack_indexes([0, 1], ['a', 'b'], drop_redundant=True))
print(index_fns.stack_indexes(pd.Index([0, 1], name='test_name'), ['a', 'b'], drop_redundant=True))
print(index_fns.stack_indexes(['a', 'a'], ['a', 'b'], drop_redundant=True))
print(index_fns.stack_indexes(pd.Index(['a', 'a'], name='test_name'), ['a', 'b'], drop_redundant=True))
print(index_fns.combine_indexes(pd.Index([1]), pd.Index([2, 3]), drop_duplicates=False))
print(index_fns.combine_indexes(pd.Index([1]), pd.Index([2, 3]), drop_duplicates=True))
print(index_fns.combine_indexes(pd.Index([1, 2]), pd.Index([3]), drop_duplicates=False))
print(index_fns.combine_indexes(pd.Index([1, 2]), pd.Index([3]), drop_duplicates=True))
print(index_fns.combine_indexes(i1, i2)) # combine_fns uses stack
print(index_fns.combine_indexes(i2, i3))
print(index_fns.combine_indexes(i23, i23))
print(index_fns.drop_levels(multi_i, 'i10'))
print(index_fns.drop_levels(multi_i, ['i7', 'i8']))
print(index_fns.rename_levels(pd.Int64Index([1, 2, 3], name='i'), {'i': 'f'}))
print(index_fns.rename_levels(multi_i, {'i7': 'f7', 'i8': 'f8'}))
print(index_fns.select_levels(multi_i, 'i7'))
print(index_fns.select_levels(multi_i, ['i7']))
print(index_fns.select_levels(multi_i, ['i7', 'i8']))
print(index_fns.drop_redundant_levels(pd.Index(['a', 'a']))) # ignores levels with single element
print(index_fns.drop_redundant_levels(pd.Index(['a', 'a'], name='hi')))
print(index_fns.drop_redundant_levels(pd.MultiIndex.from_arrays([['a', 'a'], ['b', 'b']], names=['hi', 'hi2'])))
print(index_fns.drop_redundant_levels(pd.MultiIndex.from_arrays([['a', 'b'], ['a', 'b']], names=['hi', 'hi2'])))
print(index_fns.drop_redundant_levels(pd.MultiIndex.from_arrays([[0, 1], ['a', 'b']], names=[None, 'hi2']))) # ignores 0-to-n
print(index_fns.drop_redundant_levels(pd.MultiIndex.from_arrays([[0, 2], ['a', 'b']], names=[None, 'hi2']))) # legit
print(index_fns.drop_redundant_levels(pd.MultiIndex.from_arrays([[0, 1], ['a', 'b']], names=['hi', 'hi2']))) # legit (w/ name)
print(index_fns.drop_duplicate_levels(pd.MultiIndex.from_arrays(
[[1, 2, 3], [1, 2, 3]], names=['a', 'a'])))
print(index_fns.drop_duplicate_levels(pd.MultiIndex.from_tuples(
[(0, 1, 2, 1), ('a', 'b', 'c', 'b')], names=['x', 'y', 'z', 'y']), keep='last'))
print(index_fns.drop_duplicate_levels(pd.MultiIndex.from_tuples(
[(0, 1, 2, 1), ('a', 'b', 'c', 'b')], names=['x', 'y', 'z', 'y']), keep='first'))
multi_c1 = pd.MultiIndex.from_arrays([['a8', 'b8']], names=['c8'])
multi_c2 = pd.MultiIndex.from_arrays([['a7', 'a7', 'c7', 'c7'], ['a8', 'b8', 'a8', 'b8']], names=['c7', 'c8'])
index_fns.align_index_to(multi_c1, multi_c2)
print(index_fns.pick_levels(multi_c, required_levels=[], optional_levels=[]))
print(index_fns.pick_levels(multi_c, required_levels=['c8'], optional_levels=[]))
print(index_fns.pick_levels(multi_c, required_levels=['c8'], optional_levels=[]))
print(index_fns.pick_levels(multi_c, required_levels=['c7', 'c8'], optional_levels=[]))
print(index_fns.pick_levels(multi_c, required_levels=['c8', None], optional_levels=[]))
print(index_fns.pick_levels(multi_c, required_levels=[None, None], optional_levels=[]))
print(index_fns.pick_levels(multi_c, required_levels=[None], optional_levels=['c8']))
print(index_fns.pick_levels(multi_c, required_levels=['c8'], optional_levels=[None]))
print(index_fns.pick_levels(multi_c, required_levels=[], optional_levels=['c7', 'c8']))
```
## reshape_fns
```
print(reshape_fns.soft_to_ndim(a2, 1))
print(reshape_fns.soft_to_ndim(sr2, 1))
print(reshape_fns.soft_to_ndim(df2, 1))
print(reshape_fns.soft_to_ndim(df4, 1)) # cannot -> do nothing
print(reshape_fns.soft_to_ndim(a2, 2))
print(reshape_fns.soft_to_ndim(sr2, 2))
print(reshape_fns.soft_to_ndim(df2, 2))
print(reshape_fns.to_1d(None))
print(reshape_fns.to_1d(v1))
print(reshape_fns.to_1d(a1))
print(reshape_fns.to_1d(a2))
print(reshape_fns.to_1d(sr1))
print(reshape_fns.to_1d(sr2))
print(reshape_fns.to_1d(df1))
print(reshape_fns.to_1d(df2))
print(reshape_fns.to_2d(None))
print(reshape_fns.to_2d(v1))
print(reshape_fns.to_2d(a1))
print(reshape_fns.to_2d(a2))
print(reshape_fns.to_2d(sr1))
print(reshape_fns.to_2d(sr2))
print(reshape_fns.to_2d(sr2, expand_axis=0))
print(reshape_fns.repeat(v1, 3, axis=0))
print(reshape_fns.repeat(a1, 3, axis=0))
print(reshape_fns.repeat(a2, 3, axis=0))
print(reshape_fns.repeat(a3, 3, axis=0))
print(reshape_fns.repeat(a4, 3, axis=0))
print(reshape_fns.repeat(a5, 3, axis=0))
print(reshape_fns.repeat(sr_none, 3, axis=0))
print(reshape_fns.repeat(sr1, 3, axis=0))
print(reshape_fns.repeat(sr2, 3, axis=0))
print(reshape_fns.repeat(df_none, 3, axis=0))
print(reshape_fns.repeat(df1, 3, axis=0))
print(reshape_fns.repeat(df2, 3, axis=0))
print(reshape_fns.repeat(df3, 3, axis=0))
print(reshape_fns.repeat(df4, 3, axis=0))
print(reshape_fns.repeat(v1, 3, axis=1))
print(reshape_fns.repeat(a1, 3, axis=1))
print(reshape_fns.repeat(a2, 3, axis=1))
print(reshape_fns.repeat(a3, 3, axis=1))
print(reshape_fns.repeat(a4, 3, axis=1))
print(reshape_fns.repeat(a5, 3, axis=1))
print(reshape_fns.repeat(sr_none, 3, axis=1))
print(reshape_fns.repeat(sr1, 3, axis=1))
print(reshape_fns.repeat(sr2, 3, axis=1))
print(reshape_fns.repeat(df_none, 3, axis=1))
print(reshape_fns.repeat(df1, 3, axis=1))
print(reshape_fns.repeat(df2, 3, axis=1))
print(reshape_fns.repeat(df3, 3, axis=1))
print(reshape_fns.repeat(df4, 3, axis=1))
print(reshape_fns.tile(v1, 3, axis=0))
print(reshape_fns.tile(a1, 3, axis=0))
print(reshape_fns.tile(a2, 3, axis=0))
print(reshape_fns.tile(a3, 3, axis=0))
print(reshape_fns.tile(a4, 3, axis=0))
print(reshape_fns.tile(a5, 3, axis=0))
print(reshape_fns.tile(sr_none, 3, axis=0))
print(reshape_fns.tile(sr1, 3, axis=0))
print(reshape_fns.tile(sr2, 3, axis=0))
print(reshape_fns.tile(df_none, 3, axis=0))
print(reshape_fns.tile(df1, 3, axis=0))
print(reshape_fns.tile(df2, 3, axis=0))
print(reshape_fns.tile(df3, 3, axis=0))
print(reshape_fns.tile(df4, 3, axis=0))
print(reshape_fns.tile(v1, 3, axis=1))
print(reshape_fns.tile(a1, 3, axis=1))
print(reshape_fns.tile(a2, 3, axis=1))
print(reshape_fns.tile(a3, 3, axis=1))
print(reshape_fns.tile(a4, 3, axis=1))
print(reshape_fns.tile(a5, 3, axis=1))
print(reshape_fns.tile(sr_none, 3, axis=1))
print(reshape_fns.tile(sr1, 3, axis=1))
print(reshape_fns.tile(sr2, 3, axis=1))
print(reshape_fns.tile(df_none, 3, axis=1))
print(reshape_fns.tile(df1, 3, axis=1))
print(reshape_fns.tile(df2, 3, axis=1))
print(reshape_fns.tile(df3, 3, axis=1))
print(reshape_fns.tile(df4, 3, axis=1))
# Change broadcasting rules globally
vbt.settings.broadcasting['index_from'] = 'stack' # default is 'strict'
vbt.settings.broadcasting['columns_from'] = 'stack'
print(vbt.settings.broadcasting)
# Broadcasting arrays
args = [
('v1', v1),
('a1', a1),
('a2', a2),
('a3', a3),
('a4', a4),
('a5', a5)
]
arg_combs = list(itertools.combinations_with_replacement(args, 2))
for (n1, arg1), (n2, arg2) in arg_combs:
print(arg1)
print(arg2)
print("================")
arg1, arg2 = reshape_fns.broadcast(arg1, arg2)
print(arg1)
print(arg2)
print()
# Broadcasting series
args = [
('sr_none', sr_none),
('sr1', sr1),
('sr2', sr2)
]
arg_combs = list(itertools.combinations_with_replacement(args, 2))
for (n1, arg1), (n2, arg2) in arg_combs:
print(n1 + '+' + n2)
print(arg1)
print(arg2)
print("================")
arg1, arg2 = reshape_fns.broadcast(arg1, arg2)
print(arg1)
print(arg2)
print()
# Broadcasting arrays and series
a_args = [
('v1', v1),
('a1', a1),
('a2', a2),
('a3', a3),
('a4', a4),
('a5', a5)
]
sr_args = [
('sr_none', sr_none),
('sr1', sr1),
('sr2', sr2)
]
arg_combs = list(itertools.product(a_args, sr_args))
for (n1, arg1), (n2, arg2) in arg_combs:
print(n1 + '+' + n2)
print(arg1)
print(arg2)
print("================")
arg1, arg2 = reshape_fns.broadcast(arg1, arg2)
print(arg1)
print(arg2)
print()
# Broadcasting dataframes
args = [
('df_none', df_none),
('df1', df1),
('df2', df2),
('df3', df3),
('df4', df4)
]
arg_combs = list(itertools.combinations_with_replacement(args, 2))
for (n1, arg1), (n2, arg2) in arg_combs:
print(n1 + '+' + n2)
print(arg1)
print(arg2)
print("================")
arg1, arg2 = reshape_fns.broadcast(arg1, arg2)
print(arg1)
print(arg2)
print()
# Broadcasting arrays and dataframes
a_args = [
('v1', v1),
('a1', a1),
('a2', a2),
('a3', a3),
('a4', a4),
('a5', a5)
]
sr_args = [
('df_none', df_none),
('df1', df1),
('df2', df2),
('df3', df3),
('df4', df4)
]
arg_combs = list(itertools.product(a_args, sr_args))
for (n1, arg1), (n2, arg2) in arg_combs:
print(n1 + '+' + n2)
print(arg1)
print(arg2)
print("================")
arg1, arg2 = reshape_fns.broadcast(arg1, arg2)
print(arg1)
print(arg2)
print()
# Broadcasting series and dataframes
a_args = [
('sr_none', sr_none),
('sr1', sr1),
('sr2', sr2)
]
sr_args = [
('df_none', df_none),
('df1', df1),
('df2', df2),
('df3', df3),
('df4', df4)
]
arg_combs = list(itertools.product(a_args, sr_args))
for (n1, arg1), (n2, arg2) in arg_combs:
print(n1 + '+' + n2)
print(arg1)
print(arg2)
print("================")
arg1, arg2 = reshape_fns.broadcast(arg1, arg2)
print(arg1)
print(arg2)
print()
[np.broadcast_to(x, (3, 3)) for x in (0, a1, a2, sr_none, sr1, sr2)]
# Broadcasting all at once
for i in reshape_fns.broadcast(
0, a1, a2, sr_none, sr1, sr2,
to_shape=(3, 3),
index_from='stack',
columns_from='stack'
):
print(i)
# Broadcasting all at once
for i in reshape_fns.broadcast(
v1, a1, a2, a3, a4, a5, sr_none, sr1, sr2, df_none, df1, df2, df3, df4,
index_from='stack',
columns_from='stack'
):
print(i)
for i in reshape_fns.broadcast(
v1, a1, a2, a3, a4, a5, sr_none, sr1, sr2, df_none, df1, df2, df3, df4,
index_from=None, # use as-is
columns_from=None
):
print(i)
for i in reshape_fns.broadcast(
v1, a1, a2, a3, a4, a5, sr_none, sr1, sr2, df_none, df1, df2, df3, df4,
index_from=-1, # take index from the last dataframe
columns_from=-1
):
print(i)
for i in reshape_fns.broadcast(
v1, a1, a2, a3, a4, a5, sr_none, sr1, sr2, df_none, df1, df2, df3, df4,
index_from=multi_i, # specify manually
columns_from=multi_c
):
print(i)
# Do not clean columns
vbt.settings.broadcasting['drop_duplicates'] = False
vbt.settings.broadcasting['drop_redundant'] = False
vbt.settings.broadcasting['ignore_sr_names'] = False
for i in reshape_fns.broadcast(
v1, a1, a2, a3, a4, a5, sr_none, sr1, sr2, df_none, df1, df2, df3, df4,
index_from='stack', # stack but do not clean
columns_from='stack'
):
print(i)
vbt.settings.broadcasting.reset()
big_a = np.empty((1000, 1000))
print(reshape_fns.broadcast(np.empty((1,)), big_a)[0].flags)
%timeit reshape_fns.broadcast(np.empty((1,)), big_a)
print(reshape_fns.broadcast(np.empty((1,)), big_a, require_kwargs={'requirements': 'W'})[0].flags)
%timeit reshape_fns.broadcast(np.empty((1,)), big_a, require_kwargs={'requirements': 'W'})
print(reshape_fns.broadcast(np.empty((1,)), big_a, require_kwargs={'requirements': 'C'})[0].flags)
%timeit reshape_fns.broadcast(np.empty((1,)), big_a, require_kwargs={'requirements': 'C'})
print(reshape_fns.broadcast(np.empty((1,)), big_a, require_kwargs={'requirements': 'F'})[0].flags)
%timeit reshape_fns.broadcast(np.empty((1,)), big_a, require_kwargs={'requirements': 'F'})
print(reshape_fns.broadcast(v1, df4, to_pd=False))
print(reshape_fns.broadcast(v1, df4, to_pd=True))
# One-side broadcasting, default behaviour is copying index/columns from the second argument
print(reshape_fns.broadcast_to(sr1, sr1))
print(reshape_fns.broadcast_to(sr1, sr2))
print(reshape_fns.broadcast_to(sr1, df1))
print(reshape_fns.broadcast_to(sr1, df2))
print(reshape_fns.broadcast_to(sr1, df3))
print(reshape_fns.broadcast_to(sr1, df4))
# Broadcasting first element to be an array out of the second argument
print(reshape_fns.broadcast_to_array_of(0.1, v1))
print(reshape_fns.broadcast_to_array_of([0.1], v1))
print(reshape_fns.broadcast_to_array_of([0.1, 0.2], v1))
print(reshape_fns.broadcast_to_array_of(0.1, sr2))
print(reshape_fns.broadcast_to_array_of([0.1], sr2))
print(reshape_fns.broadcast_to_array_of([0.1, 0.2], sr2))
print(reshape_fns.broadcast_to_array_of([[0.1, 0.2, 0.3], [0.4, 0.5, 0.6]], sr2))
print(reshape_fns.broadcast_to_array_of(0.1, df2))
print(reshape_fns.broadcast_to_array_of([0.1], df2))
print(reshape_fns.broadcast_to_array_of([0.1, 0.2], df2))
print(reshape_fns.broadcast_to_array_of([[[0.1], [0.2], [0.3]], [[0.4], [0.5], [0.6]]], df2))
print(reshape_fns.broadcast_to_array_of(0.1, np.empty((2, 2, 2)))) # works even for ndim > 2
print(reshape_fns.broadcast_to_axis_of(10, np.empty((2,)), 0))
print(reshape_fns.broadcast_to_axis_of(10, np.empty((2,)), 1))
print(reshape_fns.broadcast_to_axis_of(10, np.empty((2, 3)), 0))
print(reshape_fns.broadcast_to_axis_of(10, np.empty((2, 3)), 1))
print(reshape_fns.broadcast_to_axis_of(10, np.empty((2, 3)), 2))
i = pd.MultiIndex.from_arrays([[1, 1, 2, 2], [3, 4, 3, 4], ['a', 'b', 'c', 'd']])
sr = pd.Series([1, 2, 3, 4], index=i)
print(reshape_fns.unstack_to_array(sr))
print(reshape_fns.make_symmetric(sr1))
print(reshape_fns.make_symmetric(sr2))
print(reshape_fns.make_symmetric(df1))
print(reshape_fns.make_symmetric(df2))
print(reshape_fns.make_symmetric(df3))
print(reshape_fns.make_symmetric(df4))
print(reshape_fns.make_symmetric(df5))
print(reshape_fns.make_symmetric(pd.Series([1, 2, 3], name='yo'), sort=False))
print(reshape_fns.unstack_to_df(df5.iloc[0]))
print(reshape_fns.unstack_to_df(sr, index_levels=0, column_levels=1))
print(reshape_fns.unstack_to_df(sr, index_levels=(0, 1), column_levels=2))
print(reshape_fns.unstack_to_df(sr, index_levels=0, column_levels=1, symmetric=True).columns)
```
## indexing
```
PandasIndexer = indexing.PandasIndexer
ParamIndexer = indexing.ParamIndexerFactory(['param1', 'param2', 'tuple'])
class H(PandasIndexer, ParamIndexer):
def __init__(self, a, param1_mapper, param2_mapper, tuple_mapper):
self.a = a
self._param1_mapper = param1_mapper
self._param2_mapper = param2_mapper
self._tuple_mapper = tuple_mapper
PandasIndexer.__init__(self, my_kw='PandasIndexer')
ParamIndexer.__init__(self, [param1_mapper, param2_mapper, tuple_mapper], my_kw='ParamIndexer')
def _indexing_func(self, pd_indexing_func, my_kw=None):
# As soon as you call iloc etc., performs it on each dataframe and mapper and returns a new class instance
print(my_kw)
param1_mapper = indexing.indexing_on_mapper(self._param1_mapper, self.a, pd_indexing_func)
param2_mapper = indexing.indexing_on_mapper(self._param2_mapper, self.a, pd_indexing_func)
tuple_mapper = indexing.indexing_on_mapper(self._tuple_mapper, self.a, pd_indexing_func)
return H(pd_indexing_func(self.a), param1_mapper, param2_mapper, tuple_mapper)
@classmethod
def run(cls, a, params1, params2, level_names=('p1', 'p2')):
a = reshape_fns.to_2d(a)
# Build column hierarchy
params1_idx = pd.Index(params1, name=level_names[0])
params2_idx = pd.Index(params2, name=level_names[1])
params_idx = index_fns.stack_indexes(params1_idx, params2_idx)
new_columns = index_fns.combine_indexes(params_idx, a.columns)
# Build mappers
param1_mapper = np.repeat(params1, len(a.columns))
param1_mapper = pd.Series(param1_mapper, index=new_columns, name=params1_idx.name)
param2_mapper = np.repeat(params2, len(a.columns))
param2_mapper = pd.Series(param2_mapper, index=new_columns, name=params2_idx.name)
tuple_mapper = list(zip(*list(map(lambda x: x.values, [param1_mapper, param2_mapper]))))
tuple_mapper = pd.Series(tuple_mapper, index=new_columns, name=(params1_idx.name, params2_idx.name))
# Tile a to match the length of new_columns
a = array_wrapper.ArrayWrapper(a.index, new_columns, 2).wrap(reshape_fns.tile(a.values, 4, axis=1))
return cls(a, param1_mapper, param2_mapper, tuple_mapper)
# Similate an indicator with two params
h = H.run(df4, [0.1, 0.1, 0.2, 0.2], [0.3, 0.4, 0.5, 0.6])
print(df4)
print(h.a)
print(h._param1_mapper)
print(h._param2_mapper)
print(h._tuple_mapper)
# Indexing operations are delegated to the underlying dataframes
print(h[(0.1, 0.3, 'a6')].a)
print(h.loc[:, (0.1, 0.3, 'a6'):(0.1, 0.3, 'c6')].a)
print(h.iloc[-2:, -2:].a)
print(h.xs((0.1, 0.3), level=('p1', 'p2'), axis=1).a.columns)
print(h.param1_loc[0.1].a.columns)
print(h.param1_loc[0.1:0.1].a)
print(h.param1_loc[[0.1, 0.1]].a)
print(h.param2_loc[0.3].a)
print(h.param2_loc[0.3:0.3].a)
print(h.param2_loc[[0.3, 0.3]].a.columns)
print(h.tuple_loc[(0.1, 0.3)].a)
print(h.tuple_loc[(0.1, 0.3):(0.1, 0.3)].a.columns)
print(h.tuple_loc[[(0.1, 0.3), (0.1, 0.3)]].a.columns)
```
## combine_fns
```
vbt.settings.broadcasting['index_from'] = 'stack'
vbt.settings.broadcasting['columns_from'] = 'stack'
print(combine_fns.apply_and_concat_one(3, lambda i, x, a: x + a[i], sr2.values, [10, 20, 30]))
print(combine_fns.apply_and_concat_one_nb(3, njit(lambda i, x, a: x + a[i]), sr2.values, (10, 20, 30)))
print(combine_fns.apply_and_concat_one(3, lambda i, x, a: x + a[i], df4.values, [10, 20, 30]))
print(combine_fns.apply_and_concat_one_nb(3, njit(lambda i, x, a: x + a[i]), df4.values, (10, 20, 30)))
print(combine_fns.apply_and_concat_multiple(3, lambda i, x, a: (x, x + a[i]), sr2.values, [10, 20, 30]))
print(combine_fns.apply_and_concat_multiple_nb(3, njit(lambda i, x, a: (x, x + a[i])), sr2.values, (10, 20, 30)))
print(combine_fns.apply_and_concat_multiple(3, lambda i, x, a: (x, x + a[i]), df4.values, [10, 20, 30]))
print(combine_fns.apply_and_concat_multiple_nb(3, njit(lambda i, x, a: (x, x + a[i])), df4.values, (10, 20, 30)))
print(combine_fns.combine_and_concat(sr2.values, (sr2.values*2, sr2.values*3), lambda x, y, a: x + y + a, 100))
print(combine_fns.combine_and_concat_nb(sr2.values, (sr2.values*2, sr2.values*3), njit(lambda x, y, a: x + y + a), 100))
print(combine_fns.combine_and_concat(df4.values, (df4.values*2, df4.values*3), lambda x, y, a: x + y + a, 100))
print(combine_fns.combine_and_concat_nb(df4.values, (df4.values*2, df4.values*3), njit(lambda x, y, a: x + y + a), 100))
print(combine_fns.combine_multiple((sr2.values, sr2.values*2, sr2.values*3), lambda x, y, a: x + y + a, 100))
print(combine_fns.combine_multiple_nb((sr2.values, sr2.values*2, sr2.values*3), njit(lambda x, y, a: x + y + a), 100))
print(combine_fns.combine_multiple((df4.values, df4.values*2, df4.values*3), lambda x, y, a: x + y + a, 100))
print(combine_fns.combine_multiple_nb((df4.values, df4.values*2, df4.values*3), njit(lambda x, y, a: x + y + a), 100))
```
## accessors
```
print(pd.Series.vbt.empty(5, index=np.arange(10, 15), name='a', fill_value=5))
print(pd.DataFrame.vbt.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c'], fill_value=5))
print(pd.Series.vbt.empty_like(sr2, fill_value=5))
print(pd.DataFrame.vbt.empty_like(df4, fill_value=5))
print(sr1.vbt.is_series())
print(sr1.vbt.is_frame())
print(df1.vbt.is_series())
print(df2.vbt.is_frame())
print(sr2.vbt.wrapper.index)
print(sr2.vbt.wrapper.columns)
print(df4.vbt.wrapper.index)
print(df4.vbt.wrapper.columns)
print(df1.vbt.apply_on_index(lambda idx: idx + '_yo', axis=0))
print(df1.vbt.apply_on_index(lambda idx: idx + '_yo', axis=1))
df1_copy = df1.copy()
df1_copy.vbt.apply_on_index(lambda idx: idx + '_yo', axis=0, inplace=True)
print(df1_copy)
df1_copy.vbt.apply_on_index(lambda idx: idx + '_yo', axis=1, inplace=True)
print(df1_copy)
print(sr2.vbt.to_1d_array())
print(sr2.vbt.to_2d_array())
# It will try to return pd.Series
print(sr2.vbt.wrapper.wrap(a2)) # returns sr
print(sr2.vbt.wrapper.wrap(df2.values)) # returns sr
print(sr2.vbt.wrapper.wrap(df2.values, index=df2.index, columns=df2.columns)) # returns sr
print(sr2.vbt.wrapper.wrap(df4.values, columns=df4.columns)) # returns df
print(sr2.vbt.wrapper.wrap(df4.values, index=df4.index, columns=df4.columns)) # returns df
# It will try to return pd.DataFrame
print(df2.vbt.wrapper.wrap(a2)) # returns df
print(df2.vbt.wrapper.wrap(sr2.values)) # returns df
print(df2.vbt.wrapper.wrap(df4.values, columns=df4.columns)) # returns df
print(df2.vbt.wrapper.wrap(df4.values, index=df4.index, columns=df4.columns)) # returns df
print(df4.vbt.tile(2, keys=['a', 'b']))
print(df4.vbt.repeat(2, keys=['a', 'b']))
df10 = pd.DataFrame([[1, 2], [4, 5], [7, 8]], columns=multi_c1)
df20 = pd.DataFrame([[1, 2, 3, 4], [4, 5, 6, 7], [7, 8, 9, 10]], columns=multi_c2)
print(df10)
print(df20)
print(df10.vbt.align_to(df20))
print(pd.DataFrame.vbt.broadcast(
sr2,
10
))
print(sr2.vbt.broadcast(
10
))
print(sr2.vbt.broadcast_to(
df2
))
print(sr2.vbt.make_symmetric())
print(df2.vbt.make_symmetric())
print(df3.vbt.make_symmetric())
print(df4.vbt.make_symmetric())
print(df5.iloc[:, 0].vbt.unstack_to_array())
print(df5.iloc[:, 0].vbt.unstack_to_df())
print(sr2.vbt.apply(apply_func=lambda x: x ** 2))
print(sr2.vbt.apply(apply_func=lambda x: x ** 2, to_2d=True))
print(df2.vbt.apply(apply_func=lambda x: x ** 2))
print(pd.DataFrame.vbt.concat(sr2, 10, df4, keys=['a', 'b', 'c']))
print(sr2.vbt.concat(10, df4, keys=['a', 'b', 'c']))
print(sr2.vbt.apply_and_concat(3, sr2.values, 10, apply_func=lambda i, x, y, c, d=1: x + y[i] + c + d, d=100))
print(sr2.vbt.apply_and_concat(3, sr2.values, 10, apply_func=njit(lambda i, x, y, c: x + y[i] + c + 100)))
print(sr2.vbt.apply_and_concat(3, df4.values, 10, apply_func=lambda i, x, y, c, d=1: x + y[:, i] + c + d, d=100))
print(sr2.vbt.apply_and_concat(3, df4.values, 10, apply_func=njit(lambda i, x, y, c: x + y[:, i] + c + 100)))
print(df4.vbt.apply_and_concat(3, df4.values, 10, apply_func=lambda i, x, y, c, d=1: x + y[:, i] + c + d, d=100))
print(df4.vbt.apply_and_concat(
3,
df4.values,
10,
apply_func=njit(lambda i, x, y, c: x + y[:, i] + c + 100),
keys=pd.Index(['a', 'b', 'c'], name='hello')))
print(sr2.vbt.combine_with(10., combine_func=lambda x, y: x + y))
print(sr2.vbt.combine_with(10, 100, d=1000, combine_func=lambda x, y, c, d=1: x + y + c + d)) # test args and kwargs
print(sr2.vbt.combine_with([10, 20, 30], combine_func=lambda x, y: x + y))
print(sr2.vbt.combine_with([[10, 20, 30]], combine_func=lambda x, y: x + y))
print(sr2.vbt.combine_with(sr1, combine_func=lambda x, y: x + y, broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with(sr2, combine_func=lambda x, y: x + y, broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with(df2, combine_func=lambda x, y: x + y, broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with(df3, combine_func=lambda x, y: x + y, broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with(df4, combine_func=lambda x, y: x + y, broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with(df5, combine_func=lambda x, y: x + y, broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with_multiple(
[10,
[10, 20, 30],
pd.Series([10, 20, 30])],
10, b=100,
combine_func=lambda x, y, a, b=1: x + y + a + b,
broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with_multiple(
[10,
[10, 20, 30],
[[10, 20, 30]],
pd.Series([10, 20, 30]),
df1,
df3],
10, b=100,
combine_func=lambda x, y, a, b=1: x + y + a + b,
broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with_multiple(
[10,
[10, 20, 30],
[[10, 20, 30]],
pd.Series([10, 20, 30]),
df1,
df3],
10,
combine_func=njit(lambda x, y, a, b=1: x + y + a + 100),
broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with_multiple(
[10,
[10, 20, 30],
[[10, 20, 30]],
pd.Series([10, 20, 30]),
df1,
df3],
10,
combine_func=njit(lambda x, y, a, b=1: x + y + a + 100),
broadcast_kwargs=dict(index_from='stack')))
# Test concat=True
print(sr2.vbt.combine_with_multiple(
[10,
[10, 20, 30],
pd.Series([10, 20, 30])],
10, b=100,
combine_func=lambda x, y, a, b=1: x + y + a + b,
concat=True,
broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with_multiple(
[10,
[10, 20, 30],
[[10, 20, 30]],
pd.Series([10, 20, 30]),
df1,
df3],
10, b=100,
combine_func=lambda x, y, a, b=1: x + y + a + b,
concat=True,
broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with_multiple(
[10,
[10, 20, 30],
[[10, 20, 30]],
pd.Series([10, 20, 30]),
df1,
df3],
10,
combine_func=njit(lambda x, y, a, b=1: x + y + a + 100),
concat=True,
broadcast_kwargs=dict(index_from='stack')))
print(sr2.vbt.combine_with_multiple(
[10,
[10, 20, 30],
[[10, 20, 30]],
pd.Series([10, 20, 30]),
df1,
df3],
10,
combine_func=njit(lambda x, y, a, b=1: x + y + a + 100),
concat=True,
keys=['a', 'b', 'c', 'd', 'e', 'f'],
broadcast_kwargs=dict(index_from='stack')))
# Use magic methods with .vbt to do operations with custom broadcasting
# Regular df3 + df4 will return nans
print(df3.vbt + df4.vbt)
```
|
github_jupyter
|
```
from PIL import Image
import numpy as np
```
先下載 MNIST 資料
```
import os
import urllib
from urllib.request import urlretrieve
dataset = 'mnist.pkl.gz'
def reporthook(a,b,c):
print("\rdownloading: %5.1f%%"%(a*b*100.0/c), end="")
if not os.path.isfile(dataset):
origin = "https://github.com/mnielsen/neural-networks-and-deep-learning/raw/master/data/mnist.pkl.gz"
print('Downloading data from %s' % origin)
urlretrieve(origin, dataset, reporthook=reporthook)
import gzip
import pickle
with gzip.open(dataset, 'rb') as f:
train_set, validation_set, test_set = pickle.load(f, encoding='latin1')
# 設定好訓練及測試資料
train_X, train_y = train_set
test_X, test_y = test_set
# 設定成我們的格式
train_X = train_X[..., None]
test_X = test_X[..., None]
# 有 10 種類別,輸入的是 784 維
print(train_X.shape)
np.unique(train_y)
from IPython.display import display
def showX(X):
int_X = (X*255).clip(0,255).astype('uint8')
# N*784 -> N*28*28 -> 28*N*28 -> 28 * 28N
int_X_reshape = int_X.reshape(-1,28,28).swapaxes(0,1).reshape(28,-1)
display(Image.fromarray(int_X_reshape))
# 訓練資料, X 的前 20 筆
print(train_y[:20])
showX(train_X[:20])
# 參考範例 softmax regression
W = np.random.normal(size=(10, 784))
b = np.random.normal(size=(10, 1))
n_data = train_X.shape[0]
# 紀錄 loss
loss_history = []
accuracy_history = []
for epoch in range(5000):
idx = np.random.choice(n_data, 300, replace=False)
X = train_X[idx]
y = train_y[idx]
one_y = np.eye(10)[y][..., None]
d = np.exp(W @ X + b)
q = d/d.sum(axis=(1,2), keepdims=True)
loss = -np.log(q[range(len(y)), y]).mean()
loss_history.append(loss)
accuracy = (q.argmax(axis=1).ravel() == y).mean()
accuracy_history.append(accuracy)
if epoch%100 == 0:
print(epoch, accuracy, loss)
grad_b_all = q - one_y
grad_b = grad_b_all.mean(axis=0)
grad_W_all = grad_b_all @ X.swapaxes(1,2)
grad_W = grad_W_all.mean(axis=0)
W -= grad_W
b -= grad_b
# test data 的正確率
((W @ test_X + b).argmax(axis=1).ravel() == test_y).mean()
%matplotlib inline
import matplotlib.pyplot as plt
# 準確率的圖
plt.plot(accuracy_history);
# loss 的圖
plt.plot(loss_history);
def softmax(x):
t = np.exp(x)
return t/t.sum(axis=(-2,-1),keepdims=True)
def relu(x):
return np.maximum(x, 0)
def sigmoid(x):
return 1/(1+np.exp(-x))
# 微分
def Drelu(x):
return (x>0).astype('float32')
def Dsigmoid(x):
q = sigmoid(x)
return q * (1-q)
# or
#return np.exp(x)/(1+np.exp(-x))**2
# 參考範例 feedforward network
from time import time
accuracy_history = []
γ = 0.02
A = np.random.normal(size=(50,784))
b = np.random.normal(size=(50,1))
C = np.random.normal(size=(10,50))
d = np.random.normal(size=(10,1))
t0 = time()
for epochs in range(20):
idx = np.random.choice(n_data, n_data, replace=False)
for i in idx:
x = train_X[i]
y = train_y[i]
U_ = A@x+b
U = relu(U_)
q = softmax(C@U+d)
L = - np.log(q[y])[0]
p = np.eye(10)[y][:, None]
grad_d = q - p
grad_C = grad_d @ U.T
grad_b = (C.T @ grad_d ) * Drelu(U_)
grad_A = grad_b @ x.T
A -= γ * grad_A
b -= γ * grad_b
C -= γ * grad_C
d -= γ * grad_d
score = ((C@relu(A@test_X+b)+d).argmax(axis=1).ravel()==test_y).mean()
print(epochs, score, "%.1f"%(time()-t0), L)
print(time()-t0)
```
|
github_jupyter
|
```
import pandas as pd
import scipy.sparse as sparse
from code.preprocessing import Dataset
from core.database.db import DB
from code.metrics import fuzzy, precision
from implicit.als import AlternatingLeastSquares
db = DB(db='recsys')
from code.preprocessing import filter_old_cards, filter_rare_cards, filter_rare_goods, filter_old_goods, filter_by_quantile
%load_ext autoreload
%autoreload 2
```
### Препроцессинг трейна
```
train = pd.read_sql('select * from db.train', con = db.engine)
print('Shape: %s' % train.shape[0])
train = filter_rare_goods(train, rarity_num=5)
print('Shape without rare goods: %s' % train.shape[0])
train = filter_rare_cards(train, rarity_num=5)
print('Shape without rare cards: %s' % train.shape[0])
train = filter_old_cards(train, month_threshold=1)
print('Shape without old cards: %s' % train.shape[0])
train = filter_old_goods(train, month_threshold=1)
print('Shape without old goods: %s' % train.shape[0])
train = filter_by_quantile(train, plu_count_quantiles=(0.5, 0.99), cards_count_quantiles=(0.4, 0.99))
print('Shape without low and high quantiles: %s' % train.shape[0])
ds = Dataset(train)
matrix = ds.make_matrix()
matrix = ds.transform(matrix, method='clip', clip_upper_value=1000)
matrix = ds.transform(matrix, method='log')
matrix = ds.apply_weights(matrix, weight='bm25')
```
## Подготовка и очистка тестового сета
```
products = pd.read_sql('select * from db.products', con = db.engine)
test = pd.read_sql('select * from db.test', con = db.engine)
val = pd.read_sql('select * from db.val', con = db.engine)
test.columns = [x.lower() for x in test.columns]
products.columns = [x.lower() for x in products.columns]
val.columns = [x.lower() for x in val.columns]
crd_no_unique_train = matrix.index.unique()
plu_id_unique_train = matrix.columns.unique()
test = test[test['crd_no'].isin(crd_no_unique_train)]
test = test[test['plu_id'].isin(plu_id_unique_train)]
val = val[val['crd_no'].isin(crd_no_unique_train)]
val = val[val['plu_id'].isin(plu_id_unique_train)]
plu_category_dict = products.set_index('plu_id').to_dict()['level_2_name']
val_facts_dict = dict(val[['crd_no', 'plu_id']].groupby('crd_no').apply(lambda x: x['plu_id'].unique().tolist()))
test_facts_dict = dict(test[['crd_no', 'plu_id']].groupby('crd_no').apply(lambda x: x['plu_id'].unique().tolist()))
plu_price = pd.read_sql('select * from db.plu_price', con=db.engine)
plu_price['mean_price'] = plu_price['mean_price'].astype('float16')
plu_price = dict(plu_price[['plu_id', 'mean_price']].values.tolist())
```
### Строим модель
```
model = AlternatingLeastSquares(factors=50, regularization=0.0001,
iterations=20, num_threads=16,
calculate_training_loss=True)
model.fit(sparse.csr_matrix(matrix).T.tocsr(), show_progress=True)
```
### Проверяем метрики
```
%%time
fz = fuzzy(matrix, model, val_facts_dict, plu_category_dict, weight_by_price=False)
prc = precision(matrix, model, val_facts_dict, weight_by_price=False)
fz_w = fuzzy(matrix, model, val_facts_dict, plu_category_dict, plu_price=plu_price)
prc_w = precision(matrix, model, val_facts_dict, plu_price=plu_price)
print('Fuzzy: %s' % fz)
print('Fuzzy Weighted: %s' % fz_w)
print('Precision: %s' % prc)
print('Precision Weighted: %s' % prc_w)
```
|
github_jupyter
|
# Assumptions of Linear Regression
Previously, we learned to apply linear regression on a given dataset. But it is important to note that Linear Regression have some assumptions related to the data on which it is applied and if they are not followed, it can affect its performance. These assumptions are:
1. There should be a linear relationship between the dependant and the independant features.
2. There should be no auto-correlation. This means that the error terms should not be correlated.
3. The variance of error terms should be equal.
4. There should be no multi-collinearity. This means that no 2 independant features should be highly correlated.
5. The errors should be normally distributed.
Lets check these assumptions on the model which we have trained in the previous activity.
## Loading the previous model
```
#importing libraries
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
#load the data
#seperating dependant and independant features
#splitting data into training and test sets
#instantiate a model
#fit the model to training data
```
<details>
<summary>Solution</summary>
<p>
```python
data = pd.read_csv('../../data/data_cleaned.csv')
X = data.drop('price', axis= 1)
y = data.price
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state= 42, shuffle= True)
model = LinearRegression()
model.fit(X_train, y_train)
```
</p>
</details>
Now, we have the model. Lets calculate the residuals first.
## Calculate residuals
```
#create a dataframe to store residuals
result = pd.DataFrame({'Actual': y_test, 'Predicted': model.predict(X_test)})
result.reset_index(drop= True, inplace= True) #reset indexes
```
* Make a new column **residuals** by subtracting *Predicted* value from *Actual* values
* display top 5 rows of the **result** dataframe.
```
#Make a new column residuals
#display top 5 rows of the result dataframe.
```
<details>
<summary>Solution</summary>
<p>
```python
result['residuals'] = result.Actual - result.Predicted
result.head()
```
</p>
</details>
## Check the variance and correlation of error terms
```
import matplotlib.pyplot as plt #importing libraries for plotting graphs
#plotting the residuals
plt.scatter(range(len(y_test)), result.residuals)
plt.show()
```
We can clearly see that apart from 3-4 points, the spread of error terms is constant. Hence we can conclude that the variance is constant.
Also, there is no specific pattern in the error terms. They are randomly distributed. So, there is no correlation among them.
## Check Distribution of Residuals
* draw a histogram of residuals from result dataframe using 300 as the number of bins.
```
#draw a histogram
```
<details>
<summary>Solution</summary>
<p>
```python
plt.hist(result.residuals, bins= 300)
plt.show()
```
</p>
</details>
From the above graph it can be concluded that the error terms are normally distributed. The unusually high peak to the curve is caused by the outliers that were pointed out in the first activity. To confirm the distribution, we can also plot a qq plot
## Check for Multi-Collinearity
To check for multi-collinearity, we can find **Variance Inflation Factor** of all the columns. If for any feature, its value is above 5, we can conclude that the feature is correlated.
```
# Importing Variance_inflation_Factor funtion from the Statsmodels
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
# Calculating VIF for every column (only works for the not Catagorical)
VIF = pd.Series([variance_inflation_factor(data.values, i) for i in range(data.shape[1])], index =data.columns)
VIF
```
There are 4 features having VIF greater than 5. We can remove the 2 features having the higher values. So, none of the features will be correlated and hence multi-collinearity can be removed.
## Conclusion
This is how we can check the assumptions of Linear Regression and see if everything follows.
## Additional Resources
1. VIF : https://www.analyticsvidhya.com/blog/2020/03/what-is-multicollinearity/
2. Assumptions in detail: https://www.analyticsvidhya.com/blog/2016/07/deeper-regression-analysis-assumptions-plots-solutions/
|
github_jupyter
|
# How to search the IOOS CSW catalog with Python tools
This notebook demonstrates a how to query a [Catalog Service for the Web (CSW)](https://en.wikipedia.org/wiki/Catalog_Service_for_the_Web), like the IOOS Catalog, and to parse its results into endpoints that can be used to access the data.
```
import os
import sys
ioos_tools = os.path.join(os.path.pardir)
sys.path.append(ioos_tools)
```
Let's start by creating the search filters.
The filter used here constraints the search on a certain geographical region (bounding box), a time span (last week), and some [CF](http://cfconventions.org/Data/cf-standard-names/37/build/cf-standard-name-table.html) variable standard names that represent sea surface temperature.
```
from datetime import datetime, timedelta
import dateutil.parser
service_type = 'WMS'
min_lon, min_lat = -90.0, 30.0
max_lon, max_lat = -80.0, 40.0
bbox = [min_lon, min_lat, max_lon, max_lat]
crs = 'urn:ogc:def:crs:OGC:1.3:CRS84'
# Temporal range: Last week.
now = datetime.utcnow()
start, stop = now - timedelta(days=(7)), now
start = dateutil.parser.parse('2017-03-01T00:00:00Z')
stop = dateutil.parser.parse('2017-04-01T00:00:00Z')
# Ocean Model Names
model_names = ['NAM', 'GFS']
```
With these 3 elements it is possible to assemble a [OGC Filter Encoding (FE)](http://www.opengeospatial.org/standards/filter) using the `owslib.fes`\* module.
\* OWSLib is a Python package for client programming with Open Geospatial Consortium (OGC) web service (hence OWS) interface standards, and their related content models.
```
from owslib import fes
from ioos_tools.ioos import fes_date_filter
kw = dict(wildCard='*', escapeChar='\\',
singleChar='?', propertyname='apiso:AnyText')
or_filt = fes.Or([fes.PropertyIsLike(literal=('*%s*' % val), **kw)
for val in model_names])
kw = dict(wildCard='*', escapeChar='\\',
singleChar='?', propertyname='apiso:ServiceType')
serviceType = fes.PropertyIsLike(literal=('*%s*' % service_type), **kw)
begin, end = fes_date_filter(start, stop)
bbox_crs = fes.BBox(bbox, crs=crs)
filter_list = [
fes.And(
[
bbox_crs, # bounding box
begin, end, # start and end date
or_filt, # or conditions (CF variable names)
serviceType # search only for datasets that have WMS services
]
)
]
from owslib.csw import CatalogueServiceWeb
endpoint = 'https://data.ioos.us/csw'
csw = CatalogueServiceWeb(endpoint, timeout=60)
```
The `csw` object created from `CatalogueServiceWeb` did not fetched anything yet.
It is the method `getrecords2` that uses the filter for the search. However, even though there is a `maxrecords` option, the search is always limited by the server side and there is the need to iterate over multiple calls of `getrecords2` to actually retrieve all records.
The `get_csw_records` does exactly that.
```
def get_csw_records(csw, filter_list, pagesize=10, maxrecords=1000):
"""Iterate `maxrecords`/`pagesize` times until the requested value in
`maxrecords` is reached.
"""
from owslib.fes import SortBy, SortProperty
# Iterate over sorted results.
sortby = SortBy([SortProperty('dc:title', 'ASC')])
csw_records = {}
startposition = 0
nextrecord = getattr(csw, 'results', 1)
while nextrecord != 0:
csw.getrecords2(constraints=filter_list, startposition=startposition,
maxrecords=pagesize, sortby=sortby)
csw_records.update(csw.records)
if csw.results['nextrecord'] == 0:
break
startposition += pagesize + 1 # Last one is included.
if startposition >= maxrecords:
break
csw.records.update(csw_records)
get_csw_records(csw, filter_list, pagesize=10, maxrecords=1000)
records = '\n'.join(csw.records.keys())
print('Found {} records.\n'.format(len(csw.records.keys())))
for key, value in list(csw.records.items()):
print('[{}]\n{}\n'.format(value.title, key))
csw.request
#write to JSON for use in TerriaJS
csw_request = '"{}": {}"'.format('getRecordsTemplate',str(csw.request,'utf-8'))
import io
import json
with io.open('query.json', 'a', encoding='utf-8') as f:
f.write(json.dumps(csw_request, ensure_ascii=False))
f.write('\n')
```
|
github_jupyter
|
# Creating an agent
This notebook will go through the how to create a new agent within the tomsup framework. In this tutorial we will be making an reversed win-stay, lose-switch agent, e.g. an win-switch, lose-stay agent.
This guides assumes a basic understanding of classes in python, if you don't know these or need to recap we suggest examing this [chapter](http://hplgit.github.io/primer.html/doc/pub/class/._class-readable002.html) in the free ebook a byte of python
Let us first import the package:
```
#assuming you are in the github folder change the path - not relevant if tomsup is installed via. pip
import os
os.chdir("..") # go back one folder
import tomsup as ts
```
Now lets first take a look at the current win-stay, lose-switch (WSLS) agent:
```
sigmund = ts.WSLS() #create agent
# inspect sigmund
print(f"sigmund is an class of type: {type(sigmund)}") #f is for format
if isinstance(sigmund, ts.Agent):
print(f"but sigmund is also of has the parent class ts.Agent")
```
As we can see sigmund is a WSLS agent with the parent class tsAgent. This us some benefits as WSLS inherit some of the attributes of the parent class, such as the ability to save play history and the ability to reset the agents. To see more of the inherited methods see help(ts.WSLS).
## Creating a new class
Now let's try to create our own agent one bit at a time (if you are confortable with classes simply jump to 'The final reversed WSLS):
```
import numpy as np
class ReversedWSLS(ts.Agent): # make sure that the parent class is ts.Agent
"""
ReversedWSLS: Win-switch, lose-stay.
This agent is a reversed win-stay, lose-switch agent, which ...
"""
# add a docstring which explains the agent
pass # we will later replace this pass with something else
freud = ReversedWSLS()
print(f"is freud an Agent? {isinstance(freud, ts.Agent)}")
```
### Add initialization
Let's add an initalization of the agent. These are things which should be created prior to the agent competing.
```
class ReversedWSLS(ts.Agent):
"""
ReversedWSLS: Win-switch, lose-stay.
This agent is a reversed win-stay, lose-switch agent, which ...
"""
def __init__(self, first_move, **kwargs): #initalize the agent
self.strategy = "ReversedWSLS" # set the strategy name
# set internal parameters
self.first_move = first_move
super().__init__(**kwargs) # pass additional argument the ts.Agent class (could e.g. include 'save_history = True')
self._start_params = {'first_move': first_move, **kwargs} # save any starting parameters used when the agent is reset
freud = ReversedWSLS(first_move = 1)
print(f"what is freud's first move? {freud.first_move}")
print(f"what is freud's an starting parameters? {freud.get_start_params()}")
print(f"what is freud's strategy? {freud.get_strategy()}")
```
In the above you sucessfully created an freud as an agent and that his first move is 1. We also see that functions such as the ```get_start_params()``` from the ts.Agent is inherited by the new agent.
**Note** that we have set ```**kwargs```, this simply means that function accept additional arguments, e.g. ```save_history = True```.
These arguments are then passed to the ```super()__init__()```, which initialize the parent class (in this case the ts.Agent class) as well as the ```_start_params``` which is the starting parameters. The starting parameter are used when resetting the agent, which is relevant e.g. when setting up a tournament settings.
#### Add a compete function
All agent naturally need a compete function. Let us add one to the agent
```
class ReversedWSLS(ts.Agent):
"""
ReversedWSLS: Win-switch, lose-stay.
This agent is a reversed win-stay, lose-switch agent, which ...
"""
def __init__(self, first_move, **kwargs): #initalize the agent
self.strategy = "ReversedWSLS" # set the strategy name
# set internal parameters
self.first_move = first_move
super().__init__(**kwargs) # pass additional argument the ts.Agent class (could e.g. include 'save_history = True')
self._start_params = {'first_move': first_move, **kwargs} # save any starting parameters used when the agent is reset
def compete(self, p_matrix, op_choice = None, agent = 0):
"""
win-switch, lose-stay strategy, with the first move being set when the class is initilized (__init__())
p_matrix is a PayoffMatrix
op_choice is either 1 or 0
agent is either 0 or 1 and indicated the perpective of the agent in the game (whether it is player 1 og 2)
"""
if self.choice is None: # if a choice haven't been made: Choose the redifined first move
self.choice = self.first_move #fetch from self
else: # if a choice have been made:
payoff = p_matrix.payoff(self.choice, op_choice, agent) # calculate payoff of last round
if payoff == 1: # if the agent won then switch
self.choice = 1-self.choice # save the choice in self (for next round)
# also save any other internal states which you might
# want the agent to keep for next round in self
self._add_to_history(choice = self.choice) # save action and (if any) internal states in history
# note that _add_to_history() is not intented for
# later use within the agent
return self.choice # return choice which is either 1 or 0
freud = ReversedWSLS(first_move = 1) #create the agent
# fetch payoff matrix for the pennygame
penny = ts.PayoffMatrix(name = "penny_competitive")
print("This is the payoffmatrix for the game (seen from freud's perspective):", penny()[0,:,:], sep = "\n")
# have freud compete
choice = freud.compete(penny)
print(f"what is freud's choice the first round? {choice}")
choice = freud.compete(penny, op_choice = 1)
print(f"what is freud's choice the second round if his opponent chose 1? {choice}")
```
In the above script we add freud's compete function, which for the first round choses his own move and for future moves it uses the win-switch, lose-stay strategy. It then return either a 0 or 1 depending on whether is choses e.g. right or left hand in the penny game. It is important that the agent does only return 0 or 1 in its compete function otherwise the agent will not function in the context of the package.
**Note** the ```self._add_to_history(choice = self.choice)```, which indicated which variables I would like to add to the agent history, assuming save history is set to ```True```. In this case we would like to.
Finally when you have the ```__init__()``` and the ```compete()``` working you can add any additional function you might want your agent to have, for example you will se that we have added the ```get_first_move()```, which is a helper function to extract the first move of the agent.
## The final reversed WSLS
The following is the finalized version of the win-switch, lose-stay agent.
```
import numpy as np
class ReversedWSLS(ts.Agent):
"""
ReversedWSLS: Win-switch, lose-stay.
This agent is a reversed win-stay, lose-switch agent, which ...
Examples:
>>> waade = ReversedWSLS(first_move = 1)
>>> waade.compete(op_choice = None, p_matrix = penny)
1
"""
def __init__(self, first_move, **kwargs):
self.strategy = "ReversedWSLS"
# set internal parameters
self.first_move = first_move
super().__init__(**kwargs) # pass additional argument the ts.Agent class (could e.g. include 'save_history = True')
self._start_params = {'first_move': first_move, **kwargs} # save any starting parameters used when the agent is reset
def compete(self, p_matrix, op_choice = None):
if self.choice is None: # if a choice haven't been made: Choose the redifined first move
self.choice = self.first_move #fetch from self
else: # if a choice have been made:
payoff = p_matrix.payoff(self.choice, op_choice, 0) # calculate payoff of last round
if payoff == 1: # if the agent won then switch
self.choice = 1-self.choice # save the choice in self (for next round)
# also save any other internal states which you might
# want the agent to keep for next round in self
self._add_to_history(choice = self.choice) # save action and (if any) internal states in history
# note that _add_to_history() is not intented for
# later use within the agent
return self.choice # return choice
# define any additional function you wish the class should have
def get_first_move(self):
return self.first_move
```
## Test your knowlegde
1) Create an agent called Random, which simply choose randomly
2) Check that it is an agent and that the compete function work
3) Have the agent compete against another agent within the package using the ```ts.compete()```, which one win?
# FAQ
- I have developed an agent which I would like to include in your package
Sounds lovely, we would love to include the agent. Feel free to make a pull request on Github or contact us at [email protected].
|
github_jupyter
|
```
import logging
import pandas as pd
import seaborn as sns
from scipy import stats
import divisivenormalization.utils as helpers
from divisivenormalization.data import Dataset, MonkeySubDataset
helpers.config_ipython()
logging.basicConfig(level=logging.INFO)
sns.set()
sns.set_style("ticks")
# adjust sns paper context rc parameters
font_size = 8
rc_dict = {
"font.size": font_size,
"axes.titlesize": font_size,
"axes.labelsize": font_size,
"xtick.labelsize": font_size,
"ytick.labelsize": font_size,
"legend.fontsize": font_size,
"figure.figsize": (helpers.cm2inch(8), helpers.cm2inch(8)),
"figure.dpi": 300,
"pdf.fonttype": 42,
"savefig.transparent": True,
"savefig.bbox_inches": "tight",
}
sns.set_context("paper", rc=rc_dict)
class args:
num_best = 10
fname_best_csv = "df_best.csv"
weights_path = "weights"
train_logs_path = "train_logs"
stim_full_size = 140 # full size of stimulus w/o subsampling and cropping
stim_subsample = 2
crop = 10
```
### Load data
```
results_df = pd.read_csv("results.csv")
# Save a simplified version of the csv file, sorted by validation set performance
df_plain = helpers.simplify_df(results_df)
df_plain.to_csv("results_plain.csv")
data_dict = Dataset.get_clean_data()
data = MonkeySubDataset(data_dict, seed=1000, train_frac=0.8, subsample=args.stim_subsample, crop=args.crop)
```
### Get and save FEV performance on test set
Use the 10 best models for analysis. As this operation requires model loading, we do it only if it
was not done before.
```
try:
df_best = pd.read_csv(args.fname_best_csv)
logging.info("loaded data from " + args.fname_best_csv)
except FileNotFoundError:
df_best = df_plain[0 : args.num_best].copy()
fev_lst = []
for i in range(args.num_best):
run_no = df_best.iloc[i]["run_no"]
logging.info("load run no " + str(run_no))
model = helpers.load_dn_model(run_no, results_df, data, args.train_logs_path)
fev = model.evaluate_fev_testset()
fev_lst.append(fev)
feve = model.evaluate_fev_testset_per_neuron()
helpers.pkl_dump(feve, run_no, "feve.pkl", args.weights_path)
with model.session.as_default():
u = model.u.eval()
helpers.pkl_dump(u, run_no, "u.pkl", args.weights_path)
df_best["fev"] = fev_lst
df_best.to_csv(args.fname_best_csv)
fev = df_best.fev.values * 100
print("Mean FEV", fev.mean())
print("SEM", stats.sem(fev, ddof=1))
print("max FEV", fev.max())
print("FEV of model with max correlation on validation set", fev[0])
```
|
github_jupyter
|
# Vladislav Abramov and Sergei Garshin DSBA182
## The Task
### Что ждем от туториала?
1. Оценить конкретную модель заданного класса. Не только сделать .fit, но и выписать полученное уравнение!
2. Автоматически подобрать модель (встроенный подбор)
3. Построить графики прогнозов, интервальные прогнозы где есть.
4. Сравнить несколько (две-три) модели данного класса с помощью скользящего окна.
5. Творчество, любые дополнения, мемасики :)
### Класс выбираем: ETS, ARIMA, BATS + TBATS, PROPHET, случайный лес + создание признаков, GARCH, своё предложить
### Цель: когда через год будут люди спрашивать "как в питоне оценить ets/arima?" ответ должен быть "читайте туториалы от нашего курса!"
---
---
---
# Real Data Analysis with ARIMA models
Let's begin with collecting stock data
```
import pandas as pd
import yfinance as yf
from matplotlib import pyplot as plt
import numpy as np
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from pmdarima.arima import auto_arima, ARIMA, ADFTest
from sklearn.metrics import mean_squared_error
from math import sqrt
from tqdm import tqdm
from sklearn.metrics import r2_score
import warnings
warnings.filterwarnings('ignore')
def should_diff(data):
adf_test = ADFTest(alpha = 0.05)
return adf_test.should_diff(data)
def get_stock_data(ticker, start, end):
tickerData = yf.Ticker(ticker)
tickerDf = tickerData.history(period='1d', start = start, end = end)
return tickerDf
def train_test_devision(n, data):
train = data[:-n]
test = data[-n:]
return train, test
def differentiate_data(dataset, interval=1):
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
return diff
def autocorrelation_plot(data):
data = np.array(data)**2
plot_acf(data)
plt.show()
def p_autocorrelation_plot(data):
data = np.array(data)**2
plot_pacf(data)
plt.show()
data = get_stock_data('AAPL', '2015-1-1', '2021-2-1')
data.head(10)
```
---
Here we may observe the graph of stock price for Apple Inc. on the perios 1st Jan 2015 till 1st Feb 2021
```
plt.plot(data['Close'])
plt.title('Close Stock Prices')
```
---
Looking at the graph it is obvious that data is not stationary and has a strong trend. However, lets make sure that data is not stationary by Autocorrelation plot and Augmented Dickey-Fuller test.
```
print('Should differentiate? :', should_diff(data['Close']))
print()
print('ACF of undifferentiated data')
autocorrelation_plot(data['Close'])
```
---
As we can see, we were right, the data is not stationary!
## Stationarity check & convertion data to stationary
For now, lets differentiate our initial stock data to build a stationary graph of deltas
```
X = pd.DataFrame()
X['Diff_Close'] = differentiate_data(data['Close'])
plt.plot(X['Diff_Close'])
plt.title('Stationary stock data plot')
```
As we may notice we have vanished trend and made the data much more stationary than it was before, for the next step, lets check the stationary feature by Autocorrelation, Partial Autocorrelation plot and Augmented Dickey-Fuller test again.
```
print('Should differentiate? :', should_diff(X['Diff_Close']))
print()
print('ACF of differentiated data')
autocorrelation_plot(X['Diff_Close'])
print('PACF of differentiated data')
p_autocorrelation_plot(X['Diff_Close'])
```
Wow! The data has become stationary! We may go further!
---
## Train / Test devision
On this step we have devide our data into two parts, train and test. Our model will use the training set to make predictions and compare them with testing set.
```
n = 50
train, test = train_test_devision(n, data['Close'])
fig, ax = plt.subplots()
ax.plot(train, label = 'Train Set')
ax.plot(test, label = 'Test Set')
fig.set_figheight(6)
fig.set_figwidth(10)
ax.legend()
```
---
# Manual Model
In this part we have decided to train ARIMA(3,1,2) model, where p = 3 AR parts, d = 1 as we need 1 differentiation and q = 2 MA parts
```
X = data['Close'].values
size = len(train.values)
train, test = train.values, test.values
history = [x for x in train]
predictions, CI = [],[]
for t in tqdm(range(len(test))):
model = ARIMA((3,1,2))
model.fit(history)
y_hat, conf_int = model.predict(n_periods = 1, return_conf_int = True, alpha=0.05)
predictions.append(y_hat)
CI.append(conf_int)
obs = test[t]
history.append(obs)
# print('predicted=%f, expected=%f' % (yhat, obs))
rmse = sqrt(mean_squared_error(test, predictions))
r_squared = r2_score(test, predictions)
print('Test RMSE: %.3f' % rmse)
print('Test R^2: %.3f' % r_squared)
fig, ax = plt.subplots(figsize=(15,8))
ax.plot(test, label = 'Test Set')
ax.plot(predictions, label = 'Prediction Set')
ax.set_title('ARIMA (3,1,2)')
ax.set_xlabel('Price')
ax.set_ylabel('Day')
ax.legend()
model.summary()
```
## The ARIMA equation we got
$\Delta y_t = -0.0090 \Delta y_{t-1} -0.1220 \Delta y_{t-2} -0.0377 \Delta y_{t-3} -0.1042 \varepsilon_{t-1} -0.1690 y_{t-2}\varepsilon_{t-2}$
where $\\ \Delta y_t = y_t - y_{t-1}$
As we may se the model works pretty well
---
## Automatic choice of the model
In this section we would like to play with autosetting parameters, which also include sesonal dependency
```
n = 50
train, test = train_test_devision(n, data['Close'])
model = auto_arima(train, start_p=1, start_q=1,
max_p=3, max_q=3, m=12,
start_P=0, seasonal=True,
d=1, D=1, trace = True,
error_action='ignore',
suppress_warnings = True,
stepwise = True)
model.summary()
y_hat, conf_int = model.predict(n_periods = n, return_conf_int = True, alpha=0.05)
predictions = pd.DataFrame(y_hat, index = test.index, columns = ['Prediction'])
CI = pd.DataFrame({'CI lower': conf_int[:, 0], 'CI upper': conf_int[:, 1]}, index = test.index)
fig, (ax1, ax2) = plt.subplots(1, 2,figsize=(20,8))
ax1.plot(train[1400:], label = 'Train Set')
ax1.plot(test, label = 'Test Set')
ax1.plot(predictions, label = 'Prediction Set')
ax1.plot(CI['CI lower'], label = 'CI lower', c = 'r')
ax1.plot(CI['CI upper'], label = 'CI upper', c = 'r')
ax1.set_title('Close look at the predictions')
ax1.set_xlabel('Price')
ax1.set_ylabel('Date')
ax1.legend()
ax2.plot(train[900:], label = 'Train Set')
ax2.plot(test, label = 'Test Set')
ax2.plot(predictions, label = 'Prediction Set')
ax2.plot(CI['CI lower'], label = 'CI lower', c = 'r')
ax2.plot(CI['CI upper'], label = 'CI upper', c = 'r')
ax2.set_title('Global look at the predictions')
ax2.set_xlabel('Price')
ax2.set_ylabel('Date')
ax2.legend()
```
To observe the data we have built two graphs, the left one catches more localy than the right one.
---
---
---
К сожалению, на вечер среды мы не успели выполнить все пункиы и дать подробное описание нашим шагам. Очень просим прокоммментировать выполненные этапы, дать советы и наставления :)
|
github_jupyter
|
[exercises](intro.ipynb)
```
import numpy as np
np.arange(6)
np.arange(0, 0.6, 0.1), np.arange(6) * 0.1 # two possibilities
np.arange(0.5, 1.1, 0.1), "<-- wrong result!"
np.arange(5, 11) * 0.1, "<-- that's right!"
np.linspace(0, 6, 7)
np.linspace(0, 6, 6, endpoint=False), np.linspace(0, 5, 6) # two possibilities
np.linspace(0, 0.6, 6, endpoint=False), np.linspace(0, 0.5, 6) # again two possibilities
np.linspace(0.5, 1.1, 6, endpoint=False), np.linspace(0.5, 1, 6) # and again ...
```
If the number of elements is known and the step size should be obtained automatically $\Rightarrow$ `np.linspace()`
If the step size is known an if it's an integer and the number of elements should be obtained automatically $\Rightarrow$ `np.arange()`
If the step size is not an integer:
* If the step size is a fraction of integers, you can use `np.arange()` with integers and divide the result accordingly.
* If that's not feasible, calculate the expected number of elements beforehand and use `np.linspace()`
```
dur, amp, freq, fs = 1, 0.3, 500, 44100
t = np.arange(np.ceil(dur * fs)) / fs
y = amp * np.sin(2 * np.pi * freq * t)
```
alternative (but inferior) methods to get $t$:
```
t1 = np.arange(0, dur, 1/fs) # implicit rounding of dur!
t2 = np.arange(0, np.round(dur), 1/fs) # still problematic: arange with floats
# wrong if dur isn't an integer multiple of 1/fs:
t3 = np.linspace(0, dur, np.round(dur * fs), endpoint=False)
```
Length of `y` must be *exactly* 44100 (using a half-open interval for $t$), not 44101 (which would be longer than 1 second).
Plotting: 2 ways to zoom (there are probably more): draw a rectangle, drag with the right mouse button in pan/zoom mode.
Clicks? Because of discontinuities (also in the derivatives) $\Rightarrow$ Fade in/out! See [tools.fade()](tools.py).
```
import sounddevice as sd
import tools
def myplay(data):
"""Apply fade in/out and play with 44.1 kHz."""
data = tools.fade(data, 2000, 5000)
sd.play(data, 44100)
myplay(y)
def mysine(frequency, amplitude, duration):
"""Generate sine tone with the given parameters @ 44.1 kHz."""
samplerate = 44100
times = np.arange(np.ceil(duration * samplerate)) / samplerate
return amplitude * np.sin(2 * np.pi * frequency * times)
z = mysine(440, 0.4, 3)
myplay(z)
%matplotlib
import matplotlib.pyplot as plt
def myplot(data):
"""Create a simple plot @ 44.1 kHz."""
samplerate = 44100
times = np.arange(len(data)) / samplerate
plt.plot(times, data)
plt.xlabel("Time / Seconds")
myplot(mysine(440, 0.4, 3))
import soundfile as sf
dur, amp = 1, 0.3
frequencies = 400, 500, 600 # Hz
fadetime = 2000 # samples
for freq in frequencies:
sig = mysine(freq, amp, dur)
sig = tools.fade(sig, fadetime)
sf.write("sine_{}hz.wav".format(freq), sig, 44100)
from scipy import signal
f0, f1 = 100, 5000 # Hz
amp = 0.2
dur = 2 # seconds
fadetime = 2000 # samples
fs = 44100
t = np.arange(np.ceil(dur * fs)) / fs
for method in 'linear', 'log':
sweep = amp * signal.chirp(t, f0, dur, f1, method)
sweep = tools.fade(sweep, fadetime)
sf.write('sweep_{}.wav'.format(method), sweep, fs)
sinetone = mysine(frequency=500, amplitude=0.3, duration=1.5)
noise = np.random.normal(scale=0.1, size=len(sinetone))
sine_plus_noise = sinetone + noise
myplay(sine_plus_noise)
myplot(sine_plus_noise)
dur = 2
amp = 0.2
two_sines = mysine(500, amp, dur) + mysine(507, amp, dur)
myplay(two_sines)
myplot(two_sines)
```
Two sine tones with similar frequencies create "beats", see <http://en.wikipedia.org/wiki/Beat_(acoustics)>.
The sum of these two tones is equivalent to an amplitude modulation with a carrier frequency of $\frac{f_1+f_2}{2}$ and a modulation frequency of $\frac{f_1-f_2}{2}$.
$$\cos(2\pi f_1t)+\cos(2\pi f_2t) = 2\cos\left(2\pi\frac{f_1+f_2}{2}t\right)\cos\left(2\pi\frac{f_1-f_2}{2}t\right)$$
We don't really *hear* the modulation frequency itself, we only hear the envelope of the modulation, therefore the *perceived* beat frequency is $f_{\text{beat}} = f_1-f_2$.
```
stereo_sines = np.column_stack([mysine(400, amp, dur), mysine(600, amp, dur)])
myplay(stereo_sines)
```
The first column should be the left channel!
```
dur, amp = 1, 0.3
freq = 500 # Hz
delay = 0.5 # ms
fs = 44100
t = np.arange(np.ceil(dur * fs)) / fs
times = np.column_stack((t, t - delay/1000))
sig = amp * np.sin(2 * np.pi * freq * times)
myplay(sig)
dur, amp = 0.5, 0.3
frequencies = 500, 1000, 2000 # Hz
delays = 0.6, 0.4, 0.2, 0, -0.2, -0.4, -0.6 # ms
fs = 44100
t = np.arange(np.ceil(dur * fs)) / fs
for f in frequencies:
for delay in delays:
times = np.column_stack((t, t - delay/1000))
sig = amp * np.sin(2 * np.pi * f * times)
myplay(sig)
sd.wait()
```
This is supposed to illustrate [Lord Rayleigh's Duplex Theory](http://en.wikipedia.org/wiki/Interaural_time_difference#Duplex_theory) (at least the part about time differences).
```
dur, amp = 2, 0.3
frequencies = np.array([200, 400, 600, 800, 1000])
fs = 44100
t = np.arange(np.ceil(dur * fs)) / fs
t.shape = -1, 1
t
amplitudes = amp * 1 / np.arange(1, len(frequencies)+1)
amplitudes
five_sines = amplitudes * np.sin(2 * np.pi * frequencies * t)
five_sines.shape
sum_of_sines = five_sines.sum(axis=1)
myplot(sum_of_sines)
myplay(five_sines[:, [0, 1, 2, 3, 4]].sum(axis=1))
myplay(five_sines[:, [0, 1, 2, 3]].sum(axis=1))
myplay(five_sines[:, [0, 1, 2, 4]].sum(axis=1))
myplay(five_sines[:, [0, 1, 3, 4]].sum(axis=1))
myplay(five_sines[:, [0, 2, 3, 4]].sum(axis=1))
myplay(five_sines[:, [1, 2, 3, 4]].sum(axis=1))
```
<https://en.wikipedia.org/wiki/Harmonic_series_(music)>
```
f0 = 200 # Hz
partials = 20
frequencies = f0 * np.arange(1, partials + 1)
frequencies
amplitudes = amp * 1 / np.arange(1, len(frequencies)+1)
amplitudes
many_sines = amplitudes * np.sin(2 * np.pi * frequencies * t)
many_sines.shape
sawtooth = many_sines.sum(axis=1)
myplot(sawtooth)
myplay(sawtooth)
```
https://en.wikipedia.org/wiki/Sawtooth_wave
```
square = many_sines[:, ::2].sum(axis=1)
myplot(square)
myplay(square)
```
https://en.wikipedia.org/wiki/Square_wave
```
c = 343
samplerate = 44100
dur = 0.01
phat = 0.2
freq = 500
omega = 2 * np.pi * freq
kx = omega / c
x = 0
time = np.arange(np.ceil(dur * fs)) / fs
p = phat * np.exp(1j*(kx*x - omega*time))
plt.plot(time*1000, np.real(p))
plt.xlabel('$t$ / ms')
plt.ylabel('$\mathcal{R}\{p(x,t)\}$ / Pa')
plt.grid()
plt.title('$f = {}$ Hz, $T = {}$ ms'.format(freq, 1000/freq));
xrange = 3
dx = 0.001
time = 0
x = np.arange(np.ceil(xrange/dx)) * dx
p = phat * np.exp(1j*(kx*x - omega*time))
plt.plot(x*100, np.real(p))
plt.xlabel('$x$ / cm')
plt.ylabel('$\mathcal{R}\{p(x,t)\}$ / Pa')
plt.grid()
plt.title('$f = {}$ Hz, $\lambda = {}$ cm'.format(freq, c*100/freq));
```
<p xmlns:dct="http://purl.org/dc/terms/">
<a rel="license"
href="http://creativecommons.org/publicdomain/zero/1.0/">
<img src="http://i.creativecommons.org/p/zero/1.0/88x31.png" style="border-style: none;" alt="CC0" />
</a>
<br />
To the extent possible under law,
<span rel="dct:publisher" resource="[_:publisher]">the person who associated CC0</span>
with this work has waived all copyright and related or neighboring
rights to this work.
</p>
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
```
## Load data
On connaît l'âge et l'expérience d'une personne, on veut pouvoir déduire si une personne est badass dans son domaine ou non.
```
df = pd.DataFrame({
'Age': [20,16.2,20.2,18.8,18.9,16.7,13.6,20.0,18.0,21.2,
25,31.2,25.2,23.8,23.9,21.7,18.6,25.0,23.0,26.2],
'Experience': [2.3,2.2,1.8,1.4,3.2,3.9,1.4,1.4,3.6,4.3,
4.3,4.2,3.8,3.4,5.2,5.9,3.4,3.4,5.6,6.3],
'Badass': [0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1]
})
df
colors = np.full_like(df['Badass'], 'red', dtype='object')
colors[df['Badass'] == 1] = 'blue'
plt.scatter(df['Age'], df['Experience'], color=colors)
X = df.drop('Badass', axis=1).values
Y = df['Badass'].values
# Cas à prédire
x = [21.2, 4.3]
```
## Using sklearn
### Fit
```
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(C=1e20, solver='liblinear', random_state=0)
%time model.fit(X, Y)
print(model.intercept_, model.coef_)
```
### Plot Decision Boundary
<details>
<summary>Where does the equation come from? ↓</summary>
<img src="https://i.imgur.com/YxSDJZA.png?1">
</details>
```
b0 = model.intercept_[0]
b1 = model.coef_[0][0]
b2 = model.coef_[0][1]
plt.scatter(df['Age'], df['Experience'], color=colors)
# Decision boundary (with threshold 0.5)
_X = np.linspace(df['Age'].min(), df['Age'].max(),10)
_Y = (-b1/b2)*_X + (-b0/b2)
plt.plot(_X, _Y, '-k')
# Plot using contour
_X1 = np.linspace(df['Age'].min(), df['Age'].max(),10)
_X2 = np.linspace(df['Experience'].min(), df['Experience'].max(),10)
xx1, xx2 = np.meshgrid(_X1, _X2)
grid = np.c_[xx1.ravel(), xx2.ravel()]
preds = model.predict_proba(grid)[:, 1].reshape(xx1.shape)
plt.scatter(df['Age'], df['Experience'], color=colors)
plt.contour(xx1, xx2, preds, levels=[.5], cmap="Greys", vmin=0, vmax=.6)
```
### Predict
```
print('Probabilité de badass:', model.predict_proba([x])[0][1])
print('Prediction:', model.predict([x])[0])
```
## From scratch
### Fit
Source: https://github.com/martinpella/logistic-reg/blob/master/logistic_reg.ipynb
```
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def loss(h, y):
return (-y * np.log(h) - (1 - y) * np.log(1 - h)).mean()
def gradientDescent(X, y, theta, alpha, epochs, verbose=True):
m = len(y)
for i in range(epochs):
h = sigmoid(X.dot(theta))
gradient = (X.T.dot(h - y)) / m
theta -= alpha * gradient
if(verbose and i % 1000 == 0):
z = np.dot(X, theta)
h = sigmoid(z)
print('loss:', loss(h, y))
return theta
# Add intercept
m = len(X)
b = np.ones((m,1))
Xb = np.concatenate([b, X], axis=1)
# Fit
theta = np.random.rand(3)
theta = gradientDescent(Xb, Y, theta=theta, alpha=0.1, epochs=10000)
theta
```
### Plot
```
b0 = theta[0]
b1 = theta[1]
b2 = theta[2]
plt.scatter(df['Age'], df['Experience'], color=colors)
# Decision boundary (with threshold 0.5)
_X = np.linspace(df['Age'].min(), df['Age'].max(),10)
_Y = (-b1/b2)*_X + (-b0/b2)
plt.plot(_X, _Y, '-k')
```
### Predict
```
z = b0 + b1 * x[0] + b2 * x[1]
p = 1 / (1 + np.exp(-z))
print('Probabilité de badass:', p)
print('Prediction:', (1 if p > 0.5 else 0))
```
|
github_jupyter
|
### Introduction
An example of implementing the Metapath2Vec representation learning algorithm using components from the `stellargraph` and `gensim` libraries.
**References**
**1.** Metapath2Vec: Scalable Representation Learning for Heterogeneous Networks. Yuxiao Dong, Nitesh V. Chawla, and Ananthram Swami. ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), 135–144, 2017. ([link](https://ericdongyx.github.io/papers/KDD17-dong-chawla-swami-metapath2vec.pdf))
**2.** Distributed representations of words and phrases and their compositionality. T. Mikolov, I. Sutskever, K. Chen, G. S. Corrado, and J. Dean. In Advances in Neural Information Processing Systems (NIPS), pp. 3111-3119, 2013. ([link](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf))
**3.** Gensim: Topic modelling for humans. ([link](https://radimrehurek.com/gensim/))
**4.** Social Computing Data Repository at ASU [http://socialcomputing.asu.edu]. R. Zafarani and H. Liu. Tempe, AZ: Arizona State University, School of Computing, Informatics and Decision Systems Engineering. 2009.
```
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import os
import networkx as nx
import numpy as np
import pandas as pd
from stellargraph.data.loader import load_dataset_BlogCatalog3
%matplotlib inline
```
### Load the dataset
The dataset is the BlogCatalog3 network.
It can be downloaded from [here.](http://socialcomputing.asu.edu/datasets/BlogCatalog3)
The following is the description of the dataset from the publisher [4]:
> This is the data set crawled from BlogCatalog ( http://www.blogcatalog.com ). BlogCatalog is a social blog directory website. This contains the friendship network crawled and group memberships. For easier understanding, all the contents are organized in CSV file format.
The statistics of this network are,
- Number of bloggers : 10,312
- Number of friendship pairs: 333,983
- Number of groups: 39
We assume that the dataset file `BlogCatalog-dataset.zip` has been downloaded and unzipped in the directory,
`~/data`
and the data in `csv` format (the files `edges.csv`, `nodes.csv`, `groups.csv`, and `group-edges.csv` can be found in directory,
`~/data/BlogCatalog-dataset/data/`
```
dataset_location = os.path.expanduser("~/data/BlogCatalog-dataset/data")
g_nx = load_dataset_BlogCatalog3(location=dataset_location)
print("Number of nodes {} and number of edges {} in graph.".format(g_nx.number_of_nodes(), g_nx.number_of_edges()))
```
### The Metapath2Vec algorithm
The Metapath2Vec algorithm introduced in [1] is a 2-step representation learning algorithm. The two steps are:
1. Use uniform random walks to generate sentences from a graph. A sentence is a list of node IDs. The set of all sentences makes a corpus. The random walk is driven by a metapath that defines the node type order by which the random walker explores the graph.
2. The corpus is then used to learn an embedding vector for each node in the graph. Each node ID is considered a unique word/token in a dictionary that has size equal to the number of nodes in the graph. The Word2Vec algorithm [2] is used for calculating the embedding vectors.
## Corpus generation using random walks
The `stellargraph` library provides an implementation for uniform, first order, random walks as required by Metapath2Vec. The random walks have fixed maximum length and are controlled by the list of metapath schemas specified in parameter `metapaths`.
A metapath schema defines the type of node that the random walker is allowed to transition to from its current location. In the `stellargraph` implementation of metapath-driven random walks, the metapath schemas are given as a list of node types under the assumption that the input graph is not a multi-graph, i.e., two nodes are only connected by one edge type.
See [1] for a detailed description of metapath schemas and metapth-driven random walks.
For the **BlogCatalog3** dataset we use the following 3 metapaths.
- "user", "group", "user"
- "user", "group", "user", "user"
- "user", "user"
```
from stellargraph.data import UniformRandomMetaPathWalk
from stellargraph import StellarGraph
# Create the random walker
rw = UniformRandomMetaPathWalk(StellarGraph(g_nx))
# specify the metapath schemas as a list of lists of node types.
metapaths = [
["user", "group", "user"],
["user", "group", "user", "user"],
["user", "user"],
]
walks = rw.run(nodes=list(g_nx.nodes()), # root nodes
length=100, # maximum length of a random walk
n=1, # number of random walks per root node
metapaths=metapaths # the metapaths
)
print("Number of random walks: {}".format(len(walks)))
```
### Representation Learning using Word2Vec
We use the Word2Vec [2] implementation in the free Python library gensim [3] to learn representations for each node in the graph.
We set the dimensionality of the learned embedding vectors to 128 as in [1].
```
from gensim.models import Word2Vec
model = Word2Vec(walks, size=128, window=5, min_count=0, sg=1, workers=2, iter=1)
model.wv.vectors.shape # 128-dimensional vector for each node in the graph
```
### Visualise Node Embeddings
We retrieve the Word2Vec node embeddings that are 128-dimensional vectors and then we project them down to 2 dimensions using the [t-SNE](http://scikit-learn.org/stable/modules/generated/sklearn.manifold.TSNE.html) algorithm.
```
# Retrieve node embeddings and corresponding subjects
node_ids = model.wv.index2word # list of node IDs
node_embeddings = model.wv.vectors # numpy.ndarray of size number of nodes times embeddings dimensionality
node_targets = [ g_nx.node[node_id]['label'] for node_id in node_ids]
```
Transform the embeddings to 2d space for visualisation
```
transform = TSNE #PCA
trans = transform(n_components=2)
node_embeddings_2d = trans.fit_transform(node_embeddings)
# draw the points
label_map = { l: i for i, l in enumerate(np.unique(node_targets))}
node_colours = [ label_map[target] for target in node_targets]
plt.figure(figsize=(20,16))
plt.axes().set(aspect="equal")
plt.scatter(node_embeddings_2d[:,0],
node_embeddings_2d[:,1],
c=node_colours, alpha=0.3)
plt.title('{} visualization of node embeddings'.format(transform.__name__))
plt.show()
```
### Downstream task
The node embeddings calculated using Metapath2Vec can be used as feature vectors in a downstream task such as node attribute inference (e.g., inferring the gender or age attribute of 'user' nodes), community detection (e.g., clustering of 'user' nodes based on the similarity of their embedding vectors), and link prediction (e.g., prediction of friendship relation between 'user' nodes).
|
github_jupyter
|
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Neural machine translation with attention
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/text/nmt_with_attention">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/text/nmt_with_attention.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/text/nmt_with_attention.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/text/nmt_with_attention.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This notebook trains a sequence to sequence (seq2seq) model for Spanish to English translation. This is an advanced example that assumes some knowledge of sequence to sequence models.
After training the model in this notebook, you will be able to input a Spanish sentence, such as *"¿todavia estan en casa?"*, and return the English translation: *"are you still at home?"*
The translation quality is reasonable for a toy example, but the generated attention plot is perhaps more interesting. This shows which parts of the input sentence has the model's attention while translating:
<img src="https://tensorflow.org/images/spanish-english.png" alt="spanish-english attention plot">
Note: This example takes approximately 10 mintues to run on a single P100 GPU.
```
from __future__ import absolute_import, division, print_function, unicode_literals
try:
# %tensorflow_version only exists in Colab.
%tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from sklearn.model_selection import train_test_split
import unicodedata
import re
import numpy as np
import os
import io
import time
```
## Download and prepare the dataset
We'll use a language dataset provided by http://www.manythings.org/anki/. This dataset contains language translation pairs in the format:
```
May I borrow this book? ¿Puedo tomar prestado este libro?
```
There are a variety of languages available, but we'll use the English-Spanish dataset. For convenience, we've hosted a copy of this dataset on Google Cloud, but you can also download your own copy. After downloading the dataset, here are the steps we'll take to prepare the data:
1. Add a *start* and *end* token to each sentence.
2. Clean the sentences by removing special characters.
3. Create a word index and reverse word index (dictionaries mapping from word → id and id → word).
4. Pad each sentence to a maximum length.
```
# Download the file
path_to_zip = tf.keras.utils.get_file(
'spa-eng.zip', origin='http://storage.googleapis.com/download.tensorflow.org/data/spa-eng.zip',
extract=True)
path_to_file = os.path.dirname(path_to_zip)+"/spa-eng/spa.txt"
# Converts the unicode file to ascii
def unicode_to_ascii(s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
def preprocess_sentence(w):
w = unicode_to_ascii(w.lower().strip())
# creating a space between a word and the punctuation following it
# eg: "he is a boy." => "he is a boy ."
# Reference:- https://stackoverflow.com/questions/3645931/python-padding-punctuation-with-white-spaces-keeping-punctuation
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# replacing everything with space except (a-z, A-Z, ".", "?", "!", ",")
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.rstrip().strip()
# adding a start and an end token to the sentence
# so that the model know when to start and stop predicting.
w = '<start> ' + w + ' <end>'
return w
en_sentence = u"May I borrow this book?"
sp_sentence = u"¿Puedo tomar prestado este libro?"
print(preprocess_sentence(en_sentence))
print(preprocess_sentence(sp_sentence).encode('utf-8'))
# 1. Remove the accents
# 2. Clean the sentences
# 3. Return word pairs in the format: [ENGLISH, SPANISH]
def create_dataset(path, num_examples):
lines = io.open(path, encoding='UTF-8').read().strip().split('\n')
word_pairs = [[preprocess_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]]
return zip(*word_pairs)
en, sp = create_dataset(path_to_file, None)
print(en[-1])
print(sp[-1])
def max_length(tensor):
return max(len(t) for t in tensor)
def tokenize(lang):
lang_tokenizer = tf.keras.preprocessing.text.Tokenizer(
filters='')
lang_tokenizer.fit_on_texts(lang)
tensor = lang_tokenizer.texts_to_sequences(lang)
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor,
padding='post')
return tensor, lang_tokenizer
def load_dataset(path, num_examples=None):
# creating cleaned input, output pairs
targ_lang, inp_lang = create_dataset(path, num_examples)
input_tensor, inp_lang_tokenizer = tokenize(inp_lang)
target_tensor, targ_lang_tokenizer = tokenize(targ_lang)
return input_tensor, target_tensor, inp_lang_tokenizer, targ_lang_tokenizer
```
### Limit the size of the dataset to experiment faster (optional)
Training on the complete dataset of >100,000 sentences will take a long time. To train faster, we can limit the size of the dataset to 30,000 sentences (of course, translation quality degrades with less data):
```
# Try experimenting with the size of that dataset
num_examples = 30000
input_tensor, target_tensor, inp_lang, targ_lang = load_dataset(path_to_file, num_examples)
# Calculate max_length of the target tensors
max_length_targ, max_length_inp = max_length(target_tensor), max_length(input_tensor)
# Creating training and validation sets using an 80-20 split
input_tensor_train, input_tensor_val, target_tensor_train, target_tensor_val = train_test_split(input_tensor, target_tensor, test_size=0.2)
# Show length
print(len(input_tensor_train), len(target_tensor_train), len(input_tensor_val), len(target_tensor_val))
def convert(lang, tensor):
for t in tensor:
if t!=0:
print ("%d ----> %s" % (t, lang.index_word[t]))
print ("Input Language; index to word mapping")
convert(inp_lang, input_tensor_train[0])
print ()
print ("Target Language; index to word mapping")
convert(targ_lang, target_tensor_train[0])
```
### Create a tf.data dataset
```
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
steps_per_epoch = len(input_tensor_train)//BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word_index)+1
vocab_tar_size = len(targ_lang.word_index)+1
dataset = tf.data.Dataset.from_tensor_slices((input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)
example_input_batch, example_target_batch = next(iter(dataset))
example_input_batch.shape, example_target_batch.shape
```
## Write the encoder and decoder model
Implement an encoder-decoder model with attention which you can read about in the TensorFlow [Neural Machine Translation (seq2seq) tutorial](https://github.com/tensorflow/nmt). This example uses a more recent set of APIs. This notebook implements the [attention equations](https://github.com/tensorflow/nmt#background-on-the-attention-mechanism) from the seq2seq tutorial. The following diagram shows that each input words is assigned a weight by the attention mechanism which is then used by the decoder to predict the next word in the sentence. The below picture and formulas are an example of attention mechanism from [Luong's paper](https://arxiv.org/abs/1508.04025v5).
<img src="https://www.tensorflow.org/images/seq2seq/attention_mechanism.jpg" width="500" alt="attention mechanism">
The input is put through an encoder model which gives us the encoder output of shape *(batch_size, max_length, hidden_size)* and the encoder hidden state of shape *(batch_size, hidden_size)*.
Here are the equations that are implemented:
<img src="https://www.tensorflow.org/images/seq2seq/attention_equation_0.jpg" alt="attention equation 0" width="800">
<img src="https://www.tensorflow.org/images/seq2seq/attention_equation_1.jpg" alt="attention equation 1" width="800">
This tutorial uses [Bahdanau attention](https://arxiv.org/pdf/1409.0473.pdf) for the encoder. Let's decide on notation before writing the simplified form:
* FC = Fully connected (dense) layer
* EO = Encoder output
* H = hidden state
* X = input to the decoder
And the pseudo-code:
* `score = FC(tanh(FC(EO) + FC(H)))`
* `attention weights = softmax(score, axis = 1)`. Softmax by default is applied on the last axis but here we want to apply it on the *1st axis*, since the shape of score is *(batch_size, max_length, hidden_size)*. `Max_length` is the length of our input. Since we are trying to assign a weight to each input, softmax should be applied on that axis.
* `context vector = sum(attention weights * EO, axis = 1)`. Same reason as above for choosing axis as 1.
* `embedding output` = The input to the decoder X is passed through an embedding layer.
* `merged vector = concat(embedding output, context vector)`
* This merged vector is then given to the GRU
The shapes of all the vectors at each step have been specified in the comments in the code:
```
class Encoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, enc_units, batch_sz):
super(Encoder, self).__init__()
self.batch_sz = batch_sz
self.enc_units = enc_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.enc_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
def call(self, x, hidden):
x = self.embedding(x)
output, state = self.gru(x, initial_state = hidden)
return output, state
def initialize_hidden_state(self):
return tf.zeros((self.batch_sz, self.enc_units))
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
# sample input
sample_hidden = encoder.initialize_hidden_state()
sample_output, sample_hidden = encoder(example_input_batch, sample_hidden)
print ('Encoder output shape: (batch size, sequence length, units) {}'.format(sample_output.shape))
print ('Encoder Hidden state shape: (batch size, units) {}'.format(sample_hidden.shape))
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
def call(self, query, values):
# hidden shape == (batch_size, hidden size)
# hidden_with_time_axis shape == (batch_size, 1, hidden size)
# we are doing this to perform addition to calculate the score
hidden_with_time_axis = tf.expand_dims(query, 1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
# the shape of the tensor before applying self.V is (batch_size, max_length, units)
score = self.V(tf.nn.tanh(
self.W1(values) + self.W2(hidden_with_time_axis)))
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
attention_layer = BahdanauAttention(10)
attention_result, attention_weights = attention_layer(sample_hidden, sample_output)
print("Attention result shape: (batch size, units) {}".format(attention_result.shape))
print("Attention weights shape: (batch_size, sequence_length, 1) {}".format(attention_weights.shape))
class Decoder(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, dec_units, batch_sz):
super(Decoder, self).__init__()
self.batch_sz = batch_sz
self.dec_units = dec_units
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
self.gru = tf.keras.layers.GRU(self.dec_units,
return_sequences=True,
return_state=True,
recurrent_initializer='glorot_uniform')
self.fc = tf.keras.layers.Dense(vocab_size)
# used for attention
self.attention = BahdanauAttention(self.dec_units)
def call(self, x, hidden, enc_output):
# enc_output shape == (batch_size, max_length, hidden_size)
context_vector, attention_weights = self.attention(hidden, enc_output)
# x shape after passing through embedding == (batch_size, 1, embedding_dim)
x = self.embedding(x)
# x shape after concatenation == (batch_size, 1, embedding_dim + hidden_size)
x = tf.concat([tf.expand_dims(context_vector, 1), x], axis=-1)
# passing the concatenated vector to the GRU
output, state = self.gru(x)
# output shape == (batch_size * 1, hidden_size)
output = tf.reshape(output, (-1, output.shape[2]))
# output shape == (batch_size, vocab)
x = self.fc(output)
return x, state, attention_weights
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)
sample_decoder_output, _, _ = decoder(tf.random.uniform((BATCH_SIZE, 1)),
sample_hidden, sample_output)
print ('Decoder output shape: (batch_size, vocab size) {}'.format(sample_decoder_output.shape))
```
## Define the optimizer and the loss function
```
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction='none')
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
```
## Checkpoints (Object-based saving)
```
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer,
encoder=encoder,
decoder=decoder)
```
## Training
1. Pass the *input* through the *encoder* which return *encoder output* and the *encoder hidden state*.
2. The encoder output, encoder hidden state and the decoder input (which is the *start token*) is passed to the decoder.
3. The decoder returns the *predictions* and the *decoder hidden state*.
4. The decoder hidden state is then passed back into the model and the predictions are used to calculate the loss.
5. Use *teacher forcing* to decide the next input to the decoder.
6. *Teacher forcing* is the technique where the *target word* is passed as the *next input* to the decoder.
7. The final step is to calculate the gradients and apply it to the optimizer and backpropagate.
```
@tf.function
def train_step(inp, targ, enc_hidden):
loss = 0
with tf.GradientTape() as tape:
enc_output, enc_hidden = encoder(inp, enc_hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['<start>']] * BATCH_SIZE, 1)
# Teacher forcing - feeding the target as the next input
for t in range(1, targ.shape[1]):
# passing enc_output to the decoder
predictions, dec_hidden, _ = decoder(dec_input, dec_hidden, enc_output)
loss += loss_function(targ[:, t], predictions)
# using teacher forcing
dec_input = tf.expand_dims(targ[:, t], 1)
batch_loss = (loss / int(targ.shape[1]))
variables = encoder.trainable_variables + decoder.trainable_variables
gradients = tape.gradient(loss, variables)
optimizer.apply_gradients(zip(gradients, variables))
return batch_loss
EPOCHS = 10
for epoch in range(EPOCHS):
start = time.time()
enc_hidden = encoder.initialize_hidden_state()
total_loss = 0
for (batch, (inp, targ)) in enumerate(dataset.take(steps_per_epoch)):
batch_loss = train_step(inp, targ, enc_hidden)
total_loss += batch_loss
if batch % 100 == 0:
print('Epoch {} Batch {} Loss {:.4f}'.format(epoch + 1,
batch,
batch_loss.numpy()))
# saving (checkpoint) the model every 2 epochs
if (epoch + 1) % 2 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print('Epoch {} Loss {:.4f}'.format(epoch + 1,
total_loss / steps_per_epoch))
print('Time taken for 1 epoch {} sec\n'.format(time.time() - start))
```
## Translate
* The evaluate function is similar to the training loop, except we don't use *teacher forcing* here. The input to the decoder at each time step is its previous predictions along with the hidden state and the encoder output.
* Stop predicting when the model predicts the *end token*.
* And store the *attention weights for every time step*.
Note: The encoder output is calculated only once for one input.
```
def evaluate(sentence):
attention_plot = np.zeros((max_length_targ, max_length_inp))
sentence = preprocess_sentence(sentence)
inputs = [inp_lang.word_index[i] for i in sentence.split(' ')]
inputs = tf.keras.preprocessing.sequence.pad_sequences([inputs],
maxlen=max_length_inp,
padding='post')
inputs = tf.convert_to_tensor(inputs)
result = ''
hidden = [tf.zeros((1, units))]
enc_out, enc_hidden = encoder(inputs, hidden)
dec_hidden = enc_hidden
dec_input = tf.expand_dims([targ_lang.word_index['<start>']], 0)
for t in range(max_length_targ):
predictions, dec_hidden, attention_weights = decoder(dec_input,
dec_hidden,
enc_out)
# storing the attention weights to plot later on
attention_weights = tf.reshape(attention_weights, (-1, ))
attention_plot[t] = attention_weights.numpy()
predicted_id = tf.argmax(predictions[0]).numpy()
result += targ_lang.index_word[predicted_id] + ' '
if targ_lang.index_word[predicted_id] == '<end>':
return result, sentence, attention_plot
# the predicted ID is fed back into the model
dec_input = tf.expand_dims([predicted_id], 0)
return result, sentence, attention_plot
# function for plotting the attention weights
def plot_attention(attention, sentence, predicted_sentence):
fig = plt.figure(figsize=(10,10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 14}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, rotation=90)
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def translate(sentence):
result, sentence, attention_plot = evaluate(sentence)
print('Input: %s' % (sentence))
print('Predicted translation: {}'.format(result))
attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
plot_attention(attention_plot, sentence.split(' '), result.split(' '))
```
## Restore the latest checkpoint and test
```
# restoring the latest checkpoint in checkpoint_dir
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
translate(u'hace mucho frio aqui.')
translate(u'esta es mi vida.')
translate(u'¿todavia estan en casa?')
# wrong translation
translate(u'trata de averiguarlo.')
```
## Next steps
* [Download a different dataset](http://www.manythings.org/anki/) to experiment with translations, for example, English to German, or English to French.
* Experiment with training on a larger dataset, or using more epochs
|
github_jupyter
|
##### Training and Tuning
La principal razón del anterior notebook ha sido probar varios modelos de la forma más rápida posible, ver sus métricas y los impactos de diversos cambios. El principal problema (hasta ahora) con la versión de PyCaret es que al desplegar el modelo es un objeto de la misma librería, haciendo que se requiera instalar la PyCaret en producción lo cual es muy poco eficiente y complica mucho más las cosas. Por otro lado, PyCaret hace su hyperparameter tuning por RandomSearchCV, que no está mal pero sería más optimo hacerlo de manera Bayesiana. En ese sentido este notebook servirá para entrenar denuevo el(los) modelo(s) guardarlos y posteriormente desplegarlos de manera rápida y sencilla siendo prioridad el hacer el modelo lo más ligero posible.
```
import pandas as pd
import numpy as np
import warnings
import lightgbm as lgb
import xgboost as xgb
from sklearn.ensemble import RandomForestRegressor
from bayes_opt import BayesianOptimization
csv_path = (
"../data/train_encoded.csv",
"../data/test_encoded.csv"
)
train = pd.read_csv(csv_path[0]).drop(["latitud","longitud"], axis=1)
test = pd.read_csv(csv_path[1]).drop(["latitud","longitud"], axis=1)
```
##### Para LightGBM.
Como ya lo hemos tuneado con Pycaret, los parámetros son:
```
Sin Tunear:
LGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,
importance_type='split', learning_rate=0.1, max_depth=-1,
min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0,
n_estimators=100, n_jobs=-1, num_leaves=31, objective=None,
random_state=104, reg_alpha=0.0, reg_lambda=0.0, silent=True,
subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
Tuneado:
LGBMRegressor(bagging_fraction=1.0, bagging_freq=6, boosting_type='gbdt',
class_weight=None, colsample_bytree=1.0, feature_fraction=0.9,
importance_type='split', learning_rate=0.15, max_depth=-1,
min_child_samples=46, min_child_weight=0.001, min_split_gain=0,
n_estimators=150, n_jobs=-1, num_leaves=2, objective=None,
random_state=104, reg_alpha=0.7, reg_lambda=5, silent=True,
subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
```
```
import warnings
warnings.filterwarnings('ignore')
random_state = 104 #Para benchmark.
def bayes_parameter_opt_lgb(X, y, init_points=15, opt_round=25, n_folds=5, random_seed=6, n_estimators=10000, learning_rate=0.05, output_process=False):
def lgb_eval(num_leaves, bagging_fraction, lambda_l1, lambda_l2, min_split_gain):
"""
Defino los parametros que serán tuneados. Así como los parámetros fijos
"""
params = {'application':'regression','num_iterations':5000, 'learning_rate':0.05, 'early_stopping_round':100, 'metric':'rmse',
'feature_fraction':0.9,'n_estimators':200,'feature_fraction':0.9, 'max_depth':-1,'min_child_weight':0.001,'verbose':-1}
params["num_leaves"] = round(num_leaves)
params['bagging_fraction'] = max(min(bagging_fraction, 1), 0)
params['max_depth'] = -1
params['lambda_l1'] = max(lambda_l1, 0)
params['lambda_l2'] = max(lambda_l2, 0)
params['min_split_gain'] = min_split_gain
train_data = lgb.Dataset(data=X, label=y)
cv_result = lgb.cv(params, train_data, nfold=5, seed=random_state, verbose_eval =200, metrics=['mae'], shuffle=False,
stratified=False)
return -max(cv_result['l1-mean'])
#Configuro el rango de cada parametro
lgbm_optimization = BayesianOptimization(lgb_eval, {'num_leaves': (2, 25),
'bagging_fraction':(0.8,1),
'lambda_l1':(0.5,3),
'lambda_l2':(3,20),
'min_split_gain': (0.001, 0.1)
})
lgbm_optimization.maximize(init_points=init_points, n_iter=opt_round) #CHECK
if output_process == True:
lgbm_optimization.points_to_csv('lgbm_bayers_opt_result.csv')
return lgbm_optimization
X = train.select_dtypes(exclude='object').drop('Precio_m2_total',axis=1)
y = train['Precio_m2_total']
opt_params = bayes_parameter_opt_lgb(X=X,y=y, init_points= 30, opt_round=100)
min_ = min([res['target'] for res in opt_params.res])
[res['params'] for res in opt_params.res if res['target'] == min_]
#Fit model
train_data = lgb.Dataset(X,y)
params = {'application':'regression','num_iterations':5000, 'learning_rate':0.05, 'metric':'rmse',
'feature_fraction':0.9,'n_estimators':200,'feature_fraction':0.9, 'max_depth':-1,'min_child_weight':0.001,'verbose':-1,
'bagging_fraction': 0.9164810602504456,'lambda_l1': 0.5005454948781294,'lambda_l2': 6.60276585681876,
'min_split_gain': 0.07385271072078259,'num_leaves': 3}
model = lgb.cv(params, train_data, nfold=5, seed=random_state, verbose_eval =200, metrics=['mae'], shuffle=False,
stratified=False)
#l1_error = Mae
X_test = test.select_dtypes(exclude='object').drop('Precio_m2_total',axis=1)
y_test = test['Precio_m2_total']
model = lgb.train(params, train_data)
preds = model.predict(X_test)
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
r2 = r2_score(y_test, preds)
mae = mean_absolute_error(y_test, preds)
mse = mean_squared_error(y_test, preds)
print('r2:{}\nmae:{}\nmse:{}'.format(r2, mae, mse))
```
Entrenando modelo final:
```
data_x = pd.concat([X,X_test])
data_y = pd.concat([y,y_test])
data = lgb.Dataset(data_x,data_y)
model_final = lgb.train(params, data)
import pickle
with open('../webapp/artifacts/models/lgbm_base.pkl','wb') as handle:
pickle.dump(model_final, handle, protocol=pickle.HIGHEST_PROTOCOL)
```
#### Random Forest:
```
from sklearn.model_selection import cross_val_score
def rf_cv(min_impurity_decrease, min_samples_split, max_features,max_depth, data, target):
"""Random Forest Cross Validation
Esta funcion instanciará un regressor de Random Forest con los parámetros a optimizar:
min_samples_split, max_features, min_impurity_decrease.
"""
model = RandomForestRegressor(
n_estimators = 150,
min_impurity_decrease=min_impurity_decrease,
min_samples_split = min_samples_split,
max_features = max_features,
max_depth = max_depth, #No olvidar tenerlo en integer.
random_state = 123,
n_jobs=-1
)
cross_val = cross_val_score(model, data, target,
scoring='neg_mean_absolute_error', cv=4)
return cross_val.mean()
def optimize_rf(data, target):
"""Aplicamos Optimización Bayesiana a los parámetros del Random Forest Regressor"""
def inside_rf_cv(min_impurity_decrease, min_samples_split, max_features, max_depth):
"""Wrapper of RandomForest cross validation.
Tenemos que evitar que los parametros que toman valores enteros no se repitan, además de tener que
restringir aquellos parámetros que van de 0 a 1.
"""
return rf_cv(
min_samples_split = int(min_samples_split),
min_impurity_decrease = max(min(min_impurity_decrease, 0.999), 1e-3),
max_features = max(min(max_features, 0.999), 1e-3),
max_depth = int(max_depth),
data = data,
target = target,
)
optimizer = BayesianOptimization(
f = inside_rf_cv,
pbounds={
"min_samples_split":(2,25),
"min_impurity_decrease":(0.1,0.999),
"max_features":(0.1, 0.999),
"max_depth":(5, 25),
},
random_state=123,
verbose=2
)
optimizer.maximize(init_points = 30, n_iter=100)
print("Resultado Final", optimizer.max)
return optimizer
X_train = train.select_dtypes(exclude='object').drop('Precio_m2_total',axis=1)
y_train = train['Precio_m2_total']
from bayes_opt.util import Colours
print(Colours.yellow("----Random Forest Regressor Optimizer----"))
optimize_rf(X_train, y_train)
from sklearn.metrics import r2_score, mean_absolute_error
rf_reg = RandomForestRegressor(n_estimators = 300, n_jobs = -1, max_depth = 15, max_features = 0.67, min_impurity_decrease=0.1, min_samples_split=6)
rf_reg.fit(X_train, y_train)
preds = rf_reg.predict(X_test)
r2_score(y_test, preds) #0.38?
final_model_rf = rf_reg.fit(pd.concat([X_train,X_test]), pd.concat([y_train, y_test]))
import pickle
with open('../webapp/artifacts/models/rf_base.pkl','wb') as handle:
pickle.dump(final_model_rf, handle, protocol=pickle.HIGHEST_PROTOCOL)
```
|
github_jupyter
|
# Kerja Gaya Gesek
Sparisoma Viridi<sup>1</sup>, Muhammad Ervandy Rachmat<sup>2</sup> <br>
Program Studi Sarjana Fisika, Institut Teknologi Bandung <br>
Jalan Gensha 10, Bandung 40132, Indonesia <br>
<sup>1</sup>[email protected], https://github.com/dudung <br>
<sup>2</sup>[email protected], https://github.com/ErvandyR
Kerja yang dilakukan oleh gaya gesek merupakan bentuk kerja yang tidak diharapkan karena energi yang dikeluarkan, biasanya dalam bentuk panas atau bunyi yang dilepas ke lingkungan, tidak dapat dimanfaatkan lagi oleh sistem sehingga energi sistem berkurang.
## Gerak benda di atas lantai mendatar kasar
Sistem yang ditinjau adalah suatu benda yang bergerak di atas lantai mendatar kasar. Benda diberi kecepatan awal tertentu dan bergerak melambat sampai berhenti karena adanya gaya gesek kinetis antara benda dan lantai kasar.
## Parameter
Beberapa parameter yang digunakan adalah seperti pada tabel berikut ini.
Tabel <a name='tab1'>1</a>. Simbol beserta satuan dan artinya.
Simbol | Satuan | Arti
:- | :- | :-
$t$ | s | waktu
$v_0$ | m/s | kecepatan awal
$x_0$ | m | posisi awal
$v$ | m/s | kecepatan saat $t$
$x$ | m | waktu saat $t$
$a$ | m/s<sup>2</sup> | percepatan
$\mu_k$ | - | koefisien gesek kinetis
$f_k$ | N | gaya gesek kinetis
$m$ | kg | massa benda
$F$ | N | total gaya yang bekerja
$N$ | N | gaya normal
$w$ | N | gaya gravitasi
Simbol-simbol pada Tabel [1](#tab1) akan diberi nilai kemudian saat diimplementasikan dalam program.
## Persamaan
Persamaan-persamaan yang akan digunakan adalah seperti dicantumkan pada bagian ini.
### Kinematika
Hubungan antara antara kecepatan $v$, kecepatan awal $v_0$, percepatan $a$, dan waktu $t$ diberikan oleh
<a name='eqn1'></a>
\begin{equation}\label{eqn:kinematics-v-a-t}\tag{1}
v = v_0 + at.
\end{equation}
Posisi benda $x$ bergantung pada posisi awal $x_0$, kecepatan awal $v_0$, percepatan $a$, dan waktu $t$ melalui hubungan
<a name='eqn2'></a>
\begin{equation}\label{eqn:kinematics-x-v-a-t}\tag{2}
x = x_0 + v_0 t + \tfrac12 at^2.
\end{equation}
Selain kedua persamaan sebelumnya, terdapat pula persamaan berikut
<a name='eqn3'></a>
\begin{equation}\label{eqn:kinematics-v-x-a}\tag{3}
v^2 = v_0^2 + 2a(x - x_0),
\end{equation}
yang menghubungkan kecepatan $v$ dengan kecepatan awal $v_0$, percepatan $a$, dan jarak yang ditempuh $x - x_0$.
### Dinamika
Hukum Newton I menyatakan bahwa benda yang semula diam akan tetap diam dan yang semula bergerak dengan kecepatan tetap akan tetap bergerak dengan kecepatan tetap bila tidak ada gaya yang bekerja pada benda atau jumlah gaya-gaya yang bekerja sama dengan nol
<a name='eqn4'></a>
\begin{equation}\label{eqn:newtons-law-1}\tag{4}
\sum F = 0.
\end{equation}
Bila ada gaya yang bekerj pada benda bermassa $m$ atau jumlah gaya-gaya tidak nol
<a name='eqn5'></a>
\begin{equation}\label{eqn:newtons-law-2}\tag{5}
\sum F = ma,
\end{equation}
maka keadaan gerak benda akan berubah melalui percepatan $a$, dengan $m > 0$ dan $a \ne 0$.
### Usaha
Usaha oleh suatu gaya $F$ dengan posisi awal $x_0$ dan posisi akhir $x_0$ dapat diperoleh melalui
<a name='eqn6'></a>
\begin{equation}\label{eqn:work-1}\tag{6}
W = \int_{x_0}^x F dx
\end{equation}
atau dengan
<a name='eqn7'></a>
\begin{equation}\label{eqn:work-2}\tag{7}
W = \Delta K
\end{equation}
dengan $K$ adalah energi kinetik. Persamaan ([7](#eqn7)) akan memberikan gaya oleh semua gaya. Dengan demikian bila $F$ adalah satu-satunya gaya yang bekerja pada benda, maka persamaan ini akan menjadi Persamaan ([6](#eqn6)).
## Sistem
Ilustrasi sistem perlu diberikan agar dapat terbayangan dan memudahkan penyelesaian masalah. Selain itu juga perlu disajikan diagram gaya-gaya yang bekerja pada benda.
### Ilustrasi
Sistem yang benda bermassa $m$ bergerak di atas lantai kasar dapat digambarkan
seperti berikut ini.
```
%%html
<svg
width="320"
height="140"
viewBox="0 0 320 140.00001"
id="svg2"
version="1.1"
inkscape:version="1.1.2 (b8e25be833, 2022-02-05)"
sodipodi:docname="mass-horizontal-rough-surface.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:dc="http://purl.org/dc/elements/1.1/">
<defs
id="defs4">
<marker
style="overflow:visible"
id="TriangleOutM"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479" />
</marker>
<marker
style="overflow:visible"
id="marker11604"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow2Mend"
inkscape:isstock="true">
<path
transform="scale(-0.6)"
d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:0.625;stroke-linejoin:round"
id="path11602" />
</marker>
<marker
style="overflow:visible"
id="Arrow2Mend"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow2Mend"
inkscape:isstock="true">
<path
transform="scale(-0.6)"
d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:0.625;stroke-linejoin:round"
id="path11361" />
</marker>
<marker
style="overflow:visible"
id="TriangleOutM-3"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479-1" />
</marker>
<marker
style="overflow:visible"
id="TriangleOutM-35"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479-0" />
</marker>
<marker
style="overflow:visible"
id="TriangleOutM-0"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479-4" />
</marker>
<marker
style="overflow:visible"
id="TriangleOutM-37"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479-9" />
</marker>
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.5"
inkscape:cx="173"
inkscape:cy="97.333333"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
inkscape:snap-bbox="false"
inkscape:snap-global="false"
units="px"
showborder="true"
inkscape:showpageshadow="true"
borderlayer="false"
inkscape:window-width="1366"
inkscape:window-height="705"
inkscape:window-x="-8"
inkscape:window-y="-8"
inkscape:window-maximized="1"
inkscape:pagecheckerboard="0">
<inkscape:grid
type="xygrid"
id="grid970" />
</sodipodi:namedview>
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(0,-732.36216)">
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none"
x="120.0725"
y="759.6109"
id="text2711-6-2-9"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2"
x="120.0725"
y="759.6109"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, '"><tspan
style="font-style:italic"
id="tspan9923">v</tspan><tspan
style="font-size:65%;baseline-shift:sub"
id="tspan1668">0</tspan></tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:1.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM)"
d="m 84.656156,757.55169 25.738704,1.3e-4"
id="path11252" />
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-linecap:round;stroke-opacity:1"
id="rect1007"
width="59"
height="59"
x="56.5"
y="772.86218"
rx="0"
ry="0" />
<path
style="fill:none;stroke:#000000;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 20,832.86218 280,-2e-5"
id="path1386" />
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#c8c8c8;stroke-width:0.5;stroke-linecap:round;stroke-miterlimit:4;stroke-dasharray:2, 2;stroke-dashoffset:0;stroke-opacity:1"
id="rect1007-2"
width="59"
height="59"
x="225.16667"
y="772.86218"
rx="0"
ry="0" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#c8c8c8;fill-opacity:1;stroke:none"
x="236.05922"
y="759.6109"
id="text2711-6-2-9-9"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2-8"
x="236.05922"
y="759.6109"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, ';fill:#c8c8c8;fill-opacity:1"><tspan
style="font-style:italic;fill:#c8c8c8;fill-opacity:1"
id="tspan9923-8">v</tspan> = 0</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none"
x="149.18359"
y="824.54877"
id="text2711-6-2-9-96"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2-6"
x="149.18359"
y="824.54877"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, '"><tspan
style="font-style:italic"
id="tspan3028">μ<tspan
style="font-size:65%;baseline-shift:sub"
id="tspan3074">k</tspan></tspan> > 0</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none"
x="79.505844"
y="806.37714"
id="text2711-6-2-9-2"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2-84"
x="79.505844"
y="806.37714"
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, '">m</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:1.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-37)"
d="m 33.785239,770.82609 -1.3e-4,25.7387"
id="path11252-5" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none"
x="29.173132"
y="759.45776"
id="text2711-6-2-9-8"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2-2"
x="29.173132"
y="759.45776"
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, '">g</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none"
x="79.368446"
y="849.21539"
id="text2711-6-2-9-23"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2-3"
x="79.368446"
y="849.21539"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, '"><tspan
style="font-style:italic"
id="tspan9923-0">x</tspan><tspan
style="font-size:65%;baseline-shift:sub"
id="tspan1668-9">0</tspan></tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none"
x="250.91145"
y="849.21539"
id="text2711-6-2-9-23-0"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2-3-0"
x="250.91145"
y="849.21539"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, '"><tspan
style="font-style:italic"
id="tspan9923-0-3">x</tspan><tspan
style="font-size:65%;baseline-shift:sub"
id="tspan1668-9-3" /></tspan></text>
</g>
</svg>
<br/>
Gambar <a name='fig1'>1</a>. Sistem benda bermassa $m$ begerak di atas lantai
mendatar kasar dengan koefisien gesek kinetis $\mu_k$.
```
Keadaan akhir benda, yaitu saat kecepatan $v = 0$ diberikan pada bagian kanan Gambar [1](#fig1) dengan warna abu-abu.
### Diagram gaya
Diagram gaya-gaya yang berja pada benda perlu dibuat berdasarkan informasi dari Gambar [1](#fig1) dan Tabel [1](#tab1), yang diberikan berikut ini.
```
%%html
<svg
width="320"
height="200"
viewBox="0 0 320 200.00001"
id="svg2"
version="1.1"
inkscape:version="1.1.2 (b8e25be833, 2022-02-05)"
sodipodi:docname="mass-horizontal-rough-surface-fbd.svg"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:dc="http://purl.org/dc/elements/1.1/">
<defs
id="defs4">
<marker
style="overflow:visible"
id="TriangleOutM"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479" />
</marker>
<marker
style="overflow:visible"
id="marker11604"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow2Mend"
inkscape:isstock="true">
<path
transform="scale(-0.6)"
d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:0.625;stroke-linejoin:round"
id="path11602" />
</marker>
<marker
style="overflow:visible"
id="Arrow2Mend"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="Arrow2Mend"
inkscape:isstock="true">
<path
transform="scale(-0.6)"
d="M 8.7185878,4.0337352 -2.2072895,0.01601326 8.7185884,-4.0017078 c -1.7454984,2.3720609 -1.7354408,5.6174519 -6e-7,8.035443 z"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:0.625;stroke-linejoin:round"
id="path11361" />
</marker>
<marker
style="overflow:visible"
id="TriangleOutM-3"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479-1" />
</marker>
<marker
style="overflow:visible"
id="TriangleOutM-35"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479-0" />
</marker>
<marker
style="overflow:visible"
id="TriangleOutM-0"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479-4" />
</marker>
<marker
style="overflow:visible"
id="TriangleOutM-37"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479-9" />
</marker>
<marker
style="overflow:visible"
id="TriangleOutM-9"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479-8" />
</marker>
<marker
style="overflow:visible"
id="TriangleOutM-9-3"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479-8-3" />
</marker>
<marker
style="overflow:visible"
id="TriangleOutM-37-5"
refX="0"
refY="0"
orient="auto"
inkscape:stockid="TriangleOutM"
inkscape:isstock="true">
<path
transform="scale(0.4)"
style="fill:context-stroke;fill-rule:evenodd;stroke:context-stroke;stroke-width:1pt"
d="M 5.77,0 -2.88,5 V -5 Z"
id="path11479-9-9" />
</marker>
</defs>
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.2079428"
inkscape:cx="159.36185"
inkscape:cy="35.597712"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
inkscape:snap-bbox="false"
inkscape:snap-global="false"
units="px"
showborder="true"
inkscape:showpageshadow="true"
borderlayer="false"
inkscape:window-width="1366"
inkscape:window-height="705"
inkscape:window-x="-8"
inkscape:window-y="-8"
inkscape:window-maximized="1"
inkscape:pagecheckerboard="0">
<inkscape:grid
type="xygrid"
id="grid970" />
</sodipodi:namedview>
<metadata
id="metadata7">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(0,-732.36216)">
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none"
x="148.01953"
y="766.72156"
id="text2711-6-2-9-23"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2-3"
x="148.01953"
y="766.72156"
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, '">N</tspan></text>
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none"
x="251.40584"
y="806.94421"
id="text2711-6-2-9"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2"
x="251.40584"
y="806.94421"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, '"><tspan
style="font-style:italic"
id="tspan9923">v</tspan><tspan
style="font-size:65%;baseline-shift:sub"
id="tspan1668" /></tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:1.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM)"
d="m 215.98949,804.88502 25.7387,1.3e-4"
id="path11252" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none"
x="153.68098"
y="915.71051"
id="text2711-6-2-9-2"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2-84"
x="153.68098"
y="915.71051"
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, '">w</tspan></text>
<path
style="fill:none;stroke:#000000;stroke-width:1.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-37)"
d="m 31.113403,791.97918 -1.3e-4,25.7387"
id="path11252-5" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none"
x="26.501303"
y="780.6109"
id="text2711-6-2-9-8"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2-2"
x="26.501303"
y="780.6109"
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, '">g</tspan></text>
<rect
style="fill:#ffffff;fill-opacity:1;stroke:#000000;stroke-width:1;stroke-linecap:round;stroke-opacity:1"
id="rect1007"
width="59"
height="59"
x="130.5"
y="792.86218"
rx="0"
ry="0" />
<g
id="g1363"
transform="translate(-6,20)">
<path
style="fill:none;stroke:#ff0000;stroke-width:1.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-9)"
d="m 161.00001,831.69534 -45.73871,1.3e-4"
id="path11252-4" />
<path
style="fill:none;stroke:#0000ff;stroke-width:1.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-9-3)"
d="m 160.79738,832.36215 -1.3e-4,-75.7387"
id="path11252-4-6" />
</g>
<path
style="fill:none;stroke:#000000;stroke-width:1.5;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;marker-end:url(#TriangleOutM-37-5)"
d="m 159.99967,822.02879 3.4e-4,75.73871"
id="path11252-5-0" />
<text
xml:space="preserve"
style="font-style:normal;font-weight:normal;font-size:18.6667px;line-height:1.25;font-family:sans-serif;fill:#000000;fill-opacity:1;stroke:none"
x="85.624084"
y="854.51099"
id="text2711-6-2-9-8-4"><tspan
sodipodi:role="line"
id="tspan2709-5-9-2-2-1"
x="85.624084"
y="854.51099"
style="font-style:italic;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:18.6667px;font-family:'Times New Roman';-inkscape-font-specification:'Times New Roman, '">f<tspan
style="font-size:65%;baseline-shift:sub"
id="tspan2197">k</tspan></tspan></text>
</g>
</svg>
<br>
Gambar <a name='fig2'>2</a>. Diagram gaya-gaya yang bekerja pada benda
bermassa $m$.
```
Terlihat bahwa pada arah $y$ terdapat gaya normal $N$ dan gaya gravitasi $w$, sedangkan pada arah $x$ hanya terdapat gaya gesek kinetis $f_k$ yang melawan arah gerak benda. Arah gerak benda diberikan oleh arah kecepatan $v$.
## Metode numerik
Interasi suatu fungsi $f(x)$ berbentuk
<a name='eqn8'></a>
\begin{equation}\label{eqn:integral-1}\tag{8}
A = \int_a^b f(x) dx
\end{equation}
dapat didekati dengan
<a name='eqn9'></a>
\begin{equation}\label{eqn:integral-2}\tag{9}
A \approx \sum_{i = 0}^N f\left[ \tfrac12(x_i + x_{i+1}) \right] \Delta x
\end{equation}
yang dikenal sebagai metode persegi titik tengah, di mana
<a name='eqn10'></a>
\begin{equation}\label{eqn:integral-3}\tag{10}
\Delta x = \frac{b - a}{N}
\end{equation}
dengan $N$ adalah jumlah partisi. Variabel $x_i$ pada Persamaan ([9](#eqn9)) diberikan oleh
<a name='eqn11'></a>
\begin{equation}\label{eqn:integral-4}\tag{11}
x_i = a + i\Delta x
\end{equation}
dengan $i = 0, \dots, N$.
## Penyelesaian
Penerapan Persamaan ([1](#eqn1)), ([2](#eqn2)), ([3](#eqn3)), ([4](#eqn4)), dan ([5](#eqn5)) pada Gambar [2](#fig2) akan menghasilkan
<a name='eqn10'></a>
\begin{equation}\label{eqn:friction}\tag{10}
f_k = \mu_k mg
\end{equation}
dan usahanya adalah
<a name='eqn11'></a>
\begin{equation}\label{eqn:friction-work}\tag{11}
\begin{array}{rcl}
W & = & \displaystyle \int_{x_0}^x f_k dx \newline
& = & \displaystyle \int_{x_0}^x \mu_k m g dx \newline
& = & \displaystyle m g \int_{x_0}^x \mu_k dx
\end{array}
\end{equation}
dengan koefisien gesek statisnya dapat merupakan fungsi dari posisi $\mu_k = \mu_k(x)$.
```
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
# set integral lower and upper bounds
a = 0
b = 1
# generate x
x = [1, 2, 3, 4, 5]
# generate y from numerical integration
y = [1, 2, 3, 5, 6]
## plot results
fig, ax = plt.subplots()
ax.scatter(x, y)
ax.set_xlabel("$x - x^0$")
ax.set_ylabel("y")
from IPython import display
from IPython.core.display import HTML
HTML('''
<div>
Gambar <a name='fig3'>3</a>. Kurva antara usaha $W$ dan jarak tempuh $x - x_0$.
</div>
''')
```
## Diskusi
Berdasarkan Gambar [3](#fig3) dapat dijelaskan bahwa dengan $\mu_k = \mu_k(x)$ maka kurva $W(x)$ tidak lagi linier karena dipengaruhi oleh sejauh mana perhitungan kerja dilakukan.
## Kesimpulan
Perhitungan kerja dengan $\mu_k = \mu_k(x)$ telah dapat dilakukan.
## Referensi
1. J. A. C. Martins, J. T. Oden, F. M. F. Simões, "A study of static and kinetic friction", International Journal of Engineerting Science, vol 28, no 1, p 29-92, 1990, url <https://doi.org/10.1016/0020-7225(90)90014-A>.
1. Carl Rod Nave, "Friction", HyperPhysics, 2017, url <http://hyperphysics.phy-astr.gsu.edu/hbase/frict.html#fri> [20220419].
2. Wikipedia contributors, "Friction", Wikipedia, The Free Encyclopedia, 12 April 2022, 00:33 UTC, url <https://en.wikipedia.org/w/index.php?oldid=1082223658> [20220419].
3. Tia Ghose, Ailsa Harvey, "What is friction?", Live Science, 8 Feb 2022, url <https://www.livescience.com/37161-what-is-friction.html> [20220419].
|
github_jupyter
|
## Progressive Elaboration of Tasks
[Progressive elaboration](https://project-management-knowledge.com/definitions/p/progressive-elaboration/)
is the process of adding additional detail and fidelity to the project plan
as additional or more complete information becomes available. The process of progressive elaboration allows the project team to begin with a sketch of their
project plan that becomes more detailed as needed and as development priorities
emerge from the emergent detailed picture.
In this demonstration and exercise we will focus on the creation of subtasks, and identifying dependencies between tasks that impact the sequence of work. Our next demo and exercise will focus on assigning tasks to resources and developing an effort instead of duration-focused view of our project.
[foundation-03.tjp file](Sample-Files/foundation-03.tjp)
project foundation "Foundation Project" 2018-07-01 - 2019-06-30 {
currency "USD"
timeformat "%Y-%m-%d"
numberformat "-" "" "," "." 1
currencyformat "(" ")" "," "." 0
now 2018-07-01-00:00
weekstartsmonday
workinghours mon - fri 9:00 - 12:00, 13:00 - 18:00
workinghours sat, sun off
}
############## accounts ################
############## resources ###############
############## tasks ###################
task projectstart "Project Start" {
start ${projectstart}
}
task doing "Making the Goods" {
start ${projectstart}
task buy_materials "Buy the materials" {
duration 1m
}
task glue_together "Glue everything together" {
depends !buy_materials # relative reference to task
duration 8w
}
task cleanup "Clean up the mess" {
depends doing.glue_together # absolute reference to task
duration 30d
}
}
task refining "Refining the Goods" {
depends doing
task paint "Paint the items" {
duration 3w
}
task attach_bells "Screw bells onto items" {
depends !paint
task buy_bells "Buy bells"{duration 1w}
task use_screwdriver "Use screwdriver" {
depends !buy_bells
duration 1m
}
}
task repaint "Repaint the items" {
depends !attach_bells
duration 2m
}
task explain "Explain to Boss what you spent the last 3 months doing" {
depends !repaint
duration 2h
}
}
task selling "Selling the Goods" {
depends refining.explain
duration 4m
}
############## reports #################
taskreport "reports/foundation-03_taskreport" {
formats html, csv
headline "Project Breakdown"
}
```
%%bash
cd Sample-Files
tj3 foundation-03.tjp
```
producing this HTML report:
[Sample-Files/reports/foundation-03_taskreport.html](Sample-Files/reports/foundation-03_taskreport.html)
and the following CSV file:
[Sample-Files/reports/foundation-03_taskreport.csv](Sample-Files/reports/foundation-03_taskreport.csv)
## Practice ...
Based on the fleshed out skeleton (collection of high-level tasks) you previously developed, you can now
add some subtasks, and dependencies to those tasks to develop a more detailed plan. When done you can generate
new HTML and CSV reports that illustrate/contain the results of the project scheduling processs
in TaskJuggler.
### Activity:
1. Modify your previously created fleshed out `.tjp` file or create a new file based on the content of your "skeleton" file and add some sub-tasks and dependencies to your plan.
2. Run the TaskJuggler scheduler to test your skeleton to make sure that it does not generate any errors. If it does, see if you can fix them and re-run the scheduler.
<video controls src="images/Timer10Minutestory.mov" />
-------------------
[(0)](TJ-00%20What%20is%20TaskJuggler.ipynb) --
[(1)](TJ-01%20Project%20Skeleton.ipynb) --
[(2)](TJ-02%20A%20Fleshed%20Out%20TaskJuggler%20Outline.ipynb) --
(3) --
[(4)](TJ-04%20Assigning%20Resources%20%26%20Cost%20Estimation%20in%20TaskJuggler.ipynb) --
[(5)](TJ-05%20Project%20Tracking%20in%20TaskJuggler.ipynb) --
[(6)](TJ-06%20Visualization%20%26%20Reporting%20in%20TaskJuggler.ipynb)
|
github_jupyter
|
# *Import Libraries*
```
import scipy.io
import numpy as np
from matplotlib import pyplot as plt
import sys
sys.path.append('/home/bhustali/.conda/envs/tf2/svcca-master')
import cca_core
```
# Simple Example
```
# # assume A_fake has 20 neurons and we have their activations on 2000 datapoints
# A_fake = np.random.randn(20, 2000)
# # B_fake has 50 neurons with activations on the same 1000 datapoints
# # Note A and B do *not* have to have the same number of neurons
# B_fake = np.random.randn(50, 1000)
# # computing CCA simliarty between A_fake, B_fake
# # We expect similarity should be very low, because the random activations are not correlated
# results = cca_core.get_cca_similarity(A_fake, B_fake,compute_dirns=True, verbose=True)
# print("Returned Information: \n")
# print(results.keys())
# print("Single number for summarizing similarity")
# print('{:.4f}'.format(np.mean(results["cca_coef1"])))
```
# SVCCA Layers
```
for k in range(1,587):
data_A = scipy.io.loadmat('/home/bhustali/data/mat/neuron_output (586).mat')
data_B = scipy.io.loadmat('/home/bhustali/data/mat/neuron_output ' + '(' + str(k)+ ')' + '.mat')
S = np.zeros((8,8))
for i in range (1,9):
for j in range (1,9):
#extract layer outputs l_1 and l_2
A_layer = data_A['layer_' + str(i)].transpose()
B_layer = data_B['layer_' + str(j) ].transpose()
# Mean subtract activations
'''
a = np.array([[1, 2], [3, 4]])
np.mean(a, axis=1) = array([1.5, 3.5])
'''
c_A_layer = A_layer - np.mean(A_layer, axis=1, keepdims=True)
c_B_layer = B_layer - np.mean(B_layer, axis=1, keepdims=True)
#Singular Value Decomposition(SVD)
#obtain l_1' and l_2'
'''
U, S, Vh = np.linalg.svd(A, full_matrices=False)
U = [mxs] = Left-singular vector of A
S = [sxs] = Singular values of A
Vh = [sxp] = Right-singular vectors of A
'''
u_A, s_A, vh_A = np.linalg.svd(c_A_layer, full_matrices=False)
u_B, s_B, vh_B = np.linalg.svd(c_B_layer, full_matrices=False)
sv = 10 #select an appropriate value
# print("Percentage of variance explained by 'sv' singular vectors", 100*np.sum(s_A[:sv])/np.sum(s_A))
'''singular vectors = S * Vh
Equivalent to Uh * A = Uh* (U*S*Vh) = S*Vh
'''
#We compute the subspace with 'sv' largest singular values
sv_A_layer = np.matmul(s_A[:sv]*np.eye(sv), vh_A[:sv])
sv_B_layer = np.matmul(s_B[:sv]*np.eye(sv), vh_B[:sv])
svcca_results = cca_core.get_cca_similarity(sv_A_layer, sv_B_layer, epsilon=0, threshold=1,compute_dirns=True, verbose=False)
S[i-1,j-1] = np.mean(svcca_results["cca_coef1"])
fig, ax = plt.subplots()
ax.matshow(S, cmap=plt.cm.Blues)
ax.set_xticklabels([0,'B1','B2','B3','B4','B5','B6','B7','B8'])
ax.set_yticklabels([0,'A1','A2','A3','A4','A5','A6','A7','A8'])
for l in range(8):
for m in range(8):
c = S[m,l]
ax.text(l, m, f"{c:.2f}", va='center', ha='center')
plt.savefig('/home/bhustali/data/movie/' + str(k) + '.png',dpi = 100)
plt.close('all')
```
# How do layer outputs change during optimization?
Here, we measure the correlation of the output after each iteration (data_B) with the outputs of the layers before training (data_A)
```
# s = np.zeros((587,8))
# #outputs of the layers before training
# data_A = scipy.io.loadmat('data/mat/neuron_output (0).mat')
# for i in range (1,9):
# for k in range(0,587):
# #output after each iteration
# data_B = scipy.io.loadmat('data/mat/neuron_output ' + '(' + str(k)+ ')' + '.mat')
# j = i
# A_layer = data_A['layer_' + str(i)].transpose()
# B_layer = data_B['layer_' + str(j) ].transpose()
# #shift mean to 0
# c_A_layer = A_layer - np.mean(A_layer, axis=1, keepdims=True)
# c_B_layer = B_layer - np.mean(B_layer, axis=1, keepdims=True)
# # Singular Value Decomposition(SVD)
# u_A, s_A, vh_A = np.linalg.svd(c_A_layer, full_matrices=False)
# u_B, s_B, vh_B = np.linalg.svd(c_B_layer, full_matrices=False)
# sv = 10 #select an appropriate value
# sv_A_layer = np.matmul(s_A[:sv]*np.eye(sv), vh_A[:sv])
# sv_B_layer = np.matmul(s_B[:sv]*np.eye(sv), vh_B[:sv])
# #compute similarity
# svcca_results = cca_core.get_cca_similarity(sv_A_layer, sv_B_layer, epsilon=0, threshold=1,
# compute_dirns=True, verbose=False)
# s[k,i-1] = np.mean(svcca_results["cca_coef1"])
# #Plotting
# fig, ax = plt.subplots()
# for i in range (0,8):
# ax.plot(s[:,i], label = str(i+1))
# ax.set(xlabel='iterations', ylabel='rho',
# title='Convergence')
# ax.grid()
# plt.legend()
# # fig.savefig('divergence.png', dpi = 500)
```
|
github_jupyter
|
# Walk through all streets in a city
Preparation of the examples for the challenge: find the shortest path through a set of streets.
```
import matplotlib.pyplot as plt
%matplotlib inline
from jyquickhelper import add_notebook_menu
add_notebook_menu()
```
## Problem description
Find the shortest way going through all streets from a set of streets? This problem is known as the *Route inspection problem*.
## Data
[Seattle streets](https://data.seattle.gov/dataset/Street-Network-Database/afip-2mzr/data) from [data.seattle.gov](https://data.seattle.gov/)
### Read the data
```
import shapefile, os
if os.path.exists("Street_Network_Database/WGS84/Street_Network_Database.shp"):
rshp = shapefile.Reader("Street_Network_Database/WGS84/Street_Network_Database.shp")
shapes = rshp.shapes()
records = rshp.records()
else:
from pyensae.datasource import download_data
files = download_data("WGS84_seattle_street.zip")
rshp = shapefile.Reader("Street_Network_Database.shp")
shapes = rshp.shapes()
records = rshp.records()
shapes[0].__dict__
{k[0]:v for k,v in zip(rshp.fields[1:], records[0])}
from ensae_projects.datainc.data_geo_streets import get_fields_description
get_fields_description()
```
### Display the streets
```
streets5 = list(zip(records[:5], shapes[:5]))
streets5[2][1].points
import folium
from random import randint
from pyensae.notebookhelper import folium_html_map
c = streets5[0][1]
map_osm = folium.Map(location=[c.bbox[1], c.bbox[0]], zoom_start=9)
for rec, shape in streets5:
d = {k[0]: v for k,v in zip(rshp.fields[1:], rec)}
map_osm.add_child(folium.Marker([shape.points[0][1], shape.points[0][0]], popup=str(d["ORD_STNAME"])))
map_osm.add_child(folium.PolyLine(locations=[[_[1], _[0]] for _ in shape.points], weight=10))
folium_html_map(map_osm, width="60%")
```
## Find connected streets
```
street0 = streets5[0][1].points
street0
def connect_streets(st1, st2):
a1, b1 = st1[0], st1[-1]
a2, b2 = st2[0], st2[-1]
connect = []
if a1 == a2:
connect.append((0, 0))
if a1 == b2:
connect.append((0, 1))
if b1 == a2:
connect.append((1, 0))
if b1 == b2:
connect.append((1, 1))
return tuple(connect) if connect else None
neighbours = []
for i, street in enumerate(shapes):
points = street.points
con = connect_streets(street0, points)
if con:
neighbours.append(i)
neighbours
import folium
from pyensae.notebookhelper import folium_html_map
c = shapes[neighbours[0]]
map_osm = folium.Map(location=[c.bbox[1], c.bbox[0]], zoom_start=15)
points = set()
for index in neighbours:
rec, shape = records[index], shapes[index]
corners = [(_[1], _[0]) for _ in shape.points]
map_osm.add_child(folium.PolyLine(locations=corners, weight=10))
points |= set([corners[0], corners[-1]])
for x, y in points:
map_osm.add_child(folium.Marker((x, y), popup=str(index)))
folium_html_map(map_osm, width="50%")
c = shapes[neighbours[0]]
map_osm = folium.Map(location=[c.bbox[1], c.bbox[0]], zoom_start=15)
points = set()
for index in neighbours:
rec, shape = records[index], shapes[index]
corners = [(_[1], _[0]) for _ in shape.points]
map_osm.add_child(folium.PolyLine(locations=corners, weight=10))
points |= set([corners[0], corners[-1]])
for x, y in points:
map_osm.add_child(folium.CircleMarker((x, y), popup=str(index), radius=8, fill_color="yellow"))
folium_html_map(map_osm, width="50%")
```
## Extraction of all streets in a short perimeter
```
from shapely.geometry import Point, LineString
def enumerate_close(x, y, shapes, th=None):
p = Point(x,y)
for i, shape in enumerate(shapes):
obj = LineString(shape.points)
d = p.distance(obj)
if th is None or d <= th:
yield d, i
x, y = shapes[0].points[0]
closes = list(enumerate_close(x, y, shapes))
closes.sort()
closes[:10]
import folium
from ensae_projects.datainc.data_geo_streets import folium_html_street_map
folium_html_street_map([_[1] for _ in closes[:20]], shapes, html_width="50%", zoom_start=15)
def complete_subset_streets(subset, shapes):
extension = []
for i, shape in enumerate(shapes):
add = []
for s in subset:
to = shapes[s]
if s != i:
con = connect_streets(shapes[s].points, shapes[i].points)
if con is not None:
add.extend([_[1] for _ in con])
if len(set(add)) == 2:
extension.append(i)
return extension
subset = [index for dist, index in closes[:20]]
newset = set(subset + complete_subset_streets(subset, shapes))
print(list(sorted(newset)))
folium_html_street_map(newset, shapes, html_width="50%", zoom_start=15)
from ensae_projects.datainc.data_geo_streets import build_streets_vertices
vertices, edges = build_streets_vertices(newset, shapes)
vertices[:3], edges[:3]
from ensae_projects.datainc.data_geo_streets import plot_streets_network
plot_streets_network(newset, edges, vertices, shapes, figsize=(10,10));
```
|
github_jupyter
|
### Data Visualization
#### `matplotlib` - from the documentation:
https://matplotlib.org/3.1.1/tutorials/introductory/pyplot.html
`matplotlib.pyplot` is a collection of command style functions <br>
Each pyplot function makes some change to a figure <br>
`matplotlib.pyplot` preserves ststes across function calls
```
%matplotlib inline
import matplotlib.pyplot as plt
```
Call signatures::
```
plot([x], y, [fmt], data=None, **kwargs)
plot([x], y, [fmt], [x2], y2, [fmt2], ..., **kwargs)
```
Quick plot
The main usage of `plt` is the `plot()` and `show()` functions
https://matplotlib.org/3.1.1/api/pyplot_summary.html <br>
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.plot.html <br>
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.show.html <br>
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.legend.html<br>
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.figure.html<br>
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.subplot.html<br>
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.annotate.html<br>
```
df_iris = pd.read_csv('https://raw.githubusercontent.com/uiuc-cse/data-fa14/gh-pages/data/iris.csv')
df_iris.head()
colors = {'setosa':'red', 'versicolor':'orange', 'virginica':'blue'}
def get_col(spec):
return colors[spec]
colors_col = df_iris.species.apply(get_col)
plt.scatter("petal_length","petal_width", data=df_iris, c = colors_col, s = 7, marker = "o")
legend_elements = [plt.Line2D([0], [0], marker='o', linestyle="", color=colors["setosa"], label="setosa"),
plt.Line2D([0], [0], marker='o', linestyle="", color=colors["versicolor"], label="versicolor"),
plt.Line2D([0], [0], marker='o', linestyle="", color=colors["virginica"], label="virginica")]
plt.legend(handles=legend_elements,loc="upper left", title="Species")
plt.show()
```
https://python-graph-gallery.com/matplotlib/
#### Using pandas `.plot()`
```
df_iris.groupby("species").mean().plot(kind='bar')
plt.show()
df_iris.plot(x= "petal_length", y = "petal_width" ,kind = "scatter", color = colors_col)
plt.savefig('output1.png')
```
https://github.com/pandas-dev/pandas/blob/v0.25.0/pandas/plotting/_core.py#L504-L1533
https://python-graph-gallery.com/wp-content/uploads/Matplotlib_cheatsheet_datacamp.png
<img src = "https://python-graph-gallery.com/wp-content/uploads/Matplotlib_cheatsheet_datacamp.png" width = "1000"/>
### `seaborn` - dataset-oriented plotting
Seaborn is a library that specializes in making *prettier* `matplotlib` plots of statistical data. <br>
It is built on top of matplotlib and closely integrated with pandas data structures.
https://seaborn.pydata.org/introduction.html<br>
https://python-graph-gallery.com/seaborn/
```
import seaborn as sns
```
`seaborn` lets users *style* their plotting environment.<br>
There are 5 preset themes: darkgrid (default), whitegrid, dark, white, and ticks.<br>
https://seaborn.pydata.org/tutorial/aesthetics.html
However, you can always use `matplotlib`'s `plt.style`
https://matplotlib.org/3.1.1/gallery/style_sheets/style_sheets_reference.html
```
sns.set(style='whitegrid')
#dir(sns)
sns.scatterplot(x='petal_length',y='petal_width',data=df_iris)
plt.show()
with plt.style.context(('ggplot')):
sns.scatterplot(x='petal_length',y='petal_width',data=df_iris)
plt.show()
sns.scatterplot(x='petal_length',y='petal_width', hue = "species",data=df_iris)
plt.show()
```
#### Violin plot
Fancier box plot that gets rid of the need for 'jitter' to show the inherent distribution of the data points
```
sns.set(style="dark")
fig, axes = plt.subplots(figsize=(7, 3))
sns.violinplot(data=df_iris, ax=axes)
axes.set_ylabel('value')
axes.set_xlabel('feature')
plt.show()
```
#### Distplot
```
sns.set(style='dark', palette='muted')
# 1 column, 4 rows
f, axes = plt.subplots(4,1, figsize=(10,10), sharex=True)
# Regular displot
sns.distplot(df_iris.petal_length, ax=axes[0])
# Change the color
sns.distplot(df_iris.petal_width, kde=False, ax=axes[1], color='orange')
# Show the Kernel density estimate
sns.distplot(df_iris.sepal_width, hist=False, kde_kws={'shade':True}, ax=axes[2], color='purple')
# Show the rug
sns.distplot(df_iris.sepal_length, hist=False, rug=True, ax=axes[3], color='green')
plt.show()
```
#### FacetGrid
```
sns.set()
columns = ['species', 'petal_length', 'petal_width']
facet_column = 'species'
g = sns.FacetGrid(df_iris.loc[:,columns], col=facet_column, hue=facet_column)
g.map(plt.scatter, 'petal_length', 'petal_width')
sns.relplot(x="petal_length", y="petal_width", col="species",
hue="species", style="species", size="sepal_width",
data=df_iris)
plt.show()
```
https://jakevdp.github.io/PythonDataScienceHandbook/04.14-visualization-with-seaborn.html
```
sns.catplot(x="species", y="petal_length", data=df_iris)
plt.show()
sns.catplot(kind="box", data=df_iris)
plt.show()
# https://seaborn.pydata.org/tutorial/categorical.html
tips = sns.load_dataset("tips")
print(tips.head())
sns.catplot(x="day", y="total_bill", hue="smoker", kind="box", data=tips)
plt.show()
```
Plot the tips by day with two side by side box plots for males and females and different subplots for the time of the meal (lunch/dinner).
```
# help(sns.catplot)
sns.pairplot(df_iris, hue='species', height=2.5);
```
https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Python_Seaborn_Cheat_Sheet.pdf
<img src = "https://s3.amazonaws.com/assets.datacamp.com/blog_assets/Python_Seaborn_Cheat_Sheet.pdf" width = "1000"/>
### `plotnine` - R ggplot2 in python
plotnine is an implementation of a grammar of graphics in Python, it is based on ggplot2. The grammar allows users to compose plots by explicitly mapping data to the visual objects that make up the plot.
Plotting with a grammar is powerful, it makes custom (and otherwise complex) plots are easy to think about and then create, while the simple plots remain simple.
```
#!pip install plotnine
```
https://plotnine.readthedocs.io/en/stable/
```
from plotnine import *
```
https://plotnine.readthedocs.io/en/stable/api.html
```
p = ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point()
# add transparency - to address overlapping points
ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point(size = 5, alpha=0.5)
# change point size
ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point(size = 0.7, alpha=0.7)
# more parameters
ggplot(data=df_iris) + aes(x="petal_length", y = "petal_width") + geom_point() + scale_x_log10() + xlab("Petal Length")
n = "3"
features = "length and width"
title = f'species : {n}; petal : {features}'
#title = 'species : {}; petal : {}'.format(n,features)
ggplot(data=df_iris) +aes(x='petal_length',y='petal_width',color="species") \
+ geom_point(size=0.7) + facet_wrap('~species',nrow=3) \
+ theme(figure_size=(7,9)) + ggtitle(title)
p = ggplot(data=df_iris) + aes(x='petal_length') \
+ geom_histogram(binwidth=1,color='black',fill='grey')
p
ggsave(plot=p, filename='hist_plot_with_plotnine.png')
tips = sns.load_dataset("tips")
print(tips.head())
ggplot(aes(x="day", y="tip",\
color="smoker"), data=tips) \
+ geom_boxplot()\
+ geom_jitter(width=0.05, alpha=0.4) \
+ facet_grid(".~smoker")
```
http://cmdlinetips.com/2018/05/plotnine-a-python-library-to-use-ggplot2-in-python/ <br>
https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf
<img src = "https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf" width = "1000"/>
Use ggplot to plot the sepal_length in boxplots separated by species, add new axes labels and make the y axis values log10.
* Write a function that takes as a parameter a line of the dataframe and if the species is
** setosa it returns the petal_length
** versicolor it returns the petal_width
** virginica it returns the sepal_length
Apply this function to every line in the dataset. <br>
Use ggplot to make a histogram of the resulted values.
```
#dir()
```
https://plotnine.readthedocs.io/en/stable/api.html
Look for scale functions.
More resources:
https://github.com/swyder/plotnine_tutorial/blob/master/plotnine_demo_sw.ipynb <br>
https://datacarpentry.org/python-ecology-lesson/07-visualization-ggplot-python/
|
github_jupyter
|
<h1>Phi K Correlation</h1>
Phi K correlation is a newly emerging correlation cofficient with following advantages:
- it can work consistently between categorical, ordinal and interval variables
- it can capture non-linear dependency
- it reverts to the Pearson correlation coefficient in case of a bi-variate normal input distribution
```
import phik
from phik import resources
from phik.binning import bin_data
from phik.decorators import *
from phik.report import plot_correlation_matrix
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, StandardScaler, MinMaxScaler
import seaborn as sns
import matplotlib.pyplot as plt
import networkx as nx
#loading the SalePrice dataset
df=pd.read_csv('dataset.csv')
df.drop(['Id'], axis=1,inplace=True)
```
**Preprocessing**
```
#Preprocessing the data
class PreProcessor:
def __init__(self):
#treating certain categorical columns as ordinal
self.encoder={}
self.encoder['LotShape']={'Reg':0,'IR1':1,'IR2':2,'IR3':3}
self.encoder['LandSlope']={'Gtl':1, 'Mod':2, 'Sev':3}
self.encoder['GarageFinish']={'Fin':3, 'RFn':2, 'Unf':1, 'VNA':0}
self.encoder['BsmtExposure']={'Gd':4,'Av':3,'Mn':2,'No':1,'VNA':0}
self.encoder['Functional']={'Typ':0,'Min1':1,'Min2':2,'Mod':3,'Maj1':4,'Maj2':5,'Sev':6,'Sal':7}
self.encoder['PavedDrive']={'Y':2,'P':1,'N':0}
#columns with values as Ex,Gd,TA,Fa,Po,VNA can be treated as ordinal
ratings={'Ex':5,'Gd':4,'TA':3,'Fa':2,'Po':1,'VNA':0}
rated_cols=['ExterQual', 'ExterCond','BsmtQual','BsmtCond','KitchenQual','FireplaceQu','GarageQual', 'GarageCond']
for col in rated_cols:
self.encoder[col]=ratings
self.categorical_encoded=self.encoder.keys()
def preprocessing1(self,df):
#drop columns with mostly one value or mostly missing values
dropped_cols=['Street', 'Alley', 'Utilities', 'Condition2', 'RoofMatl', 'Heating','LowQualFinSF', '3SsnPorch', 'PoolArea', 'PoolQC', 'Fence', 'MiscFeature', 'MiscVal']
df.drop(dropped_cols, axis=1, inplace=True)
#treating missing values
#Filling missing values with median
col1=['LotFrontage','MasVnrArea']
for col in col1:
df[col].fillna(df[col].median(), inplace=True)
#Fill missing values with new category "VNA"
col2=['BsmtQual','BsmtCond','BsmtExposure','BsmtFinType1','BsmtFinType2','GarageType','GarageFinish','GarageQual','GarageCond','FireplaceQu','MasVnrType', 'Electrical']
for col in col2:
df[col].fillna('VNA', inplace=True)
#Replacing Na values in GarageYrBlt with corresponding values in YearBuilt
df.loc[(pd.isnull(df.GarageYrBlt)), 'GarageYrBlt'] = df.YearBuilt
#encoding categorical columns to ordinal
for col in self.categorical_encoded:
df[col]=df[col].apply(lambda val: self.encoder[col][val])
#apply lable encoder
for col in df.select_dtypes(include=['object']).columns:
df[col] = LabelEncoder().fit_transform(df[col])
return df
def preprocessing2(self,df):
df=self.preprocessing1(df)
#filtered columns
numerical_filtered=['YearBuilt','TotRmsAbvGrd','GrLivArea','1stFlrSF','GarageYrBlt','YearRemodAdd','GarageArea','SalePrice']
ordinal_filtered=['GarageCars','OverallQual','Fireplaces','GarageFinish','BsmtFullBath','KitchenQual','FullBath','FireplaceQu','BsmtQual','TotalBsmtSF']
categorical_filtered=['MSZoning', 'Neighborhood', 'Foundation', 'BsmtFinType1', 'HeatingQC', 'CentralAir', 'GarageType', 'SaleCondition', 'MSSubClass', 'MasVnrType']
return df[numerical_filtered+ordinal_filtered+categorical_filtered], numerical_filtered
#create pre processor object
pre_processor=PreProcessor()
#preprocess the data and get interval column
preprocessed_df, interval_cols=pre_processor.preprocessing2(df)
```
**PhiK correlation**
```
# get the phi_k correlation matrix between all variables
coerr_mat=preprocessed_df.phik_matrix(interval_cols=interval_cols)
#colour map
cmap = sns.diverging_palette(220, 10, as_cmap=True)
#plotting phik correlation
plot_correlation_matrix(coerr_mat.values, x_labels=coerr_mat.columns, y_labels=coerr_mat.index,
vmin=0, vmax=1, color_map=cmap, title=r'correlation $\phi_K$', fontsize_factor=1,
figsize=(7*3,5.5*3))
plt.tight_layout()
plt.show
```
**Finding highly correlated features based on above heat map and vizualizing it as a graph**
```
class GraphVisualization:
def __init__(self):
# visual is a list which stores all
# the set of edges that constitutes a
# graph
self.visual = []
# addEdge function inputs the vertices of an
# edge and appends it to the visual list
def addEdge(self, a, b):
temp = [a, b]
self.visual.append(temp)
# In visualize function G is an object of
# class Graph given by networkx G.add_edges_from(visual)
# creates a graph with a given list
# nx.draw_networkx(G) - plots the graph
# plt.show() - displays the graph
def visualize(self):
G = nx.Graph()
G.add_edges_from(self.visual)
nx.draw_shell(G, alpha = 0.7, with_labels = True, edge_color ='.4', cmap = cmap, font_size=12 )
plt.title("correlation vizualization as graph")
plt.style.use('ggplot')
plt.figure(figsize=(8,5))
plt.show()
G = GraphVisualization()
for col1 in preprocessed_df.columns:
for col2 in preprocessed_df.columns:
if col1!=col2:
#if the correlation is greater than 0.9, add an edge to the graph
if coerr_mat[col1][col2]>0.9:
G.addEdge(col1,col2)
G.visualize()
```
Based on graph plot using PhiK correlation ,following features are highly correlated:
- GarageArea and GarageCars
- GarageTrBlt and YearBuilt
- Neighborhood and MSZoning
- TotalBsmtSF is highly correlated with 1stFlrSF, SalePrice, BsmtQual, GrLivArea and Neighborhood
**Global PhiK Correlations**
This metric signifies how much a column is correlated with all other columns in the dataset
```
# get global correlations based on phi_k correlation matrix
global_coerr=preprocessed_df.global_phik(interval_cols=interval_cols)
#plotting global phik correlation
plot_correlation_matrix(global_coerr[0], x_labels=["correlation"], y_labels=global_coerr[1],vmin=0, vmax=1, color_map=cmap, title=r'global correlation $\phi_K$', fontsize_factor=1,figsize=(7*3,5.5*3))
```
|
github_jupyter
|
Author: Saeed Amen (@thalesians) - Managing Director & Co-founder of [the Thalesians](http://www.thalesians.com)
## Introduction
With the UK general election in early May 2015, we thought it would be a fun exercise to demonstrate how you can investigate market price action over historial elections. We shall be using Python, together with Plotly for plotting. Plotly is a free web-based platform for making graphs. You can keep graphs private, make them public, and run Plotly on your [Chart Studio Enterprise on your own servers](https://plot.ly/product/enterprise/). You can find more details [here](https://plot.ly/python/getting-started/).
## Getting market data with Bloomberg
To get market data, we shall be using Bloomberg. As a starting point, we have used bbg_py from [Brian Smith's TIA project](https://github.com/bpsmith/tia/tree/master/tia/bbg), which allows you to access Bloomberg via COM (older method), modifying it to make it compatible for Python 3.4. Whilst, we shall note use it to access historical daily data, there are functions which enable us to download intraday data. This method is only compatible with 32 bit versions of Python and assumes you are running the code on a Bloomberg terminal (it won't work without a valid Bloomberg licence).
In my opinion a better way to access Bloomberg via Python, is via the official Bloomberg open source Python Open Source Graphing Library, however, at time of writing the official version is not yet compatible with Python 3.4. Fil Mackay has created a Python 3.4 compatible version of this [here](https://github.com/filmackay/blpapi-py), which I have used successfully. Whilst it takes slightly more time to configure (and compile using Windows SDK 7.1), it has the benefit of being compatible with 64 bit Python, which I have found invaluable in my analysis (have a read of [this](http://ta.speot.is/2012/04/09/visual-studio-2010-sp1-windows-sdk-7-1-install-order/) in case of failed installations of Windows SDK 7.1).
Quandl can be used as an alternative data source, if you don't have access to a Bloomberg terminal, which I have also included in the code.
## Breaking down the steps in Python
Our project will consist of several parts:
- bbg_com - low level interaction with BBG COM object (adapted for Python 3.4) (which we are simply calling)
- datadownloader - wrapper for BBG COM, Quandl and CSV access to data
- eventplot - reusuable functions for interacting with Plotly and creating event studies
- ukelection - kicks off the whole script process
### Downloading the market data
As with any sort of financial market analysis, the first step is obtaining market data. We create the DataDownloader class, which acts a wrapper for Bloomberg, Quandl and CSV market data. We write a single function "download_time_series" for this. We could of course extend this for other data sources such as Yahoo Finance. Our output will be Pandas based dataframes. We want to make this code generic, so the tickers are not hard coded.
```
# for time series manipulation
import pandas
class DataDownloader:
def download_time_series(self, vendor_ticker, pretty_ticker, start_date, source, csv_file = None):
if source == 'Quandl':
import Quandl
# Quandl requires API key for large number of daily downloads
# https://www.quandl.com/help/api
spot = Quandl.get(vendor_ticker) # Bank of England's database on Quandl
spot = pandas.DataFrame(data=spot['Value'], index=spot.index)
spot.columns = [pretty_ticker]
elif source == 'Bloomberg':
from bbg_com import HistoricalDataRequest
req = HistoricalDataRequest([vendor_ticker], ['PX_LAST'], start = start_date)
req.execute()
spot = req.response_as_single()
spot.columns = [pretty_ticker]
elif source == 'CSV':
dateparse = lambda x: pandas.datetime.strptime(x, '%Y-%m-%d')
# in case you want to use a source other than Bloomberg/Quandl
spot = pandas.read_csv(csv_file, index_col=0, parse_dates=0, date_parser=dateparse)
return spot
```
### Generic functions for event study and Plotly plotting
We now focus our efforts on the EventPlot class. Here we shall do our basic analysis. We shall aslo create functions for creating plotly traces and layouts that we shall reuse a number of times. The analysis we shall conduct is fairly simple. Given a time series of spot, and a number of dates, we shall create an event study around these times for that asset. We also include the "Mean" move over all the various dates.
```
# for dates
import datetime
# time series manipulation
import pandas
# for plotting data
import plotly
from plotly.graph_objs import *
class EventPlot:
def event_study(self, spot, dates, pre, post, mean_label = 'Mean'):
# event_study - calculates the asset price moves over windows around event days
#
# spot = price of asset to study
# dates = event days to anchor our event study
# pre = days before the event day to start our study
# post = days after the event day to start our study
#
data_frame = pandas.DataFrame()
# for each date grab spot data the days before and after
for i in range(0, len(dates)):
mid_index = spot.index.searchsorted(dates[i])
start_index = mid_index + pre
finish_index = mid_index + post + 1
x = (spot.ix[start_index:finish_index])[spot.columns.values[0]]
data_frame[dates[i]] = x.values
data_frame.index = range(pre, post + 1)
data_frame = data_frame / data_frame.shift(1) - 1 # returns
# add the mean on to the end
data_frame[mean_label] = data_frame.mean(axis=1)
data_frame = 100.0 * (1.0 + data_frame).cumprod() # index
data_frame.ix[pre,:] = 100
return data_frame
```
We write a function to convert dates represented in a string format to Python format.
```
def parse_dates(self, str_dates):
# parse_dates - parses string dates into Python format
#
# str_dates = dates to be parsed in the format of day/month/year
#
dates = []
for d in str_dates:
dates.append(datetime.datetime.strptime(d, '%d/%m/%Y'))
return dates
EventPlot.parse_dates = parse_dates
```
Our next focus is on the Plotly functions which create a layout. This enables us to specify axes labels, the width and height of the final plot and so on. We could of course add further properties into it.
```
def create_layout(self, title, xaxis, yaxis, width = -1, height = -1):
# create_layout - populates a layout object
# title = title of the plot
# xaxis = xaxis label
# yaxis = yaxis label
# width (optional) = width of plot
# height (optional) = height of plot
#
layout = Layout(
title = title,
xaxis = plotly.graph_objs.XAxis(
title = xaxis,
showgrid = False
),
yaxis = plotly.graph_objs.YAxis(
title= yaxis,
showline = False
)
)
if width > 0 and height > 0:
layout['width'] = width
layout['height'] = height
return layout
EventPlot.create_layout = create_layout
```
Earlier, in the DataDownloader class, our output was Pandas based dataframes. Our convert_df_plotly function will convert these each series from Pandas dataframe into plotly traces. Along the way, we shall add various properties such as markers with varying levels of opacity, graduated coloring of lines (which uses colorlover) and so on.
```
def convert_df_plotly(self, dataframe, axis_no = 1, color_def = ['default'],
special_line = 'Mean', showlegend = True, addmarker = False, gradcolor = None):
# convert_df_plotly - converts a Pandas data frame to Plotly format for line plots
# dataframe = data frame due to be converted
# axis_no = axis for plot to be drawn (default = 1)
# special_line = make lines named this extra thick
# color_def = color scheme to be used (default = ['default']), colour will alternate in the list
# showlegend = True or False to show legend of this line on plot
# addmarker = True or False to add markers
# gradcolor = Create a graduated color scheme for the lines
#
# Also see http://nbviewer.ipython.org/gist/nipunreddevil/7734529 for converting dataframe to traces
# Also see http://moderndata.plot.ly/color-scales-in-ipython-notebook/
x = dataframe.index.values
traces = []
# will be used for market opacity for the markers
increments = 0.95 / float(len(dataframe.columns))
if gradcolor is not None:
try:
import colorlover as cl
color_def = cl.scales[str(len(dataframe.columns))]['seq'][gradcolor]
except:
print('Check colorlover installation...')
i = 0
for key in dataframe:
scatter = plotly.graph_objs.Scatter(
x = x,
y = dataframe[key].values,
name = key,
xaxis = 'x' + str(axis_no),
yaxis = 'y' + str(axis_no),
showlegend = showlegend)
# only apply color/marker properties if not "default"
if color_def[i % len(color_def)] != "default":
if special_line in str(key):
# special case for lines labelled "mean"
# make line thicker
scatter['mode'] = 'lines'
scatter['line'] = plotly.graph_objs.Line(
color = color_def[i % len(color_def)],
width = 2
)
else:
line_width = 1
# set properties for the markers which change opacity
# for markers make lines thinner
if addmarker:
opacity = 0.05 + (increments * i)
scatter['mode'] = 'markers+lines'
scatter['marker'] = plotly.graph_objs.Marker(
color=color_def[i % len(color_def)], # marker color
opacity = opacity,
size = 5)
line_width = 0.2
else:
scatter['mode'] = 'lines'
scatter['line'] = plotly.graph_objs.Line(
color = color_def[i % len(color_def)],
width = line_width)
i = i + 1
traces.append(scatter)
return traces
EventPlot.convert_df_plotly = convert_df_plotly
```
### UK election analysis
We've now created several generic functions for downloading data, doing an event study and also for helping us out with plotting via Plotly. We now start work on the ukelection.py script, for pulling it all together. As a very first step we need to provide credentials for Plotly (you can get your own Plotly key and username [here](https://plot.ly/python/getting-started/)).
```
# for time series/maths
import pandas
# for plotting data
import plotly
import plotly.plotly as py
from plotly.graph_objs import *
def ukelection():
# Learn about API authentication here: https://plot.ly/python/getting-started
# Find your api_key here: https://plot.ly/settings/api
plotly_username = "thalesians"
plotly_api_key = "XXXXXXXXX"
plotly.tools.set_credentials_file(username=plotly_username, api_key=plotly_api_key)
```
Let's download our market data that we need (GBP/USD spot data) using the DataDownloader class. As a default, I've opted to use Bloomberg data. You can try other currency pairs or markets (for example FTSE), to compare results for the event study. Note that obviously each data vendor will have a different ticker in their system for what could well be the same asset. With FX, care must be taken to know which close the vendor is snapping. As a default we have opted for BGN, which for GBP/USD is the NY close value.
```
ticker = 'GBPUSD' # will use in plot titles later (and for creating Plotly URL)
##### download market GBP/USD data from Quandl, Bloomberg or CSV file
source = "Bloomberg"
# source = "Quandl"
# source = "CSV"
csv_file = None
event_plot = EventPlot()
data_downloader = DataDownloader()
start_date = event_plot.parse_dates(['01/01/1975'])
if source == 'Quandl':
vendor_ticker = "BOE/XUDLUSS"
elif source == 'Bloomberg':
vendor_ticker = 'GBPUSD BGN Curncy'
elif source == 'CSV':
vendor_ticker = 'GBPUSD'
csv_file = 'D:/GBPUSD.csv'
spot = data_downloader.download_time_series(vendor_ticker, ticker, start_date[0], source, csv_file = csv_file)
```
The most important part of the study is getting the historical UK election dates! We can obtain these from Wikipedia. We then convert into Python format. We need to make sure we filter the UK election dates, for where we have spot data available.
```
labour_wins = ['28/02/1974', '10/10/1974', '01/05/1997', '07/06/2001', '05/05/2005']
conservative_wins = ['03/05/1979', '09/06/1983', '11/06/1987', '09/04/1992', '06/05/2010']
# convert to more easily readable format
labour_wins_d = event_plot.parse_dates(labour_wins)
conservative_wins_d = event_plot.parse_dates(conservative_wins)
# only takes those elections where we have data
labour_wins_d = [d for d in labour_wins_d if d > spot.index[0].to_pydatetime()]
conservative_wins_d = [d for d in conservative_wins_d if d > spot.index[0].to_pydatetime()]
spot.index.name = 'Date'
```
We then call our event study function in EventPlot on our spot data, which compromises of the 20 days before up till the 20 days after the UK general election. We shall plot these lines later.
```
# number of days before and after for our event study
pre = -20
post = 20
# calculate spot path during Labour wins
labour_wins_spot = event_plot.event_study(spot, labour_wins_d, pre, post, mean_label = 'Labour Mean')
# calculate spot path during Conservative wins
conservative_wins_spot = event_plot.event_study(spot, conservative_wins_d, pre, post, mean_label = 'Conservative Mean')
```
Define our xaxis and yaxis labels, as well as our source, which we shall later include in the title.
```
##### Create separate plots of price action during Labour and Conservative wins
xaxis = 'Days'
yaxis = 'Index'
source_label = "Source: @thalesians/BBG/Wikipedia"
```
We're finally ready for our first plot! We shall plot GBP/USD moves over Labour election wins, using the default palette and then we shall embed it into the sheet, using the URL given to us from the Plotly website.
```
###### Plot market reaction during Labour UK election wins
###### Using default color scheme
title = ticker + ' during UK gen elect - Lab wins' + '<BR>' + source_label
fig = Figure(data=event_plot.convert_df_plotly(labour_wins_spot),
layout=event_plot.create_layout(title, xaxis, yaxis)
)
py.iplot(fig, filename='labour-wins-' + ticker)
```
The "iplot" function will send it to Plotly's server (provided we have all the dependencies installed).
Alternatively, we could embed the HTML as an image, which we have taken from the Plotly website. Note this approach will yield a static image which is fetched from Plotly's servers. It also possible to write the image to disk. Later we shall show the embed function.
<div>
<a href="https://plot.ly/~thalesians/244/" target="_blank" title="GBPUSD during UK gen elect - Lab wins<br>Source: @thalesians/BBG/Wikipedia" style="display: block; text-align: center;"><img src="https://plot.ly/~thalesians/244.png" alt="GBPUSD during UK gen elect - Lab wins<br>Source: @thalesians/BBG/Wikipedia" style="max-width: 100%;" onerror="this.onerror=null;this.src='https://plot.ly/404.png';" /></a>
<script data-plotly="thalesians:244" src="https://plot.ly/embed.js" async></script>
</div>
We next plot GBP/USD over Conservative wins. In this instance, however, we have a graduated 'Blues' color scheme, given obviously that blue is the color of the Conserative party in the UK!
```
###### Plot market reaction during Conservative UK election wins
###### Using varying shades of blue for each line (helped by colorlover library)
title = ticker + ' during UK gen elect - Con wins ' + '<BR>' + source_label
# also apply graduated color scheme of blues (from light to dark)
# see http://moderndata.plot.ly/color-scales-in-ipython-notebook/ for details on colorlover package
# which allows you to set scales
fig = Figure(data=event_plot.convert_df_plotly(conservative_wins_spot, gradcolor='Blues', addmarker=False),
layout=event_plot.create_layout(title, xaxis, yaxis),
)
plot_url = py.iplot(fig, filename='conservative-wins-' + ticker)
```
Embed the chart into the document using "embed". This essentially embeds the Javascript code, necessary to make it interactive.
```
import plotly.tools as tls
tls.embed("https://plot.ly/~thalesians/245")
```
Our final plot, will consist of three subplots, Labour wins, Conservative wins, and average moves for both. We also add a grid and a grey background for each plot.
```
##### Plot market reaction during Conservative UK election wins
##### create a plot consisting of 3 subplots (from left to right)
##### 1. Labour wins, 2. Conservative wins, 3. Conservative/Labour mean move
# create a dataframe which grabs the mean from the respective Lab & Con election wins
mean_wins_spot = pandas.DataFrame()
mean_wins_spot['Labour Mean'] = labour_wins_spot['Labour Mean']
mean_wins_spot['Conservative Mean'] = conservative_wins_spot['Conservative Mean']
fig = plotly.tools.make_subplots(rows=1, cols=3)
# apply different color scheme (red = Lab, blue = Con)
# also add markets, which will have varying levels of opacity
fig['data'] += Data(
event_plot.convert_df_plotly(conservative_wins_spot, axis_no=1,
color_def=['blue'], addmarker=True) +
event_plot.convert_df_plotly(labour_wins_spot, axis_no=2,
color_def=['red'], addmarker=True) +
event_plot.convert_df_plotly(mean_wins_spot, axis_no=3,
color_def=['red', 'blue'], addmarker=True, showlegend = False)
)
fig['layout'].update(title=ticker + ' during UK gen elects by winning party ' + '<BR>' + source_label)
# use the scheme from https://plot.ly/python/bubble-charts-tutorial/
# can use dict approach, rather than specifying each separately
axis_style = dict(
gridcolor='#FFFFFF', # white grid lines
ticks='outside', # draw ticks outside axes
ticklen=8, # tick length
tickwidth=1.5 # and width
)
# create the various axes for the three separate charts
fig['layout'].update(xaxis1=plotly.graph_objs.XAxis(axis_style, title=xaxis))
fig['layout'].update(yaxis1=plotly.graph_objs.YAxis(axis_style, title=yaxis))
fig['layout'].update(xaxis2=plotly.graph_objs.XAxis(axis_style, title=xaxis))
fig['layout'].update(yaxis2=plotly.graph_objs.YAxis(axis_style))
fig['layout'].update(xaxis3=plotly.graph_objs.XAxis(axis_style, title=xaxis))
fig['layout'].update(yaxis3=plotly.graph_objs.YAxis(axis_style))
fig['layout'].update(plot_bgcolor='#EFECEA') # set plot background to grey
plot_url = py.iplot(fig, filename='labour-conservative-wins-'+ ticker + '-subplot')
```
This time we use "embed", which grab the plot from Plotly's server, we did earlier (given we have already uploaded it).
```
import plotly.tools as tls
tls.embed("https://plot.ly/~thalesians/246")
```
<B>That's about it!</B> I hope the code I've written proves fruitful for creating some very cool Plotly plots and also for doing some very timely analysis ahead of the UK general election! Hoping this will be first of many blogs on using Plotly data.
The analysis in this blog is based on a report I wrote for Thalesians, a quant finance thinktank. If you are interested in getting access to the full copy of the report (Thalesians: My kingdom for a vote - The definitive quant guide to UK general elections), feel free to e-mail me at <b>[email protected]</b> or tweet me <b>@thalesians</b>
## Want to hear more about global macro and UK election developments?
If you're interested in FX and the UK general election, come to our Thalesians panel in London on April 29th 2015 at 7.30pm in Canary Wharf, which will feature, Eric Burroughs (Reuters - FX Buzz Editor), Mark Cudmore (Bloomberg - First Word EM Strategist), Jordan Rochester (Nomura - FX strategist), Jeremy Wilkinson-Smith (Independent FX trader) and myself as the moderator. Tickets are available [here](http://www.meetup.com/thalesians/events/221147156/)
## Biography
<b>Saeed Amen</b> is the managing director and co-founder of the Thalesians. He has a decade of experience creating and successfully running systematic trading models at Lehman Brothers, Nomura and now at the Thalesians. Independently, he runs a systematic trading model with proprietary capital. He is the author of Trading Thalesians – What the ancient world can teach us about trading today (Palgrave Macmillan). He graduated with a first class honours master’s degree from Imperial College in Mathematics & Computer Science. He is also a fan of Python and has written an extensive library for financial market backtesting called PyThalesians.
<BR>
Follow the Thalesians on Twitter @thalesians and get my book on Amazon [here](http://www.amazon.co.uk/Trading-Thalesians-Saeed-Amen/dp/113739952X)
All the code here is available to download from the [Thalesians GitHub page](https://github.com/thalesians/pythalesians)
```
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install publisher --upgrade
import publisher
publisher.publish(
'ukelectionbbg.ipynb', 'ipython-notebooks/ukelectionbbg/', 'Plotting GBP/USD price action around UK general elections',
'Create interactive graphs with market data, IPython Notebook and Plotly', name='Plot MP Action in GBP/USD around UK General Elections')
```
|
github_jupyter
|
# Amazon SageMaker - Debugging with custom rules
[Amazon SageMaker](https://aws.amazon.com/sagemaker/) is managed platform to build, train and host maching learning models. Amazon SageMaker Debugger is a new feature which offers the capability to debug machine learning models during training by identifying and detecting problems with the models in near real-time.
In this notebook, we'll show you how to use a custom rule to monitor your training job. All through a tf.keras ResNet example.
## How does Amazon SageMaker Debugger work?
Amazon SageMaker Debugger lets you go beyond just looking at scalars like losses and accuracies during training and gives you full visibility into all tensors 'flowing through the graph' during training. Furthermore, it helps you monitor your training in near real-time using rules and provides you alerts, once it has detected inconsistency in training flow.
### Concepts
* **Tensors**: These represent the state of the training network at intermediate points during its execution
* **Debug Hook**: Hook is the construct with which Amazon SageMaker Debugger looks into the training process and captures the tensors requested at the desired step intervals
* **Rule**: A logical construct, implemented as Python code, which helps analyze the tensors captured by the hook and report anomalies, if at all
With these concepts in mind, let's understand the overall flow of things that Amazon SageMaker Debugger uses to orchestrate debugging.
### Saving tensors during training
The tensors captured by the debug hook are stored in the S3 location specified by you. There are two ways you can configure Amazon SageMaker Debugger to save tensors:
#### With no changes to your training script
If you use one of the Amazon SageMaker provided [Deep Learning Containers](https://docs.aws.amazon.com/sagemaker/latest/dg/pre-built-containers-frameworks-deep-learning.html) for 1.15, then you don't need to make any changes to your training script for the tensors to be stored. Amazon SageMaker Debugger will use the configuration you provide through the Amazon SageMaker SDK's Tensorflow `Estimator` when creating your job to save the tensors in the fashion you specify. You can review the script we are going to use at [src/tf_keras_resnet_zerocodechange.py](src/tf_keras_resnet_zerocodechange.py). You will note that this is an untouched TensorFlow Keras script which uses the `tf.keras` interface. Please note that Amazon SageMaker Debugger only supports `tf.keras`, `tf.estimator` and `tf.MonitoredSession` interfaces for the zero script change experience. Full description of support is available at [Amazon SageMaker Debugger with TensorFlow](https://github.com/awslabs/sagemaker-debugger/tree/master/docs/tensorflow.md)
#### Orchestrating your script to store tensors
For other containers, you need to make couple of lines of changes to your training script. Amazon SageMaker Debugger exposes a library `smdebug` which allows you to capture these tensors and save them for analysis. It's highly customizable and allows to save the specific tensors you want at different frequencies and possibly with other configurations. Refer [DeveloperGuide](https://github.com/awslabs/sagemaker-debugger/tree/master/docs) for details on how to use Amazon SageMaker Debugger library with your choice of framework in your training script. Here we have an example script orchestrated at [src/tf_keras_resnet_byoc.py](src/tf_keras_resnet_byoc.py). In addition to this, you will need to ensure that your container has the `smdebug` library installed in this case, and specify your container image URI when creating the SageMaker Estimator below. Please refer [SageMaker Documentation](https://sagemaker.readthedocs.io/en/stable/sagemaker.tensorflow.html) on how to do that.
### Analysis of tensors
Amazon SageMaker Debugger can be configured to run debugging ***Rules*** on the tensors saved from the training job. At a very broad level, a rule is Python code used to detect certain conditions during training. Some of the conditions that a data scientist training an algorithm may care about are monitoring for gradients getting too large or too small, detecting overfitting, and so on. Amazon SageMaker Debugger comes pre-packaged with certain built-in rules. Users can write their own rules using the APIs provided by Amazon SageMaker Debugger through the `smdebug` library. You can also analyze raw tensor data outside of the Rules construct in say, a SageMaker notebook, using these APIs. Please refer [Analysis Developer Guide](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/api.md) for more on these APIs.
## Training TensorFlow Keras models with Amazon SageMaker Debugger
### Amazon SageMaker TensorFlow as a framework
Train a TensorFlow Keras model in this notebook with Amazon Sagemaker Debugger enabled and monitor the training jobs with rules. This is done using Amazon SageMaker [TensorFlow 1.15.0](https://docs.aws.amazon.com/sagemaker/latest/dg/pre-built-containers-frameworks-deep-learning.html) Container as a framework
```
import boto3
import os
import sagemaker
from sagemaker.tensorflow import TensorFlow
```
Import the libraries needed for the demo of Amazon SageMaker Debugger.
```
from sagemaker.debugger import Rule, DebuggerHookConfig, TensorBoardOutputConfig, CollectionConfig
import smdebug_rulesconfig as rule_configs
```
Now define the entry point for the training script
```
# define the entrypoint script
entrypoint_script='src/tf_keras_resnet_zerocodechange.py'
```
### Setting up the Estimator
Now it's time to setup our SageMaker TensorFlow Estimator. There are new parameters with the estimator to enable your training job for debugging through Amazon SageMaker Debugger. These new parameters are explained below
* **debugger_hook_config**: This new parameter accepts a local path where you wish your tensors to be written to and also accepts the S3 URI where you wish your tensors to be uploaded to. It also accepts CollectionConfigurations which specify which tensors will be saved from the training job.
* **rules**: This new parameter will accept a list of rules you wish to evaluate against the tensors output by this training job. For rules,
Amazon SageMaker Debugger supports two types of rules
* **Amazon SageMaker Rules**: These are rules curated by the Amazon SageMaker team and you can choose to evaluate them against your training job.
* **Custom Rules**: You can optionally choose to write your own rule as a Python source file and have it evaluated against your training job. To provide SageMaker Debugger to evaluate this rule, you would have to provide the S3 location of the rule source and the evaluator image.
#### Creating your own custom rule
Let us look at how you can create your custom rule briefly before proceeding to use it with your training job. Please see the [documentation](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/analysis.md) to learn more about structuring your rules and other related concepts.
##### **Summary of what the custom rule evaluates**
For demonstration purposes, below is a rule that tries to track whether gradients are too large. The custom rule looks at the tensors in the collection "gradients" saved during training and attempt to get the absolute value of the gradients in each step of the training. If the mean of the absolute values of gradients in any step is greater than a specified threshold, mark the rule as 'triggering'. Let us look at how to structure the rule source.
Any custom rule logic you want to be evaluated should extend the `Rule` interface provided by Amazon SageMaker Debugger
```python
from smdebug.rules.rule import Rule
class CustomGradientRule(Rule):
```
Now implement the class methods for the rule. Doing this allows Amazon SageMaker to understand the intent of the rule and evaluate it against your training tensors.
##### Rule class constructor
In order for Amazon SageMaker to instantiate your rule, your rule class constructor must conform to the following signature.
```python
def __init__(self, base_trial, other_trials, <other parameters>)
```
###### Arguments
- `base_trial (Trial)`: This defines the primary [Trial](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/analysis.md#trial) that your rule is anchored to. This is an object of class type `Trial`.
- `other_trials (list[Trial])`: *(Optional)* This defines a list of 'other' trials you want your rule to look at. This is useful in the scenarios when you're comparing tensors from the base_trial to tensors from some other trials.
- `<other parameters>`: This is similar to `**kwargs` where you can pass in however many string parameters in your constructor signature. Note that SageMaker would only be able to support supplying string types for these values at runtime (see how, later).
##### Defining the rule logic to be invoked at each step:
This defines the logic to invoked for each step. Essentially, this is where you decide whether the rule should trigger or not. In this case, you're concerned about the gradients getting too large. So, get the [tensor reduction](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/analysis.md#reduction_value) "mean" for each step and see if it's value is larger than a threshold.
```python
def invoke_at_step(self, step):
for tname in self.base_trial.tensor_names(collection="gradients"):
t = self.base_trial.tensor(tname)
abs_mean = t.reduction_value(step, "mean", abs=True)
if abs_mean > self.threshold:
return True
return False
```
#### Using your custom rule with SageMaker Estimator
Below we create the rule configuration using the `Rule.custom` method, and then pass it to the SageMaker TensorFlow estimator to kick off the job. Note that you need to pass the rule evaluator container image for custom rules. Please refer AWS Documentation on SageMaker documentation to find the image URI for your region. We will soon have this be automatically taken care of by the SageMaker SDK. You can also provide your own image, please refer to [this repository](https://github.com/awslabs/sagemaker-debugger-rules-container) for instructions on how to build such a container.
```
custom_rule = Rule.custom(
name='MyCustomRule', # used to identify the rule
# rule evaluator container image
image_uri='759209512951.dkr.ecr.us-west-2.amazonaws.com/sagemaker-debugger-rule-evaluator:latest',
instance_type='ml.t3.medium', # instance type to run the rule evaluation on
source='rules/my_custom_rule.py', # path to the rule source file
rule_to_invoke='CustomGradientRule', # name of the class to invoke in the rule source file
volume_size_in_gb=30, # EBS volume size required to be attached to the rule evaluation instance
collections_to_save=[CollectionConfig("gradients")],
# collections to be analyzed by the rule. since this is a first party collection we fetch it as above
rule_parameters={
"threshold": "20.0" # this will be used to intialize 'threshold' param in your constructor
}
)
```
Before you proceed and create our training job, let us take a closer look at the parameters used to create the Rule configuration above:
* `name`: This is used to identify this particular rule among the suite of rules you specified to be evaluated.
* `image_uri`: This is the image of the container that has the logic of understanding your custom rule sources and evaluating them against the collections you save in the training job. You can get the list of open sourced SageMaker rule evaluator images [here]()
* `instance_type`: The type of the instance you want to run the rule evaluation on
* `source`: This is the local path or the Amazon S3 URI of your rule source file.
* `rule_to_invoke`: This specifies the particular Rule class implementation in your source file which you want to be evaluated. SageMaker supports only 1 rule to be evaluated at a time in a rule job. Your source file can have multiple Rule class implementations, though.
* `collections_to_save`: This specifies which collections are necessary to be saved for this rule to run. Note that providing this collection does not necessarily mean the rule will actually use these collections. You might want to take such parameters for the rule through the next argument `rule_parameters`.
* `rule_parameters`: This provides the runtime values of the parameter in your constructor. You can still choose to pass in other values which may be necessary for your rule to be evaluated. Any value in this map is available as an environment variable and can be accessed by your rule script using `$<rule_parameter_key>`
You can read more about custom rule evaluation in Amazon SageMaker in this [documentation](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/analysis.md)
Let us now create the estimator and call `fit()` on our estimator to start the training job and rule evaluation job in parallel.
```
estimator = TensorFlow(
role=sagemaker.get_execution_role(),
base_job_name='smdebug-custom-rule-demo-tf-keras',
train_instance_count=1,
train_instance_type='ml.p2.xlarge',
entry_point=entrypoint_script,
framework_version='1.15',
py_version='py3',
train_max_run=3600,
script_mode=True,
## New parameter
rules = [custom_rule]
)
# After calling fit, Amazon SageMaker starts one training job and one rule job for you.
# The rule evaluation status is visible in the training logs
# at regular intervals
estimator.fit(wait=False)
```
## Result
As a result of calling the `fit(wait=False)`, two jobs were kicked off in the background. Amazon SageMaker Debugger kicked off a rule evaluation job for our custom gradient logic in parallel with the training job. You can review the status of the above rule job as follows.
```
import time
status = estimator.latest_training_job.rule_job_summary()
while status[0]['RuleEvaluationStatus'] == 'InProgress':
status = estimator.latest_training_job.rule_job_summary()
print(status)
time.sleep(10)
```
Once the rule job starts and you see the RuleEvaluationJobArn above, we can see the logs for the rule job in Cloudwatch. To do that, we'll use this utlity function to get a link to the rule job logs.
```
def _get_rule_job_name(training_job_name, rule_configuration_name, rule_job_arn):
"""Helper function to get the rule job name with correct casing"""
return "{}-{}-{}".format(
training_job_name[:26], rule_configuration_name[:26], rule_job_arn[-8:]
)
def _get_cw_url_for_rule_job(rule_job_name, region):
return "https://{}.console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/ProcessingJobs;prefix={};streamFilter=typeLogStreamPrefix".format(region, region, rule_job_name)
def get_rule_jobs_cw_urls(estimator):
training_job = estimator.latest_training_job
training_job_name = training_job.describe()["TrainingJobName"]
rule_eval_statuses = training_job.describe()["DebugRuleEvaluationStatuses"]
result={}
for status in rule_eval_statuses:
if status.get("RuleEvaluationJobArn", None) is not None:
rule_job_name = _get_rule_job_name(training_job_name, status["RuleConfigurationName"], status["RuleEvaluationJobArn"])
result[status["RuleConfigurationName"]] = _get_cw_url_for_rule_job(rule_job_name, boto3.Session().region_name)
return result
get_rule_jobs_cw_urls(estimator)
```
|
github_jupyter
|
# Bayesian Hierarchical Linear Regression
Author: [Carlos Souza](mailto:[email protected])
Probabilistic Machine Learning models can not only make predictions about future data, but also **model uncertainty**. In areas such as **personalized medicine**, there might be a large amount of data, but there is still a relatively **small amount of data for each patient**. To customize predictions for each person it becomes necessary to **build a model for each person** — with its inherent **uncertainties** — and to couple these models together in a **hierarchy** so that information can be borrowed from other **similar people** [1].
The purpose of this tutorial is to demonstrate how to **implement a Bayesian Hierarchical Linear Regression model using NumPyro**. To motivate the tutorial, I will use [OSIC Pulmonary Fibrosis Progression](https://www.kaggle.com/c/osic-pulmonary-fibrosis-progression) competition, hosted at Kaggle.
## 1. Understanding the task
Pulmonary fibrosis is a disorder with no known cause and no known cure, created by scarring of the lungs. In this competition, we were asked to predict a patient’s severity of decline in lung function. Lung function is assessed based on output from a spirometer, which measures the forced vital capacity (FVC), i.e. the volume of air exhaled.
In medical applications, it is useful to **evaluate a model's confidence in its decisions**. Accordingly, the metric used to rank the teams was designed to reflect **both the accuracy and certainty of each prediction**. It's a modified version of the Laplace Log Likelihood (more details on that later).
Let's explore the data and see what's that all about:
```
!pip install -q numpyro@git+https://github.com/pyro-ppl/numpyro arviz
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
train = pd.read_csv(
"https://gist.githubusercontent.com/ucals/"
"2cf9d101992cb1b78c2cdd6e3bac6a4b/raw/"
"43034c39052dcf97d4b894d2ec1bc3f90f3623d9/"
"osic_pulmonary_fibrosis.csv"
)
train.head()
```
In the dataset, we were provided with a baseline chest CT scan and associated clinical information for a set of patients. A patient has an image acquired at time Week = 0 and has numerous follow up visits over the course of approximately 1-2 years, at which time their FVC is measured. For this tutorial, I will use only the Patient ID, the weeks and the FVC measurements, discarding all the rest. Using only these columns enabled our team to achieve a competitive score, which shows the power of Bayesian hierarchical linear regression models especially when gauging uncertainty is an important part of the problem.
Since this is real medical data, the relative timing of FVC measurements varies widely, as shown in the 3 sample patients below:
```
def chart(patient_id, ax):
data = train[train["Patient"] == patient_id]
x = data["Weeks"]
y = data["FVC"]
ax.set_title(patient_id)
ax = sns.regplot(x, y, ax=ax, ci=None, line_kws={"color": "red"})
f, axes = plt.subplots(1, 3, figsize=(15, 5))
chart("ID00007637202177411956430", axes[0])
chart("ID00009637202177434476278", axes[1])
chart("ID00010637202177584971671", axes[2])
```
On average, each of the 176 provided patients made 9 visits, when FVC was measured. The visits happened in specific weeks in the [-12, 133] interval. The decline in lung capacity is very clear. We see, though, they are very different from patient to patient.
We were are asked to predict every patient's FVC measurement for every possible week in the [-12, 133] interval, and the confidence for each prediction. In other words: we were asked fill a matrix like the one below, and provide a confidence score for each prediction:
<img src="https://i.ibb.co/0Z9kW8H/matrix-completion.jpg" alt="drawing" width="600"/>
The task was perfect to apply Bayesian inference. However, the vast majority of solutions shared by Kaggle community used discriminative machine learning models, disconsidering the fact that most discriminative methods are very poor at providing realistic uncertainty estimates. Because they are typically trained in a manner that optimizes the parameters to minimize some loss criterion (e.g. the predictive error), they do not, in general, encode any uncertainty in either their parameters or the subsequent predictions. Though many methods can produce uncertainty estimates either as a by-product or from a post-processing step, these are typically heuristic based, rather than stemming naturally from a statistically principled estimate of the target uncertainty distribution [2].
## 2. Modelling: Bayesian Hierarchical Linear Regression with Partial Pooling
The simplest possible linear regression, not hierarchical, would assume all FVC decline curves have the same $\alpha$ and $\beta$. That's the **pooled model**. In the other extreme, we could assume a model where each patient has a personalized FVC decline curve, and **these curves are completely unrelated**. That's the **unpooled model**, where each patient has completely separate regressions.
Here, I'll use the middle ground: **Partial pooling**. Specifically, I'll assume that while $\alpha$'s and $\beta$'s are different for each patient as in the unpooled case, **the coefficients all share similarity**. We can model this by assuming that each individual coefficient comes from a common group distribution. The image below represents this model graphically:
<img src="https://i.ibb.co/H7NgBfR/Artboard-2-2x-100.jpg" alt="drawing" width="600"/>
Mathematically, the model is described by the following equations:
\begin{align}
\mu_{\alpha} &\sim \mathcal{N}(0, 100) \\
\sigma_{\alpha} &\sim |\mathcal{N}(0, 100)| \\
\mu_{\beta} &\sim \mathcal{N}(0, 100) \\
\sigma_{\beta} &\sim |\mathcal{N}(0, 100)| \\
\alpha_i &\sim \mathcal{N}(\mu_{\alpha}, \sigma_{\alpha}) \\
\beta_i &\sim \mathcal{N}(\mu_{\beta}, \sigma_{\beta}) \\
\sigma &\sim \mathcal{N}(0, 100) \\
FVC_{ij} &\sim \mathcal{N}(\alpha_i + t \beta_i, \sigma)
\end{align}
where *t* is the time in weeks. Those are very uninformative priors, but that's ok: our model will converge!
Implementing this model in NumPyro is pretty straightforward:
```
import numpyro
from numpyro.infer import MCMC, NUTS, Predictive
import numpyro.distributions as dist
from jax import random
assert numpyro.__version__.startswith("0.8.0")
def model(PatientID, Weeks, FVC_obs=None):
μ_α = numpyro.sample("μ_α", dist.Normal(0.0, 100.0))
σ_α = numpyro.sample("σ_α", dist.HalfNormal(100.0))
μ_β = numpyro.sample("μ_β", dist.Normal(0.0, 100.0))
σ_β = numpyro.sample("σ_β", dist.HalfNormal(100.0))
unique_patient_IDs = np.unique(PatientID)
n_patients = len(unique_patient_IDs)
with numpyro.plate("plate_i", n_patients):
α = numpyro.sample("α", dist.Normal(μ_α, σ_α))
β = numpyro.sample("β", dist.Normal(μ_β, σ_β))
σ = numpyro.sample("σ", dist.HalfNormal(100.0))
FVC_est = α[PatientID] + β[PatientID] * Weeks
with numpyro.plate("data", len(PatientID)):
numpyro.sample("obs", dist.Normal(FVC_est, σ), obs=FVC_obs)
```
That's all for modelling!
## 3. Fitting the model
A great achievement of Probabilistic Programming Languages such as NumPyro is to decouple model specification and inference. After specifying my generative model, with priors, condition statements and data likelihood, I can leave the hard work to NumPyro's inference engine.
Calling it requires just a few lines. Before we do it, let's add a numerical Patient ID for each patient code. That can be easily done with scikit-learn's LabelEncoder:
```
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
train["PatientID"] = le.fit_transform(train["Patient"].values)
FVC_obs = train["FVC"].values
Weeks = train["Weeks"].values
PatientID = train["PatientID"].values
```
Now, calling NumPyro's inference engine:
```
nuts_kernel = NUTS(model)
mcmc = MCMC(nuts_kernel, num_samples=2000, num_warmup=2000)
rng_key = random.PRNGKey(0)
mcmc.run(rng_key, PatientID, Weeks, FVC_obs=FVC_obs)
posterior_samples = mcmc.get_samples()
```
## 4. Checking the model
### 4.1. Inspecting the learned parameters
First, let's inspect the parameters learned. To do that, I will use [ArviZ](https://arviz-devs.github.io/arviz/), which perfectly integrates with NumPyro:
```
import arviz as az
data = az.from_numpyro(mcmc)
az.plot_trace(data, compact=True);
```
Looks like our model learned personalized alphas and betas for each patient!
### 4.2. Visualizing FVC decline curves for some patients
Now, let's visually inspect FVC decline curves predicted by our model. We will completely fill in the FVC table, predicting all missing values. The first step is to create a table to fill:
```
pred_template = []
for i in range(train["Patient"].nunique()):
df = pd.DataFrame(columns=["PatientID", "Weeks"])
df["Weeks"] = np.arange(-12, 134)
df["PatientID"] = i
pred_template.append(df)
pred_template = pd.concat(pred_template, ignore_index=True)
```
Predicting the missing values in the FVC table and confidence (sigma) for each value becomes really easy:
```
PatientID = pred_template["PatientID"].values
Weeks = pred_template["Weeks"].values
predictive = Predictive(model, posterior_samples, return_sites=["σ", "obs"])
samples_predictive = predictive(random.PRNGKey(0), PatientID, Weeks, None)
```
Let's now put the predictions together with the true values, to visualize them:
```
df = pd.DataFrame(columns=["Patient", "Weeks", "FVC_pred", "sigma"])
df["Patient"] = le.inverse_transform(pred_template["PatientID"])
df["Weeks"] = pred_template["Weeks"]
df["FVC_pred"] = samples_predictive["obs"].T.mean(axis=1)
df["sigma"] = samples_predictive["obs"].T.std(axis=1)
df["FVC_inf"] = df["FVC_pred"] - df["sigma"]
df["FVC_sup"] = df["FVC_pred"] + df["sigma"]
df = pd.merge(
df, train[["Patient", "Weeks", "FVC"]], how="left", on=["Patient", "Weeks"]
)
df = df.rename(columns={"FVC": "FVC_true"})
df.head()
```
Finally, let's see our predictions for 3 patients:
```
def chart(patient_id, ax):
data = df[df["Patient"] == patient_id]
x = data["Weeks"]
ax.set_title(patient_id)
ax.plot(x, data["FVC_true"], "o")
ax.plot(x, data["FVC_pred"])
ax = sns.regplot(x, data["FVC_true"], ax=ax, ci=None, line_kws={"color": "red"})
ax.fill_between(x, data["FVC_inf"], data["FVC_sup"], alpha=0.5, color="#ffcd3c")
ax.set_ylabel("FVC")
f, axes = plt.subplots(1, 3, figsize=(15, 5))
chart("ID00007637202177411956430", axes[0])
chart("ID00009637202177434476278", axes[1])
chart("ID00011637202177653955184", axes[2])
```
The results are exactly what we expected to see! Highlight observations:
- The model adequately learned Bayesian Linear Regressions! The orange line (learned predicted FVC mean) is very inline with the red line (deterministic linear regression). But most important: it learned to predict uncertainty, showed in the light orange region (one sigma above and below the mean FVC line)
- The model predicts a higher uncertainty where the data points are more disperse (1st and 3rd patients). Conversely, where the points are closely grouped together (2nd patient), the model predicts a higher confidence (narrower light orange region)
- Finally, in all patients, we can see that the uncertainty grows as the look more into the future: the light orange region widens as the # of weeks grow!
### 4.3. Computing the modified Laplace Log Likelihood and RMSE
As mentioned earlier, the competition was evaluated on a modified version of the Laplace Log Likelihood. In medical applications, it is useful to evaluate a model's confidence in its decisions. Accordingly, the metric is designed to reflect both the accuracy and certainty of each prediction.
For each true FVC measurement, we predicted both an FVC and a confidence measure (standard deviation $\sigma$). The metric was computed as:
\begin{align}
\sigma_{clipped} &= max(\sigma, 70) \\
\delta &= min(|FVC_{true} - FVC_{pred}|, 1000) \\
metric &= -\dfrac{\sqrt{2}\delta}{\sigma_{clipped}} - \ln(\sqrt{2} \sigma_{clipped})
\end{align}
The error was thresholded at 1000 ml to avoid large errors adversely penalizing results, while the confidence values were clipped at 70 ml to reflect the approximate measurement uncertainty in FVC. The final score was calculated by averaging the metric across all (Patient, Week) pairs. Note that metric values will be negative and higher is better.
Next, we calculate the metric and RMSE:
```
y = df.dropna()
rmse = ((y["FVC_pred"] - y["FVC_true"]) ** 2).mean() ** (1 / 2)
print(f"RMSE: {rmse:.1f} ml")
sigma_c = y["sigma"].values
sigma_c[sigma_c < 70] = 70
delta = (y["FVC_pred"] - y["FVC_true"]).abs()
delta[delta > 1000] = 1000
lll = -np.sqrt(2) * delta / sigma_c - np.log(np.sqrt(2) * sigma_c)
print(f"Laplace Log Likelihood: {lll.mean():.4f}")
```
What do these numbers mean? It means if you adopted this approach, you would **outperform most of the public solutions** in the competition. Curiously, the vast majority of public solutions adopt a standard deterministic Neural Network, modelling uncertainty through a quantile loss. **Most of the people still adopt a frequentist approach**.
**Uncertainty** for single predictions becomes more and more important in machine learning and is often a requirement. **Especially when the consequenses of a wrong prediction are high**, we need to know what the probability distribution of an individual prediction is. For perspective, Kaggle just launched a new competition sponsored by Lyft, to build motion prediction models for self-driving vehicles. "We ask that you predict a few trajectories for every agent **and provide a confidence score for each of them**."
Finally, I hope the great work done by Pyro/NumPyro developers help democratize Bayesian methods, empowering an ever growing community of researchers and practitioners to create models that can not only generate predictions, but also assess uncertainty in their predictions.
## References
1. Ghahramani, Z. Probabilistic machine learning and artificial intelligence. Nature 521, 452–459 (2015). https://doi.org/10.1038/nature14541
2. Rainforth, Thomas William Gamlen. Automating Inference, Learning, and Design Using Probabilistic Programming. University of Oxford, 2017.
|
github_jupyter
|
# 1- Importing libraries
```
import ast
import json
import requests
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib.ticker import StrMethodFormatter
from matplotlib.dates import DateFormatter
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation, Dense, Dropout, LSTM
from sklearn import metrics
```
# 2- Getting real-time crptocurrency data
```
#!/usr/bin/python
# -*- coding: utf-8 -*-
import requests
def run_query(query): # A simple function to use requests.post to make the API call.
headers = {'X-API-KEY': 'BQYjLXSsm32NnV6FM4eudu9xYt2L3AsW'}
request = requests.post('https://graphql.bitquery.io/',
json={'query': query}, headers=headers)
if request.status_code == 200:
return request.json()
else:
raise Exception('Query failed and return code is {}. {}'.format(request.status_code,
query))
# The GraphQL query
query = """
query
{
ethereum(network: ethereum) {
dexTrades(
options: {limit: 100000, asc: "timeInterval.minute"}
date: {since: "2021-04-21"}
exchangeName: {is: "Uniswap"}
baseCurrency: {is: "0xdac17f958d2ee523a2206206994597c13d831ec7"}
quoteCurrency: {is: "0xc02aaa39b223fe8d0a0e5c4f27ead9083c756cc2"}
) {
timeInterval {
minute(count: 5)
}
baseCurrency {
symbol
address
}
baseAmount
quoteCurrency {
symbol
address
}
quoteAmount
trades: count
quotePrice
maximum_price: quotePrice(calculate: maximum)
minimum_price: quotePrice(calculate: minimum)
open_price: minimum(of: block, get: quote_price)
close_price: maximum(of: block, get: quote_price)
}
}
}
"""
result = run_query(query) # Execute the query
data=pd.DataFrame(result['data']['ethereum']['dexTrades'])
data.tail(2)
```
# 3- Data cleaning
```
data.isnull().sum()
time=[]
for x in range(0, data.shape[0]):
time.append(data['timeInterval'].iloc[x]['minute'])
data['timeInterval']= time
data.head(2)
type(data['close_price'].iloc[0])
data['close_price']= data['close_price'].apply(lambda x: float(x))
type(data['close_price'].iloc[0])
```
# 4- Setting time as index
```
data=data.set_index('timeInterval')
data.head(2)
```
# 5- Converting time to timestamp
```
type(data.index[0])
data.index=pd.to_datetime(data.index)
type(data.index[0])
data.shape
```
# 6- Splitting train & test sets
```
def train_test_split(df, test_size):
split = df.shape[0] - int(test_size * df.shape[0])
train_set = df.iloc[:split]
test_set = df.iloc[split:]
return train_set, test_set
train_set, test_set =train_test_split(data, 0.3) #checked test size 0.2 but the result for 0.3 is better
print('train_set.shape: ', train_set.shape)
print('test_set.shape: ', test_set.shape)
```
# 7- Plotting train & test sets
```
plt.figure(figsize=(13,7))
train_set['close_price'].plot(color='b')
test_set['close_price'].plot(color='r')
plt.xlabel('Time', fontsize=14)
plt.ylabel('Close Price ', fontsize=14)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.6f}'))
plt.legend(['Train', 'Test'], loc='best',fontsize=14 )
plt.show()
```
# 9- Normalizing data- zero scaling
```
def zero_scaling(df):
return df / df.iloc[0] - 1
def sliding_window(df, len_window, zero):
window = []
for a in range(df.shape[0] - len_window):
sub = df[a: (a + len_window)].copy()
if zero:
sub = zero_scaling(sub)
window.append(sub.values)
return np.array(window)
def prepare_data(df, column, len_window, zero):
train_data = train_set[[column]]
test_data = test_set[[column]]
X_train = sliding_window(train_data, len_window, zero)
X_test = sliding_window(test_data, len_window, zero)
y_train = train_data[column][len_window:].values
y_test = test_data[column][len_window:].values
if zero:
y_train = y_train / train_data[column][:-len_window].values - 1
y_test = y_test / test_data[column][:-len_window].values - 1
return train_data, test_data, X_train, X_test, y_train, y_test
train_data, test_data, X_train, X_test, y_train, y_test = prepare_data(data, 'close_price', len_window=5, zero=True)
X_train.shape
```
# 10- Building LSTM model- 2 layers
```
model_1 = Sequential()
#use input_shape (tuple of integers) when using this layer as the first layer in a model
model_1.add(LSTM(units=100, input_shape=(X_train.shape[1], X_train.shape[2])) )
model_1.add(Dropout(0.2))
model_1.add(Dense(units=1 )) # tedade noroun ha
model_1.add(Activation('linear')) #activation ra besoorate layer joda add konim natije behtar ast
model_1.compile(loss='mse', optimizer='adam')
# Fitting to the training set
model_1.fit(X_train,y_train,epochs=30,batch_size=32)
pd.DataFrame(model_1.history.history).plot(figsize=(8,6))
plt.xlabel('Epoch', fontsize=12)
plt.ylabel('Loss', fontsize=12)
plt.title('Training Loss Per Epoch', fontsize=14)
plt.show()
prediction_1=model_1.predict(X_test).squeeze() # use squeeze to convert to 1d array
assert (len(prediction_1)==len(y_test))
plt.figure(figsize=(8,5))
plt.plot(y_test, y_test, color='b')
plt.scatter(y_test, prediction_1, color='r')
plt.xlabel('y_test', fontsize=12)
plt.ylabel('Prediction', fontsize=12)
plt.title('Close Price Prediction, 2-Layer Model, zero scaling', fontsize=14)
plt.show()
print('Mean Absolute Error: ', metrics.mean_absolute_error(y_test, prediction_1))
predicted_close_price_1= pd.DataFrame(data=(prediction_1 + 1) * (test_data['close_price'][:-5].values) , index=test_data[5:].index ,columns=['predicted_close_price'] )
predicted_close_price_1;
merged_1=pd.merge(test_data, predicted_close_price_1, on='timeInterval', how='left')
merged_1[5:]
merged_1.isnull().sum()
plt.figure(figsize=(13,7))
merged_1['close_price'].plot(color='r')
merged_1['predicted_close_price'].plot(color='g')
plt.title('Close Price Prediction, 2-Layer Model, Zero Scaling',fontsize=16)
plt.xlabel('Time', fontsize=13)
plt.ylabel('Close Price', fontsize=13)
plt.legend(['Actual Close Price', 'Predicted Close Price'], loc='best',fontsize=13)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.6f}'))
plt.show()
```
# 11- Predicting on brand new data
```
#size of the data we use to predict should always be at least one unit bigger than window_len
from random import randint
def rand(len_window, df):
return randint(len_window + 1 , df.shape[0])
random_shape=rand(5, data)
random_shape
new=data[['close_price']].iloc[0: random_shape]
sliding_window(new, 5, True);
prediction=model_1.predict(sliding_window(new, 5, True)).squeeze()
assert(len(prediction)==len( new['close_price'][:-5]))
predicted_close_price= pd.DataFrame(data=(prediction + 1) * (new['close_price'][:-5].values) , index=new[5:].index ,columns=['predicted close'] )
pd.merge(new, predicted_close_price, on='timeInterval', how='left')[5:]
```
# 12- Backup scenarios
### 12-1- LSTM model- 6 layers + zero scaling
```
# The LSTM architecture
model_2 = Sequential()
# First LSTM layer with Dropout regularisation
#default activation` == `tanh`
#default recurrent_activation == sigmoid.
#return_sequences: Boolean. Whether to return the last output.Default: `False`.
model_2.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1],X_train.shape[2])))
model_2.add(Dropout(0.2))
# Second LSTM layer
model_2.add(LSTM(units=50, return_sequences=True))
model_2.add(Dropout(0.2))
# Third LSTM layer
model_2.add(LSTM(units=50, return_sequences=True))
model_2.add(Dropout(0.2))
# Fourth LSTM layer
model_2.add(LSTM(units=50))
model_2.add(Dropout(0.2))
# The output layer
model_2.add(Dense(units=1))
# Compiling the RNN
model_2.compile(optimizer='rmsprop',loss='mean_squared_error')
# Fitting to the training set
model_2.fit(X_train,y_train,epochs=30,batch_size=32)
pd.DataFrame(model_2.history.history).plot(figsize=(8,6))
plt.xlabel('Epoch', fontsize=12)
plt.ylabel('Loss', fontsize=12)
plt.title('Training Loss Per Epoch', fontsize=14)
plt.show()
prediction_2=model_2.predict(X_test).squeeze() # use squeeze to convert to 1d array
assert (len(prediction_2)==len(y_test))
plt.figure(figsize=(8,5))
plt.plot(y_test, y_test, color='b')
plt.scatter(y_test, prediction_2, color='r')
plt.xlabel('y_test', fontsize=12)
plt.ylabel('Prediction', fontsize=12)
plt.title('Close Price Prediction, 6-Layer Model, zero scaling', fontsize=14)
plt.show()
print('Mean Absolute Error: ', metrics.mean_absolute_error(y_test, prediction_2))
predicted_close_price_2= pd.DataFrame(data=(prediction_2 + 1) * (test_data['close_price'][:-5].values) , index=test_data[5:].index ,columns=['predicted_close_price'] )
predicted_close_price_2;
merged_2=pd.merge(test_data, predicted_close_price_2, on='timeInterval', how='left')
merged_2;
plt.figure(figsize=(13,7))
merged_2['close_price'].plot(color='r')
merged_2['predicted_close_price'].plot(color='g')
plt.title('Close Price Prediction, 6-Layer Model, Zero Scaling',fontsize=16)
plt.xlabel('Time', fontsize=13)
plt.ylabel('Close Price', fontsize=13)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.6f}'))
plt.legend(['Actual Close Price', 'Predicted Close Price'], loc='best',fontsize=13)
plt.show()
```
### 12-2- LSTM model- 2 layers + MinMaxScaler
```
train_data = train_set[['close_price']]
test_data = test_set[['close_price']]
train_data_values=train_data.values
test_data_values=test_data.values
#Scaling/Normalizing the whole Training set
sc = MinMaxScaler(feature_range=(0,1))
train_data_values_scaled = sc.fit_transform(train_data_values)
# Since LSTMs store long term memory state, we create a data structure with 5 timesteps and 1 output
# So for each element of training set, we have 5 previous training set elements
X_train = []
y_train = []
for i in range(5,train_data_values.shape[0]):
X_train.append(train_data_values_scaled[i-5:i,0]) #window up to
y_train.append(train_data_values_scaled[i,0]) #one value after the window
X_train, y_train = np.array(X_train), np.array(y_train)
print(X_train.shape)
print(y_train.shape)
# Reshaping X_train for efficient modelling
X_train=X_train.reshape(X_train.shape[0],X_train.shape[1],1)
X_train.shape
model_3 = Sequential()
#use input_shape (tuple of integers) when using this layer as the first layer in a model
model_3.add(LSTM(units=100, input_shape=(X_train.shape[1], X_train.shape[2])) )
model_3.add(Dropout(0.2))
model_3.add(Dense(units=1 ))
model_3.add(Activation('linear'))
model_3.compile(loss='mse', optimizer='adam')
# Fitting to the training set
model_3.fit(X_train,y_train,epochs=30,batch_size=32)
pd.DataFrame(model_3.history.history).plot(figsize=(8,6))
plt.xlabel('Epoch', fontsize=12)
plt.ylabel('Loss', fontsize=12)
plt.title('Training Loss Per Epoch', fontsize=14)
plt.show()
test_data_values_scaled = sc.fit_transform(test_data_values) # we only do transfrom on test set not fit
X_test = []
y_test = []
for i in range(5,test_set.shape[0]):
X_test.append(test_data_values_scaled[i-5:i,0]) #yek panjereh ja tayi ta sare
y_test.append(test_data_values_scaled[i,0]) #tak element bade panjere
X_test, y_test = np.array(X_test), np.array(y_test)
X_test.shape
X_test=X_test.reshape(X_test.shape[0],X_test.shape[1],1)
X_test.shape
prediction_3=model_3.predict(X_test) #do not use squeeze, otherwise will get error in inverse scaler
assert (len(prediction_3)==len(y_test))
plt.figure(figsize=(8,5))
plt.plot(y_test, y_test, color='b')
plt.scatter(y_test, prediction_3, color='r')
plt.xlabel('y_test', fontsize=12)
plt.ylabel('Prediction', fontsize=12)
plt.title('Close Price Prediction, 2-Layer Model, MinMax scaling', fontsize=14)
plt.show()
print('Mean Absolute Error: ', metrics.mean_absolute_error(y_test, prediction_3))
predicted_close_price_3 = sc.inverse_transform(prediction_3)
predicted_close_price_3= pd.DataFrame(data= predicted_close_price_3, index=test_set[5:].index ,columns=['predicted_close_price'] )
predicted_close_price_3;
merged_3=pd.merge(test_data, predicted_close_price_3, on='timeInterval', how='left')
merged_3
merged_3.isnull().sum()
plt.figure(figsize=(13,7))
merged_3['close_price'].plot(color='r')
merged_3['predicted_close_price'].plot(color='g')
plt.title('Close Price Prediction, 2-Layer Model, MinMax Scaling',fontsize=16)
plt.xlabel('Time', fontsize=13)
plt.ylabel('Close Price', fontsize=13)
plt.gca().yaxis.set_major_formatter(StrMethodFormatter('{x:,.6f}'))
plt.legend(['Actual Close Price', 'Predicted Close Price'], loc='best',fontsize=13)
plt.show()
```
# 13- Conclusion
Based on the Mean Absolute Error values and Close Price Prediction plots, the 2-layer predictive model executed on the data normalized by zero_scaling function has the best performance.
|
github_jupyter
|
<a href="https://colab.research.google.com/github/st24hour/tutorial/blob/master/Neural_Style_Transfer_with_Eager_Execution_question.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Neural Style Transfer with tf.keras
## Overview
이 튜토리얼에서 우리는 딥러닝을 사용하여 다른 이미지의 스타일로 이미지를 구성하는 방법을 배우게됩니다 (피카소나 반 고흐처럼 그릴 수 있기를 바란 적 있습니까?). 이것은 **neural style transfer**라고 알려져 있습니다. 이것은 Leon A. Gatys의 논문 인 [A Neural Algorithm of Artistic Style](https://arxiv.org/abs/1508.06576)에 설명되어 있으며 반드시 읽어 봐야합니다.
그런데, neural style transfer가 무엇일까요?
Neural style transfer는 3 가지 이미지, **콘텐츠** 이미지, **스타일 참조** 이미지 (유명한 화가의 작품 등) 및 원하는 **입력 이미지** 를 사용하는 최적화 기술입니다: 입력 이미지가 콘텐츠 이미지처럼 보이도록 변형되지만 스타일 이미지의 스타일처럼 "색칠"되도록 서로 섞습니다.
예를 들어, 이 거북이와 Katsushika Hokusai의 이미지 *The Great Wave off Kanagawa*를 봅시다.
<img src="https://github.com/tensorflow/models/blob/master/research/nst_blogpost/Green_Sea_Turtle_grazing_seagrass.jpg?raw=1" alt="Drawing" style="width: 200px;"/>
<img src="https://github.com/tensorflow/models/blob/master/research/nst_blogpost/The_Great_Wave_off_Kanagawa.jpg?raw=1" alt="Drawing" style="width: 200px;"/>
[Image of Green Sea Turtle](https://commons.wikimedia.org/wiki/File:Green_Sea_Turtle_grazing_seagrass.jpg)
-By P.Lindgren [CC BY-SA 3.0 (https://creativecommons.org/licenses/by-sa/3.0)], from Wikimedia Common
Hokusai가 거북이의 그림을 자신의 스타일로 그리기로 결정했다면 어떻게 될까요? 이와 같을까요?
<img src="https://github.com/tensorflow/models/blob/master/research/nst_blogpost/wave_turtle.png?raw=1" alt="Drawing" style="width: 500px;"/>
Neural style transfer는 신경 네트워크의 기능과 내부 표현을 보여주는 재미있고 흥미로운 기술입니다.
Neural style transfer의 원리는 두 이미지의 내용이 얼마나 다른지, $L_{content}$, 두 이미지의 스타일이 얼마나 다른지, $L_{ style}$를 표현하는 두 거리 함수를 정의하는 것입니다. 그 다음, 3 개의 이미지, 원하는 스타일 이미지, 원하는 컨텐츠 이미지 및 입력 이미지 (컨텐츠 이미지로 초기화 됨)가 주어지면 입력 이미지를 콘텐츠 이미지와 콘텐츠 거리가 최소화 되도록, 스타일 이미지와 스타일 거리가 최소화 되도록 변환합니다. 요약하면 기본 입력 이미지, 일치시킬 콘텐츠 이미지 및 일치시키고자하는 스타일 이미지를 사용합니다. 우리는 backpropagation으로 컨텐츠 및 스타일 거리 (losses)를 최소화하고 컨텐츠 이미지의 컨텐츠 및 스타일 이미지의 스타일과 일치하는 이미지를 만듭니다.
### 다루게 될 개념들:
이 튜토리얼에서 우리는 실제 경험을 쌓고 다음 개념을 중심으로 실습할 것입니다.
* **Eager Execution** - Operation을 즉각적으로 평가하는 TensorFlow의 imperative programming 환경 사용
* [Learn more about eager execution](https://www.tensorflow.org/programmers_guide/eager)
* [See it in action](https://www.tensorflow.org/get_started/eager)
* **모델 정의를 위해 [Functional API](https://keras.io/getting-started/functional-api-guide/) 사용** - Functional API를 사용하여 필요한 중간 activations에 대한 액세스를 제공 할 모델을 만들 것입니다.
* **Pretrained model의 feature를 활용** - Pretrained된 모델과 그 feature map을 사용하는 방법을 배웁니다.
* **Custom training loops 구현** - 입력 parameter와 관련된 주어진 손실을 최소화하기 위해 optimizer를 설정하는 방법을 살펴 보겠습니다.
### Style transfer의 일반적인 단계들 :
1. Visualize data
2. Basic Preprocessing/preparing our data
3. Set up loss functions
4. Create model
5. Optimize for loss function
## Setup
### Download Images
```
import os
img_dir = '/tmp/nst'
if not os.path.exists(img_dir):
os.makedirs(img_dir)
!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/d/d7/Green_Sea_Turtle_grazing_seagrass.jpg
!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/0/0a/The_Great_Wave_off_Kanagawa.jpg
!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/b/b4/Vassily_Kandinsky%2C_1913_-_Composition_7.jpg
!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/0/00/Tuebingen_Neckarfront.jpg
!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/6/68/Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg
!wget --quiet -P /tmp/nst/ https://upload.wikimedia.org/wikipedia/commons/thumb/e/ea/Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg/1024px-Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg
```
### Import and configure modules
```
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['figure.figsize'] = (10,10)
mpl.rcParams['axes.grid'] = False
import numpy as np
from PIL import Image
import time
import functools
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.python.keras.preprocessing import image as kp_image
from tensorflow.python.keras import models
from tensorflow.python.keras import losses
from tensorflow.python.keras import layers
from tensorflow.python.keras import backend as K
```
우리는 [eager execution](https://www.tensorflow.org/guide/eager)을 가능하게 하는 것으로 시작할 것입니다. Eager execution은 우리가 가장 명확하고 가장 판독 가능한 방식으로 작업할 수 있게 해줍니다.
```
"""
Start eager execution
"""
print("Eager execution: {}".format(tf.executing_eagerly()))
# Set up some global values here
content_path = '/tmp/nst/Green_Sea_Turtle_grazing_seagrass.jpg'
style_path = '/tmp/nst/The_Great_Wave_off_Kanagawa.jpg'
```
## Visualize the input
```
def load_img(path_to_img):
max_dim = 512
img = Image.open(path_to_img)
long = max(img.size)
scale = max_dim/long
img = img.resize((round(img.size[0]*scale), round(img.size[1]*scale)), Image.ANTIALIAS)
img = kp_image.img_to_array(img)
# We need to broadcast the image array such that it has a batch dimension
img = np.expand_dims(img, axis=0)
return img
def imshow(img, title=None):
# Remove the batch dimension
out = np.squeeze(img, axis=0)
# Normalize for display
out = out.astype('uint8')
plt.imshow(out)
if title is not None:
plt.title(title)
plt.imshow(out)
```
이들은 콘텐츠 및 스타일 입력 이미지입니다. 우리는 콘텐츠 이미지의 콘텐츠로 이미지를 "생성"하지만 스타일 이미지의 스타일을 사용하기를 바랍니다.
```
plt.figure(figsize=(10,10))
content = load_img(content_path).astype('uint8')
style = load_img(style_path).astype('uint8')
plt.subplot(1, 2, 1)
imshow(content, 'Content Image')
plt.subplot(1, 2, 2)
imshow(style, 'Style Image')
plt.show()
```
## Prepare the data
이미지를 쉽게 로드하고 사전 처리 할 수있는 메소드를 만들어 봅시다. 우리는 VGG 학습 과정과 동일한 전처리 과정을 수행합니다. VGG 네트워크는 각 채널이 `mean = [103.939, 116.779, 123.68]` 및 채널 BGR로 normalize 된 이미지로 학습됩니다.
```
def load_and_process_img(path_to_img):
img = load_img(path_to_img)
img = tf.keras.applications.vgg19.preprocess_input(img)
return img
```
최적화의 결과를 보기 위해서 우리는 역 사전 처리 단계를 수행해야합니다. 또한 최적화 된 이미지는 $- \infty$에서 $- \infty$ 사이의 값을 가질 수 있으므로 0-255 범위에서 값을 유지하려면 clip해야합니다.
```
def deprocess_img(processed_img):
x = processed_img.copy()
if len(x.shape) == 4:
x = np.squeeze(x, 0)
assert len(x.shape) == 3, ("Input to deprocess image must be an image of "
"dimension [1, height, width, channel] or [height, width, channel]")
if len(x.shape) != 3:
raise ValueError("Invalid input to deprocessing image")
# perform the inverse of the preprocessiing step
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
```
### Define content and style representations
이미지의 콘텐츠 표현과 스타일 표현을 모두 얻으려면 우리 모델에서 중간 layer를 살펴볼 것입니다. 이러한 중간 layer는 고차원 feature를 나타냅니다. 우리는 이미지 분류에서 pretrained된 VGG19 네트워크를 사용할 것입니다. 이러한 중간 layer는 이미지의 콘텐츠 및 스타일 표현을 정의하는 데 필요합니다. 입력 이미지를 이러한 중간 layer에서 해당 스타일 및 내용 타겟 표현과 일치 시키도록 할 것입니다.
#### Why intermediate layers?
미리 학습된 이미지 분류 네트워크에서 이러한 중간 결과물로 스타일과 컨텐츠 표현을 정의 할 수 있는 이유가 궁금 할 수 있습니다. High level에서 이 현상은 네트워크가 이미지 분류를 수행하기 위해 (네트워크가 수행하도록 훈련 된) 이미지를 이해해야한다는 사실로 설명 할 수 있습니다. 여기에는 raw 이미지를 입력으로 사용하여 raw 이미지 내에있는 복잡한 피쳐를 이해하는 것으로 바뀌는 변환을 통해 내부 표현을 만드는 작업이 포함됩니다. 이것은 컨벌루션 뉴럴 네트워크가 잘 일반화 될 수있는 이유 중 일부입니다. 배경 노이즈 및 기타 배경과 같은 부분에 상관없이 클래스 (예 : 고양이 vs. 개) 내에서 불변하는 특징을 정의 할 수 있습니다. 따라서 원본 이미지가 입력되고 분류 레이블이 출력되는 곳 사이의 어떤 곳에서 모델은 복잡한 피쳐 추출기로 사용됩니다. 따라서 중간 레이어에 액세스하여 입력 이미지의 내용과 스타일을 설명 할 수 있습니다.
특히 네트워크에서 다음과 같은 중간 계층을 가져옵니다.
참고: VGG19 architecture
<img src="https://www.researchgate.net/profile/Clifford_Yang/publication/325137356/figure/fig2/AS:670371271413777@1536840374533/llustration-of-the-network-architecture-of-VGG-19-model-conv-means-convolution-FC-means.jpg" alt="Drawing" style="width: 200px;"/>
```
# Content layer where will pull our feature maps
content_layers = ['block5_conv2']
# Style layer we are interested in
style_layers = ['block1_conv1',
'block2_conv1',
'block3_conv1',
'block4_conv1',
'block5_conv1'
]
num_content_layers = len(content_layers)
num_style_layers = len(style_layers)
```
## Build the Model
우리는 [VGG19](https://keras.io/applications/#vgg19)를 불러오고 우리의 입력 tensor를 모델에 입력으로 줄 것입니다. 이것은 콘텐츠, 스타일, 그리고 생성하는 이미지의 feature map (내용 및 스타일 표현)을 추출할 수 있도록 해줍니다.
원래의 논문에서와 같이 VGG19 네트워크를 이용할 것입니다. 또한 VGG19는 ResNet, Inception 등과 비교하면 비교적 단순한 모델이므로 feature map이 실제로 스타일 이전에 더 효과적입니다.
우리의 스타일 및 콘텐츠 feature map에 해당하는 중간 layer의 출력을 얻을 것이며, Keras [**Functional API**](https://keras.io/getting-started/functional-api-guide/)를 사용하여 모델이 원하는 출력을 하도록 정의합니다.
모델을 정의하는 Functional API를 사용하면 단순히 입력 및 출력을 정의 할 수 있습니다.
`model = Model(inputs, outputs)`
참고: [tf.keras.applications.vgg19.VGG19()](https://keras.io/applications/#vgg19)
```
def get_model():
""" Creates our model with access to intermediate layers.
This function will load the VGG19 model and access the intermediate layers.
These layers will then be used to create a new model that will take input image
and return the outputs from these intermediate layers from the VGG model.
Returns:
returns a keras model that takes image inputs and outputs the style and
content intermediate layers.
"""
# Load our model. We load pretrained VGG, trained on imagenet data
"""
Load Imagenet pretrained VGG19 network. You don't need to load FC layers
vgg =
"""
vgg.trainable = False
# Get output layers corresponding to style and content layers
style_outputs = [vgg.get_layer(name).output for name in style_layers]
content_outputs = [vgg.get_layer(name).output for name in content_layers]
model_outputs = style_outputs + content_outputs
# Build model
return models.Model(vgg.input, model_outputs)
```
위의 코드에서 pretrained된 이미지 분류 네트워크를 로드합니다. 그런 다음 이전에 정의한 관심있는 layer를 불러옵니다. 그런 다음 모델의 입력을 이미지로 설정하고 출력을 스타일 및 콘텐츠 레이어의 출력으로 설정하여 모델을 정의합니다. 즉, 입력 이미지를 가져와 콘텐츠 및 스타일 중간 레이어를 출력하는 모델을 만들었습니다.
## Define and create our loss functions (content and style distances)
### Content Loss
우리의 콘텐츠 loss 정의는 실제로 매우 간단합니다. 원하는 콘텐츠 이미지와 기본 입력 이미지를 네트워크에 전달합니다. 이렇게하면 모델에서 출력되는 중간 레이어 출력 (위에 정의 된 레이어에서)이 반환됩니다. 그런 다음 우리는 단순히 그 이미지들의 두 중간 representation 사이의 유클리드 거리를 취합니다.
보다 공식적으로, 콘텐츠 손실은 출력 이미지 $x$와 콘텐츠 이미지 $p$에서 콘텐츠까지의 거리를 설명하는 함수입니다. $ C_{nn} $은 미리 훈련 된 deep convolutional neural network라고 합시다. 우리는 [VGG19](https://keras.io/applications/#vgg19)를 사용할 것입니다. $X$를 임의의 이미지라고 하면 $C_{nn}(X)$ 는 네트워크에 X를 넣은 것입니다. $F^l_{ij}(x) \in C_{nn}(x)$ 와 $P^l_{ij}(p) \in C_{nn}(p)$ 를 각각 입력으로 $x$ 와 $p$ 를 넣었을때 layer $l$ 에서의 중간 feature representation이라고 합시다. 그리면 우리는 콘텐츠 거리(loss)를 수식적으로 다음과 같이 정의 할 수 있습니다: $$L^l_{content}(p, x) = \sum_{i, j} (F^l_{ij}(x) - P^l_{ij}(p))^2$$
우리는 일반적인 방식으로 backpropagation을 수행하여 이러한 콘텐츠 loss를 최소화합니다. 따라서 특정 레이어 (content_layer에 정의 됨)에서 원본 콘텐츠 이미지와 같은 응답을 생성 할 때까지 초기 이미지를 변경합니다.
이것은 매우 간단하게 구현 될 수 있습니다. 입력 이미지 $x$, 그리고 우리의 콘텐트 이미지 $p$를 입력으로 받은 네트워크의 레이어 $l$에서 feature map을 입력으로 받아서 컨텐츠 거리를 반환합니다.
### Computing content loss
실제로 원하는 각 레이어에서 콘텐츠 loss를 추가 할 것입니다. 이 방법은 우리가 모델을 통해 입력 이미지를 공급할 때마다 (eager에서는 단순하게 `model(input_image)`입니다!) 모델을 통한 모든 컨텐츠 손실이 적절하게 계산 될 것이고 eager로 실행하기 때문에 모든 gradients가 계산됩니다 .
```
def get_content_loss(base_content, target):
return tf.reduce_mean(tf.square(base_content - target))
```
### Style Loss
스타일 loss 계산은 좀 더 복잡하지만 동일한 원칙을 따르며, 이번에는 네트워크에 기본 입력 이미지와 스타일 이미지를 입력으로 줍니다. 그러나 기본 입력 이미지와 스타일 이미지의 중간 출력을 그대로 비교하는 대신 두 출력의 Gram matrix를 비교합니다.
수학적으로, 우리는 기본 입력 이미지 $x$와 스타일 이미지 $a$의 style loss를 두 이미지의 스타일 표현(gram matrix)의 거리로 정의합니다. 우리는 이미지의 스타일 표현을 gram matrix $G^l$로 주어지는 서로 다른 필터 응답의 correlation으로 설명합니다. 여기서 $G^l_{ij}$는 벡터화 된 feature map $i$와 $j$의 내적 (inner product) 입니다. 우리는 특정 이미지의 feature map에서 생성된 $G^l_{ij}$가 feature map $i$와 $j$ 사이의 correlation을 나타낸다는 것을 알 수 있습니다.
기본 입력 이미지의 스타일을 생성하기 위해 콘텐츠 이미지에서 gradient descent를 수행하여 스타일 이미지의 스타일 표현과 일치하는 이미지로 변환합니다.이를 위해 스타일 이미지와 입력 이미지 사이의 mean square 거리를 최소화하도록 만듭니다. 총 스타일 손실에 대한 각 layer의 contribution은 다음과 같습니다:
$$E_l = \frac{1}{4N_l^2M_l^2} \sum_{i,j}(G^l_{ij} - A^l_{ij})^2$$
$G^l_{ij}$ 와 $A^l_{ij}$는 각각 layer $l$에서
$x$ 와 $a$의 스타일 표현입니다. $N_l$는 각 사이즈가 $M_l = height * width$인 feature map 수를 나타냅니다. 따라서 전체 스타일 loss는
$$L_{style}(a, x) = \sum_{l \in L} w_l E_l$$
입니다. 여기서 우리는 각 layer의 loss contribution을 $w_l$로 가중치 주었습니다. 우리의 경우에 각 layer를 동일하게 가중치 주었습니다($w_l =\frac{1}{|L|}$).
### Total loss
만들고자하는 이미지는 콘텐츠 이미지와 $L_{content}$가 작고 스타일 이미지와 $L_{style}$이 작아지도록 하는 이미지입니다. 따라서 전체 목적 함수(loss)는 다음과 같습니다:
$$L_{total}(p, a, x) = \alpha L_{content}(p, x)+\beta L_{style}(a, x)$$
$\alpha$와 $\beta$는 각각 콘텐트와 스타일 loss에 곱해지는 weight 값 입니다.
### Computing style loss
이번에도 style loss를 거리 metric으로 구현합니다.
get_style_loss는 $E_l$을 구하는 함수입니다.
```
def gram_matrix(input_tensor):
# We make the image channels first
channels = int(input_tensor.shape[-1])
a = tf.reshape(input_tensor, [-1, channels])
n = tf.shape(a)[0]
gram = tf.matmul(a, a, transpose_a=True)
return gram / tf.cast(n, tf.float32)
def get_style_loss(base_style, gram_target):
"""Expects two images of dimension h, w, c"""
# height, width, num filters of each layer
# We scale the loss at a given layer by the size of the feature map and the number of filters
height, width, channels = base_style.get_shape().as_list()
gram_style = gram_matrix(base_style)
return tf.reduce_mean(tf.square(gram_style - gram_target))# / (4. * (channels ** 2) * (width * height) ** 2)
```
## Apply style transfer to our images
### Run Gradient Descent
우리는 loss를 최소화하기 위해 [Adam](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam)* optimizer를 사용합니다. 반복적으로 출력 이미지를 업데이트하여 loss를 최소화합니다. 네트워크와 관련된 weight를 업데이트하지 않고 대신 입력 이미지를 조정하여 loss를 최소화합니다. 이를 위해서는 loss와 gradients를 계산하는 방법을 알아야합니다.
우리는 콘텐츠와 스타일 이미지를 불러오고, 네트워크를 통해 feed forward하며, 모델에서 콘텐츠 및 스타일 feature representation을 출력하는 작은 도우미 함수를 정의 할 것입니다.
```
def get_feature_representations(model, content_path, style_path):
"""Helper function to compute our content and style feature representations.
This function will simply load and preprocess both the content and style
images from their path. Then it will feed them through the network to obtain
the outputs of the intermediate layers.
Arguments:
model: The model that we are using.
content_path: The path to the content image.
style_path: The path to the style image
Returns:
returns the style features and the content features.
"""
# Load our images in
content_image = load_and_process_img(content_path)
style_image = load_and_process_img(style_path)
# batch compute content and style features
style_outputs = model(style_image)
content_outputs = model(content_image)
# Get the style and content feature representations from our model
style_features = [style_layer[0] for style_layer in style_outputs[:num_style_layers]]
content_features = [content_layer[0] for content_layer in content_outputs[num_style_layers:]]
return style_features, content_features
```
### Computing the loss and gradients
여기서는 [**tf.GradientTape**](https://www.tensorflow.org/programmers_guide/eager#computing_gradients)을 사용하여 gradient를 계산합니다. 나중에 gradient를 계산하기위한 operation을 추적하여 자동 미분화를 가능하게 합니다. Forward pass중에 작업을 기록한 다음 backward pass시에 입력 이미지에 대하여 loss 함수의 gradient를 계산할 수 있습니다.
```
def compute_loss(model, loss_weights, init_image, gram_style_features, content_features):
"""This function will compute the loss total loss.
Arguments:
model: The model that will give us access to the intermediate layers
loss_weights: The weights of each contribution of each loss function.
(style weight, content weight, and total variation weight)
init_image: Our initial base image. This image is what we are updating with
our optimization process. We apply the gradients wrt the loss we are
calculating to this image.
gram_style_features: Precomputed gram matrices corresponding to the
defined style layers of interest.
content_features: Precomputed outputs from defined content layers of
interest.
Returns:
returns the total loss, style loss, content loss, and total variational loss
"""
style_weight, content_weight = loss_weights
# Feed our init image through our model. This will give us the content and
# style representations at our desired layers. Since we're using eager
# our model is callable just like any other function!
model_outputs = model(init_image)
style_output_features = model_outputs[:num_style_layers]
content_output_features = model_outputs[num_style_layers:]
style_score = 0
content_score = 0
# Accumulate style losses from all layers
# Here, we equally weight each contribution of each loss layer
weight_per_style_layer = 1.0 / float(num_style_layers)
for target_style, comb_style in zip(gram_style_features, style_output_features):
style_score += weight_per_style_layer * get_style_loss(comb_style[0], target_style)
# Accumulate content losses from all layers
weight_per_content_layer = 1.0 / float(num_content_layers)
for target_content, comb_content in zip(content_features, content_output_features):
content_score += weight_per_content_layer* get_content_loss(comb_content[0], target_content)
style_score *= style_weight
content_score *= content_weight
# Get total loss
loss = style_score + content_score
return loss, style_score, content_score
```
Gradients를 구하는 것은 쉽습니다:
```
def compute_grads(cfg):
with tf.GradientTape() as tape:
all_loss = compute_loss(**cfg)
# Compute gradients wrt input image
total_loss = all_loss[0]
return tape.gradient(total_loss, cfg['init_image']), all_loss
```
### Optimization loop
[Adam optimizer](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer)
```
import IPython.display
def run_style_transfer(content_path,
style_path,
num_iterations=1000,
content_weight=1e3,
style_weight=1e-2):
# We don't need to (or want to) train any layers of our model, so we set their
# trainable to false.
model = get_model()
for layer in model.layers:
layer.trainable = False
# Get the style and content feature representations (from our specified intermediate layers)
style_features, content_features = get_feature_representations(model, content_path, style_path)
gram_style_features = [gram_matrix(style_feature) for style_feature in style_features]
# Set initial image
init_image = load_and_process_img(content_path)
init_image = tfe.Variable(init_image, dtype=tf.float32)
# Create our optimizer
opt = tf.train.AdamOptimizer(learning_rate=5, beta1=0.99, epsilon=1e-1)
# For displaying intermediate images
iter_count = 1
# Store our best result
best_loss, best_img = float('inf'), None
# Create a nice config
loss_weights = (style_weight, content_weight)
cfg = {
'model': model,
'loss_weights': loss_weights,
'init_image': init_image,
'gram_style_features': gram_style_features,
'content_features': content_features
}
# For displaying
num_rows = 2
num_cols = 5
display_interval = num_iterations/(num_rows*num_cols)
start_time = time.time()
global_start = time.time()
norm_means = np.array([103.939, 116.779, 123.68])
min_vals = -norm_means
max_vals = 255 - norm_means
imgs = []
for i in range(num_iterations):
grads, all_loss = compute_grads(cfg)
loss, style_score, content_score = all_loss
"""
Apply_gradients
"""
clipped = tf.clip_by_value(init_image, min_vals, max_vals)
init_image.assign(clipped)
end_time = time.time()
if loss < best_loss:
# Update best loss and best image from total loss.
best_loss = loss
best_img = deprocess_img(init_image.numpy())
if i % display_interval== 0:
start_time = time.time()
# Use the .numpy() method to get the concrete numpy array
plot_img = init_image.numpy()
plot_img = deprocess_img(plot_img)
imgs.append(plot_img)
IPython.display.clear_output(wait=True)
IPython.display.display_png(Image.fromarray(plot_img)) # NumPy 배열을 Image 객체로 변환
print('Iteration: {}'.format(i))
print('Total loss: {:.4e}, '
'style loss: {:.4e}, '
'content loss: {:.4e}, '
'time: {:.4f}s'.format(loss, style_score, content_score, time.time() - start_time))
print('Total time: {:.4f}s'.format(time.time() - global_start))
IPython.display.clear_output(wait=True)
plt.figure(figsize=(14,4))
for i,img in enumerate(imgs):
plt.subplot(num_rows,num_cols,i+1)
plt.imshow(img)
plt.xticks([])
plt.yticks([])
return best_img, best_loss
best, best_loss = run_style_transfer(content_path,
style_path, num_iterations=1000)
Image.fromarray(best)
```
## Visualize outputs
우리는 출력 이미지에 적용된 processing을 제거하기 위해 출력 이미지를 "deprocess"합니다.
```
def show_results(best_img, content_path, style_path, show_large_final=True):
plt.figure(figsize=(10, 5))
content = load_img(content_path)
style = load_img(style_path)
plt.subplot(1, 2, 1)
imshow(content, 'Content Image')
plt.subplot(1, 2, 2)
imshow(style, 'Style Image')
if show_large_final:
plt.figure(figsize=(10, 10))
plt.imshow(best_img)
plt.title('Output Image')
plt.show()
show_results(best, content_path, style_path)
```
## Try it on other images
Image of Tuebingen
Photo By: Andreas Praefcke [GFDL (http://www.gnu.org/copyleft/fdl.html) or CC BY 3.0 (https://creativecommons.org/licenses/by/3.0)], from Wikimedia Commons
### Starry night + Tuebingen
```
best_starry_night, best_loss = run_style_transfer('/tmp/nst/Tuebingen_Neckarfront.jpg',
'/tmp/nst/1024px-Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg')
show_results(best_starry_night, '/tmp/nst/Tuebingen_Neckarfront.jpg',
'/tmp/nst/1024px-Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg')
```
### Pillars of Creation + Tuebingen
```
best_poc_tubingen, best_loss = run_style_transfer('/tmp/nst/Tuebingen_Neckarfront.jpg',
'/tmp/nst/Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg')
show_results(best_poc_tubingen,
'/tmp/nst/Tuebingen_Neckarfront.jpg',
'/tmp/nst/Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg')
```
### Kandinsky Composition 7 + Tuebingen
```
best_kandinsky_tubingen, best_loss = run_style_transfer('/tmp/nst/Tuebingen_Neckarfront.jpg',
'/tmp/nst/Vassily_Kandinsky,_1913_-_Composition_7.jpg')
show_results(best_kandinsky_tubingen,
'/tmp/nst/Tuebingen_Neckarfront.jpg',
'/tmp/nst/Vassily_Kandinsky,_1913_-_Composition_7.jpg')
```
### Pillars of Creation + Sea Turtle
```
best_poc_turtle, best_loss = run_style_transfer('/tmp/nst/Green_Sea_Turtle_grazing_seagrass.jpg',
'/tmp/nst/Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg')
show_results(best_poc_turtle,
'/tmp/nst/Green_Sea_Turtle_grazing_seagrass.jpg',
'/tmp/nst/Pillars_of_creation_2014_HST_WFC3-UVIS_full-res_denoised.jpg')
```
## 주요 요점
### 다룬 내용들:
* 우리는 몇 가지 다른 loss 함수를 구축하고 이러한 손실을 최소화도록 입력 영상을 변환하기 위해 backpropagation 사용했습니다
* 이를 위해 **pretrained된 모델**을 로드하고 학습 된 feature map을 사용하여 이미지의 콘텐츠 및 스타일 표현을 설명해야했습니다.
* 우리의 주요 loss 함수는 주로 이러한 다양한 representation의 관점에서 거리를 계산하는 것이 었습니다
* 우리는 이것을 custom model과 **eager execution**으로 구현하였습니다.
* 우리는 Functional API를 이용하여 우리의 custom model을 만들었습니다.
* Eager execution은 자연스러운 Python control flow를 사용하여 텐서로 동적으로 작업 할 수있게 해줍니다.
* 우리는 텐서를 직접 조작하여 디버깅을하고 텐서로 작업하는 것을 더 쉽게 만듭니다.
* **tf.gradient**를 사용하여 optimizer 업데이트 규칙을 적용하고 이미지를 반복적으로 업데이트했습니다. Optimizer는 입력 이미지와 관련하여 주어진 loss를 최소화했습니다.
**[Image of Tuebingen](https://commons.wikimedia.org/wiki/File:Tuebingen_Neckarfront.jpg)**
Photo By: Andreas Praefcke [GFDL (http://www.gnu.org/copyleft/fdl.html) or CC BY 3.0 (https://creativecommons.org/licenses/by/3.0)], from Wikimedia Commons
**[Image of Green Sea Turtle](https://commons.wikimedia.org/wiki/File:Green_Sea_Turtle_grazing_seagrass.jpg)**
By P.Lindgren [CC BY-SA 3.0 (https://creativecommons.org/licenses/by-sa/3.0)], from Wikimedia Commons
# Report
1. 튀빙겐 사진을 고흐의 starry night 스타일로 바꿔봅시다. content_weight=1e3, style_weight=1e-2
2. 튀빙겐 사진을 고흐의 starry night 스타일로 바꿔봅시다. content_weight=1e3, style_weight=1e-0
3. 튀빙겐 사진을 고흐의 starry night 스타일로 바꿔봅시다. content_weight=1e3, style_weight=1e--4
4. 튀빙겐 사진을 고흐의 starry night 스타일로 바꿔봅시다. content_weight=1e1, style_weight=1e-2
5. 튀빙겐 사진을 고흐의 starry night 스타일로 바꿔봅시다. content_weight=1e5, style_weight=1e-2
Q) $\alpha$(content_weight)와 $\beta$(style_weight)의 역할은 무엇입니까?
#### 참고) 파일 경로, 이름
> 튀빙겐: '/tmp/nst/Tuebingen_Neckarfront.jpg'
> starry night: '/tmp/nst/1024px-Van_Gogh_-_Starry_Night_-_Google_Art_Project.jpg'
|
github_jupyter
|
# Node2Vec representation learning with Stellargraph components
<table><tr><td>Run the latest release of this notebook:</td><td><a href="https://mybinder.org/v2/gh/stellargraph/stellargraph/master?urlpath=lab/tree/demos/embeddings/keras-node2vec-embeddings.ipynb" alt="Open In Binder" target="_parent"><img src="https://mybinder.org/badge_logo.svg"/></a></td><td><a href="https://colab.research.google.com/github/stellargraph/stellargraph/blob/master/demos/embeddings/keras-node2vec-embeddings.ipynb" alt="Open In Colab" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg"/></a></td></tr></table>
This example demonstrates how to apply components from the stellargraph library to perform representation learning via Node2Vec. This uses a Keras implementation of Node2Vec available in stellargraph instead of the reference implementation provided by ``gensim``. This implementation provides flexible interfaces to downstream tasks for end-to-end learning.
<a name="refs"></a>
**References**
[1] Node2Vec: Scalable Feature Learning for Networks. A. Grover, J. Leskovec. ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD), 2016. ([link](https://snap.stanford.edu/node2vec/))
[2] Distributed representations of words and phrases and their compositionality. T. Mikolov, I. Sutskever, K. Chen, G. S. Corrado, and J. Dean. In Advances in Neural Information Processing Systems (NIPS), pp. 3111-3119, 2013. ([link](https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf))
[3] word2vec Parameter Learning Explained. X. Rong. arXiv preprint arXiv:1411.2738. 2014 Nov 11. ([link](https://arxiv.org/pdf/1411.2738.pdf))
## Introduction
Following word2vec [2,3], for each (``target``,``context``) node pair $(v_i,v_j)$ collected from random walks, we learn the representation for the target node $v_i$ by using it to predict the existence of context node $v_j$, with the following three-layer neural network.

Node $v_i$'s representation in the hidden layer is obtained by multiplying $v_i$'s one-hot representation in the input layer with the input-to-hidden weight matrix $W_{in}$, which is equivalent to look up the $i$th row of input-to-hidden weight matrix $W_{in}$. The existence probability of each node conditioned on node $v_i$ is outputted in the output layer, which is obtained by multiplying $v_i$'s hidden-layer representation with the hidden-to-out weight matrix $W_{out}$ followed by a softmax activation. To capture the ``target-context`` relation between $v_i$ and $v_j$, we need to maximize the probability $\mathrm{P}(v_j|v_i)$. However, computing $\mathrm{P}(v_j|v_i)$ is time consuming, which involves the matrix multiplication between $v_i$'s hidden-layer representation and the hidden-to-out weight matrix $W_{out}$.
To speed up the computing, we adopt the negative sampling strategy [2,3]. For each (``target``, ``context``) node pair, we sample a negative node $v_k$, which is not $v_i$'s context. To obtain the output, instead of multiplying $v_i$'s hidden-layer representation with the hidden-to-out weight matrix $W_{out}$ followed by a softmax activation, we only calculate the dot product between $v_i$'s hidden-layer representation and the $j$th column as well as the $k$th column of the hidden-to-output weight matrix $W_{out}$ followed by a sigmoid activation respectively. According to [3], the original objective to maximize $\mathrm{P}(v_j|v_i)$ can be approximated by minimizing the cross entropy between $v_j$ and $v_k$'s outputs and their ground-truth labels (1 for $v_j$ and 0 for $v_k$).
Following [2,3], we denote the rows of the input-to-hidden weight matrix $W_{in}$ as ``input_embeddings`` and the columns of the hidden-to-out weight matrix $W_{out}$ as ``output_embeddings``. To build the Node2Vec model, we need look up ``input_embeddings`` for target nodes and ``output_embeddings`` for context nodes and calculate their inner product together with a sigmoid activation.
```
# install StellarGraph if running on Google Colab
import sys
if 'google.colab' in sys.modules:
%pip install -q stellargraph[demos]==1.3.0b
# verify that we're using the correct version of StellarGraph for this notebook
import stellargraph as sg
try:
sg.utils.validate_notebook_version("1.3.0b")
except AttributeError:
raise ValueError(
f"This notebook requires StellarGraph version 1.3.0b, but a different version {sg.__version__} is installed. Please see <https://github.com/stellargraph/stellargraph/issues/1172>."
) from None
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
import os
import networkx as nx
import numpy as np
import pandas as pd
from tensorflow import keras
from stellargraph import StellarGraph
from stellargraph.data import BiasedRandomWalk
from stellargraph.data import UnsupervisedSampler
from stellargraph.data import BiasedRandomWalk
from stellargraph.mapper import Node2VecLinkGenerator, Node2VecNodeGenerator
from stellargraph.layer import Node2Vec, link_classification
from stellargraph import datasets
from IPython.display import display, HTML
%matplotlib inline
```
### Dataset
For clarity, we use only the largest connected component, ignoring isolated nodes and subgraphs; having these in the data does not prevent the algorithm from running and producing valid results.
```
dataset = datasets.Cora()
display(HTML(dataset.description))
G, subjects = dataset.load(largest_connected_component_only=True)
print(G.info())
```
### The Node2Vec algorithm
The Node2Vec algorithm introduced in [[1]](#refs) is a 2-step representation learning algorithm. The two steps are:
1. Use random walks to generate sentences from a graph. A sentence is a list of node ids. The set of all sentences makes a corpus.
2. The corpus is then used to learn an embedding vector for each node in the graph. Each node id is considered a unique word/token in a dictionary that has size equal to the number of nodes in the graph. The Word2Vec algorithm [[2]](#refs) is used for calculating the embedding vectors.
In this implementation, we train the Node2Vec algorithm in the following two steps:
1. Generate a set of (`target`, `context`) node pairs through starting the biased random walk with a fixed length at per node. The starting nodes are taken as the target nodes and the following nodes in biased random walks are taken as context nodes. For each (`target`, `context`) node pair, we generate 1 negative node pair.
2. Train the Node2Vec algorithm through minimizing cross-entropy loss for `target-context` pair prediction, with the predictive value obtained by performing the dot product of the 'input embedding' of the target node and the 'output embedding' of the context node, followed by a sigmoid activation.
Specify the optional parameter values: the number of walks to take per node, the length of each walk. Here, to guarantee the running efficiency, we respectively set `walk_number` and `walk_length` to 100 and 5. Larger values can be set to them to achieve better performance.
```
walk_number = 100
walk_length = 5
```
Create the biased random walker to perform context node sampling, with the specified parameters.
```
walker = BiasedRandomWalk(
G,
n=walk_number,
length=walk_length,
p=0.5, # defines probability, 1/p, of returning to source node
q=2.0, # defines probability, 1/q, for moving to a node away from the source node
)
```
Create the UnsupervisedSampler instance with the biased random walker.
```
unsupervised_samples = UnsupervisedSampler(G, nodes=list(G.nodes()), walker=walker)
```
Set the batch size and the number of epochs.
```
batch_size = 50
epochs = 2
```
Define an attri2vec training generator, which generates a batch of (index of target node, index of context node, label of node pair) pairs per iteration.
```
generator = Node2VecLinkGenerator(G, batch_size)
```
Build the Node2Vec model, with the dimension of learned node representations set to 128.
```
emb_size = 128
node2vec = Node2Vec(emb_size, generator=generator)
x_inp, x_out = node2vec.in_out_tensors()
```
Use the link_classification function to generate the prediction, with the 'dot' edge embedding generation method and the 'sigmoid' activation, which actually performs the dot product of the ``input embedding`` of the target node and the ``output embedding`` of the context node followed by a sigmoid activation.
```
prediction = link_classification(
output_dim=1, output_act="sigmoid", edge_embedding_method="dot"
)(x_out)
```
Stack the Node2Vec encoder and prediction layer into a Keras model. Our generator will produce batches of positive and negative context pairs as inputs to the model. Minimizing the binary crossentropy between the outputs and the provided ground truth is much like a regular binary classification task.
```
model = keras.Model(inputs=x_inp, outputs=prediction)
model.compile(
optimizer=keras.optimizers.Adam(lr=1e-3),
loss=keras.losses.binary_crossentropy,
metrics=[keras.metrics.binary_accuracy],
)
```
Train the model.
```
history = model.fit(
generator.flow(unsupervised_samples),
epochs=epochs,
verbose=1,
use_multiprocessing=False,
workers=4,
shuffle=True,
)
```
## Visualise Node Embeddings
Build the node based model for predicting node representations from node ids and the learned parameters. Below a Keras model is constructed, with `x_inp[0]` as input and `x_out[0]` as output. Note that this model's weights are the same as those of the corresponding node encoder in the previously trained node pair classifier.
```
x_inp_src = x_inp[0]
x_out_src = x_out[0]
embedding_model = keras.Model(inputs=x_inp_src, outputs=x_out_src)
```
Get the node embeddings from node ids.
```
node_gen = Node2VecNodeGenerator(G, batch_size).flow(subjects.index)
node_embeddings = embedding_model.predict(node_gen, workers=4, verbose=1)
```
Transform the embeddings to 2d space for visualisation.
```
transform = TSNE # PCA
trans = transform(n_components=2)
node_embeddings_2d = trans.fit_transform(node_embeddings)
# draw the embedding points, coloring them by the target label (paper subject)
alpha = 0.7
label_map = {l: i for i, l in enumerate(np.unique(subjects))}
node_colours = [label_map[target] for target in subjects]
plt.figure(figsize=(7, 7))
plt.axes().set(aspect="equal")
plt.scatter(
node_embeddings_2d[:, 0],
node_embeddings_2d[:, 1],
c=node_colours,
cmap="jet",
alpha=alpha,
)
plt.title("{} visualization of node embeddings".format(transform.__name__))
plt.show()
```
### Downstream task
The node embeddings calculated using Node2Vec can be used as feature vectors in a downstream task such as node attribute inference (e.g., inferring the subject of a paper in Cora), community detection (clustering of nodes based on the similarity of their embedding vectors), and link prediction (e.g., prediction of citation links between papers).
<table><tr><td>Run the latest release of this notebook:</td><td><a href="https://mybinder.org/v2/gh/stellargraph/stellargraph/master?urlpath=lab/tree/demos/embeddings/keras-node2vec-embeddings.ipynb" alt="Open In Binder" target="_parent"><img src="https://mybinder.org/badge_logo.svg"/></a></td><td><a href="https://colab.research.google.com/github/stellargraph/stellargraph/blob/master/demos/embeddings/keras-node2vec-embeddings.ipynb" alt="Open In Colab" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg"/></a></td></tr></table>
|
github_jupyter
|
## [Bag of Words Meets Bags of Popcorn | Kaggle](https://www.kaggle.com/c/word2vec-nlp-tutorial#part-3-more-fun-with-word-vectors)
# 튜토리얼 파트 3, 4
* [DeepLearningMovies/KaggleWord2VecUtility.py at master · wendykan/DeepLearningMovies](https://github.com/wendykan/DeepLearningMovies/blob/master/KaggleWord2VecUtility.py)
* 캐글에 링크 되어 있는 github 튜토리얼을 참고하여 만들었으며 파이썬2로 되어있는 소스를 파이썬3에 맞게 일부 수정하였다.
### 첫 번째 시도(average feature vectors)
- 튜토리얼2의 코드로 벡터의 평균을 구한다.
### 두 번째 시도(K-means)
- Word2Vec은 의미가 관련있는 단어들의 클러스터를 생성하기 때문에 클러스터 내의 단어 유사성을 이용하는 것이다.
- 이런식으로 벡터를 그룹화 하는 것을 "vector quantization(벡터 양자화)"라고 한다.
- 이를 위해서는 K-means와 같은 클러스터링 알고리즘을 사용하여 클러스터라는 단어의 중심을 찾아야 한다.
- 비지도학습인 K-means를 통해 클러스터링을 하고 지도학습인 랜덤포레스트로 리뷰가 추천인지 아닌지를 예측한다.
```
import pandas as pd
import numpy as np
from gensim.models import Word2Vec
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestClassifier
from bs4 import BeautifulSoup
import re
import time
from nltk.corpus import stopwords
import nltk.data
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
model = Word2Vec.load('300features_40minwords_10text')
model
# 숫자로 단어를 표현
# Word2Vec 모델은 어휘의 각 단어에 대한 feature 벡터로 구성되며
# 'syn0'이라는 넘파이 배열로 저장된다.
# syn0의 행 수는 모델 어휘의 단어 수
# 컬럼 수는 2 부에서 설정 한 피처 벡터의 크기
type(model.wv.syn0)
# syn0의 행 수는 모델 어휘의 단어 수
# 열 수는 2부에서 설정한 특징 벡터의 크기
model.wv.syn0.shape
# 개별 단어 벡터 접근
model.wv['flower'].shape
model.wv['flower'][:10]
```
## K-means (K평균)클러스터링으로 데이터 묶기
* [K-평균 알고리즘 - 위키백과, 우리 모두의 백과사전](https://ko.wikipedia.org/wiki/K-%ED%8F%89%EA%B7%A0_%EC%95%8C%EA%B3%A0%EB%A6%AC%EC%A6%98)
- 클러스터링은 비지도 학습 기법
- 클러스터링은 유사성 등 개념에 기초해 몇몇 그룹으로 분류하는 기법
- 클러스터링의 목적은 샘플(실수로 구성된 n차원의 벡터)을 내부적으로는 비슷하지만 외부적으로 공통 분모가 없는 여러 그룹으로 묶는 것
- 특정 차원의 범위가 다른 차원과 차이가 크면 클러스터링 하기 전에 스케일을 조정해야 한다.
1. 최초 센트로이드(centroid)(중심점)로 k개의 벡터를 무작위로 선정한다.
2. 각 샘플을 그 위치에서 가장 가까운 센트로이드에 할당한다.
3. 센트로이드의 위치를 재계산한다.
4. 센트로이드가 더 이상 움직이지 않을 때까지 2와 3을 반복한다.
참고 : [책] 모두의 데이터 과학(with 파이썬)
```
# 단어 벡터에서 k-means를 실행하고 일부 클러스터를 찍어본다.
start = time.time() # 시작시간
# 클러스터의 크기 "k"를 어휘 크기의 1/5 이나 평균 5단어로 설정한다.
word_vectors = model.wv.syn0 # 어휘의 feature vector
num_clusters = word_vectors.shape[0] / 5
num_clusters = int(num_clusters)
# K means 를 정의하고 학습시킨다.
kmeans_clustering = KMeans( n_clusters = num_clusters )
idx = kmeans_clustering.fit_predict( word_vectors )
# 끝난시간에서 시작시간을 빼서 걸린 시간을 구한다.
end = time.time()
elapsed = end - start
print("Time taken for K Means clustering: ", elapsed, "seconds.")
# 각 어휘 단어를 클러스터 번호에 매핑되게 word/Index 사전을 만든다.
idx = list(idx)
names = model.wv.index2word
word_centroid_map = {names[i]: idx[i] for i in range(len(names))}
# word_centroid_map = dict(zip( model.wv.index2word, idx ))
# 첫번째 클러스터의 처음 10개를 출력
for cluster in range(0,10):
# 클러스터 번호를 출력
print("\nCluster {}".format(cluster))
# 클러스터번호와 클러스터에 있는 단어를 찍는다.
words = []
for i in range(0,len(list(word_centroid_map.values()))):
if( list(word_centroid_map.values())[i] == cluster ):
words.append(list(word_centroid_map.keys())[i])
print(words)
"""
판다스로 데이터프레임 형태의 데이터로 읽어온다.
QUOTE_MINIMAL (0), QUOTE_ALL (1),
QUOTE_NONNUMERIC (2) or QUOTE_NONE (3).
그리고 이전 튜토리얼에서 했던 것처럼 clean_train_reviews 와
clean_test_reviews 로 텍스트를 정제한다.
"""
train = pd.read_csv('data/labeledTrainData.tsv',
header=0, delimiter="\t", quoting=3)
test = pd.read_csv('data/testData.tsv',
header=0, delimiter="\t", quoting=3)
# unlabeled_train = pd.read_csv( 'data/unlabeledTrainData.tsv', header=0, delimiter="\t", quoting=3 )
from KaggleWord2VecUtility import KaggleWord2VecUtility
# 학습 리뷰를 정제한다.
clean_train_reviews = []
for review in train["review"]:
clean_train_reviews.append(
KaggleWord2VecUtility.review_to_wordlist( review, \
remove_stopwords=True ))
# 테스트 리뷰를 정제한다.
clean_test_reviews = []
for review in test["review"]:
clean_test_reviews.append(
KaggleWord2VecUtility.review_to_wordlist( review, \
remove_stopwords=True ))
# bags of centroids 생성
# 속도를 위해 centroid 학습 세트 bag을 미리 할당 한다.
train_centroids = np.zeros((train["review"].size, num_clusters), \
dtype="float32" )
train_centroids[:5]
# centroid 는 두 클러스터의 중심점을 정의 한 다음 중심점의 거리를 측정한 것
def create_bag_of_centroids( wordlist, word_centroid_map ):
# 클러스터의 수는 word / centroid map에서 가장 높은 클러스트 인덱스와 같다.
num_centroids = max( word_centroid_map.values() ) + 1
# 속도를 위해 bag of centroids vector를 미리 할당한다.
bag_of_centroids = np.zeros( num_centroids, dtype="float32" )
# 루프를 돌며 단어가 word_centroid_map에 있다면
# 해당되는 클러스터의 수를 하나씩 증가시켜 준다.
for word in wordlist:
if word in word_centroid_map:
index = word_centroid_map[word]
bag_of_centroids[index] += 1
# bag of centroids를 반환한다.
return bag_of_centroids
# 학습 리뷰를 bags of centroids 로 변환한다.
counter = 0
for review in clean_train_reviews:
train_centroids[counter] = create_bag_of_centroids( review, \
word_centroid_map )
counter += 1
# 테스트 리뷰도 같은 방법으로 반복해 준다.
test_centroids = np.zeros(( test["review"].size, num_clusters), \
dtype="float32" )
counter = 0
for review in clean_test_reviews:
test_centroids[counter] = create_bag_of_centroids( review, \
word_centroid_map )
counter += 1
# 랜덤포레스트를 사용하여 학습시키고 예측
forest = RandomForestClassifier(n_estimators = 100)
# train 데이터의 레이블을 통해 학습시키고 예측한다.
# 시간이 좀 소요되기 때문에 %time을 통해 걸린 시간을 찍도록 함
print("Fitting a random forest to labeled training data...")
%time forest = forest.fit(train_centroids, train["sentiment"])
from sklearn.model_selection import cross_val_score
%time score = np.mean(cross_val_score(\
forest, train_centroids, train['sentiment'], cv=10,\
scoring='roc_auc'))
%time result = forest.predict(test_centroids)
score
# 결과를 csv로 저장
output = pd.DataFrame(data={"id":test["id"], "sentiment":result})
output.to_csv("data/submit_BagOfCentroids_{0:.5f}.csv".format(score), index=False, quoting=3)
fig, axes = plt.subplots(ncols=2)
fig.set_size_inches(12,5)
sns.countplot(train['sentiment'], ax=axes[0])
sns.countplot(output['sentiment'], ax=axes[1])
output_sentiment = output['sentiment'].value_counts()
print(output_sentiment[0] - output_sentiment[1])
output_sentiment
# 캐글 점수 0.84908
print(330/528)
```
### 왜 이 튜토리얼에서는 Bag of Words가 더 좋은 결과를 가져올까?
벡터를 평균화하고 centroids를 사용하면 단어 순서가 어긋나며 Bag of Words 개념과 매우 비슷하다. 성능이 (표준 오차의 범위 내에서) 비슷하기 때문에 튜토리얼 1, 2, 3이 동등한 결과를 가져온다.
첫째, Word2Vec을 더 많은 텍스트로 학습시키면 성능이 좋아진다. Google의 결과는 10 억 단어가 넘는 코퍼스에서 배운 단어 벡터를 기반으로 한다. 학습 레이블이 있거나 레이블이 없는 학습 세트는 단지 대략 천팔백만 단어 정도다. 편의상 Word2Vec은 Google의 원래 C도구에서 출력되는 사전 학습 된 모델을 로드하는 기능을 제공하기 때문에 C로 모델을 학습 한 다음 Python으로 가져올 수도 있다.
둘째, 출판 된 자료들에서 분산 워드 벡터 기술은 Bag of Words 모델보다 우수한 것으로 나타났다. 이 논문에서는 IMDB 데이터 집합에 단락 벡터 (Paragraph Vector)라는 알고리즘을 사용하여 현재까지의 최첨단 결과 중 일부를 생성한다. 단락 벡터는 단어 순서 정보를 보존하는 반면 벡터 평균화 및 클러스터링은 단어 순서를 잃어 버리기 때문에 여기에서 시도하는 방식보다 부분적으로 더 좋다.
* 더 공부하기 : 스탠포드 NLP 강의 : [Lecture 1 | Natural Language Processing with Deep Learning - YouTube](https://www.youtube.com/watch?v=OQQ-W_63UgQ&list=PL3FW7Lu3i5Jsnh1rnUwq_TcylNr7EkRe6)
|
github_jupyter
|
```
# Uncomment and run this cell if you're on Colab or Kaggle
# !git clone https://github.com/nlp-with-transformers/notebooks.git
# %cd notebooks
# from install import *
# install_requirements(is_chapter10=True)
# hide
from utils import *
setup_chapter()
```
# Training Transformers from Scratch
> **Note:** In this chapter a large dataset and the script to train a large language model on a distributed infrastructure are built. As such not all the steps in this notebook are executable on platforms such as Colab or Kaggle. Either downscale the steps at critical points or use this notebook as an inspiration when building a script for distributed training.
## Large Datasets and Where to Find Them
### Challenges of Building a Large-Scale Corpus
```
#hide_output
from transformers import pipeline, set_seed
generation_gpt = pipeline("text-generation", model="openai-gpt")
generation_gpt2 = pipeline("text-generation", model="gpt2")
def model_size(model):
return sum(t.numel() for t in model.parameters())
print(f"GPT size: {model_size(generation_gpt.model)/1000**2:.1f}M parameters")
print(f"GPT2 size: {model_size(generation_gpt2.model)/1000**2:.1f}M parameters")
# hide
set_seed(1)
def enum_pipeline_ouputs(pipe, prompt, num_return_sequences):
out = pipe(prompt, num_return_sequences=num_return_sequences,
clean_up_tokenization_spaces=True)
return "\n".join(f"{i+1}." + s["generated_text"] for i, s in enumerate(out))
prompt = "\nWhen they came back"
print("GPT completions:\n" + enum_pipeline_ouputs(generation_gpt, prompt, 3))
print("")
print("GPT-2 completions:\n" + enum_pipeline_ouputs(generation_gpt2, prompt, 3))
```
### Building a Custom Code Dataset
#### Creating a dataset with Google BigQuery
#sidebar To Filter the Noise or Not?
### Working with Large Datasets
#### Memory mapping
> **Note:** The following code block assumes that you have downloaded the BigQuery dataset to a folder called `codeparrot`. We suggest skipping this step since it will unpack the compressed files and require ~180GB of disk space. This code is just for demonstration purposes and you can just continue below with the streamed dataset which will not consume that much disk space.
```
#hide_output
from datasets import load_dataset, DownloadConfig
download_config = DownloadConfig(delete_extracted=True)
dataset = load_dataset("./codeparrot", split="train",
download_config=download_config)
import psutil, os
print(f"Number of python files code in dataset : {len(dataset)}")
ds_size = sum(os.stat(f["filename"]).st_size for f in dataset.cache_files)
# os.stat.st_size is expressed in bytes, so we convert to GB
print(f"Dataset size (cache file) : {ds_size / 2**30:.2f} GB")
# Process.memory_info is expressed in bytes, so we convert to MB
print(f"RAM used: {psutil.Process(os.getpid()).memory_info().rss >> 20} MB")
```
#### Streaming
```
# hide_output
streamed_dataset = load_dataset('./codeparrot', split="train", streaming=True)
iterator = iter(streamed_dataset)
print(dataset[0] == next(iterator))
print(dataset[1] == next(iterator))
remote_dataset = load_dataset('transformersbook/codeparrot', split="train",
streaming=True)
```
### Adding Datasets to the Hugging Face Hub
## Building a Tokenizer
```
# hide_output
from transformers import AutoTokenizer
def tok_list(tokenizer, string):
input_ids = tokenizer(string, add_special_tokens=False)["input_ids"]
return [tokenizer.decode(tok) for tok in input_ids]
tokenizer_T5 = AutoTokenizer.from_pretrained("t5-base")
tokenizer_camembert = AutoTokenizer.from_pretrained("camembert-base")
print(f'T5 tokens for "sex": {tok_list(tokenizer_T5,"sex")}')
print(f'CamemBERT tokens for "being": {tok_list(tokenizer_camembert,"being")}')
```
### The Tokenizer Model
### Measuring Tokenizer Performance
### A Tokenizer for Python
```
from transformers import AutoTokenizer
python_code = r"""def say_hello():
print("Hello, World!")
# Print it
say_hello()
"""
tokenizer = AutoTokenizer.from_pretrained("gpt2")
print(tokenizer(python_code).tokens())
print(tokenizer.backend_tokenizer.normalizer)
print(tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(python_code))
a, e = u"a", u"€"
byte = ord(a.encode("utf-8"))
print(f'`{a}` is encoded as `{a.encode("utf-8")}` with a single byte: {byte}')
byte = [ord(chr(i)) for i in e.encode("utf-8")]
print(f'`{e}` is encoded as `{e.encode("utf-8")}` with three bytes: {byte}')
from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
byte_to_unicode_map = bytes_to_unicode()
unicode_to_byte_map = dict((v, k) for k, v in byte_to_unicode_map.items())
base_vocab = list(unicode_to_byte_map.keys())
print(f'Size of our base vocabulary: {len(base_vocab)}')
print(f'First element: `{base_vocab[0]}`, last element: `{base_vocab[-1]}`')
# hide_input
#id unicode_mapping
#caption Examples of character mappings in BPE
#hide_input
import pandas as pd
from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
byte_to_unicode_map = bytes_to_unicode()
unicode_to_byte_map = dict((v, k) for k, v in byte_to_unicode_map.items())
base_vocab = list(unicode_to_byte_map.keys())
examples = [
['Regular characters', '`a` and `?`', f'{ord("a")} and {ord("?")}' , f'`{byte_to_unicode_map[ord("a")]}` and `{byte_to_unicode_map[ord("?")]}`'],
['Nonprintable control character (carriage return)', '`U+000D`', f'13', f'`{byte_to_unicode_map[13]}`'],
['A space', '` `', f'{ord(" ")}', f'`{byte_to_unicode_map[ord(" ")]}`'],
['A nonbreakable space', '`\\xa0`', '160', f'`{byte_to_unicode_map[ord(chr(160))]}`'],
['A newline character', '`\\n`', '10', f'`{byte_to_unicode_map[ord(chr(10))]}`'],
]
pd.DataFrame(examples, columns = ['Description', 'Character', 'Bytes', 'Mapped bytes'])
print(tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(python_code))
print(f"Size of the vocabulary: {len(tokenizer)}")
print(tokenizer(python_code).tokens())
```
### Training a Tokenizer
```
tokens = sorted(tokenizer.vocab.items(), key=lambda x: len(x[0]), reverse=True)
print([f'{tokenizer.convert_tokens_to_string(t)}' for t, _ in tokens[:8]]);
tokens = sorted(tokenizer.vocab.items(), key=lambda x: x[1], reverse=True)
print([f'{tokenizer.convert_tokens_to_string(t)}' for t, _ in tokens[:12]]);
#hide_output
from tqdm.auto import tqdm
length = 100000
dataset_name = 'transformersbook/codeparrot-train'
dataset = load_dataset(dataset_name, split="train", streaming=True)
iter_dataset = iter(dataset)
def batch_iterator(batch_size=10):
for _ in tqdm(range(0, length, batch_size)):
yield [next(iter_dataset)['content'] for _ in range(batch_size)]
new_tokenizer = tokenizer.train_new_from_iterator(batch_iterator(),
vocab_size=12500,
initial_alphabet=base_vocab)
tokens = sorted(new_tokenizer.vocab.items(), key=lambda x: x[1], reverse=False)
print([f'{tokenizer.convert_tokens_to_string(t)}' for t, _ in tokens[257:280]]);
print([f'{new_tokenizer.convert_tokens_to_string(t)}' for t,_ in tokens[-12:]]);
print(new_tokenizer(python_code).tokens())
import keyword
print(f'There are in total {len(keyword.kwlist)} Python keywords.')
for keyw in keyword.kwlist:
if keyw not in new_tokenizer.vocab:
print(f'No, keyword `{keyw}` is not in the vocabulary')
# hide_output
length = 200000
new_tokenizer_larger = tokenizer.train_new_from_iterator(batch_iterator(),
vocab_size=32768, initial_alphabet=base_vocab)
tokens = sorted(new_tokenizer_larger.vocab.items(), key=lambda x: x[1],
reverse=False)
print([f'{tokenizer.convert_tokens_to_string(t)}' for t, _ in tokens[-12:]]);
print(new_tokenizer_larger(python_code).tokens())
for keyw in keyword.kwlist:
if keyw not in new_tokenizer_larger.vocab:
print(f'No, keyword `{keyw}` is not in the vocabulary')
```
### Saving a Custom Tokenizer on the Hub
```
#hide_output
model_ckpt = "codeparrot"
org = "transformersbook"
new_tokenizer_larger.push_to_hub(model_ckpt, organization=org)
reloaded_tokenizer = AutoTokenizer.from_pretrained(org + "/" + model_ckpt)
print(reloaded_tokenizer(python_code).tokens())
#hide_output
new_tokenizer.push_to_hub(model_ckpt+ "-small-vocabulary", organization=org)
```
## Training a Model from Scratch
### A Tale of Pretraining Objectives
<img alt="Code snippet" caption="An example of a Python function that could be found in our dataset" src="images/chapter10_code-snippet.png" id="code-snippet"/>
#### Causal language modeling
<img alt="CLM pretraining" caption="In causal language modeling, the future tokens are masked and the model has to predict them; typically a decoder model such as GPT is used for such a task" src="images/chapter10_pretraining-clm.png" id="pretraining-clm"/>
#### Masked language modeling
<img alt="MLM pretraining" caption="In masked language modeling some of the input tokens are either masked or replaced, and the model's task is to predict the original tokens; this is the architecture underlying the encoder branch of transformer models" src="images/chapter10_pretraining-mlm.png" id="pretraining-mlm"/>
#### Sequence-to-sequence training
<img alt="Seq2seq pretraining" caption="Using an encoder-decoder architecture for a sequence-to-sequence task where the inputs are split into comment/code pairs using heuristics: the model gets one element as input and needs to generate the other one" src="images/chapter10_pretraining-seq2seq.png" id="pretraining-seq2seq"/>
### Initializing the Model
> **NOTE**: In the following code block, a large GPT-2 checkpoint is loaded into memory. On platforms like Colab and Kaggle, this can cause the instance to crash due to insufficient RAM or GPU memory. You can still run the example if you use the small checkpoint by replacing the configuration with `config = AutoConfig.from_pretrained("gpt2", vocab_size=len(tokenizer))`.
```
#hide_output
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(org + "/" + model_ckpt)
config = AutoConfig.from_pretrained("gpt2-xl", vocab_size=len(tokenizer))
model = AutoModelForCausalLM.from_config(config)
print(f'GPT-2 (xl) size: {model_size(model)/1000**2:.1f}M parameters')
#hide_output
model.save_pretrained("models/" + model_ckpt, push_to_hub=True,
organization=org)
tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
config_small = AutoConfig.from_pretrained("gpt2", vocab_size=len(tokenizer))
model_small = AutoModelForCausalLM.from_config(config_small)
print(f'GPT-2 size: {model_size(model_small)/1000**2:.1f}M parameters')
#hide_output
model_small.save_pretrained("models/" + model_ckpt + "-small", push_to_hub=True,
organization=org)
```
### Implementing the Dataloader
<img alt="Preprocessing for CLM" caption="Preparing sequences of varying length for causal language modeling by concatenating several tokenized examples with an EOS token before chunking them" src="images/chapter10_preprocessing-clm.png" id="preprocessing-clm"/>
```
#hide_output
examples, total_characters, total_tokens = 500, 0, 0
dataset = load_dataset('transformersbook/codeparrot-train', split='train',
streaming=True)
for _, example in tqdm(zip(range(examples), iter(dataset)), total=examples):
total_characters += len(example['content'])
total_tokens += len(tokenizer(example['content']).tokens())
characters_per_token = total_characters / total_tokens
print(characters_per_token)
import torch
from torch.utils.data import IterableDataset
class ConstantLengthDataset(IterableDataset):
def __init__(self, tokenizer, dataset, seq_length=1024,
num_of_sequences=1024, chars_per_token=3.6):
self.tokenizer = tokenizer
self.concat_token_id = tokenizer.eos_token_id
self.dataset = dataset
self.seq_length = seq_length
self.input_characters = seq_length * chars_per_token * num_of_sequences
def __iter__(self):
iterator = iter(self.dataset)
more_examples = True
while more_examples:
buffer, buffer_len = [], 0
while True:
if buffer_len >= self.input_characters:
m=f"Buffer full: {buffer_len}>={self.input_characters:.0f}"
print(m)
break
try:
m=f"Fill buffer: {buffer_len}<{self.input_characters:.0f}"
print(m)
buffer.append(next(iterator)["content"])
buffer_len += len(buffer[-1])
except StopIteration:
iterator = iter(self.dataset)
all_token_ids = []
tokenized_inputs = self.tokenizer(buffer, truncation=False)
for tokenized_input in tokenized_inputs['input_ids']:
all_token_ids.extend(tokenized_input + [self.concat_token_id])
for i in range(0, len(all_token_ids), self.seq_length):
input_ids = all_token_ids[i : i + self.seq_length]
if len(input_ids) == self.seq_length:
yield torch.tensor(input_ids)
shuffled_dataset = dataset.shuffle(buffer_size=100)
constant_length_dataset = ConstantLengthDataset(tokenizer, shuffled_dataset,
num_of_sequences=10)
dataset_iterator = iter(constant_length_dataset)
lengths = [len(b) for _, b in zip(range(5), dataset_iterator)]
print(f"Lengths of the sequences: {lengths}")
```
### Defining the Training Loop
```
from argparse import Namespace
# Commented parameters correspond to the small model
config = {"train_batch_size": 2, # 12
"valid_batch_size": 2, # 12
"weight_decay": 0.1,
"shuffle_buffer": 1000,
"learning_rate": 2e-4, # 5e-4
"lr_scheduler_type": "cosine",
"num_warmup_steps": 750, # 2000
"gradient_accumulation_steps": 16, # 1
"max_train_steps": 50000, # 150000
"max_eval_steps": -1,
"seq_length": 1024,
"seed": 1,
"save_checkpoint_steps": 50000} # 15000
args = Namespace(**config)
from torch.utils.tensorboard import SummaryWriter
import logging
import wandb
def setup_logging(project_name):
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, handlers=[
logging.FileHandler(f"log/debug_{accelerator.process_index}.log"),
logging.StreamHandler()])
if accelerator.is_main_process: # We only want to set up logging once
wandb.init(project=project_name, config=args)
run_name = wandb.run.name
tb_writer = SummaryWriter()
tb_writer.add_hparams(vars(args), {'0': 0})
logger.setLevel(logging.INFO)
datasets.utils.logging.set_verbosity_debug()
transformers.utils.logging.set_verbosity_info()
else:
tb_writer = None
run_name = ''
logger.setLevel(logging.ERROR)
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
return logger, tb_writer, run_name
def log_metrics(step, metrics):
logger.info(f"Step {step}: {metrics}")
if accelerator.is_main_process:
wandb.log(metrics)
[tb_writer.add_scalar(k, v, step) for k, v in metrics.items()]
#hide_output
from torch.utils.data.dataloader import DataLoader
def create_dataloaders(dataset_name):
train_data = load_dataset(dataset_name+'-train', split="train",
streaming=True)
train_data = train_data.shuffle(buffer_size=args.shuffle_buffer,
seed=args.seed)
valid_data = load_dataset(dataset_name+'-valid', split="validation",
streaming=True)
train_dataset = ConstantLengthDataset(tokenizer, train_data,
seq_length=args.seq_length)
valid_dataset = ConstantLengthDataset(tokenizer, valid_data,
seq_length=args.seq_length)
train_dataloader=DataLoader(train_dataset, batch_size=args.train_batch_size)
eval_dataloader=DataLoader(valid_dataset, batch_size=args.valid_batch_size)
return train_dataloader, eval_dataloader
def get_grouped_params(model, no_decay=["bias", "LayerNorm.weight"]):
params_with_wd, params_without_wd = [], []
for n, p in model.named_parameters():
if any(nd in n for nd in no_decay):
params_without_wd.append(p)
else:
params_with_wd.append(p)
return [{'params': params_with_wd, 'weight_decay': args.weight_decay},
{'params': params_without_wd, 'weight_decay': 0.0}]
def evaluate():
model.eval()
losses = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(batch, labels=batch)
loss = outputs.loss.repeat(args.valid_batch_size)
losses.append(accelerator.gather(loss))
if args.max_eval_steps > 0 and step >= args.max_eval_steps: break
loss = torch.mean(torch.cat(losses))
try:
perplexity = torch.exp(loss)
except OverflowError:
perplexity = torch.tensor(float("inf"))
return loss.item(), perplexity.item()
set_seed(args.seed)
# Accelerator
accelerator = Accelerator()
samples_per_step = accelerator.state.num_processes * args.train_batch_size
# Logging
logger, tb_writer, run_name = setup_logging(project_name.split("/")[1])
logger.info(accelerator.state)
# Load model and tokenizer
if accelerator.is_main_process:
hf_repo = Repository("./", clone_from=project_name, revision=run_name)
model = AutoModelForCausalLM.from_pretrained("./", gradient_checkpointing=True)
tokenizer = AutoTokenizer.from_pretrained("./")
# Load dataset and dataloader
train_dataloader, eval_dataloader = create_dataloaders(dataset_name)
# Prepare the optimizer and learning rate scheduler
optimizer = AdamW(get_grouped_params(model), lr=args.learning_rate)
lr_scheduler = get_scheduler(name=args.lr_scheduler_type, optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,)
def get_lr():
return optimizer.param_groups[0]['lr']
# Prepare everything with our `accelerator` (order of args is not important)
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader)
# Train model
model.train()
completed_steps = 0
for step, batch in enumerate(train_dataloader, start=1):
loss = model(batch, labels=batch).loss
log_metrics(step, {'lr': get_lr(), 'samples': step*samples_per_step,
'steps': completed_steps, 'loss/train': loss.item()})
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
completed_steps += 1
if step % args.save_checkpoint_steps == 0:
logger.info('Evaluating and saving model checkpoint')
eval_loss, perplexity = evaluate()
log_metrics(step, {'loss/eval': eval_loss, 'perplexity': perplexity})
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
if accelerator.is_main_process:
unwrapped_model.save_pretrained("./")
hf_repo.push_to_hub(commit_message=f'step {step}')
model.train()
if completed_steps >= args.max_train_steps:
break
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
eval_loss, perplexity = evaluate()
log_metrics(step, {'loss/eval': eval_loss, 'perplexity': perplexity})
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
if accelerator.is_main_process:
unwrapped_model.save_pretrained("./")
hf_repo.push_to_hub(commit_message=f'final model')
```
<img alt="DDP" caption="Illustration of the processing steps in DDP with four GPUs" src="images/chapter10_ddp.png" id="ddp"/>
### The Training Run
## Results and Analysis
```
#hide_output
from transformers import pipeline, set_seed
model_ckpt = 'transformersbook/codeparrot-small'
generation = pipeline('text-generation', model=model_ckpt, device=0)
import re
from transformers import set_seed
def first_block(string):
return re.split('\nclass|\ndef|\n#|\n@|\nprint|\nif', string)[0].rstrip()
def complete_code(pipe, prompt, max_length=64, num_completions=4, seed=1):
set_seed(seed)
gen_kwargs = {"temperature":0.4, "top_p":0.95, "top_k":0, "num_beams":1,
"do_sample":True,}
code_gens = generation(prompt, num_return_sequences=num_completions,
max_length=max_length, **gen_kwargs)
code_strings = []
for code_gen in code_gens:
generated_code = first_block(code_gen['generated_text'][len(prompt):])
code_strings.append(generated_code)
print(('\n'+'='*80 + '\n').join(code_strings))
prompt = '''def area_of_rectangle(a: float, b: float):
"""Return the area of the rectangle."""'''
complete_code(generation, prompt)
prompt = '''def get_urls_from_html(html):
"""Get all embedded URLs in a HTML string."""'''
complete_code(generation, prompt)
import requests
def get_urls_from_html(html):
return [url for url in re.findall(r'<a href="(.*?)"', html) if url]
print(" | ".join(get_urls_from_html(requests.get('https://hf.co/').text)))
```
> **NOTE**: In the following code block, a large GPT-2 checkpoint is loaded into memory. On platforms like Colab and Kaggle, this can cause the instance to crash due to insufficient RAM or GPU memory. You can still run the example if you replace the large model with the small one by using `model_ckpt = "transformersbook/codeparrot-small"`.
```
model_ckpt = 'transformersbook/codeparrot'
generation = pipeline('text-generation', model=model_ckpt, device=0)
prompt = '''# a function in native python:
def mean(a):
return sum(a)/len(a)
# the same function using numpy:
import numpy as np
def mean(a):'''
complete_code(generation, prompt, max_length=64)
prompt = '''X = np.random.randn(100, 100)
y = np.random.randint(0, 1, 100)
# fit random forest classifier with 20 estimators'''
complete_code(generation, prompt, max_length=96)
```
## Conclusion
|
github_jupyter
|
# Introduction to Reinforcement Learning
This Jupyter notebook and the others in the same folder act as supporting materials for **Chapter 21 Reinforcement Learning** of the book* Artificial Intelligence: A Modern Approach*. The notebooks make use of the implementations in `rl.py` module. We also make use of the implementation of MDPs in the `mdp.py` module to test our agents. It might be helpful if you have already gone through the Jupyter notebook dealing with the Markov decision process. Let us import everything from the `rl` module. It might be helpful to view the source of some of our implementations.
```
import os, sys
sys.path = [os.path.abspath("../../")] + sys.path
from rl4e import *
```
Before we start playing with the actual implementations let us review a couple of things about RL.
1. Reinforcement Learning is concerned with how software agents ought to take actions in an environment so as to maximize some notion of cumulative reward.
2. Reinforcement learning differs from standard supervised learning in that correct input/output pairs are never presented, nor sub-optimal actions explicitly corrected. Further, there is a focus on on-line performance, which involves finding a balance between exploration (of uncharted territory) and exploitation (of current knowledge).
-- Source: [Wikipedia](https://en.wikipedia.org/wiki/Reinforcement_learning)
In summary, we have a sequence of state action transitions with rewards associated with some states. Our goal is to find the optimal policy $\pi$ which tells us what action to take in each state.
# Passive Reinforcement Learning
In passive Reinforcement Learning the agent follows a fixed policy $\pi$. Passive learning attempts to evaluate the given policy $pi$ - without any knowledge of the Reward function $R(s)$ and the Transition model $P(s'\ |\ s, a)$.
This is usually done by some method of **utility estimation**. The agent attempts to directly learn the utility of each state that would result from following the policy. Note that at each step, it has to *perceive* the reward and the state - it has no global knowledge of these. Thus, if a certain the entire set of actions offers a very low probability of attaining some state $s_+$ - the agent may never perceive the reward $R(s_+)$.
Consider a situation where an agent is given the policy to follow. Thus, at any point, it knows only its current state and current reward, and the action it must take next. This action may lead it to more than one state, with different probabilities.
For a series of actions given by $\pi$, the estimated utility $U$:
$$U^{\pi}(s) = E(\sum_{t=0}^\inf \gamma^t R^t(s'))$$
Or the expected value of summed discounted rewards until termination.
Based on this concept, we discuss three methods of estimating utility: direct utility estimation, adaptive dynamic programming, and temporal-difference learning.
### Implementation
Passive agents are implemented in `rl4e.py` as various `Agent-Class`es.
To demonstrate these agents, we make use of the `GridMDP` object from the `MDP` module. `sequential_decision_environment` is similar to that used for the `MDP` notebook but has discounting with $\gamma = 0.9$.
The `Agent-Program` can be obtained by creating an instance of the relevant `Agent-Class`. The `__call__` method allows the `Agent-Class` to be called as a function. The class needs to be instantiated with a policy ($\pi$) and an `MDP` whose utility of states will be estimated.
```
from mdp import sequential_decision_environment
```
The `sequential_decision_environment` is a GridMDP object as shown below. The rewards are **+1** and **-1** in the terminal states, and **-0.04** in the rest. <img src="images/mdp.png"> Now we define actions and a policy similar to **Fig 21.1** in the book.
```
# Action Directions
north = (0, 1)
south = (0,-1)
west = (-1, 0)
east = (1, 0)
policy = {
(0, 2): east, (1, 2): east, (2, 2): east, (3, 2): None,
(0, 1): north, (2, 1): north, (3, 1): None,
(0, 0): north, (1, 0): west, (2, 0): west, (3, 0): west,
}
```
This enviroment will be extensively used in the following demonstrations.
## Direct Utility Estimation (DUE)
The first, most naive method of estimating utility comes from the simplest interpretation of the above definition. We construct an agent that follows the policy until it reaches the terminal state. At each step, it logs its current state, reward. Once it reaches the terminal state, it can estimate the utility for each state for *that* iteration, by simply summing the discounted rewards from that state to the terminal one.
It can now run this 'simulation' $n$ times and calculate the average utility of each state. If a state occurs more than once in a simulation, both its utility values are counted separately.
Note that this method may be prohibitively slow for very large state-spaces. Besides, **it pays no attention to the transition probability $P(s'\ |\ s, a)$.** It misses out on information that it is capable of collecting (say, by recording the number of times an action from one state led to another state). The next method addresses this issue.
### Examples
The `PassiveDEUAgent` class in the `rl` module implements the Agent Program described in **Fig 21.2** of the AIMA Book. `PassiveDEUAgent` sums over rewards to find the estimated utility for each state. It thus requires the running of several iterations.
```
%psource PassiveDUEAgent
```
Now let's try the `PassiveDEUAgent` on the newly defined `sequential_decision_environment`:
```
DUEagent = PassiveDUEAgent(policy, sequential_decision_environment)
```
We can try passing information through the markove model for 200 times in order to get the converged utility value:
```
for i in range(200):
run_single_trial(DUEagent, sequential_decision_environment)
DUEagent.estimate_U()
```
Now let's print our estimated utility for each position:
```
print('\n'.join([str(k)+':'+str(v) for k, v in DUEagent.U.items()]))
```
## Adaptive Dynamic Programming (ADP)
This method makes use of knowledge of the past state $s$, the action $a$, and the new perceived state $s'$ to estimate the transition probability $P(s'\ |\ s,a)$. It does this by the simple counting of new states resulting from previous states and actions.<br>
The program runs through the policy a number of times, keeping track of:
- each occurrence of state $s$ and the policy-recommended action $a$ in $N_{sa}$
- each occurrence of $s'$ resulting from $a$ on $s$ in $N_{s'|sa}$.
It can thus estimate $P(s'\ |\ s,a)$ as $N_{s'|sa}/N_{sa}$, which in the limit of infinite trials, will converge to the true value.<br>
Using the transition probabilities thus estimated, it can apply `POLICY-EVALUATION` to estimate the utilities $U(s)$ using properties of convergence of the Bellman functions.
### Examples
The `PassiveADPAgent` class in the `rl` module implements the Agent Program described in **Fig 21.2** of the AIMA Book. `PassiveADPAgent` uses state transition and occurrence counts to estimate $P$, and then $U$. Go through the source below to understand the agent.
```
%psource
```
We instantiate a `PassiveADPAgent` below with the `GridMDP` shown and train it for 200 steps. The `rl` module has a simple implementation to simulate a single step of the iteration. The function is called `run_single_trial`.
```
ADPagent = PassiveADPAgent(policy, sequential_decision_environment)
for i in range(200):
run_single_trial(ADPagent, sequential_decision_environment)
```
The utilities are calculated as :
```
print('\n'.join([str(k)+':'+str(v) for k, v in ADPagent.U.items()]))
```
When comparing to the result of `PassiveDUEAgent`, they both have -1.0 for utility at (3,1) and 1.0 at (3,2). Another point to notice is that the spot with the highest utility for both agents is (2,2) beside the terminal states, which is easy to understand when referring to the map.
## Temporal-difference learning (TD)
Instead of explicitly building the transition model $P$, the temporal-difference model makes use of the expected closeness between the utilities of two consecutive states $s$ and $s'$.
For the transition $s$ to $s'$, the update is written as:
$$U^{\pi}(s) \leftarrow U^{\pi}(s) + \alpha \left( R(s) + \gamma U^{\pi}(s') - U^{\pi}(s) \right)$$
This model implicitly incorporates the transition probabilities by being weighed for each state by the number of times it is achieved from the current state. Thus, over a number of iterations, it converges similarly to the Bellman equations.
The advantage of the TD learning model is its relatively simple computation at each step, rather than having to keep track of various counts.
For $n_s$ states and $n_a$ actions the ADP model would have $n_s \times n_a$ numbers $N_{sa}$ and $n_s^2 \times n_a$ numbers $N_{s'|sa}$ to keep track of. The TD model must only keep track of a utility $U(s)$ for each state.
### Examples
`PassiveTDAgent` uses temporal differences to learn utility estimates. We learn the difference between the states and back up the values to previous states. Let us look into the source before we see some usage examples.
```
%psource PassiveTDAgent
```
In creating the `TDAgent`, we use the **same learning rate** $\alpha$ as given in the footnote of the book: $\alpha(n)=60/(59+n)$
```
TDagent = PassiveTDAgent(policy, sequential_decision_environment, alpha = lambda n: 60./(59+n))
```
Now we run **200 trials** for the agent to estimate Utilities.
```
for i in range(200):
run_single_trial(TDagent,sequential_decision_environment)
```
The calculated utilities are:
```
print('\n'.join([str(k)+':'+str(v) for k, v in TDagent.U.items()]))
```
When comparing to previous agents, the result of `PassiveTDAgent` is closer to `PassiveADPAgent`.
|
github_jupyter
|
```
### duffing oscillator
import matplotlib
import numpy as np
from numpy import zeros, linspace, pi, cos, array
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
from matplotlib.path import Path
from matplotlib.patches import PathPatch
t0=0
tf=30*pi
omega=1.2
beta=1
delta=0.3
gamma=0.35
alpha=1
n=10000 #iteration
sampsize=100 #SampleSize
sampstart=5000 #SampleStart
sampend=n #SampleEnd
h=(tf-t0)/(n-1) #stepsize
print('the value of h is',h)
u0=0 #initial displacement
t=linspace(t0,tf,n)
v=zeros([n])
u=zeros([n])
u[0]=u0
v[0]=0 #initial velocity
##### DEFINING FUNCTIONS
def dudt(t,u,v): #### u' = v
return(v)
def funt(t,u,v): #### v' = -delta*v+alpha*u-beta*u**3+gamma*cos(omega*t)
return (-delta*v+alpha*u-beta*u**3+gamma*cos(omega*t))
###### RK4 ALGORITHM USING FOR LOOP
for i in range(1,n):
k1=h*dudt(t[i-1],u[i-1],v[i-1])
l1=h*funt(t[i-1],u[i-1],v[i-1])
k2=h*dudt(t[i-1]+(0.5*h),u[i-1]+(k1*0.5),v[i-1]+(l1*0.5))
l2=h*funt(t[i-1]+(0.5*h),u[i-1]+(k1*0.5),v[i-1]+(l1*0.5))
k3=h*dudt(t[i-1]+(0.5*h),u[i-1]+(k2*0.5),v[i-1]+(l2*0.5))
l3=h*funt(t[i-1]+(0.5*h),u[i-1]+(k2*0.5),v[i-1]+(l2*0.5))
k4=h*dudt(t[i-1]+h,u[i-1]+(k3),v[i-1]+(l3))
l4=h*funt(t[i-1]+h,u[i-1]+(k3),v[i-1]+(l3))
u[i]=u[i-1]+(1/6)*(k1+(2*k2)+(2*k3)+k4)
v[i]=v[i-1]+(1/6)*(l1+(2*l2)+(2*l3)+l4)
### PLOT
plt.plot(t,u,'-r')
plt.xlabel('time(t)')
plt.ylabel('displacement(u)')
plt.show()
print('The value of GAMMA =',gamma)
fig = plt.figure()
plt.plot(u[sampstart:sampend],v[sampstart:sampend],'-g')
plt.xlabel('displacement(u)')
plt.ylabel('velocity(v)')
plt.show()
#### InterPlay
import pandas as pd
xx = lambda a: np.interp(a, (a.min(), a.max()), (0, +1))
uu = xx(u[sampstart:sampend:int(sampsize/2)])
vv = xx(v[sampstart:sampend:int(sampsize/2)])
huh = np.array(list(zip(uu,vv)))
huh = huh[np.random.choice(huh.shape[0], sampsize, replace=False), :]
u1,v1 = zip(*huh)
#print(huh)
pd.DataFrame(huh).to_csv("data/seed1_data.csv", header=['X_value','Y_value'], index=True, index_label='point_id')
#### SAMPLING
print("SAMPLING")
fig = plt.figure()
plt.plot(u1,v1,'.g')
plt.xlabel('displacement(u)')
plt.ylabel('velocity(v)')
plt.show()
my_data = huh
from gudhi import *
import gudhi
rips_complex = gudhi.RipsComplex(points=my_data)
simplex_tree = rips_complex.create_simplex_tree(max_dimension=2)
result_str = 'Rips complex is of dimension ' + repr(simplex_tree.dimension()) + ' - ' + \
repr(simplex_tree.num_simplices()) + ' simplices - ' + \
repr(simplex_tree.num_vertices()) + ' vertices'
print(result_str)
BarCodes_RipsAll = simplex_tree.persistence()
BarCodes_Rips1 = list(filter(lambda BettiNum: BettiNum[0] == 1, BarCodes_RipsAll))
gudhi.plot_persistence_barcode(BarCodes_Rips1)
gudhi.plot_persistence_diagram(BarCodes_Rips1)
entropy = representations.Entropy(normalized=True)
print("Entropy for Dim 1 is {}".format(entropy(np.array([j for i, j in BarCodes_Rips1]))))
max_filtration_value = np.array(list(simplex_tree.get_filtration()))[-1, 1]
pointss = [[1, 1], [7, 0], [4, 6], [9, 6], [0, 14], [2, 19], [9, 17]]
#for i in (gudhi.RipsComplex(points=pointss).create_simplex_tree().get_skeleton(2)):
# print(i[0])
simplex_tree = gudhi.RipsComplex(points=pointss, max_edge_length=6.0).create_simplex_tree()
#print(simplex_tree.persistence())
#list(simplex_tree.get_simplices())
def genDiagWithFilt(points, length):
rc = gudhi.RipsComplex(points=points, max_edge_length=length)
st = rc.create_simplex_tree(max_dimension=2)
BarCodes_RipsAll = st.persistence()
BarCodes_Rips1 = list(
filter(lambda BettiNum: BettiNum[0] == 1, BarCodes_RipsAll))
max_filtration_value = np.array(list(st.get_filtration()))[-1, 1]
# We are only going to plot the triangles
triangles = np.array([s[0] for s in st.get_skeleton(2) if len(s[0]) == 3])
return max_filtration_value, triangles, BarCodes_Rips1
from IPython.display import display
import ipywidgets as widgets
from ipywidgets import interact, interact_manual
@interact
def blah(length=(max_filtration_value/10,max_filtration_value,max_filtration_value/10)):
max_filtration_value, triangles, BarCodes_Rips1 = genDiagWithFilt(huh, length=length)
fig2, ax2 = plt.subplots()
ax2.set_aspect('equal')
ax2.triplot(u1, v1, triangles, 'go-', lw=1.0,
alpha=0.5)
ax2.set_title('triplot of user-specified triangulation, filtration: {}'.format(max_filtration_value))
ax2.set_xlabel('Longitude (degrees)')
ax2.set_ylabel('Latitude (degrees)')
plt.show()
gudhi.plot_persistence_barcode(BarCodes_Rips1)
fig2, ax2 = plt.subplots()
patches = []
hey = [i[0] for i in st.get_skeleton(2)]
kalel = [huh[j] for j in hey]
for x1, y1 in huh:
circle = Circle((x1, y1), max_filtration_value, alpha=0.1)
patches.append(circle)
for kkk in kalel:
if len(kkk) == 2:
path_data = [(Path.MOVETO, kkk[0]), (Path.LINETO, kkk[1]),]
codes, verts = zip(*path_data)
path = Path(verts, codes)
patch = PathPatch(path, edgecolor='black', alpha=0.7)
ax2.add_patch(patch)
if len(kkk) > 2:
polygon = Polygon(kkk, edgecolor='black', alpha=0.7)
patches.append(polygon)
p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.1)
colors = 100*np.random.rand(len(patches))
p.set_array(np.array(colors))
plt.ylim((-0.1, 1.1))
plt.xlim((-0.1, 1.1))
ax2.add_collection(p)
#plt.plot(u1,v1,'.g')
plt.show()
import numpy as np
import gudhi
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import numpy as np
points = huh
rc = gudhi.RipsComplex(points=points, max_edge_length=0.1)
st = rc.create_simplex_tree(max_dimension=2)
BarCodes_RipsAll = st.persistence()
BarCodes_Rips1 = list(filter(lambda BettiNum: BettiNum[0] == 1, BarCodes_RipsAll))
max_filtration_value = np.array(list(st.get_filtration()))[-1,1]
# We are only going to plot the triangles
triangles = np.array([s[0] for s in st.get_skeleton(2) if len(s[0])==3])
fig21, ax21 = plt.subplots()
ax21.set_aspect('equal')
ax21.triplot(u1, v1, triangles, 'go-', lw=1.0, alpha=0.5, ms=max_filtration_value*100)
ax21.set_title('triplot of user-specified triangulation')
ax21.set_xlabel('Longitude (degrees)')
ax21.set_ylabel('Latitude (degrees)')
plt.show()
print("Max Filtration is {}".format(max_filtration_value))
gudhi.plot_persistence_barcode(BarCodes_Rips1)
```
|
github_jupyter
|
# Sample authors while controlling for year-of-first-publication
For each editor, this notebook samples a set of authors whose year-of-first-publication matches that of the editor. For the sake of demonstration, we picked a subset of authors to match against so that the code could finish in a reasonable amount of time.
```
import pandas as pd
import numpy as np
from tqdm.notebook import tqdm
editors = pd.read_csv("../data/SampleEditors.csv", sep='\t',
dtype={'issn':str,'NewAuthorId':int,'start_year':int,'end_year':int})
editors.shape
editor_career = pd.read_csv('../data/EditorCareerDiscipline.csv',sep='\t',
dtype={'NewAuthorId':int,'Yfp':int,'Ylp':int,'Parent':int})
editor_career.shape
%%time
# the first year that an author has a known affiliation
first_year = pd.read_csv('../data/figure_1/FirstYearWithKnownAff.csv',sep='\t',
dtype={'NewAuthorId':int,'Year':int})
first_year = first_year.rename(columns={'Year':'FirstYear'})
print(first_year.shape)
%%time
author_career = pd.read_csv('../data/figure_1/AuthorEraDisp.csv',
sep='\t', memory_map=True,
usecols=['NewAuthorId', 'Parent', 'Yfp', 'Ylp'], #
dtype={'NewAuthorId':int, 'Yfp':int, 'Ylp':int, 'Parent':int})
print(author_career.shape)
editors = editors.merge(editor_career, on='NewAuthorId')
print(editors.shape)
def sample(df, year):
dfs = []
for seed in range(50):
np.random.seed(seed)
sampled = df.groupby(['EditorsNewId','issn']).apply(
lambda x: x.filter([np.random.choice(x.index)], axis=0)).reset_index(drop=True)
dfs.append(sampled)
return pd.concat(dfs, ignore_index=True, sort=False)
def match(editors, author_career):
dfs = []
for year in tqdm(range(editors.Yfp.max(), editors.Yfp.min()-1, -1)):
edi = editors[editors.Yfp == year]
aut = author_career[author_career.Yfp == year]
if edi.shape[0] == 0 or aut.shape[0] == 0: continue
matched = edi.rename(columns={'NewAuthorId':'EditorsNewId'}).merge(aut, on='Yfp')
matched = matched[~matched.NewAuthorId.isin(editors.NewAuthorId)]
# make sure that at least one aff was known before
matched = matched.merge(first_year, on='NewAuthorId')
matched = matched[matched.start_year >= matched.FirstYear]
sampled = sample(matched, year)
dfs.append(sampled)
return pd.concat(dfs, ignore_index=True, sort=False)
%%time
matched = match(editors, author_career)
print(matched.shape)
matched.head()
```
|
github_jupyter
|
# Part 4: Projects and Automated ML Pipeline
This part of the MLRun getting-started tutorial walks you through the steps for working with projects, source control (git), and automating the ML pipeline.
MLRun Project is a container for all your work on a particular activity: all the associated code, functions,
jobs/workflows and artifacts. Projects can be mapped to `git` repositories to enable versioning, collaboration, and CI/CD.
You can create project definitions using the SDK or a yaml file and store those in MLRun DB, file, or archive.
Once the project is loaded you can run jobs/workflows which refer to any project element by name, allowing separation between configuration and code. See the [Projects, Automation & CI/CD](../projects/overview.md) section for details.
Projects contain `workflows` that execute the registered functions in a sequence/graph (DAG), and which can reference project parameters, secrets and artifacts by name. MLRun currently supports two workflow engines, `local` (for simple tasks) and [Kubeflow Pipelines](https://www.kubeflow.org/docs/pipelines/pipelines-quickstart/) (for more complex/advanced tasks). MLRun also supports a real-time workflow engine (see [MLRun serving graphs](../serving/serving-graph.md)).
> **Note**: The Iguazio Data Science Platform has a default (pre-deployed) shared Kubeflow Pipelines service (`pipelines`).
An ML Engineer can gather the different functions created by the Data Engineer and Data Scientist and create this automated pipeline.
The tutorial consists of the following steps:
1. [Setting up Your Project](#gs-tutorial-4-step-setting-up-project)
2. [Updating Project and Function Definitions](#gs-tutorial-4-step-import-functions)
3. [Defining and Saving a Pipeline Workflow](#gs-tutorial-4-step-pipeline-workflow-define-n-save)
4. [Registering the Workflow](#gs-tutorial-4-step-register-workflow)
5. [Running A Pipeline](#gs-tutorial-4-step-run-pipeline)
6. [Viewing the Pipeline on the Dashboard (UI)](#gs-tutorial-4-step-ui-pipeline-view)
7. [Invoking the Model](#gs-tutorial-4-step-invoke-model)
By the end of this tutorial you'll learn how to:
- Create an operational pipeline using previously defined functions.
- Run the pipeline and track the pipeline results.
<a id="gs-tutorial-4-prerequisites"></a>
## Prerequisites
The following steps are a continuation of the previous parts of this getting-started tutorial and rely on the generated outputs.
Therefore, make sure to first run parts [1](01-mlrun-basics.ipynb)—[3](03-model-serving.ipynb) of the tutorial.
<a id="gs-tutorial-4-step-setting-up-project"></a>
## Step 1: Setting Up Your Project
To run a pipeline, you first need to create a Python project object and import the required functions for its execution.
Create a project by using one of:
- the `new_project` MLRun method
- the `get_or_create_project`method: loads a project from the MLRun DB or the archive/context if it exists, or creates a new project if it doesn't exist.
Both methods have the following parameters:
- **`name`** (required) — the project name.
- **`context`** — the path to a local project directory (the project's context directory).
The project directory contains a project-configuration file (default: **project.yaml**) that defines the project, and additional generated Python code.
The project file is created when you save your project (using the `save` MLRun project method or when saving your first function within the project).
- **`init_git`** — set to `True` to perform Git initialization of the project directory (`context`) in case its not initialized.
> **Note:** It's customary to store project code and definitions in a Git repository.
The following code gets or creates a user project named "getting-started-<username>".
> **Note:** Platform projects are currently shared among all users of the parent tenant, to facilitate collaboration. Therefore:
>
> - Set `user_project` to `True` if you want to create a project unique to your user.
> You can easily change the default project name for this tutorial by changing the definition of the `project_name_base` variable in the following code.
> - Don't include in your project proprietary information that you don't want to expose to other users.
> Note that while projects are a useful tool, you can easily develop and run code in the platform without using projects.
```
import mlrun
# Set the base project name
project_name_base = 'getting-started'
# Initialize the MLRun project object
project = mlrun.get_or_create_project(project_name_base, context="./", user_project=True, init_git=True)
print(f'Project name: {project.metadata.name}')
```
<a id="gs-tutorial-4-step-import-functions"></a>
## Step 2: Updating Project and Function Definitions
You must save the definitions for the functions used in the project so that you can automatically convert code to functions, import external functions when you load new versions of MLRun code, or run automated CI/CD workflows. In addition, you might want to set other project attributes such as global parameters, secrets, and data.
The code can be stored in Python files, notebooks, external repositories, packaged containers, etc. Use the `project.set_function()` method to register the code in the project. The definitions are saved to the project object as well as in a YAML file in the root of the project.
Functions can also be imported from MLRun marketplace (using the `hub://` schema).
This tutorial uses the functions:
- `prep-data` — the first function, which ingests the Iris data set (in Notebook 01)
- `describe` — generates statistics on the data set (from the marketplace)
- `train-iris` — the model-training function (in Notebook 02)
- `test-classifier` — the model-testing function (from the marketplace)
- `mlrun-model` — the model-serving function (in Notebook 03)
> Note: `set_function` uses the `code_to_function` and `import_function` methods under the hood (used in the previous notebooks), but in addition it saves the function configurations in the project spec for use in automated workflows and CI/CD.
Add the function definitions to the project along with parameters and data artifacts, and save the project.
<a id="gs-tutorial-4-view-project-functions"></a>
```
project.set_function('01-mlrun-basics.ipynb', 'prep-data', kind='job', image='mlrun/mlrun')
project.set_function('02-model-training.ipynb', 'train', kind='job', image='mlrun/mlrun', handler='train_iris')
project.set_function('hub://describe', 'describe')
project.set_function('hub://test_classifier', 'test')
project.set_function('hub://v2_model_server', 'serving')
# set project level parameters and save
project.spec.params = {'label_column': 'label'}
project.save()
```
<br>When you save the project it stores the project definitions in the `project.yaml`. This means that you can load the project from the source control (GIT) and run it with a single command or API call.
The project YAML for this project can be printed using:
```
print(project.to_yaml())
```
### Saving and Loading Projects from GIT
After you save the project and its elements (functions, workflows, artifacts, etc.) you can commit all the changes to a GIT repository. Use the standard GIT tools or use the MLRun `project` methods such as `pull`, `push`, `remote`, which call the Git API for you.
Projects can then be loaded from Git using the MLRun `load_project` method, for example:
project = mlrun.load_project("./myproj", "git://github.com/mlrun/project-demo.git", name=project_name)
or using MLRun CLI:
mlrun project -n myproj -u "git://github.com/mlrun/project-demo.git" ./myproj
Read the [Projects, Automation & CI/CD](../projects/overview.md) section for more details
<a id="gs-tutorial-4-kubeflow-pipelines"></a>
### Using Kubeflow Pipelines
You're now ready to create a full ML pipeline.
This is done by using [Kubeflow Pipelines](https://www.kubeflow.org/docs/pipelines/overview/pipelines-overview/) —
an open-source framework for building and deploying portable, scalable machine-learning workflows based on Docker containers.
MLRun leverages this framework to take your existing code and deploy it as steps in the pipeline.
> **Note:** When using the Iguazio Data Science Platform, Kubeflow Pipelines is available as a default (pre-deployed) shared platform service.
<a id="gs-tutorial-4-step-pipeline-workflow-define-n-save"></a>
## Step 3: Defining and Saving a Pipeline Workflow
A pipeline is created by running an MLRun **"workflow"**.
The following code defines a workflow and writes it to a file in your local directory, with the file name **workflow.py**.
The workflow describes a directed acyclic graph (DAG) for execution using Kubeflow Pipelines, and depicts the connections between the functions and the data as part of an end-to-end pipeline.
The workflow file has two parts: initialization of the function objects, and definition of a pipeline DSL (domain-specific language) for connecting the function inputs and outputs.
Examine the code to see how function objects are initialized and used (by name) within the workflow.
The defined pipeline includes the following steps:
- Ingest the Iris flower data set (`ingest`).
- Train and the model (`train`).
- Test the model with its test data set.
- Deploy the model as a real-time serverless function (`deploy`).
> **Note**: A pipeline can also include continuous build integration and deployment (CI/CD) steps, such as building container images and deploying models.
```
%%writefile './workflow.py'
from kfp import dsl
from mlrun import run_function, deploy_function
DATASET = 'cleaned_data'
MODEL = 'iris'
LABELS = "label"
# Create a Kubeflow Pipelines pipeline
@dsl.pipeline(
name="Getting-started-tutorial",
description="This tutorial is designed to demonstrate some of the main "
"capabilities of the Iguazio Data Science Platform.\n"
"The tutorial uses the Iris flower data set."
)
def kfpipeline(source_url):
# Ingest the data set
ingest = run_function(
'prep-data',
handler='prep_data',
inputs={'source_url': source_url},
params={'label_column': LABELS},
outputs=[DATASET])
# Train a model
train = run_function(
"train",
params={"label_column": LABELS},
inputs={"dataset": ingest.outputs[DATASET]},
outputs=['my_model', 'test_set'])
# Test and visualize the model
test = run_function(
"test",
params={"label_column": LABELS},
inputs={"models_path": train.outputs['my_model'],
"test_set": train.outputs['test_set']})
# Deploy the model as a serverless function
deploy = deploy_function("serving", models={f"{MODEL}_v1": train.outputs['my_model']})
```
<a id="gs-tutorial-4-step-register-workflow"></a>
## Step 4: Registering the Workflow
Use the `set_workflow` MLRun project method to register your workflow with MLRun.
The following code sets the `name` parameter to the selected workflow name ("main") and the `code` parameter to the name of the workflow file that is found in your project directory (**workflow.py**).
```
# Register the workflow file as "main"
project.set_workflow('main', 'workflow.py')
```
<a id="gs-tutorial-4-step-run-pipeline"></a>
## Step 5: Running A Pipeline
First run the following code to save your project:
```
project.save()
```
Use the `run` MLRun project method to execute your workflow pipeline with Kubeflow Pipelines.
The tutorial code sets the following method parameters; (for the full parameters list, see the [MLRun documentation](../api/mlrun.run.html#mlrun.run.run_pipeline) or embedded help):
- **`name`** — the workflow name (in this case, "main" — see the previous step).
- **`arguments`** — A dictionary of Kubeflow Pipelines arguments (parameters).
The tutorial code sets this parameter to an empty arguments list (`{}`), but you can edit the code to add arguments.
- **`artifact_path`** — a path or URL that identifies a location for storing the workflow artifacts.
You can use `{{workflow.uid}}` in the path to signify the ID of the current workflow run iteration.
The tutorial code sets the artifacts path to a **<worker ID>** directory (`{{workflow.uid}}`) in a **pipeline** directory under the projects container (**/v3io/projects/getting-started-tutorial-project name/pipeline/<worker ID>**).
- **`dirty`** — set to `True` to allow running the workflow also when the project's Git repository is dirty (i.e., contains uncommitted changes).
(When the notebook that contains the execution code is in the same Git directory as the executed workflow, the directory will always be dirty during the execution.)
- **`watch`** — set to `True` to wait for the pipeline to complete and output the execution graph as it updates.
The `run` method returns the ID of the executed workflow, which the code stores in a `run_id` variable.
You can use this ID to track the progress or your workflow, as demonstrated in the following sections.
> **Note**: You can also run the workflow from a command-line shell by using the `mlrun` CLI.
> The following CLI command defines a similar execution logic as that of the `run` call in the tutorial:
> ```
> mlrun project /User/getting-started-tutorial/conf -r main -p "$V3IO_HOME_URL/getting-started-tutorial/pipeline/{{workflow.uid}}/"
> ```
```
source_url = mlrun.get_sample_path("data/iris/iris.data.raw.csv")
import os
pipeline_path = mlrun.mlconf.artifact_path
run_id = project.run(
'main',
arguments={'source_url' : source_url},
artifact_path=os.path.join(pipeline_path, "pipeline", '{{workflow.uid}}'),
dirty=True,
watch=True)
```
<a id="gs-tutorial-4-step-ui-pipeline-view"></a>
## Step 6: Viewing the Pipeline on the Dashboard (UI)
In the **Projects > Jobs and Workflows > Monitor Workflows** tab, press the workflow name to view a graph of the workflow. Press any step to open another pane with full details of the step: either the job's overview, inputs, artifacts, etc.; or the deploy / build function's overview, code, and log.
After the pipelines execution completes, you should be able to view the pipeline and see its functions:
- `prep-data`
- `train`
- `test`
- `deploy-serving`
The graph is refreshed while the pipeline is running.
<img src="../_static/images/job_pipeline.png" alt="pipeline" width="700"/>
<a id="gs-tutorial-4-step-invoke-model"></a>
## Step 7: Invoking the Model
Now that your model is deployed using the pipeline, you can invoke it as usual:
```
serving_func = project.func('serving')
my_data = {'inputs': [[5.1, 3.5, 1.4, 0.2],[7.7, 3.8, 6.7, 2.2]]}
serving_func.invoke('/v2/models/iris_v1/infer', my_data)
```
You can also make an HTTP call directly:
```
import requests
import json
predict_url = f'http://{serving_func.status.address}/v2/models/iris_v1/predict'
resp = requests.put(predict_url, json=json.dumps(my_data))
print(resp.json())
```
<a id="gs-tutorial-4-done"></a>
## Done!
Congratulations! You've completed the getting started tutorial.
You might also want to explore the following demos:
- For an example of distributed training of an image-classification pipeline using TensorFlow (versions 1 or 2), Keras, and Horovod, see the [**image-classification with distributed training demo**](https://github.com/mlrun/demos/tree/release/v0.6.x-latest/image-classification-with-distributed-training).
- To learn more about deploying live endpoints and concept drift, see the [**network-operations (NetOps) demo**](https://github.com/mlrun/demos/tree/release/v0.6.x-latest/network-operations).
- To learn how to deploy your model with streaming information, see the [**model-deployment pipeline demo**](https://github.com/mlrun/demos/tree/release/v0.6.x-latest/model-deployment-pipeline).
For additional information and guidelines, see the MLRun [**How-To Guides and Demos**](../howto/index.md).
|
github_jupyter
|
# SGT ($\beta \neq 0 $) calculation for fluids mixtures with SAFT-$\gamma$-Mie
In this notebook, the SGT ($\beta \neq 0 $) calculations for fluid mixtures with ```saftgammamie``` EoS are illustrated.
When using $\beta \neq 0 $, the cross-influence parameters are computed as $c_{ij} = (1-\beta_{ij})\sqrt{c_{ii}c_{jj}}$.
First, all the needed modules are imported.
- numpy: numerical interface and work with arrays
- matplotlib: to plot results
- sgtpy: package with SAFT-$\gamma$-Mie EoS and SGT functions.
```
import numpy as np
import matplotlib.pyplot as plt
from sgtpy import component, mixture, saftgammamie
```
Now, pure components are configured and created with the ```component``` function. To use SGT it is required to set the influence parameter (```cii```) for the pure fluids. Then, a mixture is created with them using the ```mixture``` function or by adding (`+`) pure components. The interaction parameters are set up with the ```mixture.saftgammamie``` method. Finally, the ```eos``` object is created with the ```saftgammamie``` function.
The ```eos``` object includes all the necessary methods to compute phase equilibria and interfacial properties using SAFT-$\gamma$-Mie EoS.
For this notebook, the calculations are exemplified for the mixture of ethanol + water and the mixture of hexane + ethanol.
```
ethanol = component(GC={'CH3':1, 'CH2OH':1}, cii=4.1388468864244875e-20)
water = component(GC={'H2O':1}, cii=1.6033244745871344e-20)
# creating mixture with mixture class function
mix1 = mixture(ethanol, water)
# or creating mixture by adding pure components
mix1 = ethanol + water
mix1.saftgammamie()
eos1 = saftgammamie(mix1)
```
Now, it is required to compute the phase equilibria (VLE, LLE or VLLE). See Notebooks 5 to 10 for more information about phase equilibria computation.
In this example, the bubble point of the mixture of ethanol and water at $x_1=0.2$ and 298.15K is computed.
```
from sgtpy.equilibrium import bubblePy
T = 298.15 # K
# liquid composition
x = np.array([0.2, 0.8])
# initial guesses
P0 = 1e4 # Pa
y0 = np.array([0.8, 0.2])
sol = bubblePy(y0, P0, x, T, eos1, full_output=True)
y, P = sol.Y, sol.P
vl, vv = sol.v1, sol.v2
rhol = x/vl
rhov = y/vv
```
In order to set the $\beta$ correction is necessary to create the symmetric matrix of shape (`nc, nc`) and then use it with the ```eos.beta_sgt``` method from the eos. The $\beta_{ij}$ correction is computed as follows:
$$ \beta_{ij} = \beta_{ij,0} + \beta_{ij,1} \cdot T + \beta_{ij,2} \cdot T^2 + \frac{\beta_{ij,3}}{T} $$
Alternatively, you can modify just the pair $ij$ using the `eos.set_betaijsgt` method. In both methods, by default only the $\beta_{ij,0}$ is required. The temperature dependent parameters are optional, if they are not provided they are assumed to be zero.
The function ```sgt_mix_beta0``` is used to study the interfacial behavior with SGT and $\beta=0$. AS shown in Notebook 12, Liang method can compute the density paths correctly.
```
from sgtpy.sgt import sgt_mix_beta0
bij = 0.0
beta = np.array([[0, bij], [bij, 0]])
eos1.beta_sgt(beta)
# or by setting the beta correction by pair i=0 (hexane), j=1 (ethanol)
eos1.set_betaijsgt(i=0, j=1, beta0=bij)
soll = sgt_mix_beta0(rhov, rhol, T, P, eos1, n=300, method='liang', full_output=True)
```
When using $\beta \neq 0$ two options are available to solve SGT.
- ```sgt_mix```: solves SGT system as a boundary value problem using orthogonal collocation (increasing interfacial length).
- ```msgt_mix```: solves a stabilized SGT system as a boundary value problem using orthogonal collocation (fixed interfacial length).
```
from sgtpy.sgt import sgt_mix
bij = 0.2
beta = np.array([[0, bij], [bij, 0]])
eos1.beta_sgt(beta)
# or by setting the beta correction by pair i=0 (ethanol), j=1 (water)
eos1.set_betaijsgt(i=0, j=1, beta0=bij)
solbeta = sgt_mix(rhov, rhol, T, P, eos1, full_output=True)
from sgtpy.sgt import msgt_mix
bij = 0.5
beta = np.array([[0, bij], [bij, 0]])
eos1.beta_sgt(beta)
# or by setting the beta correction by pair i=0 (ethanol), j=1 (water)
eos1.set_betaijsgt(i=0, j=1, beta0=bij)
msolbeta = msgt_mix(rhov, rhol, T, P, eos1, rho0 = solbeta, full_output=True)
```
The interfacial tension results are shown below.
```
print('Liang path Function: ', soll.tension, 'mN/m')
print('SGT BVP: ', solbeta.tension, 'mN/m')
print('Modified SGT BVP: ', msolbeta.tension, 'mN/m')
```
The density profiles are plotted below. It can be seen that using a $\beta$ correction smooths the density profiles.
```
rhobeta = solbeta.rho / 1000 # kmol/m3
mrhobeta = msolbeta.rho / 1000 # kmol/m3
rholiang = soll.rho / 1000 # kmol/m3
alphas = soll.alphas
path = soll.path
fig = plt.figure(figsize = (10, 4))
fig.subplots_adjust( wspace=0.3)
ax1 = fig.add_subplot(121)
ax1.plot(rholiang[0], rholiang[1], color = 'red')
ax1.plot(rhobeta[0], rhobeta[1], 's', color = 'blue')
ax1.plot(mrhobeta[0], mrhobeta[1], '--', color = 'black')
ax1.plot(rhov[0]/1000, rhov[1]/1000, 'o', color = 'k')
ax1.plot(rhol[0]/1000, rhol[1]/1000, 'o', color = 'k')
ax1.set_xlabel(r'$\rho_1$ / kmol m$^{-3}$')
ax1.set_ylabel(r'$\rho_2$ / kmol m$^{-3}$')
ax2 = fig.add_subplot(122)
ax2.plot(path/1000, alphas)
ax2.axhline(y = 0, linestyle = '--',color = 'r')
ax2.set_ylabel(r'$\alpha$')
ax2.set_xlabel(r'path function / 1000')
```
## Hexane - Ethanol
The interfacial behavior of this mixture is well known to be difficult to study as its displays multiple stationary points in the inhomogeneous zone.
```
hexane = component(GC={'CH3':2, 'CH2':4}, cii=3.288396028761707e-19)
mix2 = mixture(hexane, ethanol)
mix2.saftgammamie()
eos2 = saftgammamie(mix2)
```
In this example, the bubble point of the mixture at $x_1=0.3$ and 298.15K is computed with the ```bubblePy``` function.
```
T = 298.15 # K
x = np.array([0.3, 0.7])
y0 = 1.*x
P0 = 8000. # Pa
sol = bubblePy(y0, P0, x, T, eos2, full_output=True)
y, P = sol.Y, sol.P
vl, vv = sol.v1, sol.v2
rhox = x/vl
rhoy = y/vv
sol
```
The function ```sgt_mix_beta0``` is used to study the interfacial behavior with SGT and $\beta=0$. AS shown in Notebook 12, Liang method can compute the density paths correctly.
```
soll2 = sgt_mix_beta0(rhoy, rhox, T, P, eos2, n=300, method='liang', full_output=True)
```
SGT is solved with $\beta = 0.2$ and $\beta = 0.5$ using the ```sgt_mix``` and ```msgt_mix``` function.
```
bij = 0.2
beta = np.array([[0, bij], [bij, 0]])
eos2.beta_sgt(beta)
# or by setting the beta correction by pair i=0 (hexane), j=1 (ethanol)
eos2.set_betaijsgt(i=0, j=1, beta0=bij)
solbeta = sgt_mix(rhoy, rhox, T, P, eos2, full_output=True)
bij = 0.5
beta = np.array([[0, bij], [bij, 0]])
eos2.beta_sgt(beta)
# or by setting the beta correction by pair i=0 (hexane), j=1 (ethanol)
eos2.set_betaijsgt(i=0, j=1, beta0=bij)
msolbeta = msgt_mix(rhoy, rhox, T, P, eos2, rho0=solbeta, full_output=True)
```
The interfacial tension results are shown below.
```
print('Liang path Function: ', soll2.tension, 'mN/m')
print('SGT BVP: ', solbeta.tension, 'mN/m')
print('Modified SGT BVP: ', msolbeta.tension, 'mN/m')
```
The density profiles are plotted below. It can be seen that using a $\beta$ correction smooths the density profiles and reduces the number of stationary points.
```
rhobeta = solbeta.rho / 1000 # kmol/m3
mrhobeta = msolbeta.rho / 1000 # kmol/m3
rholiang = soll2.rho / 1000 # kmol/m3
alphas = soll2.alphas
path = soll2.path
fig = plt.figure(figsize = (10, 4))
fig.subplots_adjust( wspace=0.3)
ax1 = fig.add_subplot(121)
ax1.plot(rholiang[0], rholiang[1], color = 'red')
ax1.plot(rhobeta[0], rhobeta[1], 's', color = 'blue')
ax1.plot(mrhobeta[0], mrhobeta[1], '--', color = 'black')
ax1.plot(rhoy[0]/1000, rhoy[1]/1000, 'o', color = 'k')
ax1.plot(rhox[0]/1000, rhox[1]/1000, 'o', color = 'k')
ax1.set_xlabel(r'$\rho_1$ / kmol m$^{-3}$')
ax1.set_ylabel(r'$\rho_2$ / kmol m$^{-3}$')
ax2 = fig.add_subplot(122)
ax2.plot(path/1000, alphas)
ax2.axhline(y = 0, linestyle = '--',color = 'r')
ax2.set_ylabel(r'$\alpha$')
ax2.set_xlabel(r'path function / 1000')
ax1.tick_params(direction='in')
ax2.tick_params(direction='in')
# fig.savefig('sgt_mix.pdf')
```
For further information of any of these functions just run: ```function?```
|
github_jupyter
|
# Water Risk Classification: Data Wrangling
## Setup
```
import numpy as np
import pandas as pd
import geopandas as gpd
import requests, zipfile, io, os, tarfile
import rasterio as rio
from rasterio import plot
from rasterstats import zonal_stats
import rasterio.warp, rasterio.shutil
import rioxarray # for the extension to load
import xarray
import missingno as msno
from shapely.geometry import Polygon
from matplotlib import pyplot
import folium
from matplotlib import pyplot as plt
%matplotlib inline
```
## Download Data
**ONLY RUN IF YOU DON'T HAVE THE DATA FOLDER YET. IT WILL TAKE A LONG TIME.**
Download and unzip all the datasets.
```
# create data folder
os.mkdir('./data')
```
### Aqueduct Database
```
# download and extract
# DON'T RUN IF DATA IS IN ./DATA FOLDER
url_aq = 'https://wri-projects.s3.amazonaws.com/Aqueduct30/finalData/Y2019M07D12_Aqueduct30_V01.zip'
r = requests.get(url_aq) # download zipped directory
z = zipfile.ZipFile(io.BytesIO(r.content)) # create zipfile object
z.extractall(path='data') # unzip into data subdirectory
```
### Global Human Settlments Layer
```
# download and extract
# DON'T RUN IF DATA IS IN ./DATA FOLDER
url_ghs = 'http://cidportal.jrc.ec.europa.eu/ftp/jrc-opendata/GHSL/GHS_POP_MT_GLOBE_R2019A/GHS_POP_E2015_GLOBE_R2019A_54009_1K/V1-0/GHS_POP_E2015_GLOBE_R2019A_54009_1K_V1_0.zip'
r = requests.get(url_ghs) # download zipped directory
z = zipfile.ZipFile(io.BytesIO(r.content)) # create zipfile object
z.extractall(path='data/ghs') # unzip into data subdirectory
```
### Infant Mortality
The SEDAC infant mortality data requires user authentication so we did not download if programatically. The data is available for download [here](https://sedac.ciesin.columbia.edu/downloads/data/povmap/povmap-global-subnational-infant-mortality-rates-v2/povmap-global-subnational-infant-mortality-rates-v2-geotiff.zip) and is unzipped to `./data/sedac/`.
```
# This download requires useer authentication and isn't currently working
# DON'T RUN IF DATA IS IN ./DATA FOLDER
url_inf_mort = 'https://sedac.ciesin.columbia.edu/downloads/data/povmap/povmap-global-subnational-infant-mortality-rates-v2/povmap-global-subnational-infant-mortality-rates-v2-geotiff.zip'
r = requests.get(url_inf_mort) # download zipped directory
z = zipfile.ZipFile("./data/povmap-global-subnational-infant-mortality-rates-v2-geotiff.zip") # create zipfile object
z.extractall(path='./data/sedac') # unzip into data subdirectory
z.close()
```
### Nighttime Light
```
# download and extract
# DON'T RUN IF DATA IS IN ./DATA FOLDER
url_light = 'https://ngdc.noaa.gov/eog/data/web_data/v4avg_lights_x_pct/F182013.v4c.avg_lights_x_pct.tgz'
r = requests.get(url_light, allow_redirects=True)
open('./data/F182013.v4c.avg_lights_x_pct.tgz', 'wb').write(r.content)
temp_path = './data/F182013.v4c.avg_lights_x_pct.tgz'
z = tarfile.open(temp_path, mode='r:gz') # create zipfile object
z.extractall(path='data/light') # unzip into data subdirectory
z.close()
os.remove(temp_path)
```
## Load Data
WRI Aqueduct metadata with column name explanations is available [here](https://github.com/wri/aqueduct30_data_download/blob/master/metadata.md).
```
path_aq = './data/Y2019M07D12_Aqueduct30_V01/baseline/annual/y2019m07d11_aqueduct30_annual_v01.gpkg'
aq = gpd.read_file(path_aq, layer='y2019m07d11_aqueduct30_annual_v01')
# Select just the columns that will be used for the analysis
data_cols = ['string_id', 'geometry','bws_raw', 'bwd_raw', 'iav_raw', 'sev_raw', 'gtd_raw', 'rfr_raw', 'cfr_raw', 'drr_raw', 'ucw_raw', 'udw_raw', 'usa_raw']
data = aq.loc[aq['gid_0'] != 'GRL'].copy()
data = data.loc[data['gid_0'].notnull()]
data = data[data_cols].copy()
data.shape
path_ghs = './data/ghs/GHS_POP_E2015_GLOBE_R2019A_54009_1K_V1_0.tif'
ghs_meta = None
ghs_t = None
with rio.open(path_ghs) as tif:
ghs_meta = tif.meta
ghs_t = ghs.transform
oviews = tif.overviews(1) # list of overviews from biggest to smallest
oview = oviews[-1]
thumbnail = tif.read(1, out_shape=(1, int(tif.height // oview), int(tif.width // oview)))
ghs_meta['width'], ghs_meta['height'], ghs_meta['width']*ghs_meta['height']
path_inf_mort = './data/sedac/povmap_global_subnational_infant_mortality_rates_v2.tif'
with rio.open(path_inf_mort) as tif:
inf_mort = tif
path_light = './data/light/F182013.v4c.avg_lights_x_pct.tif'
with rio.open(path_light) as tif:
light = rio.open(tif)
```
## Explore Data
```
with rio.open(path_ghs) as im:
rio.plot.show_hist(im, bins=50, lw=0.0, stacked=False, alpha=1, histtype='stepfilled', title="Population")
ghs.meta
# plot of infant mortality
with rio.open(path_inf_mort) as im:
rio.plot.show_hist(im, bins=50, lw=0.0, stacked=False, alpha=1, histtype='stepfilled', title="Infant Mortality")
inf_mort.meta
# Plot of light
# plot of infant mortality
with rio.open(path_inf_mort) as im:
rio.plot.show_hist(im, bins=50, lw=0.0, stacked=False, alpha=1, histtype='stepfilled', title="Nightime Light")
light.meta,plot
# check crs
aq.crs == ghs.crs, aq.crs == inf_mort.crs, aq.crs == light.crs
```
## Join Data
```
with rio.open(path_inf_mort) as inf_mort:
inf_mort_array = inf_mort.read(1)
# Reclassify from two values for no data to just one
inf_mort_array[inf_mort_array<0] = -999
# Need to set nodata value explicitly
mortality_stats = zonal_stats(data, inf_mort_array,
affine = inf_mort.transform,
stats = ['mean', 'median'],
nodata = -999)
data['mean_infant_mort'] = [s['mean'] for s in mortality_stats]
#aq_join['median_infant_mort'] = [t['median'] for t in mortality_stats]
# aq_join.loc[np.isinf(aq_join['mean_infant_mort']) == True, 'mean_infant_mort'] = float('NaN')
# aq_join.loc[np.isinf(aq_join['median_infant_mort']) == True, 'median_infant_mort'] = float('NaN')
# aq_join.loc[np.isinf(aq_join['sum_infant_mort']) == True, 'sum_infant_mort'] = float('NaN')
with rio.open(path_light) as light:
light_array = light.read(1)
light_stats = zonal_stats(data, light_array,
affine = light.transform,
stats = ['mean'],
nodata = -999)
data['mean_light'] = [s['mean'] for s in light_stats]
data.loc[np.isnan(data['mean_light']) == True, 'mean_light'] = 0
with rio.open(path_ghs) as ghs:
ghs_array = ghs.read(1)
ghs_stats = zonal_stats(data.to_crs(ghs.crs.data), ghs_array,
affine = ghs.transform,
stats = ['sum'],
nodata = -200.0)
data['sum_pop'] = [u['sum'] for u in ghs_stats]
data['sum_pop'].isna().sum()
s = data.loc[data['sum_pop'] < 0]
t = data.loc[np.isnan(data['sum_pop'])]
s.shape, t.shape
```
## Engineer Features
```
# Calculate the area of each Aqueduct polygon
# project to equal area projection to calculate densities
# NSIDC EASE-Grid 2.0 Global Equal area to calculate densities, https://epsg.io/6933
data['area_sqkm'] = data.to_crs({'init':'epsg:6933'}).area/10000000
# Calculate population density
data['pop_density'] = data['sum_pop']/data['area_sqkm']
# reclassify NAs as zero
data.loc[np.isnan(data['pop_density']) == True, 'pop_density'] = 0
data.sum_pop.min()
msno.bar(data)
```
## Save GeoJSON for Modeling
```
export_cols = ['string_id', 'geometry', 'bws_raw', 'bwd_raw', 'iav_raw', 'sev_raw', 'gtd_raw', 'rfr_raw', 'cfr_raw',
'drr_raw', 'ucw_raw', 'udw_raw', 'usa_raw',
'mean_infant_mort', 'mean_light', 'pop_density'
]
# select columns to export and project back to WGS84
data_export = data[export_cols].to_crs(aq.crs)
# Save data
data_export.to_file("./data/data.geojson", driver='GeoJSON')
```
|
github_jupyter
|
$\newcommand{\To}{\Rightarrow}$
```
import os
os.chdir('..')
from kernel.type import TFun, BoolType, NatType
from kernel import term
from kernel.term import Term, Var, Const, Lambda, Abs, Bound, Nat, Or, Eq, Forall, Exists, Implies, And
from data import nat
from logic import basic
from syntax.settings import settings
basic.load_theory('nat')
```
## Lambda calculus
In the previous section, we discussed how to construct terms consisting of variables, constants, and function application. The relevant constructors are `Var`, `Const`, and `Comb`. In this section, we discuss construction of *lambda terms*, which completes the representation of terms in *lambda calculus*.
The motivation is as follows: we have already noted that terms can have function type. For example, in the previous section, we can declare a variable $f$ of type $nat \To nat$ by `Var("f", TFun(NatType, NatType))`. We have also encountered constants that have function type, for example the addition operator. However, we have not said anything about how to construct new examples of such functions.
In principle, any well-defined rule for computing the output from the input should be representable as a function. For example, there should be a function that takes as input a natural number $n$, and outputs $n+2$. In higher-order logic (also known as *simply-typed lambda calculus*), we can represent such functions as *lambda terms*. The above function can be written (in mathematical notation) as:
$$ \lambda n. n + 2 $$
Here $n$ (the variable right after $\lambda$) is known as a *bound variable*, in the sense that it is associated to the $\lambda$ sign directly in front of it, and is valid only in the scope of that $\lambda$ sign. It is important to note that *the name of the bound variable does not matter*. The expression $\lambda n. n + 2$ means the same thing as the expression $\lambda m. m + 2$. Both represent functions that add 2 to its input. We say that two terms are *$\alpha$-equivalent* if one can be changed to the other by changing the names of some bound variables.
We can construct a function term using `Lambda`.
```
n = Var('n', NatType)
f = Lambda(n, n + 2)
print(f)
```
Note $\lambda$ is printed in ASCII using `%`. As before, we turn on unicode printing, so the Greek letter $\lambda$ is printed properly.
```
settings.unicode = True
print(f)
```
We can test that the name of bound variable does not matter by constructing $f$ in another way:
```
m = Var('m', NatType)
f2 = Lambda(m, m + 2)
print(f2)
assert f == f2
```
Functions taking several arguments can be constructed using multiple Lambdas. The following constructs a function that takes two natural numbers $x$ and $y$ as input, and returns $x + 2y$.
```
x = Var('x', NatType)
y = Var('y', NatType)
g = Lambda(x, Lambda(y, x + 2 * y))
print(g)
```
This can be written more simply as follows:
```
g2 = Lambda(x, y, x + 2 * y)
print(g2)
```
The types of $f$ and $g$ are as expected (recall `checked_get_type` will perform type-checking on the term, in addition to returning the type of the term).
```
print(f.checked_get_type())
print(g.checked_get_type())
```
`Lambda` can also be used to construct predicates or binary relations.
```
P = Lambda(x, Or(Eq(x, 0), Eq(x, 2)))
print(P)
R = Lambda(x, y, Eq(x, y + 2))
print(R)
```
## $\beta$-conversion
In the previous section, we constructed lambda terms using the `Lambda` constructor. These are supposed to represent functions. What happens when we apply such functions an argument? Well, initially nothing happens:
```
print(f)
t = f(Nat(3))
print(t)
```
The `Comb` constructor (invoked through the `__call__` method of $f$) simply combines its two arguments, performing no function evaluation. To actually evaluate a function application, we need to use the `beta_conv` method, so named because function evaluation in lambda calculus is called *$\beta$-conversion*.
```
t2 = t.beta_conv()
print(t2)
```
Now, the argument 2 is substituted into the function. More precisely, the function `beta_conv` assumes the input term is in the form `f x`, where `f` is a lambda term, and substitutes `x` for the bound variable of `f`. The addition $3+2$ is still not evaluated: the general rule is that no evaluation is performed unless explicitly called for. We will discuss evaluation of arithmetic on natural numbers in a later section.
Let's see a more complicated example:
Oops... Here `beta_conv` failed because the function part of $t_3$ is not a lambda term: it is a lambda term applied to 2. To fully evaluate $f_2$ on two arguments 3 and 4, we need to apply them one at a time, performing $\beta$-conversion:
```
print('g: ', g)
t3 = g(Nat(3), Nat(4))
print('t3:', t3)
t4 = t3.beta_conv() # raises TermException
t3 = g(Nat(3)).beta_conv()
print('t3:', t3)
t4 = t3(Nat(4)).beta_conv()
print('t4:', t4)
```
A more convenient method is `beta_norm`, which performs all $\beta$-conversions on subterms:
```
t5 = g(Nat(3),Nat(4)).beta_norm()
print(t5)
```
## Quantifiers in predicate logic
Predicate logic extends propositional logic by adding two quantifiers: forall ($\forall$) and exists ($\exists$). In higher-order logic, both operators are represented as constants of type $('a \To bool) \To bool$. This can be explained as follows, taking the forall quantifier as an example. A forall expression in mathematics has the form
$$ \forall x. P(x) $$
Here $x$ is a bound variable. In (untyped) first-order logic, there are only two types of terms: objects and propositions, and $x$ can only range over objects. The main distinction between higher-order and first-order logic is that in higher-order logic, the bound variable of quantifiers can be of any type, including function types. Hence, we designate the type of the bound variable by the type variable $'a$. Then, the predicate $P$ has type $'a \To bool$. Any forall expression is a function taking a predicate $P$ of type $'a \To bool$ as input, and outputs a boolean value (whether $P$ is true on all of $'a$). Hence, its type must be $('a \To bool) \To bool$.
Forall and exists expressions are constructed as follows.
```
x = Var("x", NatType)
t1 = Forall(x, Implies(x > 2, x > 1))
print('t1:', t1)
t2 = Exists(x, And(x > 2, x < 4))
print('t2:', t2)
```
The type of $t_1$ and $t_2$ are booleans, as expected.
```
print(t1.checked_get_type())
print(t2.checked_get_type())
```
Forall and exists can take more than two arguments as well:
```
print(Forall(x, y, Implies(x < y, x < y + 1)))
print(Exists(x, y, x < y))
```
Forall and exists can alternate in a term. Make sure you understand the difference between the two propositions below. The first statement says for any natural number, there is a greater natural number (which is true). The second says there exists a natural number that is greater than all natural numbers (which is false).
```
print('P1:', Forall(x, Exists(y, x < y)))
print('P2:', Exists(y, Forall(x, x < y)))
```
## de Bruijn indices
When representing terms in higher-order logic, we would like to be able to quickly tell whether two terms are $\alpha$-equivalent. This motivates the use of *de Bruijn index* (named after Dutch mathematician Nicolaas Govert de Bruijn). Following this method, the bound variables are (in principle) unnamed, and whenever one needs to refer to a bound variable, one uses a sign $B_i$ where $i$ counts the depth of the location of reference with respect to the lambda sign of that variable. We follow the convention that the counting begins at 0. For example, the above function is represented using de Bruijn index as:
$$ \lambda\_. B_0 + 2 $$
Here we use an underscore to denote a bound variable that is unnamed. Another example: the expression $\lambda x. \lambda y. x + y$ is represented as $\lambda\_. \lambda\_. B_1 + B_0$ using de Bruijn indices. This is because the location where $x$ occurs is separated from the $\lambda$ sign that bounds it (the first $\lambda$ sign) by one $\lambda$ sign in the middle, while the location where $y$ occurs is directly after the $\lambda$ sign that bounds it (the second $\lambda$ sign).
The use of de Bruijn indices is revealed by looking at the `repr` of a lambda term:
```
x = Var('x', NatType)
t = Lambda(x, x + 1)
print(repr(t))
```
Here, `Abs` is the constructor for a lambda term. The first argument is the *suggested* name of the bound variable. It is used for printing only (and perhaps as a starting point when names of new variables need to be invented during proof). The second argument is the type of the bound variable, which *is* significant (different types of bound variables give different terms). The third argument is the body of the lambda term. In the body, bound variables are refered to by `Bound(n)`, where $n$ is a natural number.
Let us examine a more complex lambda expression:
```
x = Var('x', NatType)
y = Var('y', NatType)
t = Lambda(x, Lambda(y, x + y))
print(t)
print(repr(t))
```
While we are at it, let us also examine the representation of forall and exists terms:
```
print(repr(Forall(x, x >= 0)))
print(repr(Exists(x, x < 1)))
```
After understanding the de Bruijn representation, we can also creater lambda terms directly using the `Abs` and `Bound` constructors. This is seldomly necessary, but we show it here to illustrate the concepts:
```
t = Abs('x', NatType, nat.plus(Bound(0), nat.one))
print(t)
assert t == Lambda(x, x + 1)
```
|
github_jupyter
|
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Deep Convolutional Generative Adversarial Network
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/generative/dcgan">
<img src="https://www.tensorflow.org/images/tf_logo_32px.png" />
View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/generative/dcgan.ipynb">
<img src="https://www.tensorflow.org/images/colab_logo_32px.png" />
Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/generative/dcgan.ipynb">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/generative/dcgan.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
This tutorial demonstrates how to generate images of handwritten digits using a [Deep Convolutional Generative Adversarial Network](https://arxiv.org/pdf/1511.06434.pdf) (DCGAN). The code is written using the [Keras Sequential API](https://www.tensorflow.org/guide/keras) with a `tf.GradientTape` training loop.
## What are GANs?
[Generative Adversarial Networks](https://arxiv.org/abs/1406.2661) (GANs) are one of the most interesting ideas in computer science today. Two models are trained simultaneously by an adversarial process. A *generator* ("the artist") learns to create images that look real, while a *discriminator* ("the art critic") learns to tell real images apart from fakes.

During training, the *generator* progressively becomes better at creating images that look real, while the *discriminator* becomes better at telling them apart. The process reaches equilibrium when the *discriminator* can no longer distinguish real images from fakes.

This notebook demonstrates this process on the MNIST dataset. The following animation shows a series of images produced by the *generator* as it was trained for 50 epochs. The images begin as random noise, and increasingly resemble hand written digits over time.

To learn more about GANs, we recommend MIT's [Intro to Deep Learning](http://introtodeeplearning.com/) course.
### Import TensorFlow and other libraries
```
import tensorflow as tf
tf.__version__
# To generate GIFs
!pip install imageio
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
from tensorflow.keras import layers
import time
from IPython import display
```
### Load and prepare the dataset
You will use the MNIST dataset to train the generator and the discriminator. The generator will generate handwritten digits resembling the MNIST data.
```
(train_images, train_labels), (_, _) = tf.keras.datasets.mnist.load_data()
train_images = train_images.reshape(train_images.shape[0], 28, 28, 1).astype('float32')
train_images = (train_images - 127.5) / 127.5 # Normalize the images to [-1, 1]
BUFFER_SIZE = 60000
BATCH_SIZE = 256
# Batch and shuffle the data
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(BUFFER_SIZE).batch(BATCH_SIZE)
```
## Create the models
Both the generator and discriminator are defined using the [Keras Sequential API](https://www.tensorflow.org/guide/keras#sequential_model).
### The Generator
The generator uses `tf.keras.layers.Conv2DTranspose` (upsampling) layers to produce an image from a seed (random noise). Start with a `Dense` layer that takes this seed as input, then upsample several times until you reach the desired image size of 28x28x1. Notice the `tf.keras.layers.LeakyReLU` activation for each layer, except the output layer which uses tanh.
```
def make_generator_model():
model = tf.keras.Sequential()
model.add(layers.Dense(7*7*256, use_bias=False, input_shape=(100,)))
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Reshape((7, 7, 256)))
assert model.output_shape == (None, 7, 7, 256) # Note: None is the batch size
model.add(layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False))
assert model.output_shape == (None, 7, 7, 128)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False))
assert model.output_shape == (None, 14, 14, 64)
model.add(layers.BatchNormalization())
model.add(layers.LeakyReLU())
model.add(layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh'))
assert model.output_shape == (None, 28, 28, 1)
return model
```
Use the (as yet untrained) generator to create an image.
```
generator = make_generator_model()
noise = tf.random.normal([1, 100])
generated_image = generator(noise, training=False)
plt.imshow(generated_image[0, :, :, 0], cmap='gray')
```
### The Discriminator
The discriminator is a CNN-based image classifier.
```
def make_discriminator_model():
model = tf.keras.Sequential()
model.add(layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same',
input_shape=[28, 28, 1]))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
model.add(layers.LeakyReLU())
model.add(layers.Dropout(0.3))
model.add(layers.Flatten())
model.add(layers.Dense(1))
return model
```
Use the (as yet untrained) discriminator to classify the generated images as real or fake. The model will be trained to output positive values for real images, and negative values for fake images.
```
discriminator = make_discriminator_model()
decision = discriminator(generated_image)
print (decision)
```
## Define the loss and optimizers
Define loss functions and optimizers for both models.
```
# This method returns a helper function to compute cross entropy loss
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
```
### Discriminator loss
This method quantifies how well the discriminator is able to distinguish real images from fakes. It compares the discriminator's predictions on real images to an array of 1s, and the discriminator's predictions on fake (generated) images to an array of 0s.
```
def discriminator_loss(real_output, fake_output):
real_loss = cross_entropy(tf.ones_like(real_output), real_output)
fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output)
total_loss = real_loss + fake_loss
return total_loss
```
### Generator loss
The generator's loss quantifies how well it was able to trick the discriminator. Intuitively, if the generator is performing well, the discriminator will classify the fake images as real (or 1). Here, we will compare the discriminators decisions on the generated images to an array of 1s.
```
def generator_loss(fake_output):
return cross_entropy(tf.ones_like(fake_output), fake_output)
```
The discriminator and the generator optimizers are different since we will train two networks separately.
```
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
```
### Save checkpoints
This notebook also demonstrates how to save and restore models, which can be helpful in case a long running training task is interrupted.
```
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
```
## Define the training loop
```
EPOCHS = 50
noise_dim = 100
num_examples_to_generate = 16
# We will reuse this seed overtime (so it's easier)
# to visualize progress in the animated GIF)
seed = tf.random.normal([num_examples_to_generate, noise_dim])
```
The training loop begins with generator receiving a random seed as input. That seed is used to produce an image. The discriminator is then used to classify real images (drawn from the training set) and fakes images (produced by the generator). The loss is calculated for each of these models, and the gradients are used to update the generator and discriminator.
```
# Notice the use of `tf.function`
# This annotation causes the function to be "compiled".
@tf.function
def train_step(images):
noise = tf.random.normal([BATCH_SIZE, noise_dim])
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
def train(dataset, epochs):
for epoch in range(epochs):
start = time.time()
for image_batch in dataset:
train_step(image_batch)
# Produce images for the GIF as we go
display.clear_output(wait=True)
generate_and_save_images(generator,
epoch + 1,
seed)
# Save the model every 15 epochs
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
# Generate after the final epoch
display.clear_output(wait=True)
generate_and_save_images(generator,
epochs,
seed)
```
**Generate and save images**
```
def generate_and_save_images(model, epoch, test_input):
# Notice `training` is set to False.
# This is so all layers run in inference mode (batchnorm).
predictions = model(test_input, training=False)
fig = plt.figure(figsize=(4,4))
for i in range(predictions.shape[0]):
plt.subplot(4, 4, i+1)
plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray')
plt.axis('off')
plt.savefig('image_at_epoch_{:04d}.png'.format(epoch))
plt.show()
```
## Train the model
Call the `train()` method defined above to train the generator and discriminator simultaneously. Note, training GANs can be tricky. It's important that the generator and discriminator do not overpower each other (e.g., that they train at a similar rate).
At the beginning of the training, the generated images look like random noise. As training progresses, the generated digits will look increasingly real. After about 50 epochs, they resemble MNIST digits. This may take about one minute / epoch with the default settings on Colab.
```
train(train_dataset, EPOCHS)
```
Restore the latest checkpoint.
```
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
```
## Create a GIF
```
# Display a single image using the epoch number
def display_image(epoch_no):
return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))
display_image(EPOCHS)
```
Use `imageio` to create an animated gif using the images saved during training.
```
anim_file = 'dcgan.gif'
with imageio.get_writer(anim_file, mode='I') as writer:
filenames = glob.glob('image*.png')
filenames = sorted(filenames)
last = -1
for i,filename in enumerate(filenames):
frame = 2*(i**0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
import IPython
if IPython.version_info > (6,2,0,''):
display.Image(filename=anim_file)
```
If you're working in Colab you can download the animation with the code below:
```
try:
from google.colab import files
except ImportError:
pass
else:
files.download(anim_file)
```
## Next steps
This tutorial has shown the complete code necessary to write and train a GAN. As a next step, you might like to experiment with a different dataset, for example the Large-scale Celeb Faces Attributes (CelebA) dataset [available on Kaggle](https://www.kaggle.com/jessicali9530/celeba-dataset). To learn more about GANs we recommend the [NIPS 2016 Tutorial: Generative Adversarial Networks](https://arxiv.org/abs/1701.00160).
|
github_jupyter
|
# Perturbation cost trajectories for gaussian noise of different sizes vs uniform noise of different sizes
```
import os
os.chdir("../")
import sys
import json
from argparse import Namespace
import numpy as np
from sklearn import metrics
from sklearn.metrics import pairwise_distances as dist
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='paper')
import provable_robustness_max_linear_regions.data as dt
from generate_perturbation_cost_trajectories import calculate_perturbation_cost_data
from utils import NumpyEncoder
```
## Plot settings:
```
SMALL_SIZE = 14
MEDIUM_SIZE = 18
BIGGER_SIZE = 26
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.rc('text', usetex=True)
# dictionary that maps color string to 'good looking' seaborn colors that are easily distinguishable
colors = {
"orange": sns.xkcd_rgb["yellowish orange"],
"red": sns.xkcd_rgb["pale red"],
"green": sns.xkcd_rgb["medium green"],
"blue": sns.xkcd_rgb["denim blue"],
"yellow": sns.xkcd_rgb["amber"],
"purple": sns.xkcd_rgb["dusty purple"],
"cyan": sns.xkcd_rgb["cyan"]
}
```
## Calculate perturbation cost data:
Estimated runtime (if no file with data is present): 12 hours
```
def load_from_json(file_name):
if not os.path.exists("res/" + file_name + ".json"):
return None
else:
with open("res/" + file_name + ".json", 'r') as fp:
return json.load(fp)
def save_to_json(dictionary, file_name):
if not os.path.exists("res"):
os.makedirs("res")
with open("res/" + file_name + ".json", 'w') as fp:
json.dump(dictionary, fp, cls = NumpyEncoder)
n_points = 1000
perturbation_cost_data = load_from_json("pc_croce_adversarial_noise_plus_gaussian_noise_n_points={}".format(n_points))
croce_model_paths = ["provable_robustness_max_linear_regions/models/plain/2019-02-24 00:50:45 dataset=mnist nn_type=cnn_lenet_small p_norm=2 lmbd=0.0 gamma_rb=0.0 gamma_db=0.0 ae_frac=0.0 lr=0.001 epoch=100.mat","provable_robustness_max_linear_regions/models/mmr+at/2019-02-17 01:54:16 dataset=mnist nn_type=cnn_lenet_small p_norm=inf lmbd=0.5 gamma_rb=0.2 gamma_db=0.2 ae_frac=0.5 epoch=100.mat", "provable_robustness_max_linear_regions/models/mmr+at/2019-02-24 00:04:27 dataset=mnist nn_type=cnn_lenet_small p_norm=2 lmbd=6.0 gamma_rb=0.45 gamma_db=0.45 ae_frac=0.5 lr=5e-05 epoch=50.mat"]
if not perturbation_cost_data:
perturbation_cost_data = dict()
for model_path in croce_model_paths:
args = Namespace()
args.dataset = "mnist"
args.n_points = n_points
args.model_path = model_path
args.adversarial_model_paths = [model_path]
args.nn_type = "cnn"
args.norms = ["inf", "2"]
args.noise_types = ["gaussian", "uniform"]
args.noise_sizes = [0.1, 0.3, 0.6]
args.splits = [{"inf": [0.0, np.inf], "2": [0.0, np.inf]}]
args.save = False
args.plot = False
file_name = model_path.split("/")[3]
model_name = file_name.split(".mat")[0]
perturbation_cost_data[model_name] = calculate_perturbation_cost_data(args)
save_to_json(perturbation_cost_data, "pc_croce_adversarial_noise_plus_gaussian_noise_n_points={}".format(n_points))
```
## Plot:
```
# name to save the plot
save_name = "fig_pc_comparing_noise_levels"
model_names = [
"2019-02-17 01:54:16 dataset=mnist nn_type=cnn_lenet_small p_norm=inf lmbd=0.5 gamma_rb=0.2 gamma_db=0.2 ae_frac=0.5 epoch=100",
"2019-02-24 00:04:27 dataset=mnist nn_type=cnn_lenet_small p_norm=2 lmbd=6.0 gamma_rb=0.45 gamma_db=0.45 ae_frac=0.5 lr=5e-05 epoch=50"
]
model_name_dict = {
"2019-02-24 00:50:45 dataset=mnist nn_type=cnn_lenet_small p_norm=2 lmbd=0.0 gamma_rb=0.0 gamma_db=0.0 ae_frac=0.0 lr=0.001 epoch=100":
"Training: Standard Training",
"2019-02-17 01:54:16 dataset=mnist nn_type=cnn_lenet_small p_norm=inf lmbd=0.5 gamma_rb=0.2 gamma_db=0.2 ae_frac=0.5 epoch=100":
"Training: MMR+AT\nThreat Model: $\ell_\infty(\epsilon=0.1)$",
"2019-02-24 00:04:27 dataset=mnist nn_type=cnn_lenet_small p_norm=2 lmbd=6.0 gamma_rb=0.45 gamma_db=0.45 ae_frac=0.5 lr=5e-05 epoch=50":
"Training: MMR+AT\nThreat Model: $\ell_2(\epsilon=0.3)$"
}
base_color_dict_for_noise_type = {
"noise": {
"inf": {
"gaussian": colors["red"],
"uniform": colors["blue"]
},
"2": {
"gaussian": colors["red"],
"uniform": colors["blue"]
}
}
}
# number of model types and parameter combinations
n_cols = 2
n_rows = 1
fig, ax = plt.subplots(n_rows,
n_cols,
figsize=(6 * n_cols, 5 * n_rows),
dpi=400)
linestyle = "-"
base_color_dict = {
"adv": {
"inf": colors["blue"],
"2": colors["green"]
},
"noise": {
"inf": colors["red"],
"2": colors["yellow"]
}
}
norm_to_latex = {"inf": "\infty", "2": "2"}
model_name = "2019-02-24 00:50:45 dataset=mnist nn_type=cnn_lenet_small p_norm=2 lmbd=0.0 gamma_rb=0.0 gamma_db=0.0 ae_frac=0.0 lr=0.001 epoch=100"
pert_costs_data = perturbation_cost_data[model_name][model_name]
for i, norm in enumerate(["inf", "2"]):
pert_cost_norm = norm
perturbation_norm = norm
noise_types = ["uniform", "gaussian"]
noise_sizes = ["0.1", "0.3", "0.6"]
split = json.dumps({"inf": [0.0, np.inf], "2": [0.0, np.inf]})
for noise_type in noise_types:
for noise_size in noise_sizes:
pert_costs_noise = np.array(
pert_costs_data[pert_cost_norm][perturbation_norm]
[noise_type][noise_size][split]["pert_costs_2"])
linestyle = "-"
color = base_color_dict_for_noise_type["noise"][
perturbation_norm][noise_type]
ax[i].plot(np.mean(pert_costs_noise, axis=0),
c=color,
linestyle=linestyle,
label="{} noise, size $\ell_{}={}$".format(
noise_type, norm_to_latex[perturbation_norm],
noise_size),
alpha=0.4 + float(noise_size))
ax[i].set_xlim(0.0, 8.0)
ax[i].set_xticks(np.arange(0, 9.0, 1.0))
ax[i].set_xticklabels([
"INPUT", "CONV", "RELU", "CONV", "RELU", "FC", "RELU", "FC",
"ARGMAX"
])
ax[i].set_xlabel("layer")
ax[i].legend()
fig.suptitle(
"mean perturbation cost curves for different noise sizes in $\ell_\infty$ and $\ell_2$ norm"
)
ax[0].set_ylabel("perturbation costs")
ax[0].set_ylim(0.0, 2.0)
ax[1].set_ylim(0.0, 0.1)
fig.tight_layout()
fig.subplots_adjust(top=0.88)
fig.savefig('res/{}.pdf'.format(save_name))
```
|
github_jupyter
|
# Introduction: Writing Patent Abstracts with a Recurrent Neural Network
The purpose of this notebook is to develop a recurrent neural network using LSTM cells that can generate patent abstracts. We will look at using a _word level_ recurrent neural network and _embedding_ the vocab, both with pre-trained vectors and training our own embeddings. We will train the model by feeding in as the features a long sequence of words (for example 50 words) and then using the next word as the label. Over time, the network will (hopefully) learn to predict the next word in a given sequence and we can use the model predictions to generate entirely novel patent abstracts.
## Approach
The approach to solving this problem is:
1. Read in training data: thousands of "neural network" patents
2. Convert patents to integer sequences: `tokenization`
3. Create training dataset using next word following a sequence as label
4. Build a recurrent neural network using word embeddings and LSTM cells
5. Load in pre-trained embeddings
6. Train network to predict next word from sequence
7. Generate new abstracts by feeding network a seed sequence
8. Repeat steps 2 - 7 using pre-trained embeddings
9. Try different model architecture to see if performance improves
10. For fun, create a simple game where we must guess if the output is human or computer!
Each of these steps is relatively simple by itself, so don't be intimidated. We'll walk through the entire process and at the end will be able to have a working application of deep learning!
```
# Set up IPython to show all outputs from a cell
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = 'all'
import warnings
warnings.filterwarnings('ignore', category = RuntimeWarning)
RANDOM_STATE = 50
EPOCHS = 150
BATCH_SIZE = 2048
TRAINING_LENGTH = 50
TRAIN_FRACTION = 0.7
VERBOSE = 0
SAVE_MODEL = True
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
```
## Read in Data
Our data consists of patent abstracts by searching for the term "neural networks" on [patentsview query](http://www.patentsview.org/querydev) web interface. The data can be downloaded in a number of formats and can include a number of patent attributes (I only kept 4).
```
import pandas as pd
import numpy as np
# Read in data
data = pd.read_csv('../data/neural_network_patent_query.csv', parse_dates = ['patent_date'])
# Extract abstracts
original_abstracts = list(data['patent_abstract'])
len(original_abstracts)
data.head()
```
### Brief Data Exploration
This data is extremely clean, which means we don't need to do any manual munging. We can still make a few simple plots out of curiousity though!
```
data['patent_abstract'][100]
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('fivethirtyeight')
data['year-month'] = [pd.datetime(year, month, 1) for year, month in zip(data['patent_date'].dt.year,
data['patent_date'].dt.month)]
monthly = data.groupby('year-month')['patent_number'].count().reset_index()
monthly.set_index('year-month')['patent_number'].plot(figsize = (16, 8))
plt.ylabel('Number of Patents'); plt.xlabel('Date');
plt.title('Neural Network Patents over Time');
monthly.groupby(monthly['year-month'].dt.year)['patent_number'].sum().plot.bar(color = 'red', edgecolor = 'k',
figsize = (12, 6))
plt.xlabel('Year'); plt.ylabel('Number of Patents'); plt.title('Neural Network Patents by Year');
```
The distribution of patents over time is interesting. I would expect 2018 to come out on top once the patents have been accepted.
## Data Cleaning
Our preprocessing is going to involve using a `Tokenizer` to convert the patents from sequences of words (strings) into sequences of integers. We'll get to that in a bit, but even with neural networks, having a clean dataset is paramount. The data quality is already high, but there are some idiosyncracies of patents as well as general text improvements to make. For example, let's consider the following two sentences.
>'This is a short sentence (1) with one reference to an image. This next sentence, while non-sensical, does not have an image and has two commas.'
If we choose to remove all punctuation with the default Tokenizer settings, we get the following.
```
from keras.preprocessing.text import Tokenizer
example = 'This is a short sentence (1) with one reference to an image. This next sentence, while non-sensical, does not have an image and has two commas.'
tokenizer = Tokenizer(filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n')
tokenizer.fit_on_texts([example])
s = tokenizer.texts_to_sequences([example])[0]
' '.join(tokenizer.index_word[i] for i in s)
```
This removes all the punctuation and now we have a random number in the sentence. If we choose to not remove the punctuation, the sentence looks better, but then we have some interesting words in the vocabulary.
```
tokenizer = Tokenizer(filters='"#$%&*+/:;<=>?@[\\]^_`{|}~\t\n')
tokenizer.fit_on_texts([example])
s = tokenizer.texts_to_sequences([example])[0]
' '.join(tokenizer.index_word[i] for i in s)
tokenizer.word_index.keys()
```
Notice that `image` and `image.` are classified as distinct words. This is because the period is attached to one and not the other and the same with `sentence` and `sentence,`. To alleviate this issue, we can add spaces around the punctuation using regular expressions. We will also remove the image references.
```
import re
def format_patent(patent):
"""Add spaces around punctuation and remove references to images/citations."""
# Add spaces around punctuation
patent = re.sub(r'(?<=[^\s0-9])(?=[.,;?])', r' ', patent)
# Remove references to figures
patent = re.sub(r'\((\d+)\)', r'', patent)
# Remove double spaces
patent = re.sub(r'\s\s', ' ', patent)
return patent
f = format_patent(example)
f
```
Now when we do the tokenization, we get separate entries in the vocab for the punctuation, but _not_ for words with punctuation attached.
```
tokenizer = Tokenizer(filters='"#$%&*+/:;<=>?@[\\]^_`{|}~\t\n')
tokenizer.fit_on_texts([f])
s = tokenizer.texts_to_sequences([f])[0]
' '.join(tokenizer.index_word[i] for i in s)
tokenizer.word_index.keys()
```
We no longer have the `image` and `image.` problem but we do have separate symbols for `.` and `,`. This means the network will be forced to learn a representation for these punctuation marks (they are also in the pre-trained embeddings). When we want to get back to the original sentence (without image references) we simply have to remove the spaces.
```
def remove_spaces(patent):
"""Remove spaces around punctuation"""
patent = re.sub(r'\s+([.,;?])', r'\1', patent)
return patent
remove_spaces(' '.join(tokenizer.index_word[i] for i in s))
```
We can apply this operation to all of the original abstracts.
```
formatted = []
# Iterate through all the original abstracts
for a in original_abstracts:
formatted.append(format_patent(a))
len(formatted)
```
# Convert Text to Sequences
A neural network cannot process words, so we must convert the patent abstracts into integers. This is done using the Keras utility `Tokenizer`. By default, this will convert all words to lowercase and remove punctuation. Therefore, our model will not be able to write complete sentences. However, this can be beneficial for a first model because it limits the size of the vocabulary and means that more of the words (converted into tokens) will have pre-trained embeddings.
Later, we will not remove the capitalization and punctuation when we train our own embeddings.
## Features and Labels
This function takes a few parameters including a training length which is the number of words we will feed into the network as features with the next word the label. For example, if we set `training_length = 50`, then the model will take in 50 words as features and the 51st word as the label.
For each abstract, we can make multiple training examples by slicing at different points. We can use the first 50 words as features with the 51st as a label, then the 2nd through 51st word as features and the 52nd as the label, then 3rd - 52nd with 53rd as label and so on. This gives us much more data to train on and the performance of the model is proportional to the amount of training data.
```
def make_sequences(texts, training_length = 50,
lower = True, filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n'):
"""Turn a set of texts into sequences of integers"""
# Create the tokenizer object and train on texts
tokenizer = Tokenizer(lower=lower, filters=filters)
tokenizer.fit_on_texts(texts)
# Create look-up dictionaries and reverse look-ups
word_idx = tokenizer.word_index
idx_word = tokenizer.index_word
num_words = len(word_idx) + 1
word_counts = tokenizer.word_counts
print(f'There are {num_words} unique words.')
# Convert text to sequences of integers
sequences = tokenizer.texts_to_sequences(texts)
# Limit to sequences with more than training length tokens
seq_lengths = [len(x) for x in sequences]
over_idx = [i for i, l in enumerate(seq_lengths) if l > (training_length + 20)]
new_texts = []
new_sequences = []
# Only keep sequences with more than training length tokens
for i in over_idx:
new_texts.append(texts[i])
new_sequences.append(sequences[i])
training_seq = []
labels = []
# Iterate through the sequences of tokens
for seq in new_sequences:
# Create multiple training examples from each sequence
for i in range(training_length, len(seq)):
# Extract the features and label
extract = seq[i - training_length: i + 1]
# Set the features and label
training_seq.append(extract[:-1])
labels.append(extract[-1])
print(f'There are {len(training_seq)} training sequences.')
# Return everything needed for setting up the model
return word_idx, idx_word, num_words, word_counts, new_texts, new_sequences, training_seq, labels
```
Now let's see how our function generates data. For using pre-trained embeddings, we'll remove a fair amount of the punctuation and lowercase all letters but leave in periods and commas. This is because there are no capitalized words in the pre-trained embeddings but there is some punctuation. Our model will not learn how to capitalize words, but it may learn how to end a sentence and insert commas.
```
filters = '!"#$%&()*+/:<=>@[\\]^_`{|}~\t\n'
word_idx, idx_word, num_words, word_counts, abstracts, sequences, features, labels = make_sequences(formatted,
TRAINING_LENGTH,
lower = True,
filters = filters)
```
Each patent is now represented as a sequence of integers. Let's look at an example of a few features and the corresponding labels. The label is the next word in the sequence after the first 50 words.
```
n = 3
features[n][:10]
def find_answer(index):
"""Find label corresponding to features for index in training data"""
# Find features and label
feats = ' '.join(idx_word[i] for i in features[index])
answer = idx_word[labels[index]]
print('Features:', feats)
print('\nLabel: ', answer)
find_answer(n)
original_abstracts[0]
find_answer(100)
```
Our patents are no longer correct English, but, by removing capital letters, we do reduce the size of the vocabulary.
__Deciding which pre-processing steps to take in general is the most important aspect of an machine learning project.__
```
sorted(word_counts.items(), key = lambda x: x[1], reverse = True)[:15]
```
The most common words make sense in the context of the patents we are using and the geneal English language.
## Training Data
Next we need to take the features and labels and convert them into training and validation data. The following function does this by splitting the data - after random shuffling because the features were made in sequential order - based on the `train_fraction` specified. All the inputs are converted into numpy arrays which is the correct input to a keras neural network.
### Encoding of Labels
One important step is to convert the labels to one hot encoded vectors because our network will be trained using `categorical_crossentropy` and makes a prediction for each word in the vocabulary (we can train with the labels represented as simple integers, but I found performance was better and training faster when using a one-hot representation of the labels). This is done by creating an array of rows of all zeros except for the index of the word which we want to predict - the label - which gets a 1.
```
from sklearn.utils import shuffle
def create_train_valid(features, labels, num_words, train_fraction = TRAIN_FRACTION):
"""Create training and validation features and labels."""
# Randomly shuffle features and labels
features, labels = shuffle(features, labels, random_state = RANDOM_STATE)
# Decide on number of samples for training
train_end = int(train_fraction * len(labels))
train_features = np.array(features[:train_end])
valid_features = np.array(features[train_end:])
train_labels = labels[:train_end]
valid_labels = labels[train_end:]
# Convert to arrays
X_train, X_valid = np.array(train_features), np.array(valid_features)
# Using int8 for memory savings
y_train = np.zeros((len(train_labels), num_words), dtype = np.int8)
y_valid = np.zeros((len(valid_labels), num_words), dtype = np.int8)
# One hot encoding of labels
for example_index, word_index in enumerate(train_labels):
y_train[example_index, word_index] = 1
for example_index, word_index in enumerate(valid_labels):
y_valid[example_index, word_index] = 1
# Memory management
import gc
gc.enable()
del features, labels, train_features, valid_features, train_labels, valid_labels
gc.collect()
return X_train, X_valid, y_train, y_valid
X_train, X_valid, y_train, y_valid = create_train_valid(features, labels, num_words)
X_train.shape
y_train.shape
```
We do want to be careful about using up too much memory. One hot encoding the labels creates massive numpy arrays so I took care to delete the un-used objects from the workspace.
```
import sys
sys.getsizeof(y_train) / 1e9
def check_sizes(gb_min = 1):
for x in globals():
size = sys.getsizeof(eval(x))/1e9
if size > gb_min:
print(f'Object: {x:10}\tSize: {size} GB.')
check_sizes(gb_min = 1)
```
# Pre-Trained Embeddings
Rather than training our own word embeddings, a very expensive operation, we can use word embeddings that were trained on a large corpus of words. The hope is that these embeddings will generalize from the training corpus to our needs.
This code downloads 100-dimensional word embeddings if you don't already have them. There are a number of different pre-trained word embeddings you can find from [Stanford online](https://nlp.stanford.edu/data/).
```
import os
from keras.utils import get_file
# Vectors to use
glove_vectors = '/home/ubuntu/.keras/datasets/glove.6B.zip'
# Download word embeddings if they are not present
if not os.path.exists(glove_vectors):
glove_vectors = get_file('glove.6B.zip', 'http://nlp.stanford.edu/data/glove.6B.zip')
os.system(f'unzip {glove_vectors}')
# Load in unzipped file
glove_vectors = '/home/ubuntu/.keras/datasets/glove.6B.100d.txt'
glove = np.loadtxt(glove_vectors, dtype='str', comments=None)
glove.shape
```
Now we separated into the words and the vectors.
```
vectors = glove[:, 1:].astype('float')
words = glove[:, 0]
del glove
vectors[100], words[100]
```
Next we want to keep only those words that appear in our vocabulary. For words that are in our vocabulary but don't have an embedding, they will be represented as all 0s (a shortcoming that we can address by training our own embeddings.)
```
vectors.shape
word_lookup = {word: vector for word, vector in zip(words, vectors)}
embedding_matrix = np.zeros((num_words, vectors.shape[1]))
not_found = 0
for i, word in enumerate(word_idx.keys()):
# Look up the word embedding
vector = word_lookup.get(word, None)
# Record in matrix
if vector is not None:
embedding_matrix[i + 1, :] = vector
else:
not_found += 1
print(f'There were {not_found} words without pre-trained embeddings.')
import gc
gc.enable()
del vectors
gc.collect()
```
Each word is represented by 100 numbers with a number of words that can't be found. We can find the closest words to a given word in embedding space using the cosine distance. This requires first normalizing the vectors to have a magnitude of 1.
```
# Normalize and convert nan to 0
embedding_matrix = embedding_matrix / np.linalg.norm(embedding_matrix, axis = 1).reshape((-1, 1))
embedding_matrix = np.nan_to_num(embedding_matrix)
def find_closest(query, embedding_matrix, word_idx, idx_word, n = 10):
"""Find closest words to a query word in embeddings"""
idx = word_idx.get(query, None)
# Handle case where query is not in vocab
if idx is None:
print(f'{query} not found in vocab.')
return
else:
vec = embedding_matrix[idx]
# Handle case where word doesn't have an embedding
if np.all(vec == 0):
print(f'{query} has no pre-trained embedding.')
return
else:
# Calculate distance between vector and all others
dists = np.dot(embedding_matrix, vec)
# Sort indexes in reverse order
idxs = np.argsort(dists)[::-1][:n]
sorted_dists = dists[idxs]
closest = [idx_word[i] for i in idxs]
print(f'Query: {query}\n')
max_len = max([len(i) for i in closest])
# Print out the word and cosine distances
for word, dist in zip(closest, sorted_dists):
print(f'Word: {word:15} Cosine Similarity: {round(dist, 4)}')
find_closest('the', embedding_matrix, word_idx, idx_word)
find_closest('neural', embedding_matrix, word_idx, idx_word, 10)
find_closest('.', embedding_matrix, word_idx, idx_word, 10)
find_closest('wonder', embedding_matrix, word_idx, idx_word)
find_closest('dnn', embedding_matrix, word_idx, idx_word)
```
# Build Model
With data encoded as integers and an embedding matrix of pre-trained word vectors, we're ready to build the recurrent neural network. This model is relatively simple and uses an LSTM cell as the heart of the network. After converting the words into embeddings, we pass them through a single LSTM layer, then into a fully connected layer with `relu` activation before the final output layer with a `softmax` activation. The final layer produces a probability for every word in the vocab.
When training, these predictions are compared to the actual label using the `categorical_crossentropy` to calculate a loss. The parameters (weights) in the network are then updated using the Adam optimizer (a variant on Stochastic Gradient Descent) with gradients calculated through backpropagation. Fortunately, Keras handles all of this behind the scenes, so we just have to set up the network and then start the training. The most difficult part is figuring out the correct shapes for the inputs and outputs into the model.
```
from keras.models import Sequential, load_model
from keras.layers import LSTM, Dense, Dropout, Embedding, Masking, Bidirectional
from keras.optimizers import Adam
from keras.utils import plot_model
def make_word_level_model(num_words, embedding_matrix, bi_directional = False,
trainable = False, lstm_cells = 128, lstm_layers = 1):
"""Make a word level recurrent neural network with option for pretrained embeddings
and varying numbers of LSTM cell layers."""
model = Sequential()
# Map words to an embedding
if not trainable:
model.add(Embedding(input_dim=num_words,
output_dim=embedding_matrix.shape[1],
weights = [embedding_matrix], trainable = False,
mask_zero = True))
model.add(Masking())
else:
model.add(Embedding(input_dim = num_words,
output_dim = embedding_matrix.shape[1],
weights = [embedding_matrix],
trainable = True))
# If want to add multiple LSTM layers
if lstm_layers > 1:
for i in range(lstm_layers - 1):
model.add(LSTM(128, return_sequences=True, dropout=0.1, recurrent_dropout=0.1))
# Add final LSTM cell layer
if bi_directional:
model.add(Bidirectional(LSTM(lstm_cells, return_sequences = False, dropout = 0.1, recurrent_dropout=0.1)))
else:
model.add(LSTM(lstm_cells, return_sequences=False, dropout=0.1))
model.add(Dense(128, activation = 'relu'))
# Dropout for regularization
model.add(Dropout(0.5))
# Output layer
model.add(Dense(num_words, activation = 'softmax'))
# Compile the model
model.compile(optimizer = 'adam', loss = 'categorical_crossentropy',
metrics = ['accuracy'])
return model
model = make_word_level_model(num_words, embedding_matrix = embedding_matrix, bi_directional = True,
trainable = False, lstm_layers = 1, lstm_cells = 64)
model.summary()
```
The model needs a loss to minimize (`categorical_crossentropy`) as well as a method for updating the weights using the gradients (`Adam`). We will also monitor accuracy which is not a good loss but can give us a more interpretable measure of the model performance.
Using pre-trained embeddings means we have about half the parameters to train. However, this also means that the embeddings might not be the best for our data, and there are a number of words with no embeddings.
```
model_name = 'pre-trained-bi-directional-rnn'
model_dir = '../models/'
plot_model(model, to_file = f'{model_dir}{model_name}.png', show_shapes = True)
from IPython.display import Image
Image(f'{model_dir}{model_name}.png')
```
# Train Model
We can now train the model on our training examples. We'll make sure to use early stopping with a validation set to stop the training when the loss on the validation set is no longer decreasing. Also, we'll save the best model every time the validation loss decreases so we can then load in the best model to generate predictions.
### Callbacks
* Early Stopping: Stop training when validation loss no longer decreases
* Model Checkpoint: Save the best model on disk
```
from keras.callbacks import EarlyStopping, ModelCheckpoint
BATCH_SIZE = 2048
def make_callbacks(model_name, save = SAVE_MODEL):
"""Make list of callbacks for training"""
callbacks = [EarlyStopping(monitor = 'val_loss', patience = 5)]
if save:
callbacks.append(ModelCheckpoint(f'{model_dir}{model_name}.h5',
save_best_only = True, save_weights_only = False))
return callbacks
callbacks = make_callbacks(model_name)
def load_and_evaluate(model_name, return_model = False):
"""Load in a trained model and evaluate with log loss and accuracy"""
model = load_model(f'{model_dir}{model_name}.h5')
r = model.evaluate(X_valid, y_valid, batch_size = 2048, verbose = 1)
valid_crossentropy = r[0]
valid_accuracy = r[1]
print(f'Cross Entropy: {round(valid_crossentropy, 4)}')
print(f'Accuracy: {round(100 * valid_accuracy, 2)}%')
if return_model:
return model
```
__Depending on your machine, this may take several hours to run.__
```
history = model.fit(X_train, y_train, epochs = EPOCHS, batch_size = BATCH_SIZE, verbose = 1,
callbacks=callbacks,
validation_data = (X_valid, y_valid))
model = load_and_evaluate(model_name, return_model = True)
model = make_word_level_model(num_words, embedding_matrix = embedding_matrix, bi_directional = False,
trainable = False, lstm_layers = 1, lstm_cells = 64)
model.summary()
model_name = 'pre-trained-nonbi-directional-rnn'
callbacks = make_callbacks(model_name)
history = model.fit(X_train, y_train, epochs = EPOCHS, batch_size = BATCH_SIZE, verbose = 1,
callbacks=callbacks,
validation_data = (X_valid, y_valid))
model = load_and_evaluate(model_name, return_model = True)
```
The accuracy - both training and validation - increase over time and the loss decreases over time which gives us indication that our model is getting better with training.
We can load back in the model so we don't need to repeat the training.
```
def load_and_evaluate(model_name, return_model = False):
"""Load in a trained model and evaluate with log loss and accuracy"""
model = load_model(f'{model_dir}{model_name}.h5')
r = model.evaluate(X_valid, y_valid, batch_size = 2048, verbose = 1)
valid_crossentropy = r[0]
valid_accuracy = r[1]
print(f'Cross Entropy: {round(valid_crossentropy, 4)}')
print(f'Accuracy: {round(100 * valid_accuracy, 2)}%')
if return_model:
return model
model = load_and_evaluate(model_name, return_model = True)
```
To check how the model compares to just using the word frequencies to make predictions, we can compute the accuracy if we were to use the most frequent word for every guess. We can also choose from a multinomial distribution using the word frequencies as probabilities.
```
np.random.seed(40)
# Number of all words
total_words = sum(word_counts.values())
# Compute frequency of each word in vocab
frequencies = [word_counts[word]/total_words for word in word_idx.keys()]
frequencies.insert(0, 0)
frequencies[1:10], list(word_idx.keys())[0:9]
```
The most common word is 'the'. Let's see the accuracy of guessing this for every validation example.
```
print(f'The accuracy is {round(100 * np.mean(np.argmax(y_valid, axis = 1) == 1), 4)}%.')
```
Now we make a guess for each of the sequences in the validation set using the frequencies as probabilities. This is in some sense informed, but the multinomial also has randomness.
```
random_guesses = []
# Make a prediction based on frequencies for each example in validation data
for i in range(len(y_valid)):
random_guesses.append(np.argmax(np.random.multinomial(1, frequencies, size = 1)[0]))
from collections import Counter
# Create a counter from the guesses
c = Counter(random_guesses)
# Iterate through the 10 most common guesses
for i in c.most_common(10):
word = idx_word[i[0]]
word_count = word_counts[word]
print(f'Word: {word} \tCount: {word_count} \tPercentage: {round(100 * word_count / total_words, 2)}% \tPredicted: {i[1]}')
accuracy = np.mean(random_guesses == np.argmax(y_valid, axis = 1))
print(f'Random guessing accuracy: {100 * round(accuracy, 4)}%')
```
We can see that our model easily outperforms both guessing the most common word - 7.76% accuracy - as well as using relative word frequencies to guess the next word - 1.46% accuracy. Therefore, we can say that our model has learned something!
# Generating Output
Now for the fun part: we get to use our model to generate new abstracts. To do this, we feed the network a seed sequence, have it make a prediction, add the predicted word to the sequence, and make another prediction for the next word. We continue this for the number of words that we want. We compare the generated output to the actual abstract to see if we can tell the difference!
```
from IPython.display import HTML
def header(text, color = 'black'):
raw_html = f'<h1 style="color: {color};"><center>' + str(text) + '</center></h1>'
return raw_html
def box(text):
raw_html = '<div style="border:1px inset black;padding:1em;font-size: 20px;">'+str(text)+'</div>'
return raw_html
def addContent(old_html, raw_html):
old_html += raw_html
return old_html
import random
def generate_output(model, sequences, training_length = 50, new_words = 50, diversity = 1,
return_output = False, n_gen = 1):
"""Generate `new_words` words of output from a trained model and format into HTML."""
# Choose a random sequence
seq = random.choice(sequences)
# Choose a random starting point
seed_idx = random.randint(0, len(seq) - training_length - 10)
# Ending index for seed
end_idx = seed_idx + training_length
gen_list = []
for n in range(n_gen):
# Extract the seed sequence
seed = seq[seed_idx:end_idx]
original_sequence = [idx_word[i] for i in seed]
generated = seed[:] + ['#']
# Find the actual entire sequence
actual = generated[:] + seq[end_idx:end_idx + new_words]
# Keep adding new words
for i in range(new_words):
# Make a prediction from the seed
preds = model.predict(np.array(seed).reshape(1, -1))[0].astype(np.float64)
# Diversify
preds = np.log(preds) / diversity
exp_preds = np.exp(preds)
# Softmax
preds = exp_preds / sum(exp_preds)
# Choose the next word
probas = np.random.multinomial(1, preds, 1)[0]
next_idx = np.argmax(probas)
# New seed adds on old word
seed = seed[1:] + [next_idx]
generated.append(next_idx)
# Showing generated and actual abstract
n = []
for i in generated:
n.append(idx_word.get(i, '< --- >'))
gen_list.append(n)
a = []
for i in actual:
a.append(idx_word.get(i, '< --- >'))
a = a[training_length:]
gen_list = [gen[training_length:training_length + len(a)] for gen in gen_list]
if return_output:
return original_sequence, gen_list, a
# HTML formatting
seed_html = ''
seed_html = addContent(seed_html, header('Seed Sequence', color = 'darkblue'))
seed_html = addContent(seed_html, box(remove_spaces(' '.join(original_sequence))))
gen_html = ''
gen_html = addContent(gen_html, header('RNN Generated', color = 'darkred'))
gen_html = addContent(gen_html, box(remove_spaces(' '.join(gen_list[0]))))
a_html = ''
a_html = addContent(a_html, header('Actual', color = 'darkgreen'))
a_html = addContent(a_html, box(remove_spaces(' '.join(a))))
return seed_html, gen_html, a_html
```
The `diversity` parameter determines how much randomness is added to the predictions. If we just use the most likely word for each prediction, the output sometimes gets stuck in loops. The diversity means the predicted text has a little more variation.
```
seed_html, gen_html, a_html = generate_output(model, sequences, TRAINING_LENGTH)
HTML(seed_html)
HTML(gen_html)
HTML(a_html)
seed_html, gen_html, a_html = generate_output(model, sequences, TRAINING_LENGTH, diversity = 1)
HTML(seed_html)
HTML(gen_html)
HTML(a_html)
seed_html, gen_html, a_html = generate_output(model, sequences, TRAINING_LENGTH, diversity = 0.75)
HTML(seed_html)
HTML(gen_html)
HTML(a_html)
```
Increasing the diversity seems to increase the plausibility of the output. However, that could be becuase the patents themselves don't sound that realistic. This is especially true when we remove the punctuation. We'll fix that in the next section by keeping the punctuation and training our own embeddings.
# Training Own Embeddings
If we aren't happy with the output, especially the lack of punctuation, we can try training our own embeddings. This means the model will adapt the embeddings by itself to get better at the problem of predicting the next output. The final embeddings should place words that are more similar closer together in embedding space. The advantage of training our own embeddings are that they might be more relevant to the task. However, the downside is that training will take longer because the number of parameters significantly increases.
```
def clear_memory():
import gc
gc.enable()
for i in ['model', 'X', 'y', 'word_idx', 'idx_word', 'X_train', 'X_valid,' 'y_train', 'y_valid', 'embedding_matrix',
'words', 'vectors', 'labels', 'random_guesses', 'training_seq', 'word_counts', 'data', 'frequencies']:
if i in dir():
del globals()[i]
gc.collect()
clear_memory()
```
Now when we create the training data, we do not remove the punctuation or convert the words to lowercase.
```
TRAINING_LENGTH = 50
filters = '!"%;[\\]^_`{|}~\t\n'
word_idx, idx_word, num_words, word_counts, abstracts, sequences, features, labels = make_sequences(formatted,
TRAINING_LENGTH,
lower = False,
filters = filters)
embedding_matrix = np.zeros((num_words, len(word_lookup['the'])))
not_found = 0
for i, word in enumerate(word_idx.keys()):
# Look up the word embedding
vector = word_lookup.get(word, None)
# Record in matrix
if vector is not None:
embedding_matrix[i + 1, :] = vector
else:
not_found += 1
print(f'There were {not_found} words without pre-trained embeddings.')
embedding_matrix.shape
# Split into training and validation
X_train, X_valid, y_train, y_valid = create_train_valid(features, labels, num_words)
X_train.shape, y_train.shape
check_sizes(gb_min = 1)
```
Let's create a model with 100 dimensional embeddings, input sequences of length 50, and 1 LSTM layer as before.
```
model = make_word_level_model(num_words, embedding_matrix, trainable = True, bi_directional = True,
lstm_layers = 1, lstm_cells = 64)
model.summary()
model_name = 'training-rnn-bi-directional'
callbacks = make_callbacks(model_name)
model.compile(optimizer = Adam(), loss = 'categorical_crossentropy', metrics = ['accuracy'])
history = model.fit(X_train, y_train, batch_size = BATCH_SIZE, verbose = VERBOSE, epochs = EPOCHS, callbacks=callbacks,
validation_data = (X_valid, y_valid))
import json
with open('training-rnn.json', 'w') as f:
f.write(json.dumps(word_idx))
```
As before we load in the model and have it generate output.
```
model_dir = '../models/'
from keras.models import load_model
model = load_and_evaluate(model_name, return_model=True)
seed_html, gen_html, a_html = generate_output(model, sequences, TRAINING_LENGTH, diversity = 0.75)
HTML(seed_html)
HTML(gen_html)
HTML(a_html)
seed_html, gen_html, a_html = generate_output(model, sequences, TRAINING_LENGTH, diversity = 0.75)
HTML(seed_html)
HTML(gen_html)
HTML(a_html)
```
The most realisitic output seems to occur when the diversity is between 0.5 and 1.0. Sometimes it's difficult to tell the generated from the actual, a trial we'll look at a little later!
## Inspect Embeddings
We can take a look at our trained embeddings to figure out the closest words in the embedding space. These embeddings are trained for our task, which means they may differ slightly from the pre-trained versions.
```
model.summary()
def get_embeddings(model):
embedding_layer = model.get_layer(index = 0)
embedding_matrix = embedding_layer.get_weights()[0]
embedding_matrix = embedding_matrix / np.linalg.norm(embedding_matrix, axis = 1).reshape((-1, 1))
embedding_matrix = np.nan_to_num(embedding_matrix)
return embedding_matrix
embedding_matrix = get_embeddings(model)
embedding_matrix.shape
find_closest('the', embedding_matrix, word_idx, idx_word)
find_closest('neural', embedding_matrix, word_idx, idx_word)
find_closest('computer', embedding_matrix, word_idx, idx_word)
```
# Change Parameters of Network
Next, we can try to generate more accurate predictions by altering the network parameters. Primarily, we will increase the number of LSTM layers to 2. The first LSTM layer returns the sequences - the entire output for each input sequence instead of only the final one - before passing it on to the second. Training may take a little longer, but performance could also improve. There's no guarantee this model is better because we could just end up overfitting on the training data. There is no downside to trying though.
```
model = make_word_level_model(num_words, embedding_matrix, trainable = True, lstm_layers = 2)
model.summary()
model_name = 'training-rnn-2_layers'
callbacks = make_callbacks(model_name)
history = model.fit(X_train, y_train, batch_size = BATCH_SIZE, verbose = VERBOSE, epochs = EPOCHS, callbacks=callbacks,
validation_data = (X_valid, y_valid))
model = load_and_evaluate(model_name, return_model = True)
embedding_matrix = get_embeddings(model)
seed_html, gen_html, a_html = generate_output(model, sequences, TRAINING_LENGTH, diversity = 0.75)
HTML(seed_html)
HTML(gen_html)
HTML(a_html)
```
# Change Training Length
Another option to try and improve the model is to change the length of the training sequences. The idea here is using more previous words will give the network more context for predicting the next word. However, it could also be that including more words _hurts_ the model because some of them are irrelevant!
```
clear_memory()
TRAINING_LENGTH = 100
filters = '!"%;[\\]^_`{|}~\t\n'
word_idx, idx_word, num_words, word_counts, abstracts, sequences, features, labels = make_sequences(formatted,
TRAINING_LENGTH,
lower = False,
filters = filters)
X_train, X_valid, y_train, y_valid = create_train_valid(features, labels, num_words)
X_train.shape, y_train.shape
check_sizes()
model = make_word_level_model(num_words, embedding_matrix, trainable = True)
model.summary()
model_name = 'training-len100'
callbacks = make_callbacks(model_name)
history = model.fit(X_train, y_train, epochs = EPOCHS, callbacks=callbacks, batch_size = BATCH_SIZE, verbose = VERBOSE,
validation_data = (X_valid, y_valid))
model = load_and_evaluate(model_name, return_model=True)
embedding_matrix = get_embeddings(model)
word_lookup = {word: embedding_matrix[i] for i, word in zip(idx_word.keys(), idx_word.values())}
len(word_lookup)
seed_html, gen_html, a_html = generate_output(model, sequences, TRAINING_LENGTH, diversity = 1.5)
HTML(seed_html)
HTML(gen_html)
HTML(a_html)
```
# Reduce Training Length
```
clear_memory()
TRAINING_LENGTH = 20
filters = '!"%[\\]^_`{|}~\t\n'
word_idx, idx_word, num_words, word_counts, abstracts, sequences, features, labels = make_sequences(formatted,
TRAINING_LENGTH,
lower = False,
filters = filters)
embedding_matrix = np.zeros((num_words, len(word_lookup['the'])))
not_found = 0
for i, word in enumerate(word_idx.keys()):
# Look up the word embedding
vector = word_lookup.get(word, None)
# Record in matrix
if vector is not None:
embedding_matrix[i + 1, :] = vector
else:
not_found += 1
print(f'There were {not_found} words without pre-trained embeddings.')
X_train, X_valid, y_train, y_valid = create_train_valid(features, labels, num_words)
X_train.shape, y_train.shape
check_sizes()
model = make_word_level_model(num_words, embedding_matrix, trainable = True, lstm_layers = 1)
model_name = 'training-len20'
callbacks = make_callbacks(model_name)
history = model.fit(X_train, y_train, epochs = EPOCHS, batch_size = BATCH_SIZE, verbose = VERBOSE,
callbacks=callbacks,
validation_data = (X_valid, y_valid))
model = load_and_evaluate(model_name, return_model = True)
seed_html, gen_html, a_html = generate_output(model, sequences, TRAINING_LENGTH, diversity = 0.75)
HTML(seed_html)
HTML(gen_html)
HTML(a_html)
seed_html, gen_html, a_html = generate_output(model, sequences, TRAINING_LENGTH, diversity = 0.8)
HTML(seed_html)
HTML(gen_html)
HTML(a_html)
```
# Is Output from a human or machine?
```
def guess_human(model, sequences, training_length=50, new_words=50):
"""Produce 2 RNN sequences and play game to compare to actaul.
Diversity is randomly set between 0.5 and 1.25"""
diversity = np.random.uniform(0.5, 1.25)
sequence, gen_list, actual = generate_output(model, sequences, training_length,
diversity=diversity, return_output=True, n_gen = 2)
gen_0, gen_1 = gen_list
output = {'sequence': remove_spaces(' '.join(sequence)),
'c0': remove_spaces(' '.join(gen_0)),
'c1': remove_spaces(' '.join(gen_1)),
'h': remove_spaces(' '.join(actual))}
print(f"Seed Sequence: {output['sequence']}\n")
choices = ['h', 'c0', 'c1']
selected = []
i = 0
while len(selected) < 3:
choice = random.choice(choices)
selected.append(choice)
print('\n')
print(f'Option {i + 1} {output[choice]}')
choices.remove(selected[-1])
i += 1
print('\n')
guess = int(input('Enter option you think is human (1-3): ')) - 1
print('\n')
if guess == np.where(np.array(selected) == 'h')[0][0]:
print('Correct')
print('Correct Ordering', selected)
else:
print('Incorrect')
print('Correct Ordering', selected)
print('Diversity', round(diversity, 2))
guess_human(model, sequences)
guess_human(model, sequences)
```
# Conclusions
In this notebook, we saw how to build a recurrent neural network and used it to generate patent abstracts. Although the output is not always believable, this project gives us practice handling text sequences with neural networks. Deep learning has some advantages compared to traditional machine learning, especially in areas of computer vision and natural language processing. Hopefully you are now confident harnessing these powerful techniques to solve your own text problems!
This project covered a number of steps for working with text data including:
1. Cleaning data using regular expressions
2. Preparing data for neural network
* Converting text strings to integers (tokenization)
* Encoding labels using one-hot encoding
* Building training and validation set
3. Buildig a recurrent neural network using LSTM cells
4. Using pre-trained word embeddings and training our own embeddings
5. Adjusting model parameters to improve performance
6. Inspecting model results
Although we didn't cover the theory in depth, we did see the implementation, which means we now have a framework to fit the concepts we study. Technical topics are best learned through practice, and this project gave us a great opportunity to explore the frontiers of natural language processing with deep learning.
# Appendix I: Training with A Data Generator
```
def data_gen(sequences, labels, batch_size, num_words):
"""Yield batches for training"""
i = 0
while True:
# Reset once all examples have been used
if i + batch_size > len(labels):
i = 0
X = np.array(sequences[i: i + batch_size])
# Create array of zeros for labels
y = np.zeros((BATCH_SIZE, num_words))
# Extract integer labels
ys = labels[i: i + batch_size]
# Convert to one hot representation
for example_num, word_num in enumerate(ys):
y[example_num, word_num] = 1
yield X, y
i += batch_size
gc.collect()
def create_train_valid_gen(features, labels, batch_size, num_words):
"""Create training and validation generators for training"""
# Randomly shuffle features and labels
features, labels = shuffle(features, labels, random_state = RANDOM_STATE)
# Decide on number of samples for training
train_end = int(0.7 * len(labels))
train_features = np.array(features[:train_end])
valid_features = np.array(features[train_end:])
train_labels = labels[:train_end]
valid_labels = labels[train_end:]
# Make training and validation generators
train_gen = data_gen(train_features, train_labels, batch_size, num_words)
valid_gen = data_gen(valid_features, valid_labels, batch_size, num_words)
return train_gen, valid_gen, train_end
BATCH_SIZE = 2048
train_gen, valid_gen, train_len = create_train_valid_gen(features, labels, BATCH_SIZE, num_words)
X, y = next(train_gen)
train_steps = train_len // BATCH_SIZE
valid_steps = (len(labels) - train_len) // BATCH_SIZE
X.shape
y.shape
train_steps
valid_steps
history = model.fit_generator(train_gen, steps_per_epoch= train_steps, epochs = 2,
callbacks=None,
validation_data = valid_gen,
validation_steps = valid_steps)
```
# Appendix II: Using a Keras Sequence for Training
```
from keras.utils import Sequence
class textSequence(Sequence):
"""Keras Sequence for training with a generator."""
def __init__(self, x_set, y_set, batch_size, num_words):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.num_words = num_words
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
X = np.array(batch_x)
y = np.zeros((len(batch_y), self.num_words))
for example_idx, word_idx in enumerate(batch_y):
y[example_idx, word_idx] = 1
return X, y
# Decide on number of samples for training
train_end = int(TRAIN_FRACTION * len(labels))
train_features = np.array(features[:train_end])
valid_features = np.array(features[train_end:])
train_labels = labels[:train_end]
valid_labels = labels[train_end:]
train_sequence = textSequence(train_features, train_labels, 2048, num_words)
valid_sequence = textSequence(valid_features, valid_labels, 2048, num_words)
history = model.fit_generator(train_sequence, epochs = 2,
callbacks=None,
validation_data = valid_sequence,
workers = 20)
```
|
github_jupyter
|
```
from pathlib import Path
import os
import os.path as op
from pkg_resources import resource_filename as pkgrf
import shutil
import cubids
TEST_DATA = pkgrf("cubids", "testdata")
def test_data(tmp_path):
data_root = tmp_path / "testdata"
shutil.copytree(TEST_DATA, str(data_root))
assert len(list(data_root.rglob("*"))) > 5
return data_root
workdir = os.getcwd()
def copy_testing_data(dirname):
newdir = op.join(workdir, dirname)
os.makedirs(newdir)
data_dir = test_data(Path(newdir))
return data_dir
# copy the data
data_root = copy_testing_data("test1")
!rm -rf test1
```
# Test the key / param groups
This test copies the data and makes sure we get the correct number of key and parameter groups out of it
```
from cubids import CuBIDS
bod = CuBIDS(str(first_test / "complete"))
bod._cache_fieldmaps()
key_groups = bod.get_key_groups()
print(key_groups)
ibod = CuBIDS(str(first_test / "inconsistent"))
misfits = ibod._cache_fieldmaps()
len(misfits)
ikey_groups = ibod.get_key_groups()
ikey_groups == key_groups
```
# Working with datalad
Here we try to initialize a datalad repo on the test data
```
import datalad.api as dlapi
dl = dlapi.create(path=first_test / "inconsistent", force=True)
files_df, summary_df = bod.get_param_groups_dataframes()
%qtconsole
summary_df[["key_group", "ParamGroup", "Count"]]
import pandas as pd
param_group_cols = list(set(df.columns.to_list()) - set(["FilePath"]))
uniques = df.drop_duplicates(param_group_cols, ignore_index=True)
print(uniques.shape)
counts = df.groupby(["key_group", "ParamGroup"]).size().reset_index(name='Count')
print(counts.shape)
params_and_counts = pd.merge(uniques, counts)
print(params_and_counts.shape)
no_paths[["key_group", "ParamGroup"]].groupby(["key_group", "ParamGroup"]).count()
keyparam_df.groupby(["key_group", "ParamGroup"]).size().reset_index(name='Count')
fname = 'sub-NDARAT581NDH/ses-HBNsiteRU/dwi/sub-NDARAT581NDH_ses-HBNsiteRU_acq-64dir_dwi.nii.gz'
bod.get_key_groups()
self = bod
from cubids.cubids import *
suffix = '(phase1|phasediff|epi|fieldmap)'
fmap_files = self.layout.get(suffix=suffix, regex_search=True,
extension=['.nii.gz', '.nii'])
files_to_fmaps = defaultdict(list)
print("\n".join([f.path for f in fmap_files]))
"""
for fmap_file in tqdm(fmap_files):
intentions = listify(fmap_file.get_metadata().get("IntendedFor"))
subject_prefix = "sub-%s/" % fmap_file.entities['subject']
for intended_for in intentions:
subject_relative_path = subject_prefix + intended_for
files_to_fmaps[subject_relative_path].append(fmap_file)
"""
fmap_file = fmap_files[0]
intentions = listify(fmap_file.get_metadata().get("IntendedFor"))
print("intentions:", intentions)
subject_prefix = "sub-%s/" % fmap_file.entities['subject']
print(subject_prefix)
suffix = '(phase1|phasediff|epi|fieldmap)'
fmap_files = self.layout.get(suffix=suffix, regex_search=True,
extension=['.nii.gz', '.nii'])
files_to_fmaps = defaultdict(list)
for fmap_file in tqdm(fmap_files):
intentions = listify(fmap_file.get_metadata().get("IntendedFor"))
subject_prefix = "sub-%s" % fmap_file.entities['subject']
for intended_for in intentions:
full_path = Path(self.path) / subject_prefix / intended_for
files_to_fmaps[str(full_path)].append(fmap_file)
for data_file, fmap_files in bod.fieldmap_lookup.items():
print(data_file[44:])
for fmap_file in fmap_files:
print(" ", fmap_file.path[44:])
files_to_fmaps.keys()
from cubids.cubids import *
files = [
'/Users/mcieslak/projects/test_bids_data/HBN/sub-NDARAT581NDH/ses-HBNsiteRU/dwi/sub-NDARAT581NDH_ses-HBNsiteRU_acq-64dir_dwi.nii.gz',
'/Users/mcieslak/projects/test_bids_data/HBN/sub-NDARRP384BVX/ses-HBNsiteRU/dwi/sub-NDARRP384BVX_ses-HBNsiteRU_acq-64dir_dwi.nii.gz']
dfs = []
fieldmap_lookup = bod.fieldmap_lookup
key_group_name = "test"
# path needs to be relative to the root with no leading prefix
for path in files:
metadata = bod.layout.get_metadata(path)
wanted_keys = metadata.keys() & IMAGING_PARAMS
example_data = {key: metadata[key] for key in wanted_keys}
example_data["key_group"] = key_group_name
# Get the fieldmaps out and add their types
print(fieldmap_lookup[path])
fieldmap_types = sorted([fmap.entities['fmap'] for fmap in fieldmap_lookup[path]])
for fmap_num, fmap_type in enumerate(fieldmap_types):
example_data['fieldmap_type%02d' % fmap_num] = fmap_type
# Expand slice timing to multiple columns
SliceTime = example_data.get('SliceTiming')
if SliceTime:
# round each slice time to one place after the decimal
for i in range(len(SliceTime)):
SliceTime[i] = round(SliceTime[i], 1)
example_data.update(
{"SliceTime%03d" % SliceNum: time for
SliceNum, time in enumerate(SliceTime)})
del example_data['SliceTiming']
dfs.append(example_data)
example_data
```
|
github_jupyter
|
# Collaboration and Competition
---
You are welcome to use this coding environment to train your agent for the project. Follow the instructions below to get started!
### 1. Start the Environment
Run the next code cell to install a few packages. This line will take a few minutes to run!
```
!pip -q install ./python
```
The environment is already saved in the Workspace and can be accessed at the file path provided below.
```
from unityagents import UnityEnvironment
import numpy as np
env = UnityEnvironment(file_name="/data/Tennis_Linux_NoVis/Tennis")
```
Environments contain **_brains_** which are responsible for deciding the actions of their associated agents. Here we check for the first brain available, and set it as the default brain we will be controlling from Python.
```
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
```
### 2. Examine the State and Action Spaces
Run the code cell below to print some information about the environment.
```
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
```
### 3. Take Random Actions in the Environment
In the next code cell, you will learn how to use the Python API to control the agent and receive feedback from the environment.
Note that **in this coding environment, you will not be able to watch the agents while they are training**, and you should set `train_mode=True` to restart the environment.
```
for i in range(5): # play game for 5 episodes
env_info = env.reset(train_mode=False)[brain_name] # reset the environment
states = env_info.vector_observations # get the current state (for each agent)
scores = np.zeros(num_agents) # initialize the score (for each agent)
while True:
actions = np.random.randn(num_agents, action_size) # select an action (for each agent)
actions = np.clip(actions, -1, 1) # all actions between -1 and 1
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
scores += env_info.rewards # update the score (for each agent)
states = next_states # roll over states to next time step
if np.any(dones): # exit loop if episode finished
break
print('Total score (averaged over agents) this episode: {}'.format(np.mean(scores)))
```
When finished, you can close the environment.
### 4. It's Your Turn!
Now it's your turn to train your own agent to solve the environment! A few **important notes**:
- When training the environment, set `train_mode=True`, so that the line for resetting the environment looks like the following:
```python
env_info = env.reset(train_mode=True)[brain_name]
```
- To structure your work, you're welcome to work directly in this Jupyter notebook, or you might like to start over with a new file! You can see the list of files in the workspace by clicking on **_Jupyter_** in the top left corner of the notebook.
- In this coding environment, you will not be able to watch the agents while they are training. However, **_after training the agents_**, you can download the saved model weights to watch the agents on your own machine!
# Import necessart packages
```
import matplotlib.pyplot as plt
%matplotlib inline
import time, os
from collections import deque
import torch
from maddpg import MADDPG
```
# Instantiate agent
```
agent = MADDPG(seed=2, noise_start=0.5, update_every=2, gamma=1, t_stop_noise=30000)
episode_num = 6000
max_t = 1000
scores = []
scores_deque = deque(maxlen=100)
scores_avg = []
for i_episode in range(1, episode_num + 1):
rewards = []
env_info = env.reset(train_mode=False)[brain_name]
state = env_info.vector_observations
for t in range(max_t):
action = agent.act(state)
env_info = env.step(action)[brain_name]
next_state = env_info.vector_observations
rewards_vec = env_info.rewards
done = env_info.local_done
agent.step(state, action, rewards_vec, next_state, done)
state = next_state
rewards.append(rewards_vec)
if any(done):
break
episode_reward = np.max(np.sum(np.array(rewards), axis=0))
scores.append(episode_reward)
scores_deque.append(episode_reward)
current_avg_score = np.mean(scores_deque)
scores_avg.append(current_avg_score)
print('\rEpisode {}\tAverage Score: {:.3f}'.format(i_episode, current_avg_score), end="")
if i_episode % 200 == 0:
print('\rEpisode {}\tAverage Score: {:.3f}'.format(i_episode, current_avg_score))
agent.save_agents()
if np.mean(scores_deque) >= .5:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.3f}'.format(i_episode, np.mean(scores_deque)))
agent.save_agents()
break
```
# Training
```
%%time
import pandas as pd
pd.DataFrame({"scores":scores,"scores_avg":scores_avg}).to_csv("p3_score.csv",index=False)
```
# Plotting
```
fig = plt.figure(figsize=(16,8))
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores,'b',label='Episode Scores')
plt.plot(np.arange(1, len(scores)+1), scores_avg,'y',\
linewidth=5,label='Avg. score of last 100 episodes')
plt.ylabel('Score', fontsize=18)
plt.xlabel('Episode no', fontsize=18)
ax.legend(fontsize=14)
plt.show()
!tar -zcvf p3_.tar.gz *
```
# Closing env
```
env.close()
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import os
import glob
import nltk.data
from __future__ import division # Python 2 users only
import nltk, re, pprint
from nltk import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import linear_kernel
from nltk.corpus import stopwords
from sklearn.feature_extraction.text import TfidfVectorizer
from collections import Counter
%matplotlib inline
translations = glob.glob('/Users/sheldon/completed_podcasts/*/*.txt')
translations = filter(lambda x: 'DONE' not in x, translations)
translations = filter(lambda x: 'speech_notebook' not in x, translations)
translations
episode = [i.split('/')[5] for i in translations]
series = [i.split('/')[4] for i in translations]
locations = translations
transcribed = [open(i).read() for i in translations]
df = pd.DataFrame(data={'episode':episode,'series':series,'locations':locations,'transcribed':transcribed})
df['id'] = df.index
stop = set(stopwords.words('english'))
def tokenize_and_lower(textfile):
tokens = word_tokenize(textfile)
lower = [w.lower() for w in tokens]
filtered_words = [word for word in lower if word not in stop]
remove_contractions = [word for word in filtered_words if "'" not in word]
remove_periods = [word for word in remove_contractions if "." not in word]
count = Counter(remove_periods)
return count
#df['trans_token'] = df.transcribed.apply(tokenize_and_lower)
df['removed_stop_transcribed'] = df.transcribed.apply(tokenize_and_lower)
tf = TfidfVectorizer(stop_words=stop)
tfidf_matrix = tf.fit_transform(df['transcribed'])
tfidf_matrix
from sklearn.metrics.pairwise import linear_kernel
cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)
def get_related_podcasts(podcast_number,number_of_similarities):
cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)
related_pod_index = cosine_similarities.argsort()[podcast_number][::-1]
pod_dict = dict(zip(range(0, len(related_pod_index)),related_pod_index))
pod_dict = pd.DataFrame({'rank':pod_dict.keys()},index=pod_dict.values())
related_podcasts_df = pd.DataFrame.join(pod_dict, df, how='inner')
final_df = related_podcasts_df.sort_values('rank')[0:number_of_similarities+1][['rank','episode','series']]
return final_df
def get_related_podcasts_query(query, number_of_similarities):
query = query.lower()
query = query.split()
tfidf_matrix_test = tf.fit_transform(query)
tfidf_matrix_train = tf.transform(df['transcribed'])
tfidf_matrix_train.todense()
tfidf_matrix_test.todense()
query_similarities = linear_kernel(tfidf_matrix_test, tfidf_matrix_train)
query_similarities = query_similarities.argsort()[0][::-1]
pod_dict = dict(zip(range(0, len(query_similarities)),query_similarities))
pod_dict = pd.DataFrame({'rank':pod_dict.keys()},index=pod_dict.values())
related_podcasts_df = pd.DataFrame.join(pod_dict, df, how='inner')
final_df = related_podcasts_df.sort_values('rank')[0:number_of_similarities+1][['rank','episode','series']]
return final_df
get_related_podcasts_query('economics math statistics',5)
get_related_podcasts(17,5)
```
## Compute for queries
```
query = ['python tim ferris']
vectorizer = TfidfVectorizer(stop_words='english')
tfidf_matrix_test = tf.fit_transform(query)
tfid_matrix_train = tfidf_matrix.todense()
tfidf_matrix_test.todense()
cosine_similarities = linear_kernel(tfidf_matrix_test, tfidf_matrix_train)
cosine_similarities= cosine_similarities.argsort()[::-1]
cosine_similarities
```
|
github_jupyter
|
## Kaggle Advance House Price Prediction Using PyTorch
* https://docs.fast.ai/tabular.html
* https://www.fast.ai/2018/04/29/categorical-embeddings/
* https://yashuseth.blog/2018/07/22/pytorch-neural-network-for-tabular-data-with-categorical-embeddings/
```
import pandas as pd
```
### Importing the Dataset
```
df=pd.read_csv('houseprice.csv',usecols=["SalePrice", "MSSubClass", "MSZoning", "LotFrontage", "LotArea",
"Street", "YearBuilt", "LotShape", "1stFlrSF", "2ndFlrSF"]).dropna()
df.shape
df.head()
df.info()
```
### Unique Values in the Columns
```
for i in df.columns:
print("Column name {} and unique values are {}".format(i,len(df[i].unique())))
```
### Derived Features
```
import datetime
datetime.datetime.now().year
df['Total Years']=datetime.datetime.now().year-df['YearBuilt']
df.head()
df.drop("YearBuilt",axis=1,inplace=True)
df.columns
```
### Creating my Categorical Features
```
cat_features=["MSSubClass", "MSZoning", "Street", "LotShape"]
out_feature="SalePrice"
df["MSSubClass"].unique()
```
### Converting the categorical feature
```
from sklearn.preprocessing import LabelEncoder
lbl_encoders={}
lbl_encoders["MSSubClass"]=LabelEncoder()
lbl_encoders["MSSubClass"].fit_transform(df["MSSubClass"])
lbl_encoders
from sklearn.preprocessing import LabelEncoder
lbl_encoders={}
for feature in cat_features:
lbl_encoders[feature]=LabelEncoder()
df[feature]=lbl_encoders[feature].fit_transform(df[feature])
df.head()
```
### Stacking and Converting Into Tensors
```
import numpy as np
cat_features=np.stack([df['MSSubClass'],df['MSZoning'],df['Street'],df['LotShape']],1)
cat_features
```
### Convert numpy to Tensors
**Note: CATEGORICAL FEATURES CAN NEVER BY CONVERTED TO FLOAT**
```
import torch
cat_features=torch.tensor(cat_features,dtype=torch.int64)
cat_features
```
### Creating continuous variables
```
cont_features=[]
for i in df.columns:
if i in ["MSSubClass", "MSZoning", "Street", "LotShape","SalePrice"]:
pass
else:
cont_features.append(i)
cont_features
```
### Stacking continuous variables to a tensor
```
cont_values=np.stack([df[i].values for i in cont_features],axis=1)
cont_values=torch.tensor(cont_values,dtype=torch.float)
cont_values
cont_values.dtype
```
### Dependent Feature
```
y=torch.tensor(df['SalePrice'].values,dtype=torch.float).reshape(-1,1)
y
df.info()
cat_features.shape,cont_values.shape,y.shape
len(df['MSSubClass'].unique())
```
## Embedding Size For Categorical columns
```
cat_dims=[len(df[col].unique()) for col in ["MSSubClass", "MSZoning", "Street", "LotShape"]]
cat_dims
```
### Dimension of Output from the Embedding Layer
* Output dimension should be set based on the input dimension
* Should be min(50, feature dimension/2)
* **Not more than 50 categorical values can be used**
```
embedding_dim= [(x, min(50, (x + 1) // 2)) for x in cat_dims]
embedding_dim
```
## Creating an Embedding Layer inside the Neural Network
* ModuleList is used because we have many dimensions (4) in the embedding layer.
* Embedding function creates the embedding layer using the list comprehension
```
import torch
import torch.nn as nn
import torch.nn.functional as F
embed_representation=nn.ModuleList([nn.Embedding(inp,out) for inp,out in embedding_dim])
embed_representation
cat_features
cat_featuresz=cat_features[:4]
cat_featuresz
pd.set_option('display.max_rows', 500)
embedding_val=[]
for i,e in enumerate(embed_representation):
embedding_val.append(e(cat_features[:,i]))
embedding_val
len(embedding_val[0][0])
```
### Stacking the embedded values column wise
```
z = torch.cat(embedding_val, 1)
z
```
### Implement dropout - Regularization Method (Prevents Overfitting)
```
# 40% values are dropped out.
droput=nn.Dropout(.4)
final_embed=droput(z)
final_embed
```
## Create a Feed Forward Neural Network
```
import torch
import torch.nn as nn
import torch.nn.functional as F
class FeedForwardNN(nn.Module):
def __init__(self, embedding_dim, n_cont, out_sz, layers, p=0.5):
super().__init__()
self.embeds = nn.ModuleList([nn.Embedding(inp,out) for inp,out in embedding_dim])
self.emb_drop = nn.Dropout(p)
self.bn_cont = nn.BatchNorm1d(n_cont)
layerlist = []
n_emb = sum((out for inp,out in embedding_dim))
# Input feature = Embedding Layers + Continuous Variables
n_in = n_emb + n_cont
for i in layers:
layerlist.append(nn.Linear(n_in,i))
layerlist.append(nn.ReLU(inplace=True))
layerlist.append(nn.BatchNorm1d(i))
layerlist.append(nn.Dropout(p))
n_in = i
layerlist.append(nn.Linear(layers[-1],out_sz))
self.layers = nn.Sequential(*layerlist)
def forward(self, x_cat, x_cont):
embeddings = []
for i,e in enumerate(self.embeds):
embeddings.append(e(x_cat[:,i]))
x = torch.cat(embeddings, 1)
x = self.emb_drop(x)
x_cont = self.bn_cont(x_cont)
x = torch.cat([x, x_cont], 1)
x = self.layers(x)
return x
len(cont_features)
torch.manual_seed(100)
model=FeedForwardNN(embedding_dim,len(cont_features),1,[100,50],p=0.4)
```
* ReLU activation function is used because it is a regression problem.
```
model
```
### Define Loss And Optimizer
```
model.parameters
# Later converted to Root Mean Squared Error
loss_function=nn.MSELoss()
optimizer=torch.optim.Adam(model.parameters(),lr=0.01)
df.shape
cont_values
cont_values.shape
batch_size=1200
test_size=int(batch_size*0.15)
train_categorical=cat_features[:batch_size-test_size]
test_categorical=cat_features[batch_size-test_size:batch_size]
train_cont=cont_values[:batch_size-test_size]
test_cont=cont_values[batch_size-test_size:batch_size]
y_train=y[:batch_size-test_size]
y_test=y[batch_size-test_size:batch_size]
len(train_categorical),len(test_categorical),len(train_cont),len(test_cont),len(y_train),len(y_test)
epochs=5000
final_losses=[]
for i in range(epochs):
i=i+1
y_pred=model(train_categorical,train_cont)
# RMSE
loss=torch.sqrt(loss_function(y_pred,y_train))
final_losses.append(loss)
if i%10==1:
print("Epoch number: {} and the loss : {}".format(i,loss.item()))
optimizer.zero_grad()
loss.backward()
optimizer.step()
import matplotlib.pyplot as plt
%matplotlib inline
plt.plot(range(epochs), final_losses)
plt.ylabel('RMSE Loss')
plt.xlabel('Epoch')
```
### Validate the Test Data
```
y_pred=""
with torch.no_grad():
y_pred=model(test_categorical,test_cont)
loss=torch.sqrt(loss_function(y_pred,y_test))
print('RMSE: {}'.format(loss))
data_verify=pd.DataFrame(y_test.tolist(),columns=["Test"])
data_verify
data_predicted=pd.DataFrame(y_pred.tolist(),columns=["Prediction"])
data_predicted
final_output=pd.concat([data_verify,data_predicted],axis=1)
final_output['Difference']=final_output['Test']-final_output['Prediction']
final_output.head()
```
## Save the model
```
torch.save(model,'HousePrice.pt')
torch.save(model.state_dict(),'HouseWeights.pt')
```
### Loading the saved Model
```
embs_size=[(15, 8), (5, 3), (2, 1), (4, 2)]
model1=FeedForwardNN(embs_size,5,1,[100,50],p=0.4)
model1.load_state_dict(torch.load('HouseWeights.pt'))
model1.eval()
```
|
github_jupyter
|
# Creating and grading assignments
This guide walks an instructor through the workflow for generating an assignment and preparing it for release to students.
## Accessing the formgrader extension
The formgrader extension provides the core access to nbgrader's instructor tools. After the extension has been installed, you can access it through the tab in the notebook list:

## Creating a new assignment
### From the formgrader
To create a new assignment, open the formgrader extension and click the "Add new assignment..." button at the bottom of the page. This will ask you to provide some information such as the name of the assignment and its due date. Then, you can add files to the assignment and edit them by clicking the name of the assignment:

### From the command line
To simplify this example, two notebooks of the assignment have already been stored in the `source/ps1` folder:
* [source/ps1/problem1.ipynb](source/ps1/problem1.ipynb)
* [source/ps1/problem2.ipynb](source/ps1/problem2.ipynb)
## Developing assignments with the assignment toolbar
**Note**: As you are developing your assignments, you should save them
into the `source/{assignment_id}/` folder of the nbgrader hierarchy,
where `assignment_id` is the name of the assignment you are creating
(e.g. "ps1").
Once the toolbar has been installed, you should see it in the drop down "View -> Cell Toolbar" menu:

Selecting the "Create Assignment" toolbar will create a separate toolbar
for each cell which by default will be a dropdown menu with the "-" item
selected. For markdown cells, there are two additional options to choose
from, either "Manually graded answer" or "Read-only":

For code cells, there are four options to choose from, including
"Manually graded answer", "Autograded answer", "Autograder tests", and
"Read-only":

The following sections go into detail about the different cell types,
and show cells that are taken from a complete example of an assignment
generated with the nbgrader toolbar extension:
- [source/ps1/problem1.ipynb](source/ps1/problem1.html)
- [source/ps1/problem2.ipynb](source/ps1/problem2.html)
### "Manually graded answer" cells
If you select the "Manually graded answer" option (available for both
markdown and code cells), the nbgrader extension will mark that cell as
a cell that contains an answer that must be manually graded by a human
grader. Here is an example of a manually graded answer cell:

The most common use case for this type of cell is for written
free-response answers (for example, which interpret the results of code
that may have been written and/or executed above).
*Note: the blue border only shows up when the nbgrader extension toolbar
is active; it will not be visible to students.*
### “Manually graded task” cells
If you select the “Manually graded task” option (available for markdown cells),
the nbgrader extension will mark that cell as
a cell that contains the description of a task that students have to perform.
They must be manually graded by a human
grader. Here is an example of a manually graded answer cell:

The difference with a manually graded answer is that the manually graded tasks cells are not edited by the student. A manually or automatically graded cell ask students to perform a task *in* one cell. A manually graded task asks students to perform a task *with* cells.
The common use case for this type of cell is for tasks that require the
student to create several cells such as "Process the data and create a plot to illustrate your results."
or to contain notebook-wide tasks such as "adhere to the PEP8 style convention."
*Note: the blue border only shows up when the nbgrader extension toolbar
is active; it will not be visible to students.*
### “Manually graded task” cells with mark scheme
A mark scheme can be created through the use of a
special syntax such as ``=== BEGIN MARK SCHEME ===`` and
``=== END MARK SCHEME ===``. The section of text between the two markers will be removed from the student version,
but will be visible at the grading stage and in the feedback.
### "Autograded answer" cells
If you select the "Autograded answer" option (available only for code
cells), the nbgrader extension will mark that cell as a cell that
contains an answer which will be autograded. Here is an example of an
autograded graded answer cell:

Unlike manually graded answers, autograded answers aren't worth any
points: instead, the points for autograded answers are specified for the
particular tests that grade those answers. See the next section for
further details.
*Note: the blue border only shows up when the nbgrader extension toolbar
is active; it will not be visible to students.*
### "Autograder tests" cells
If you select the "Autograder tests" option (available only for code
cells), the nbgrader extension will mark that cell as a cell that
contains tests to be run during autograding. Here is an example of two
test cells:

The lock icon on the left side of the cell toolbar indicates that the
tests are "read-only". See the next section for further details on what
this means.
*Note: the blue border only shows up when the nbgrader extension toolbar
is active; it will not be visible to students.*
### "Autograder tests" cells with hidden tests
Tests in "Autograder tests" cells can be hidden through the use of a special syntax such as ``### BEGIN HIDDEN TESTS`` and ``### END HIDDEN TESTS``, for example:

### "Read-only" cells
If you select the "Read-only" option (available for both code and
markdown cells), the nbgrader extension will mark that cell as one that
cannot be modified. This is indicated by a lock icon on the left side of
the cell toolbar:

This functionality is particularly important for test cells, which are
always marked as read-only. Because the mechanism for autograding is
that students receive full credit if the tests pass, an easy way to get
around this would be to simply delete or comment out the tests. This
read-only functionality will reverse any such changes made by the
student.
## Validating the instructor version
### From the validate extension
Ideally, the solutions in the instructor version should be correct and pass all the test cases to ensure that you are giving your students tests that they can actually pass. To verify this is the case, you can use the validate extension:

If your assignment passes all the tests, you'll get a success pop-up:

If it doesn't pass all the tests, you'll get a message telling you which cells failed:

### From the command line
You can also validate assignments on the command line using the `nbgrader validate` command:
```
%%bash
nbgrader validate source/ps1/*.ipynb
```
## Generate and release an assignment
### From the formgrader
After an assignment has been created with the assignment toolbar, you will want to generate the version that students will receive. You can do this from the formgrader by clicking the "generate" button:

This should succeed with a pop-up window containing log output:

### From the command line
```
{course_directory}/source/{assignment_id}/{notebook_id}.ipynb
```
Note: The `student_id` is not included here because the source and release versions of the assignment are the same for all students.
After running `nbgrader generate_assignment`, the release version of the notebooks will be:
```
{course_directory}/release/{assignment_id}/{notebook_id}.ipynb
```
As a reminder, the instructor is responsible for distributing this release version to their students using their institution's existing student communication and document distribution infrastructure.
When running `nbgrader generate_assignment`, the assignment name (which is "ps1") is passed. We also specify a *header* notebook (`source/header.ipynb`) to prepend at the beginning of each notebook in the assignment. By default, this command should be run from the root of the course directory:
```
%%bash
nbgrader generate_assignment "ps1" --IncludeHeaderFooter.header=source/header.ipynb --force
```
## Preview the student version
After generating the student version of assignment, you should preview it to make sure that it looks correct. You can do this from the formgrader extension by clicking the "preview" button:

Under the hood, there will be a new folder called `release` with the same structure as `source`. The `release` folder contains the actual release version of the assignment files:
* [release/ps1/problem1.ipynb](release/ps1/problem1.ipynb)
* [release/ps1/problem2.ipynb](release/ps1/problem2.ipynb)
If you are working on the command line, you may want to formally verify the student version as well. Ideally, all the tests should fail in the student version if the student hasn't implemented anything. To verify that this is in fact the case, we can use the `nbgrader validate --invert` command:
```
%%bash
nbgrader validate --invert release/ps1/*.ipynb
```
If the notebook fails all the test cases, you should see the message "Success! The notebook does not pass any tests."
## Releasing files to students and collecting submissions
```
submitted/{student_id}/{assignment_id}/{notebook_id}.ipynb
```
**Please note**: Students must use version 3 or greater of the IPython/Jupyter notebook for nbgrader to work properly. If they are not using version 3 or greater, it is possible for them to delete cells that contain important metadata for nbgrader. With version 3 or greater, there is a feature in the notebook that prevents cells from being deleted. See [this issue](https://github.com/jupyter/nbgrader/issues/424) for more details.
To ensure that students have a recent enough version of the notebook, you can include a cell such as the following in each notebook of the assignment:
```python
import IPython
assert IPython.version_info[0] >= 3, "Your version of IPython is too old, please update it."
```
## Autograde assignments
In the following example, we have an assignment with two notebooks. There are two submissions of the assignment:
Submission 1:
* [submitted/bitdiddle/ps1/problem1.ipynb](submitted/bitdiddle/ps1/problem1.ipynb)
* [submitted/bitdiddle/ps1/problem2.ipynb](submitted/bitdiddle/ps1/problem2.ipynb)
Submission 2:
* [submitted/hacker/ps1/problem1.ipynb](submitted/hacker/ps1/problem1.ipynb)
* [submitted/hacker/ps1/problem2.ipynb](submitted/hacker/ps1/problem2.ipynb)
### From the formgrader
You can autograde individual submissions from the formgrader directly. To do so, click on the the number of submissions in the "Manage Assignments" view:

This will take you to a new page where you can see all the submissions. For a particular submission, click the "autograde" button to autograde it:

After autograding completes, you will see a pop-up window with log output:

And back on the submissions screen, you will see that the status of the submission has changed to "needs manual grading" and there is now a reported score as well:

### From the command line
We can run the autograder for all students at once from the command line:
```
%%bash
nbgrader autograde "ps1" --force
```
When grading the submission for `Bitdiddle`, you'll see some warnings that look like "Checksum for grade cell correct_squares has changed!". What's happening here is that nbgrader has recorded what the *original* contents of the grade cell `correct_squares` (when `nbgrader generate_assignment` was run), and is checking the submitted version against this original version. It has found that the submitted version changed (perhaps this student tried to cheat by commenting out the failing tests), and has therefore overwritten the submitted version of the tests with the original version of the tests.
You may also notice that there is a note saying "ps1 for Bitdiddle is 21503.948203 seconds late". What is happening here is that nbgrader is detecting a file in Bitdiddle's submission called `timestamp.txt`, reading in that timestamp, and saving it into the database. From there, it can compare the timestamp to the duedate of the problem set, and compute whether the submission is at all late.
Once the autograding is complete, there will be new directories for the autograded versions of the submissions:
```
autograded/{student_id}/{assignment_id}/{notebook_id}.ipynb
```
Autograded submission 1:
* [autograded/bitdiddle/ps1/problem1.ipynb](autograded/bitdiddle/ps1/problem1.ipynb)
* [autograded/bitdiddle/ps1/problem2.ipynb](autograded/bitdiddle/ps1/problem2.ipynb)
Autograded submission 2:
* [autograded/hacker/ps1/problem1.ipynb](autograded/hacker/ps1/problem1.ipynb)
* [autograded/hacker/ps1/problem2.ipynb](autograded/hacker/ps1/problem2.ipynb)
## Manual grading
After running `nbgrader autograde`, the autograded version of the
notebooks will be:
autograded/{student_id}/{assignment_id}/{notebook_id}.ipynb
We can manually grade assignments through the formgrader as well, by clicking on the "Manual Grading" navigation button. This will provide you with an interface for hand grading assignments that it finds in the directory listed above. Note that this applies to *all* assignments as well -- as long as the autograder has been run on the assignment, it will be available for manual grading via the formgrader.
## Generate feedback on assignments
```
autograded/{student_id}/{assignment_id}/{notebook_id}.ipynb
```
Creating feedback for students is divided into two parts:
* generate feedback
* release feedback
Generating feedback will create HTML files in the local instructor directory. Releasing feedback will copy those HTML files to the nbgrader exchange.
We can generate feedback based on the graded notebooks by running the `nbgrader generate_feedback` command, which will produce HTML versions of these notebooks at the following location:
```
feedback/{student_id}/{assignment_id}/{notebook_id}.html
```
The `nbgrader generate_feedback` is available by clicking the Generate Feedback button on either the Manage Assignments view (to generate feedback for all graded submissions), or on the individual student's Manage Submission page (to generate feedback for that specific individual).
We can release the generated feedback by running the `nbgrader release_feedback` command, which will send the generated HTML files to the nbgrader exchange.
The `nbgrader release_feedback` is available by clicking the Release Feedback button on either the Manage Assignments view (to release feedback for all generated feedback), or on the individual student's Manage Submission page (to release feedback for that specific individual).
### Workflow example: Instructor returning feedback to students
In some scenarios, you may not want to (or be able to) use the exchange to deliver student feedback. This sections describes a workflow for manually returning generated feedback.
In the following example, we have an assignment with two notebooks. There are two submissions of the assignment that have been graded:
Autograded submission 1:
* [autograded/bitdiddle/ps1/problem1.ipynb](autograded/bitdiddle/ps1/problem1.ipynb)
* [autograded/bitdiddle/ps1/problem2.ipynb](autograded/bitdiddle/ps1/problem2.ipynb)
Autograded submission 2:
* [autograded/hacker/ps1/problem1.ipynb](autograded/hacker/ps1/problem1.ipynb)
* [autograded/hacker/ps1/problem2.ipynb](autograded/hacker/ps1/problem2.ipynb)
Generating feedback is fairly straightforward (and as with the other nbgrader commands for instructors, this must be run from the root of the course directory):
```
%%bash
nbgrader generate_feedback "ps1"
```
Once the feedback has been generated, there will be new directories and HTML files corresponding to each notebook in each submission:
Feedback for submission 1:
* [feedback/bitdiddle/ps1/problem1.html](feedback/bitdiddle/ps1/problem1.html)
* [feedback/bitdiddle/ps1/problem2.html](feedback/bitdiddle/ps1/problem2.html)
Feedback for submission 2:
* [feedback/hacker/ps1/problem1.html](feedback/hacker/ps1/problem1.html)
* [feedback/hacker/ps1/problem2.html](feedback/hacker/ps1/problem2.html)
If the exchange is available, one would of course use `nbgrader release_feedback`. However if not available, you can now deliver these generated HTML feedback files via whatever mechanism you wish.
## Getting grades from the database
In addition to creating feedback for the students, you may need to upload grades to whatever learning management system your school uses (e.g. Canvas, Blackboard, etc.). nbgrader provides a way to export grades to CSV out of the box, with the `nbgrader export` command:
```
%%bash
nbgrader export
```
After running `nbgrader export`, you will see the grades in a CSV file called `grades.csv`:
```
%%bash
cat grades.csv
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/williamsdoug/CTG_RP/blob/master/CTG_RP_Train_Model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Generate Datasets and Train Model
```
#! rm -R images
! ls
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import config_local
from config_local import *
import numpy as np
import matplotlib.pyplot as plt
import gc
from fastai.vision import *
from fastai.metrics import error_rate
import torch
from torch import nn
import collections
import pprint
import random
from compute_metadata import get_splits, generate_label_file, generate_lists
from generate_recurrence_images import generate_rp_images, gen_recurrence_params
```
## Code
## Config
```
np.random.seed(1234)
random.seed(1234)
# Configure Recurrent Plot Parameters
POLICY='early_valid' # 'best_quality', 'early_valid', 'late_valid'
rp_params = gen_recurrence_params(dimensions=[2], time_delays=[1], percentages=[1,3, 10], use_clip_vals=[False])
rp_params
tfms=[]
size=64
bs=64
workers=4
path = Path() / 'images'
```
## Generate Recurrence Images
```
generate_rp_images(RECORDINGS_DIR, images_dir=IMAGES_DIR, rp_params=rp_params[:1],
policy=POLICY,
show_signal=False, show_image=True, verbose=True, cmap='binary',
limit=3,
)
generate_rp_images(RECORDINGS_DIR, images_dir=IMAGES_DIR, rp_params=rp_params,
policy=POLICY,
show_signal=False, show_image=False, verbose=True, cmap='binary',
)
#!ls images
```
## Generate Train and Valid Label Files
```
train_valid_groups_full = get_splits(image_dir='images', image_file='rp_images_index.json',
exclude=['_clipped'],
thresh = 7.15)
# Create valid_x.csv files for each split
for i in range(len(train_valid_groups_full)):
generate_lists(train_valid_groups_full[i], train_file='train_{}.csv'.format(i),
valid_file='valid_{}.csv'.format(i))
!ls images/*.csv
train = ImageList.from_csv(path, 'train_0.csv')
valid = ImageList.from_csv(path, 'valid_0.csv')
lls = ItemLists(path, train, valid).label_from_df(cols=1).transform(tfms, size=size)
#db = lls.databunch(bs=bs, num_workers=workers)#.normalize(binary_image_stats)
db = lls.databunch(bs=bs, num_workers=workers)
my_stats = db.batch_stats()
db = lls.databunch(bs=bs, num_workers=workers).normalize(my_stats)
db.batch_stats()
```
### Examine Results
```
print('nClass: {} classes: {}'.format(db.c, db.classes))
db
im = train.get(-1)
print(len(train), im.size)
im.show()
```
## Model
```
trial_model = nn.Sequential(
nn.Sequential(
nn.Conv2d(3,8,5), # 60 × 60 × 8
nn.ReLU(),
nn.AvgPool2d(3, stride=2), # 29 × 29 × 8
#nn.Dropout(p=0.25),
nn.Conv2d(8,8,5), # 25 × 25 × 8
nn.ReLU(),
nn.AvgPool2d(3, stride=2), # 12 × 12 × 8
Flatten() # 1152
),
# removed model head to compute flatten size
)
trial_learn = Learner(db, trial_model, loss_func = nn.CrossEntropyLoss(), metrics=accuracy)
trial_learn.summary()
del trial_model
trial_learn.destroy()
gc.collect()
mymodel = nn.Sequential(
nn.Sequential(
nn.Conv2d(3,8,5), # 60 × 60 × 8
nn.ReLU(),
nn.AvgPool2d(3, stride=2), # 29 × 29 × 8
#nn.Dropout(p=0.25),
nn.Conv2d(8,8,5), # 25 × 25 × 8
nn.ReLU(),
nn.AvgPool2d(3, stride=2), # 12 × 12 × 8
Flatten() # 1152
),
nn.Sequential(
# nn.Dropout(p=0.25),
nn.Linear(1152, 144),
nn.ReLU(),
nn.Dropout(p=0.8),
nn.Linear(144, db.c)
)
)
learn = Learner(db, mymodel, loss_func = nn.CrossEntropyLoss(), metrics=accuracy)
learn.summary()
learn.save('initial')
```
# Train Model
```
learn.fit_one_cycle(1, 1e-6) # learn.fit_one_cycle(1, 0.01)
# learn.save('save-1')
learn.lr_find(end_lr=1)
learn.recorder.plot()
learn.load('initial')
learn.fit_one_cycle(100, 3e-3) # learn.fit_one_cycle(1, 0.01)
learn.load('initial')
learn.fit_one_cycle(100, 1e-2) # learn.fit_one_cycle(1, 0.01)
learn.load('initial')
learn.fit_one_cycle(100, 1e-3) # learn.fit_one_cycle(1, 0.01)
learn.load('initial')
learn.fit_one_cycle(100, 1e-4) # learn.fit_one_cycle(1, 0.01)
#train an additional 100 epochs
learn.fit_one_cycle(100, 1e-4) # learn.fit_one_cycle(1, 0.01)
gc.collect()
```
|
github_jupyter
|
```
# Developer: Halmon Lui
# Implement a Hash Table using Linear Probing from scratch
class HashTable:
def __init__(self, length=11):
self.hash_list = [None for _ in range(length)]
self.length = length
self.item_count = 0
# hash key where m is size of table
def _hash(self, k, m):
return hash(k) % m
# if key exists, update value
def add(self, key, value):
# If we are full return immediately
if self.item_count==self.length:
return 'Error: HashTable is full'
hash_key = self._hash(key, self.length)
# If there is something in the location make sure we aren't colliding
if self.hash_list[hash_key]:
# If the keys match update the value
if self.hash_list[hash_key][0] == key:
self.hash_list[hash_key] = (key, value)
self.item_count += 1
# Handle collision case
elif self.item_count < self.length:
# Linear probe for a free spot
for count, location in enumerate(self.hash_list):
if not location:
self.hash_list[count] = (key, value)
self.item_count += 1
return
# Slot is free to add the key, value pair
else:
self.hash_list[hash_key] = (key, value)
self.item_count += 1
# check if key exists
def exists(self, key):
hash_key = self._hash(key, self.length)
if self.hash_list[hash_key]:
# Return true if matching key
if self.hash_list[hash_key][0] == key:
return True
# Handle collision case
else:
# Linear probe for matching key
for item in self.hash_list:
if item and item[0]==key:
return True
return False
# get value from key
def get(self, key):
hash_key = self._hash(key, self.length)
if self.hash_list[hash_key]:
# Return value if matching key
if self.hash_list[hash_key][0] == key:
return self.hash_list[hash_key][1]
# Handle collision case
else:
for item in self.hash_list:
if item and item[0]==key:
return item[1]
return 'Error: Invalid Key'
# remove value at key
def remove(self, key):
hash_key = self._hash(key, self.length)
if self.hash_list[hash_key]:
# Delete if key matches
if self.hash_list[hash_key][0] == key:
self.hash_list[hash_key] = None
self.item_count -= 1
# handle collision case
else:
for count, item in enumerate(self.hash_list):
if item and item[0]==key:
self.hash_list[count] = None
return 'Error: Invalid Key'
# Test HashTable methods
# Initialize HashTable object
ht = HashTable()
print('Created HashTable: ', ht.hash_list)
# Add to table, check if exists and get it
print('Adding key1')
ht.add('key1', 'hello')
print('Check if key1 exists: ', ht.exists('key1'))
print('Get value of key1: ', ht.get('key1'))
print(ht.hash_list)
# Remove key1 from table and get it
print('Removing key1')
ht.remove('key1')
print('Check if key1 exists: ', ht.exists('key1'))
print('Get value of key1: ', ht.get('key1'))
print(ht.hash_list)
print('###########################################')
# Add to table, check if exists and get it
ht = HashTable()
print('Adding key1 and key2')
ht.add('key1', 'hello')
ht.add('key2', 'world')
print('Check if key1 exists: ', ht.exists('key1'))
print('Get value of key1: ', ht.get('key1'))
print('Check if key2 exists: ', ht.exists('key2'))
print('Get value of key2: ', ht.get('key2'))
print(ht.hash_list)
# Remove key1 from table and get it
print('Removing key1')
ht.remove('key1')
print('Check if key1 exists: ', ht.exists('key1'))
print('Get value of key1: ', ht.get('key1'))
print('Check if key1 exists: ', ht.exists('key2'))
print('Get value of key1: ', ht.get('key2'))
print(ht.hash_list)
print('###########################################')
# Add to table, check if exists and get it
ht = HashTable()
print('Fill up the table and check for collisions')
ht.add('key1', 'aaaa')
ht.add('key2', 'bbbb')
ht.add('key3', 'cccc')
ht.add('key4', 'dddd')
ht.add('key5', 'eeee')
ht.add('key6', 'ffff')
ht.add('key7', 'gggg')
ht.add('key8', 'hhhh')
ht.add('key9', 'iiii')
ht.add('key10', 'jjjj')
ht.add('key11', 'kkkk')
print('Check if key1 exists: ', ht.exists('key1'))
print('Get value of key1: ', ht.get('key1'))
print('Check if key2 exists: ', ht.exists('key2'))
print('Get value of key2: ', ht.get('key2'))
print('Check if key3 exists: ', ht.exists('key3'))
print('Get value of key3: ', ht.get('key3'))
print('Check if key4 exists: ', ht.exists('key4'))
print('Get value of key4: ', ht.get('key4'))
print('Check if key5 exists: ', ht.exists('key5'))
print('Get value of key5: ', ht.get('key5'))
print('Check if key6 exists: ', ht.exists('key6'))
print('Get value of key6: ', ht.get('key6'))
print('Check if key7 exists: ', ht.exists('key7'))
print('Get value of key7: ', ht.get('key7'))
print('Check if key8 exists: ', ht.exists('key8'))
print('Get value of key8: ', ht.get('key8'))
print('Check if key9 exists: ', ht.exists('key9'))
print('Get value of key9: ', ht.get('key9'))
print('Check if key10 exists: ', ht.exists('key10'))
print('Get value of key10: ', ht.get('key10'))
print('Check if key11 exists: ', ht.exists('key11'))
print('Get value of key11: ', ht.get('key11'))
print(ht.hash_list)
print('test removing key11')
ht.remove('key11')
print(ht.hash_list)
# Test bad cases
# Add to table, check if exists and get it
ht = HashTable()
ht.add('key1', 'aaaa')
ht.add('key2', 'bbbb')
ht.add('key3', 'cccc')
ht.add('key4', 'dddd')
ht.add('key5', 'eeee')
ht.add('key6', 'ffff')
ht.add('key7', 'gggg')
ht.add('key8', 'hhhh')
ht.add('key9', 'iiii')
ht.add('key10', 'jjjj')
ht.add('key11', 'kkkk')
print('Try adding over table size: ', ht.add('key12', 'no bueno'))
print('Try getting invalid key', ht.get('badkeyhere'))
print('Try removing invalid key', ht.get('notpossible'))
```
|
github_jupyter
|
# Targeting Direct Marketing with Amazon SageMaker XGBoost
_**Supervised Learning with Gradient Boosted Trees: A Binary Prediction Problem With Unbalanced Classes**_
## Background
Direct marketing, either through mail, email, phone, etc., is a common tactic to acquire customers. Because resources and a customer's attention is limited, the goal is to only target the subset of prospects who are likely to engage with a specific offer. Predicting those potential customers based on readily available information like demographics, past interactions, and environmental factors is a common machine learning problem.
This notebook presents an example problem to predict if a customer will enroll for a term deposit at a bank, after one or more phone calls. The steps include:
* Preparing your Amazon SageMaker notebook
* Downloading data from the internet into Amazon SageMaker
* Investigating and transforming the data so that it can be fed to Amazon SageMaker algorithms
* Estimating a model using the Gradient Boosting algorithm
* Evaluating the effectiveness of the model
* Setting the model up to make on-going predictions
---
## Preparation
_This notebook was created and tested on an ml.m4.xlarge notebook instance._
Let's start by specifying:
- The S3 bucket and prefix that you want to use for training and model data. This should be within the same region as the Notebook Instance, training, and hosting.
- The IAM role arn used to give training and hosting access to your data. See the documentation for how to create these. Note, if more than one role is required for notebook instances, training, and/or hosting, please replace the boto regexp with a the appropriate full IAM role arn string(s).
```
# Define IAM role
import boto3
import sagemaker
import re
from sagemaker import get_execution_role
region = boto3.Session().region_name
session = sagemaker.Session()
bucket = session.default_bucket()
prefix = 'sagemaker/DEMO-xgboost-dm'
role = get_execution_role()
```
Now let's bring in the Python libraries that we'll use throughout the analysis
```
import numpy as np # For matrix operations and numerical processing
import pandas as pd # For munging tabular data
import matplotlib.pyplot as plt # For charts and visualizations
from IPython.display import Image # For displaying images in the notebook
from IPython.display import display # For displaying outputs in the notebook
from time import gmtime, strftime # For labeling SageMaker models, endpoints, etc.
import sys # For writing outputs to notebook
import math # For ceiling function
import json # For parsing hosting outputs
import os # For manipulating filepath names
import sagemaker # Amazon SageMaker's Python SDK provides many helper functions
from sagemaker.predictor import csv_serializer # Converts strings for HTTP POST requests on inference
! python -m pip install smdebug
```
---
## Data
Let's start by downloading the [direct marketing dataset](https://sagemaker-sample-data-us-west-2.s3-us-west-2.amazonaws.com/autopilot/direct_marketing/bank-additional.zip) from the sample data s3 bucket.
\[Moro et al., 2014\] S. Moro, P. Cortez and P. Rita. A Data-Driven Approach to Predict the Success of Bank Telemarketing. Decision Support Systems, Elsevier, 62:22-31, June 2014
```
!wget https://sagemaker-sample-data-us-west-2.s3-us-west-2.amazonaws.com/autopilot/direct_marketing/bank-additional.zip
!conda install -y -c conda-forge unzip
!unzip -o bank-additional.zip
```
Now lets read this into a Pandas data frame and take a look.
```
data = pd.read_csv('./bank-additional/bank-additional-full.csv')
pd.set_option('display.max_rows',10)
data
```
Let's talk about the data. At a high level, we can see:
* We have a little over 40K customer records, and 20 features for each customer
* The features are mixed; some numeric, some categorical
* The data appears to be sorted, at least by `time` and `contact`, maybe more
_**Specifics on each of the features:**_
*Demographics:*
* `age`: Customer's age (numeric)
* `job`: Type of job (categorical: 'admin.', 'services', ...)
* `marital`: Marital status (categorical: 'married', 'single', ...)
* `education`: Level of education (categorical: 'basic.4y', 'high.school', ...)
*Past customer events:*
* `default`: Has credit in default? (categorical: 'no', 'unknown', ...)
* `housing`: Has housing loan? (categorical: 'no', 'yes', ...)
* `loan`: Has personal loan? (categorical: 'no', 'yes', ...)
*Past direct marketing contacts:*
* `contact`: Contact communication type (categorical: 'cellular', 'telephone', ...)
* `month`: Last contact month of year (categorical: 'may', 'nov', ...)
* `day_of_week`: Last contact day of the week (categorical: 'mon', 'fri', ...)
* `duration`: Last contact duration, in seconds (numeric). Important note: If duration = 0 then `y` = 'no'.
*Campaign information:*
* `campaign`: Number of contacts performed during this campaign and for this client (numeric, includes last contact)
* `pdays`: Number of days that passed by after the client was last contacted from a previous campaign (numeric)
* `previous`: Number of contacts performed before this campaign and for this client (numeric)
* `poutcome`: Outcome of the previous marketing campaign (categorical: 'nonexistent','success', ...)
*External environment factors:*
* `emp.var.rate`: Employment variation rate - quarterly indicator (numeric)
* `cons.price.idx`: Consumer price index - monthly indicator (numeric)
* `cons.conf.idx`: Consumer confidence index - monthly indicator (numeric)
* `euribor3m`: Euribor 3 month rate - daily indicator (numeric)
* `nr.employed`: Number of employees - quarterly indicator (numeric)
*Target variable:*
* `y`: Has the client subscribed a term deposit? (binary: 'yes','no')
### Transformation
Cleaning up data is part of nearly every machine learning project. It arguably presents the biggest risk if done incorrectly and is one of the more subjective aspects in the process. Several common techniques include:
* Handling missing values: Some machine learning algorithms are capable of handling missing values, but most would rather not. Options include:
* Removing observations with missing values: This works well if only a very small fraction of observations have incomplete information.
* Removing features with missing values: This works well if there are a small number of features which have a large number of missing values.
* Imputing missing values: Entire [books](https://www.amazon.com/Flexible-Imputation-Missing-Interdisciplinary-Statistics/dp/1439868247) have been written on this topic, but common choices are replacing the missing value with the mode or mean of that column's non-missing values.
* Converting categorical to numeric: The most common method is one hot encoding, which for each feature maps every distinct value of that column to its own feature which takes a value of 1 when the categorical feature is equal to that value, and 0 otherwise.
* Oddly distributed data: Although for non-linear models like Gradient Boosted Trees, this has very limited implications, parametric models like regression can produce wildly inaccurate estimates when fed highly skewed data. In some cases, simply taking the natural log of the features is sufficient to produce more normally distributed data. In others, bucketing values into discrete ranges is helpful. These buckets can then be treated as categorical variables and included in the model when one hot encoded.
* Handling more complicated data types: Mainpulating images, text, or data at varying grains is left for other notebook templates.
Luckily, some of these aspects have already been handled for us, and the algorithm we are showcasing tends to do well at handling sparse or oddly distributed data. Therefore, let's keep pre-processing simple.
```
data['no_previous_contact'] = np.where(data['pdays'] == 999, 1, 0) # Indicator variable to capture when pdays takes a value of 999
data['not_working'] = np.where(np.in1d(data['job'], ['student', 'retired', 'unemployed']), 1, 0) # Indicator for individuals not actively employed
model_data = pd.get_dummies(data) # Convert categorical variables to sets of indicators
```
Another question to ask yourself before building a model is whether certain features will add value in your final use case. For example, if your goal is to deliver the best prediction, then will you have access to that data at the moment of prediction? Knowing it's raining is highly predictive for umbrella sales, but forecasting weather far enough out to plan inventory on umbrellas is probably just as difficult as forecasting umbrella sales without knowledge of the weather. So, including this in your model may give you a false sense of precision.
Following this logic, let's remove the economic features and `duration` from our data as they would need to be forecasted with high precision to use as inputs in future predictions.
Even if we were to use values of the economic indicators from the previous quarter, this value is likely not as relevant for prospects contacted early in the next quarter as those contacted later on.
```
model_data = model_data.drop(['duration', 'emp.var.rate', 'cons.price.idx', 'cons.conf.idx', 'euribor3m', 'nr.employed'], axis=1)
```
When building a model whose primary goal is to predict a target value on new data, it is important to understand overfitting. Supervised learning models are designed to minimize error between their predictions of the target value and actuals, in the data they are given. This last part is key, as frequently in their quest for greater accuracy, machine learning models bias themselves toward picking up on minor idiosyncrasies within the data they are shown. These idiosyncrasies then don't repeat themselves in subsequent data, meaning those predictions can actually be made less accurate, at the expense of more accurate predictions in the training phase.
The most common way of preventing this is to build models with the concept that a model shouldn't only be judged on its fit to the data it was trained on, but also on "new" data. There are several different ways of operationalizing this, holdout validation, cross-validation, leave-one-out validation, etc. For our purposes, we'll simply randomly split the data into 3 uneven groups. The model will be trained on 70% of data, it will then be evaluated on 20% of data to give us an estimate of the accuracy we hope to have on "new" data, and 10% will be held back as a final testing dataset which will be used later on.
```
train_data, validation_data, test_data = np.split(model_data.sample(frac=1, random_state=1729), [int(0.7 * len(model_data)), int(0.9 * len(model_data))]) # Randomly sort the data then split out first 70%, second 20%, and last 10%
```
Amazon SageMaker's XGBoost container expects data in the libSVM or CSV data format. For this example, we'll stick to CSV. Note that the first column must be the target variable and the CSV should not include headers. Also, notice that although repetitive it's easiest to do this after the train|validation|test split rather than before. This avoids any misalignment issues due to random reordering.
```
pd.concat([train_data['y_yes'], train_data.drop(['y_no', 'y_yes'], axis=1)], axis=1).to_csv('train.csv', index=False, header=False)
pd.concat([validation_data['y_yes'], validation_data.drop(['y_no', 'y_yes'], axis=1)], axis=1).to_csv('validation.csv', index=False, header=False)
```
Now we'll copy the file to S3 for Amazon SageMaker's managed training to pickup.
```
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'train/train.csv')).upload_file('train.csv')
boto3.Session().resource('s3').Bucket(bucket).Object(os.path.join(prefix, 'validation/validation.csv')).upload_file('validation.csv')
```
---
## Training
Now we know that most of our features have skewed distributions, some are highly correlated with one another, and some appear to have non-linear relationships with our target variable. Also, for targeting future prospects, good predictive accuracy is preferred to being able to explain why that prospect was targeted. Taken together, these aspects make gradient boosted trees a good candidate algorithm.
There are several intricacies to understanding the algorithm, but at a high level, gradient boosted trees works by combining predictions from many simple models, each of which tries to address the weaknesses of the previous models. By doing this the collection of simple models can actually outperform large, complex models. Other Amazon SageMaker notebooks elaborate on gradient boosting trees further and how they differ from similar algorithms.
`xgboost` is an extremely popular, open-source package for gradient boosted trees. It is computationally powerful, fully featured, and has been successfully used in many machine learning competitions. Let's start with a simple `xgboost` model, trained using Amazon SageMaker's managed, distributed training framework.
First we'll need to specify the ECR container location for Amazon SageMaker's implementation of XGBoost.
```
from sagemaker.amazon.amazon_estimator import get_image_uri
container = sagemaker.image_uris.retrieve(region=boto3.Session().region_name, framework='xgboost', version='1.0-1')
```
Then, because we're training with the CSV file format, we'll create `s3_input`s that our training function can use as a pointer to the files in S3, which also specify that the content type is CSV.
```
s3_input_train = sagemaker.TrainingInput(s3_data='s3://{}/{}/train'.format(bucket, prefix), content_type='csv')
s3_input_validation = sagemaker.TrainingInput(s3_data='s3://{}/{}/validation/'.format(bucket, prefix), content_type='csv')
base_job_name = "demo-smdebug-xgboost-regression"
bucket_path='s3://{}/{}/output'.format(bucket, prefix)
```
### Enabling Debugger in Estimator object
#### DebuggerHookConfig
Enabling Amazon SageMaker Debugger in training job can be accomplished by adding its configuration into Estimator object constructor:
```python
from sagemaker.debugger import DebuggerHookConfig, CollectionConfig
estimator = Estimator(
...,
debugger_hook_config = DebuggerHookConfig(
s3_output_path="s3://{bucket_name}/{location_in_bucket}", # Required
collection_configs=[
CollectionConfig(
name="metrics",
parameters={
"save_interval": "10"
}
)
]
)
)
```
Here, the `DebuggerHookConfig` object instructs `Estimator` what data we are interested in.
Two parameters are provided in the example:
- `s3_output_path`: it points to S3 bucket/path where we intend to store our debugging tensors.
Amount of data saved depends on multiple factors, major ones are: training job / data set / model / frequency of saving tensors.
This bucket should be in your AWS account, and you should have full access control over it.
**Important Note**: this s3 bucket should be originally created in the same region where your training job will be running, otherwise you might run into problems with cross region access.
- `collection_configs`: it enumerates named collections of tensors we want to save.
Collections are a convinient way to organize relevant tensors under same umbrella to make it easy to navigate them during analysis.
In this particular example, you are instructing Amazon SageMaker Debugger that you are interested in a single collection named `metrics`.
We also instructed Amazon SageMaker Debugger to save metrics every 10 iteration.
See [Collection](https://github.com/awslabs/sagemaker-debugger/blob/master/docs/api.md#collection) documentation for all parameters that are supported by Collections and DebuggerConfig documentation for more details about all parameters DebuggerConfig supports.
#### Rules
Enabling Rules in training job can be accomplished by adding the `rules` configuration into Estimator object constructor.
- `rules`: This new parameter will accept a list of rules you wish to evaluate against the tensors output by this training job.
For rules, Amazon SageMaker Debugger supports two types:
- SageMaker Rules: These are rules specially curated by the data science and engineering teams in Amazon SageMaker which you can opt to evaluate against your training job.
- Custom Rules: You can optionally choose to write your own rule as a Python source file and have it evaluated against your training job.
To provide Amazon SageMaker Debugger to evaluate this rule, you would have to provide the S3 location of the rule source and the evaluator image.
In this example, you will use a Amazon SageMaker's LossNotDecreasing rule, which helps you identify if you are running into a situation where the training loss is not going down.
```python
from sagemaker.debugger import rule_configs, Rule
estimator = Estimator(
...,
rules=[
Rule.sagemaker(
rule_configs.loss_not_decreasing(),
rule_parameters={
"collection_names": "metrics",
"num_steps": "10",
},
),
],
)
```
- `rule_parameters`: In this parameter, you provide the runtime values of the parameter in your constructor.
You can still choose to pass in other values which may be necessary for your rule to be evaluated.
In this example, you will use Amazon SageMaker's LossNotDecreasing rule to monitor the `metircs` collection.
The rule will alert you if the tensors in `metrics` has not decreased for more than 10 steps.
First we'll need to specify training parameters to the estimator. This includes:
1. The `xgboost` algorithm container
1. The IAM role to use
1. Training instance type and count
1. S3 location for output data
1. Algorithm hyperparameters
And then a `.fit()` function which specifies:
1. S3 location for output data. In this case we have both a training and validation set which are passed in.
```
from sagemaker.debugger import rule_configs, Rule, DebuggerHookConfig, CollectionConfig
from sagemaker.estimator import Estimator
sess = sagemaker.Session()
save_interval = 5
xgboost_estimator = Estimator(
role=role,
base_job_name=base_job_name,
instance_count=1,
instance_type='ml.m5.4xlarge',
image_uri=container,
max_run=1800,
sagemaker_session=sess,
debugger_hook_config=DebuggerHookConfig(
s3_output_path=bucket_path, # Required
collection_configs=[
CollectionConfig(
name="metrics",
parameters={
"save_interval": str(save_interval)
}
),
CollectionConfig(
name="predictions",
parameters={
"save_interval": str(save_interval)
}
),
CollectionConfig(
name="feature_importance",
parameters={
"save_interval": str(save_interval)
}
),
CollectionConfig(
name="average_shap",
parameters={
"save_interval": str(save_interval)
}
)
],
)
)
xgboost_estimator.set_hyperparameters(max_depth=5,
eta=0.2,
gamma=4,
min_child_weight=6,
subsample=0.8,
silent=0,
objective='binary:logistic',
num_round=100)
xgboost_estimator.fit(
{"train": s3_input_train, "validation": s3_input_validation},
# This is a fire and forget event. By setting wait=False, you submit the job to run in the background.
# Amazon SageMaker starts one training job and release control to next cells in the notebook.
# Follow this notebook to see status of the training job.
wait=False
)
```
### Result
As a result of the above command, Amazon SageMaker starts one training job and one rule job for you. The first one is the job that produces the tensors to be analyzed. The second one analyzes the tensors to check if `train-rmse` and `validation-rmse` are not decreasing at any point during training.
Check the status of the training job below.
After your training job is started, Amazon SageMaker starts a rule-execution job to run the LossNotDecreasing rule.
**Note that the next cell blocks until the rule execution job ends. You can stop it at any point to proceed to the rest of the notebook. Once it says Rule Evaluation Status is Started, and shows the `RuleEvaluationJobArn`, you can look at the status of the rule being monitored.**
```
import time
from time import gmtime, strftime
# Below command will give the status of training job
job_name = xgboost_estimator.latest_training_job.name
client = xgboost_estimator.sagemaker_session.sagemaker_client
description = client.describe_training_job(TrainingJobName=job_name)
print('Training job name: ' + job_name)
print(description['TrainingJobStatus'])
if description['TrainingJobStatus'] != 'Completed':
while description['SecondaryStatus'] not in ['Training', 'Completed']:
description = client.describe_training_job(TrainingJobName=job_name)
primary_status = description['TrainingJobStatus']
secondary_status = description['SecondaryStatus']
print("{}: {}, {}".format(strftime('%X', gmtime()), primary_status, secondary_status))
time.sleep(15)
```
## Data Analysis - Manual
Now that you've trained the system, analyze the data.
Here, you focus on after-the-fact analysis.
You import a basic analysis library, which defines the concept of trial, which represents a single training run.
```
from smdebug.trials import create_trial
description = client.describe_training_job(TrainingJobName=job_name)
s3_output_path = xgboost_estimator.latest_job_debugger_artifacts_path()
# This is where we create a Trial object that allows access to saved tensors.
trial = create_trial(s3_output_path)
```
You can list all the tensors that you know something about. Each one of these names is the name of a tensor. The name is a combination of the feature name, which in these cases, is auto-assigned by XGBoost, and whether it's an evaluation metric, feature importance, or SHAP value.
```
trial.tensor_names()
```
For each tensor, ask for the steps where you have data. In this case, every five steps
```
trial.tensor("predictions").values()
```
You can obtain each tensor at each step as a NumPy array.
```
type(trial.tensor("predictions").value(10))
```
### Performance metrics
You can also create a simple function that visualizes the training and validation errors as the training progresses.
Each gradient should get smaller over time, as the system converges to a good solution.
Remember that this is an interactive analysis. You are showing these tensors to give an idea of the data.
```
import matplotlib.pyplot as plt
import seaborn as sns
import re
def get_data(trial, tname):
"""
For the given tensor name, walks though all the iterations
for which you have data and fetches the values.
Returns the set of steps and the values.
"""
tensor = trial.tensor(tname)
steps = tensor.steps()
vals = [tensor.value(s) for s in steps]
return steps, vals
def plot_collection(trial, collection_name, regex='.*', figsize=(8, 6)):
"""
Takes a `trial` and a collection name, and
plots all tensors that match the given regex.
"""
fig, ax = plt.subplots(figsize=figsize)
sns.despine()
tensors = trial.collection(collection_name).tensor_names
for tensor_name in sorted(tensors):
if re.match(regex, tensor_name):
steps, data = get_data(trial, tensor_name)
ax.plot(steps, data, label=tensor_name)
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
ax.set_xlabel('Iteration')
plot_collection(trial, "metrics")
```
### Feature importances
You can also visualize the feature priorities as determined by
[xgboost.get_score()](https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.Booster.get_score).
If you instructed Estimator to log the `feature_importance` collection, all five importance types supported by `xgboost.get_score()` will be available in the collection.
```
def plot_feature_importance(trial, importance_type="weight"):
SUPPORTED_IMPORTANCE_TYPES = ["weight", "gain", "cover", "total_gain", "total_cover"]
if importance_type not in SUPPORTED_IMPORTANCE_TYPES:
raise ValueError(f"{importance_type} is not one of the supported importance types.")
plot_collection(
trial,
"feature_importance",
regex=f"feature_importance/{importance_type}/.*")
plot_feature_importance(trial)
plot_feature_importance(trial, importance_type="cover")
```
### SHAP
[SHAP](https://github.com/slundberg/shap) (SHapley Additive exPlanations) is
another approach to explain the output of machine learning models.
SHAP values represent a feature's contribution to a change in the model output.
You instructed Estimator to log the average SHAP values in this example so the SHAP values (as calculated by [xgboost.predict(pred_contribs=True)](https://xgboost.readthedocs.io/en/latest/python/python_api.html#xgboost.Booster.predict)) will be available the `average_shap` collection.
```
plot_collection(trial,"average_shap")
```
|
github_jupyter
|
## Dependencies
```
import os
import cv2
import shutil
import random
import warnings
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tensorflow import set_random_seed
from sklearn.utils import class_weight
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, cohen_kappa_score
from keras import backend as K
from keras.models import Model
from keras.utils import to_categorical
from keras import optimizers, applications
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import EarlyStopping, ReduceLROnPlateau, Callback, LearningRateScheduler
from keras.layers import Dense, Dropout, GlobalAveragePooling2D, Input
# Set seeds to make the experiment more reproducible.
def seed_everything(seed=0):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
set_random_seed(0)
seed = 0
seed_everything(seed)
%matplotlib inline
sns.set(style="whitegrid")
warnings.filterwarnings("ignore")
```
## Load data
```
hold_out_set = pd.read_csv('../input/aptos-data-split/hold-out.csv')
X_train = hold_out_set[hold_out_set['set'] == 'train']
X_val = hold_out_set[hold_out_set['set'] == 'validation']
test = pd.read_csv('../input/aptos2019-blindness-detection/test.csv')
print('Number of train samples: ', X_train.shape[0])
print('Number of validation samples: ', X_val.shape[0])
print('Number of test samples: ', test.shape[0])
# Preprocecss data
X_train["id_code"] = X_train["id_code"].apply(lambda x: x + ".png")
X_val["id_code"] = X_val["id_code"].apply(lambda x: x + ".png")
test["id_code"] = test["id_code"].apply(lambda x: x + ".png")
X_train['diagnosis'] = X_train['diagnosis'].astype('str')
X_val['diagnosis'] = X_val['diagnosis'].astype('str')
display(X_train.head())
```
# Model parameters
```
# Model parameters
N_CLASSES = X_train['diagnosis'].nunique()
BATCH_SIZE = 16
EPOCHS = 40
WARMUP_EPOCHS = 5
LEARNING_RATE = 1e-4
WARMUP_LEARNING_RATE = 1e-3
HEIGHT = 320
WIDTH = 320
CHANNELS = 3
ES_PATIENCE = 5
RLROP_PATIENCE = 3
DECAY_DROP = 0.5
def kappa(y_true, y_pred, n_classes=5):
y_trues = K.cast(K.argmax(y_true), K.floatx())
y_preds = K.cast(K.argmax(y_pred), K.floatx())
n_samples = K.cast(K.shape(y_true)[0], K.floatx())
distance = K.sum(K.abs(y_trues - y_preds))
max_distance = n_classes - 1
kappa_score = 1 - ((distance**2) / (n_samples * (max_distance**2)))
return kappa_score
def step_decay(epoch):
lrate = 30e-5
if epoch > 3:
lrate = 15e-5
if epoch > 7:
lrate = 7.5e-5
if epoch > 11:
lrate = 3e-5
if epoch > 15:
lrate = 1e-5
return lrate
def focal_loss(y_true, y_pred):
gamma = 2.0
epsilon = K.epsilon()
pt = y_pred * y_true + (1-y_pred) * (1-y_true)
pt = K.clip(pt, epsilon, 1-epsilon)
CE = -K.log(pt)
FL = K.pow(1-pt, gamma) * CE
loss = K.sum(FL, axis=1)
return loss
```
# Pre-procecess images
```
train_base_path = '../input/aptos2019-blindness-detection/train_images/'
test_base_path = '../input/aptos2019-blindness-detection/test_images/'
train_dest_path = 'base_dir/train_images/'
validation_dest_path = 'base_dir/validation_images/'
test_dest_path = 'base_dir/test_images/'
# Making sure directories don't exist
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
# Creating train, validation and test directories
os.makedirs(train_dest_path)
os.makedirs(validation_dest_path)
os.makedirs(test_dest_path)
def crop_image(img, tol=7):
if img.ndim ==2:
mask = img>tol
return img[np.ix_(mask.any(1),mask.any(0))]
elif img.ndim==3:
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
mask = gray_img>tol
check_shape = img[:,:,0][np.ix_(mask.any(1),mask.any(0))].shape[0]
if (check_shape == 0): # image is too dark so that we crop out everything,
return img # return original image
else:
img1=img[:,:,0][np.ix_(mask.any(1),mask.any(0))]
img2=img[:,:,1][np.ix_(mask.any(1),mask.any(0))]
img3=img[:,:,2][np.ix_(mask.any(1),mask.any(0))]
img = np.stack([img1,img2,img3],axis=-1)
return img
def circle_crop(img):
img = crop_image(img)
height, width, depth = img.shape
largest_side = np.max((height, width))
img = cv2.resize(img, (largest_side, largest_side))
height, width, depth = img.shape
x = width//2
y = height//2
r = np.amin((x, y))
circle_img = np.zeros((height, width), np.uint8)
cv2.circle(circle_img, (x, y), int(r), 1, thickness=-1)
img = cv2.bitwise_and(img, img, mask=circle_img)
img = crop_image(img)
return img
def preprocess_image(base_path, save_path, image_id, HEIGHT, WIDTH, sigmaX=10):
image = cv2.imread(base_path + image_id)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = circle_crop(image)
image = cv2.resize(image, (HEIGHT, WIDTH))
image = cv2.addWeighted(image, 4, cv2.GaussianBlur(image, (0,0), sigmaX), -4 , 128)
cv2.imwrite(save_path + image_id, image)
# Pre-procecss train set
for i, image_id in enumerate(X_train['id_code']):
preprocess_image(train_base_path, train_dest_path, image_id, HEIGHT, WIDTH)
# Pre-procecss validation set
for i, image_id in enumerate(X_val['id_code']):
preprocess_image(train_base_path, validation_dest_path, image_id, HEIGHT, WIDTH)
# Pre-procecss test set
for i, image_id in enumerate(test['id_code']):
preprocess_image(test_base_path, test_dest_path, image_id, HEIGHT, WIDTH)
```
# Data generator
```
datagen=ImageDataGenerator(rescale=1./255,
rotation_range=360,
horizontal_flip=True,
vertical_flip=True)
train_generator=datagen.flow_from_dataframe(
dataframe=X_train,
directory=train_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="categorical",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
valid_generator=datagen.flow_from_dataframe(
dataframe=X_val,
directory=validation_dest_path,
x_col="id_code",
y_col="diagnosis",
class_mode="categorical",
batch_size=BATCH_SIZE,
target_size=(HEIGHT, WIDTH),
seed=seed)
test_generator=datagen.flow_from_dataframe(
dataframe=test,
directory=test_dest_path,
x_col="id_code",
batch_size=1,
class_mode=None,
shuffle=False,
target_size=(HEIGHT, WIDTH),
seed=seed)
```
# Model
```
def create_model(input_shape, n_out):
input_tensor = Input(shape=input_shape)
base_model = applications.DenseNet169(weights=None,
include_top=False,
input_tensor=input_tensor)
base_model.load_weights('../input/keras-notop/densenet169_weights_tf_dim_ordering_tf_kernels_notop.h5')
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5)(x)
x = Dense(2048, activation='relu')(x)
x = Dropout(0.5)(x)
final_output = Dense(n_out, activation='softmax', name='final_output')(x)
model = Model(input_tensor, final_output)
return model
```
# Train top layers
```
model = create_model(input_shape=(HEIGHT, WIDTH, CHANNELS), n_out=N_CLASSES)
for layer in model.layers:
layer.trainable = False
for i in range(-5, 0):
model.layers[i].trainable = True
class_weights = class_weight.compute_class_weight('balanced', np.unique(X_train['diagnosis'].astype('int').values), X_train['diagnosis'].astype('int').values)
metric_list = ["accuracy", kappa]
optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=metric_list)
model.summary()
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
history_warmup = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
class_weight=class_weights,
verbose=1).history
```
# Fine-tune the complete model (1st step)
```
for layer in model.layers:
layer.trainable = True
# lrstep = LearningRateScheduler(step_decay)
es = EarlyStopping(monitor='val_loss', mode='min', patience=ES_PATIENCE, restore_best_weights=True, verbose=1)
rlrop = ReduceLROnPlateau(monitor='val_loss', mode='min', patience=RLROP_PATIENCE, factor=DECAY_DROP, min_lr=1e-6, verbose=1)
callback_list = [es, rlrop]
optimizer = optimizers.Adam(lr=LEARNING_RATE)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=metric_list)
model.summary()
history_finetunning = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=int(EPOCHS*0.8),
callbacks=callback_list,
class_weight=class_weights,
verbose=1).history
```
# Fine-tune the complete model (2nd step)
```
optimizer = optimizers.SGD(lr=LEARNING_RATE, momentum=0.9, nesterov=True)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=metric_list)
history_finetunning_2 = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=int(EPOCHS*0.2),
callbacks=callback_list,
class_weight=class_weights,
verbose=1).history
```
# Model loss graph
```
history = {'loss': history_finetunning['loss'] + history_finetunning_2['loss'],
'val_loss': history_finetunning['val_loss'] + history_finetunning_2['val_loss'],
'acc': history_finetunning['acc'] + history_finetunning_2['acc'],
'val_acc': history_finetunning['val_acc'] + history_finetunning_2['val_acc'],
'kappa': history_finetunning['kappa'] + history_finetunning_2['kappa'],
'val_kappa': history_finetunning['val_kappa'] + history_finetunning_2['val_kappa']}
sns.set_style("whitegrid")
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, sharex='col', figsize=(20, 18))
ax1.plot(history['loss'], label='Train loss')
ax1.plot(history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history['acc'], label='Train accuracy')
ax2.plot(history['val_acc'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
ax3.plot(history['kappa'], label='Train kappa')
ax3.plot(history['val_kappa'], label='Validation kappa')
ax3.legend(loc='best')
ax3.set_title('Kappa')
plt.xlabel('Epochs')
sns.despine()
plt.show()
# Create empty arays to keep the predictions and labels
lastFullTrainPred = np.empty((0, N_CLASSES))
lastFullTrainLabels = np.empty((0, N_CLASSES))
lastFullValPred = np.empty((0, N_CLASSES))
lastFullValLabels = np.empty((0, N_CLASSES))
# Add train predictions and labels
for i in range(STEP_SIZE_TRAIN+1):
im, lbl = next(train_generator)
scores = model.predict(im, batch_size=train_generator.batch_size)
lastFullTrainPred = np.append(lastFullTrainPred, scores, axis=0)
lastFullTrainLabels = np.append(lastFullTrainLabels, lbl, axis=0)
# Add validation predictions and labels
for i in range(STEP_SIZE_VALID+1):
im, lbl = next(valid_generator)
scores = model.predict(im, batch_size=valid_generator.batch_size)
lastFullValPred = np.append(lastFullValPred, scores, axis=0)
lastFullValLabels = np.append(lastFullValLabels, lbl, axis=0)
lastFullComPred = np.concatenate((lastFullTrainPred, lastFullValPred))
lastFullComLabels = np.concatenate((lastFullTrainLabels, lastFullValLabels))
train_preds = [np.argmax(pred) for pred in lastFullTrainPred]
train_labels = [np.argmax(label) for label in lastFullTrainLabels]
validation_preds = [np.argmax(pred) for pred in lastFullValPred]
validation_labels = [np.argmax(label) for label in lastFullValLabels]
complete_labels = [np.argmax(label) for label in lastFullComLabels]
```
# Threshold optimization
```
def find_best_fixed_threshold(preds, targs, do_plot=True):
best_thr_list = [0 for i in range(preds.shape[1])]
for index in range(1, preds.shape[1]):
score = []
thrs = np.arange(0, 1, 0.01)
for thr in thrs:
preds_thr = [index if x[index] > thr else np.argmax(x) for x in preds]
score.append(cohen_kappa_score(targs, preds_thr))
score = np.array(score)
pm = score.argmax()
best_thr, best_score = thrs[pm], score[pm].item()
best_thr_list[index] = best_thr
print('Label %s: thr=%.3f, Kappa=%.3f' % (index, best_thr, best_score))
if do_plot:
plt.plot(thrs, score)
plt.vlines(x=best_thr, ymin=score.min(), ymax=score.max())
plt.text(best_thr+0.03, best_score-0.01, ('Kappa[%s]=%.3f' % (index, best_score)), fontsize=14);
plt.show()
return best_thr_list
threshold_list = find_best_fixed_threshold(lastFullComPred, complete_labels, do_plot=True)
threshold_list[0] = 0 # In last instance assign label 0
# Apply optimized thresholds to the train predictions
train_preds_opt = [0 for i in range(lastFullTrainPred.shape[0])]
for idx, thr in enumerate(threshold_list):
for idx2, pred in enumerate(lastFullTrainPred):
if pred[idx] > thr:
train_preds_opt[idx2] = idx
# Apply optimized thresholds to the validation predictions
validation_preds_opt = [0 for i in range(lastFullValPred.shape[0])]
for idx, thr in enumerate(threshold_list):
for idx2, pred in enumerate(lastFullValPred):
if pred[idx] > thr:
validation_preds_opt[idx2] = idx
index_order = [0, 2, 1, 4, 3]
# Apply optimized thresholds to the train predictions by class distribution
train_preds_opt2 = [0 for i in range(lastFullTrainPred.shape[0])]
for idx in index_order:
thr = threshold_list[idx]
for idx2, pred in enumerate(lastFullTrainPred):
if pred[idx] > thr:
train_preds_opt2[idx2] = idx
# Apply optimized thresholds to the validation predictions by class distribution
validation_preds_opt2 = [0 for i in range(lastFullValPred.shape[0])]
for idx in index_order:
thr = threshold_list[idx]
for idx2, pred in enumerate(lastFullValPred):
if pred[idx] > thr:
validation_preds_opt2[idx2] = idx
```
# Model Evaluation
## Confusion Matrix
### Original thresholds
```
labels = ['0 - No DR', '1 - Mild', '2 - Moderate', '3 - Severe', '4 - Proliferative DR']
def plot_confusion_matrix(train, validation, labels=labels):
train_labels, train_preds = train
validation_labels, validation_preds = validation
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(24, 7))
train_cnf_matrix = confusion_matrix(train_labels, train_preds)
validation_cnf_matrix = confusion_matrix(validation_labels, validation_preds)
train_cnf_matrix_norm = train_cnf_matrix.astype('float') / train_cnf_matrix.sum(axis=1)[:, np.newaxis]
validation_cnf_matrix_norm = validation_cnf_matrix.astype('float') / validation_cnf_matrix.sum(axis=1)[:, np.newaxis]
train_df_cm = pd.DataFrame(train_cnf_matrix_norm, index=labels, columns=labels)
validation_df_cm = pd.DataFrame(validation_cnf_matrix_norm, index=labels, columns=labels)
sns.heatmap(train_df_cm, annot=True, fmt='.2f', cmap="Blues",ax=ax1).set_title('Train')
sns.heatmap(validation_df_cm, annot=True, fmt='.2f', cmap=sns.cubehelix_palette(8),ax=ax2).set_title('Validation')
plt.show()
plot_confusion_matrix((train_labels, train_preds), (validation_labels, validation_preds))
```
### Optimized thresholds
```
plot_confusion_matrix((train_labels, train_preds_opt), (validation_labels, validation_preds_opt))
```
### Optimized thresholds by class
```
plot_confusion_matrix((train_labels, train_preds_opt2), (validation_labels, validation_preds_opt2))
```
## Quadratic Weighted Kappa
```
def evaluate_model(train, validation):
train_labels, train_preds = train
validation_labels, validation_preds = validation
print("Train Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds, train_labels, weights='quadratic'))
print("Validation Cohen Kappa score: %.3f" % cohen_kappa_score(validation_preds, validation_labels, weights='quadratic'))
print("Complete set Cohen Kappa score: %.3f" % cohen_kappa_score(train_preds+validation_preds, train_labels+validation_labels, weights='quadratic'))
print(" Original thresholds")
evaluate_model((train_preds, train_labels), (validation_preds, validation_labels))
print(" Optimized thresholds")
evaluate_model((train_preds_opt, train_labels), (validation_preds_opt, validation_labels))
print(" Optimized thresholds by class")
evaluate_model((train_preds_opt2, train_labels), (validation_preds_opt2, validation_labels))
```
## Apply model to test set and output predictions
```
def apply_tta(model, generator, steps=10):
step_size = generator.n//generator.batch_size
preds_tta = []
for i in range(steps):
generator.reset()
preds = model.predict_generator(generator, steps=step_size)
preds_tta.append(preds)
return np.mean(preds_tta, axis=0)
preds = apply_tta(model, test_generator)
predictions = np.argmax(preds, axis=1)
predictions_opt = [0 for i in range(preds.shape[0])]
for idx, thr in enumerate(threshold_list):
for idx2, pred in enumerate(preds):
if pred[idx] > thr:
predictions_opt[idx2] = idx
predictions_opt2 = [0 for i in range(preds.shape[0])]
for idx in index_order:
thr = threshold_list[idx]
for idx2, pred in enumerate(preds):
if pred[idx] > thr:
predictions_opt2[idx2] = idx
results = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions})
results['id_code'] = results['id_code'].map(lambda x: str(x)[:-4])
results_opt = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions_opt})
results_opt['id_code'] = results_opt['id_code'].map(lambda x: str(x)[:-4])
results_opt2 = pd.DataFrame({'id_code':test['id_code'], 'diagnosis':predictions_opt2})
results_opt2['id_code'] = results_opt2['id_code'].map(lambda x: str(x)[:-4])
# Cleaning created directories
if os.path.exists(train_dest_path):
shutil.rmtree(train_dest_path)
if os.path.exists(validation_dest_path):
shutil.rmtree(validation_dest_path)
if os.path.exists(test_dest_path):
shutil.rmtree(test_dest_path)
```
# Predictions class distribution
```
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharex='col', figsize=(24, 8.7))
sns.countplot(x="diagnosis", data=results, palette="GnBu_d", ax=ax1).set_title('Test')
sns.countplot(x="diagnosis", data=results_opt, palette="GnBu_d", ax=ax2).set_title('Test optimized')
sns.countplot(x="diagnosis", data=results_opt2, palette="GnBu_d", ax=ax3).set_title('Test optimized by class')
sns.despine()
plt.show()
val_kappa = cohen_kappa_score(validation_preds, validation_labels, weights='quadratic')
val_opt_kappa = cohen_kappa_score(validation_preds_opt, validation_labels, weights='quadratic')
val_opt_kappa2 = cohen_kappa_score(validation_preds_opt2, validation_labels, weights='quadratic')
results_name = 'submission.csv'
results_opt_name = 'submission_opt.csv'
results_opt2_name = 'submission_opt2.csv'
# if (val_kappa > val_opt_kappa) and (val_kappa > val_opt_kappa2):
# results_name = 'submission.csv'
# results_opt_name = 'submission_opt.csv'
# results_opt2_name = 'submission_opt2.csv'
# elif (val_opt_kappa > val_kappa) and (val_opt_kappa > val_opt_kappa2):
# results_name = 'submission_norm.csv'
# results_opt_name = 'submission.csv'
# results_opt2_name = 'submission_opt2.csv'
# else:
# results_name = 'submission_norm.csv'
# results_opt_name = 'submission_opt.csv'
# results_opt2_name = 'submission.csv'
results.to_csv(results_name, index=False)
display(results.head())
results_opt.to_csv(results_opt_name, index=False)
display(results_opt.head())
results_opt2.to_csv(results_opt2_name, index=False)
display(results_opt2.head())
```
|
github_jupyter
|
```
%matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
```
## Introduction
Machine learning literature makes heavy use of probabilistic graphical models
and bayesian statistics. In fact, state of the art (SOTA) architectures, such as
[variational autoencoders][vae-blog] (VAE) or [generative adversarial
networks][gan-blog] (GAN), are intrinsically stochastic by nature. To
wholesomely understand research in this field not only do we need a broad
knowledge of mathematics, probability, and optimization but we somehow need
intuition about how these concepts are applied to real world problems. For
example, one of the most common applications of deep learning techniques is
vision. We may want to classify images or generate new ones. Most SOTA
techniques pose these problems in a probabilistic framework. We frequently see
things like $p(\mathbf{x}|\mathbf{z})$ where $\mathbf{x}$ is an image and
$\mathbf{z}$ is a latent variable. What do we mean by the probability of an
image? What is a latent variable, and why is it necessary[^Bishop2006] to pose
the problems this way?
Short answer, it is necessary due to the inherent uncertainty of our universe.
In this case, uncertainty in image acquisition can be introduced via many
sources, such as the recording apparatus, the finite precision of our
measurements, as well as the intrinsic stochasticity of the process being
measured. Perhaps the most important source of uncertainty we will consider is
due to there being sources of variability that are themselves unobserved.
Probability theory provides us with a framework to reason in the presence of
uncertainty and information theory allows us to quantify uncertainty. As we
elluded earlier the field of machine learning makes heavy use of both, and
this is no coincidence.
## Representations
How do we describe a face? The word "face" is a symbol and this symbol means
different things to different people. Yet, there is enough commonality between
our interpretations that we are able to effectively communicate with one
another using the word. How is that? What are the underlying features of faces
that we all hold common? Why is a simple smiley face clip art so obviously
perceived as a face? To make it more concrete, why are two simple ellipses
decorated underneath by a short curve so clearly a face, while an eye lid,
lower lip, one ear and a nostril, not?
**Insert Image of Faces**
*Left: Most would likely agree, this is clearly a face. Middle:
With nearly all of the details removed, a mere two circles and
curve are enough to create what the author still recognizes
as a face. Right: Does this look like a face to you? An ear,
nostril, eyelid, and lip do not seem to convey a face as clearly
as the eyes and the mouth do. We will quantify this demonstration
shortly.*
Features, or representations, are built on the idea that characteristics of the
symbol "face" are not a property of any one face. Rather, they only arise from
the myriad of things we use the symbol to represent. In other words, a
particular face is not ascribed meaning by the word "face" - the word "face"
derives meaning from the many faces it represents. This suggests that facial
characteristics can be described through the statistical properties of all
faces. Loosely speaking, these underlying statistical characteristics are what
the machine learning field often calls latent variables.
## Probability of an Image
Most images are contaminated with noise that must be addressed. At the
highest level, we have noise being added to the data by the imaging device. The
next level of uncertainty comes as a consequence of discretization.
Images in reality are continuous but in the process of imaging we only measure
certain points along the face. Consider for example a military satellite
tracking a vehicle. If one wishes to predict the future location of the van,
the prediction is limited to be within one of the discrete cells that make up
its measurements. However, the true location of the van could be anywhere
within that grid cell. There is also intrinsic stochasticity at the atomic
level that we ignore. The fluctuations taking place at that scale are assumed
to be averaged out in our observations.
The unobserved sources of variability will be our primary focus. Before we
address that, let us lay down some preliminary concepts. We are going to assume
that there exists some true unknown process that determines what faces look
like. A dataset of faces can then be considered as a sample of this process at
various points throughout its life. This suggests that these snapshots are a
outputs of the underlying data generating process. Considering the many
sources of uncertainty outlined above, it is natural to describe this process
as a probability distribution. There will be many ways to interpret the data as
a probability, but we will begin by considering any one image to be the result
of a data generating distribution, $P_{data}(\mathbf{x})$. Here $\mathbf{x}$ is considered to be
an image of a face with $n$ pixels. So $P_{data}$ is a joint distribution over
each pixel of the frame with a probability density function (pdf),
$p_{data}(x_1,x_2,\dots,x_n)$.
To build intuition about what $p_{data}(\mathbf{x})$ is and how it relates to
the assumed data generating process, we will explore a simple example. Take an
image with only 2 pixels... [$x_1$,$x_2$] where both $x_1$ and $x_2$ are in
[0,1]. Each image can be considered as a two dimensional point, in
$\mathbb{R}^2$. All possible images would occupy a square in the 2 dimensional
plane. An example of what this might look like can be seen in Figure
\ref{fig:images_in_2dspace} on page \pageref{fig:images_in_2dspace}. Any one
point inside the unit square would represent an image. For example the image
associated with the point $(0.25,0.85)$ is shown below.
```
x1 = np.random.uniform(size=500)
x2 = np.random.uniform(size=500)
fig = plt.figure();
ax = fig.add_subplot(1,1,1);
ax.scatter(x1,x2, edgecolor='black', s=80);
ax.grid();
ax.set_axisbelow(True);
ax.set_xlim(-0.25,1.25); ax.set_ylim(-0.25,1.25)
ax.set_xlabel('Pixel 2'); ax.set_ylabel('Pixel 1'); plt.savefig('images_in_2dspace.pdf')
```
Any one point inside the unit square would represent an image. For example the image associated with the point $(0.25,0.85)$ is shown below.
```
im = [(0.25, 0.85)]
plt.imshow(im, cmap='gray',vmin=0,vmax=1)
plt.tick_params(
axis='both', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom='off', # ticks along the bottom edge are off
top='off', # ticks along the top edge are off
left='off',
right='off'
)
plt.xticks([])
plt.yticks([])
plt.xlabel('Pixel 1 = 0.25 Pixel 2 = 0.85')
plt.savefig('sample_2dspace_image.pdf')
```
Now consider the case where there is some
process correlating the two variables. This
would be similar to their being some rules behind
the structure of faces. We know, that this must be
the case because if it weren't then faces would
be created randomly and we would not see the
patterns that was do. In
this case, the pixels would be correlated in
some manner due to the mechanism driving the
construction of faces. In this simple case,
let's consider a direct correlation of the
form $x_1 = \frac{1}{2} \cos(2\pi x_2)+\frac{1}{2}+\epsilon$
where $\epsilon$ is a noise term coming from
a low variability normal distribution
$\epsilon \sim N(0,\frac{1}{10})$. We see
in Figure \ref{fig:structured_images_in_2dspace}
on page \pageref{fig:structured_images_in_2dspace}
that in this case, the images plotted
in two dimensions resulting from this
relationship form a distinct pattern.
```
x1 = lambda x2: 0.5*np.cos(2*np.pi*x2)+0.5
x2 = np.linspace(0,1,200)
eps = np.random.normal(scale=0.1, size=200)
fig = plt.figure();
ax = fig.add_subplot(1,1,1);
ax.scatter(x2,x1(x2)+eps, edgecolor='black', s=80);
ax.grid();
ax.set_axisbelow(True);
ax.set_xlim(-0.25,1.25); ax.set_ylim(-0.25,1.25); plt.axes().set_aspect('equal')
ax.set_xlabel('Pixel 2'); ax.set_ylabel('Pixel 1'); plt.savefig('structured_images_in_2dspace.pdf')
```
We will refer to the structure suggested by
the two dimensional points as the 'manifold'.
This is a common practice when analyzing images.
A 28 by 28 dimensional image will be a point in
784 dimensional space. If we are examining
images with structure, various images of the
number 2 for example, then it turns out that
these images will form a manifold in 784
dimensional space. In most cases, as is the
case in our contrived example, this manifold
exists in a lower dimensional space than that
of the images themselves. The goal is to 'learn'
this manifold. In our simple case we can describe
the manifold as a function of only 1 variable
$$f(t) = <t,\frac{1}{2} \cos(2\pi t)+\frac{1}{2}>$$
This is what we would call the underlying data
generating process. In practice we usually
describe the manifold in terms of a probability
distribution. We will refer to the data
generating distribution in our example as
$p_{test}(x_1, x_2)$. Why did we choose a
probability to describe the manifold created
by the data generating process? How might this
probability be interpreted?
Learning the actual distribution turns out to
be a difficult task. Here we will use a
common non parametric technique for describing
distributions, the histogram. Looking at a
histogram of the images, or two dimensional points,
will give us insight into the structure of the
distribution from which they came. Notice here
though that the histogram merely describes the
distribution, we do not know what it is.
```
from matplotlib.colors import LogNorm
x2 = np.random.uniform(size=100000)
eps = np.random.normal(scale=0.1, size=100000)
hist2d = plt.hist2d(x2,x1(x2)+eps, bins=50, norm=LogNorm())
plt.xlim(0.0,1.0); plt.ylim(-0.3,1.3); plt.axes().set_aspect('equal')
plt.xlabel('Pixel 2'); plt.ylabel('Pixel 1')
plt.colorbar();
plt.savefig('histogram_of_structured_images.pdf')
```
As our intuition might have suggested, the data
generating distribution looks very similar to
the structure suggested by the two dimensional
images plotted above. There is high probability
very near the actual curve
$x_1 = \frac{1}{2} \cos(2\pi x_2)+\frac{1}{2}$
and low probability as we move away. We imposed
the uncertainty via the Gaussian noise term
$\epsilon$. However, in real data the
uncertainty can be due to the myriad sources
outlined above. In these cases a complex
probability distribution isn't an arbitrary
choice for representing the data, it becomes
necessary.
Hopefully we're now beginning to understand how
to interpret $p_{test}(x_1, x_2)$. One might say
$p_{test}$ measures how likely a certain
configuration of $x_1$ and $x_2$ is to have
arisen from the data generating process $f(t)$.
Therefore if one can learn the data generating
distribution, then they have a descriptive
measure of the true underlying data generating
process. This intuition extends to the
$p_{data}(x)$ for faces that was presented
above. A sample from the LFW dataset is shown in
Figure \ref{fig:Agnelo_Queiroz_0001} on page
\pageref{fig:Agnelo_Queiroz_0001}.
|
github_jupyter
|
# Astronomy 8824 - Numerical and Statistical Methods in Astrophysics
## Statistical Methods Topic I. High Level Backround
These notes are for the course Astronomy 8824: Numerical and Statistical Methods in Astrophysics. It is based on notes from David Weinberg with modifications and additions by Paul Martini.
David's original notes are available from his website: http://www.astronomy.ohio-state.edu/~dhw/A8824/index.html
#### Background reading:
- Statistics, Data Mining, and Machine Learning in Astronomy, Chapter 3 (see David's [Reader's Guide](http://www.astronomy.ohio-state.edu/~dhw/A8824/ivezic_guide.pdf))
```
import math
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
from scipy import optimize
# matplotlib settings
SMALL_SIZE = 14
MEDIUM_SIZE = 16
BIGGER_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('lines', linewidth=2)
plt.rc('axes', linewidth=2)
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
```
LaTex macros hidden here --
$\newcommand{\expect}[1]{{\left\langle #1 \right\rangle}}$
$\newcommand{\intinf}{\int_{-\infty}^{\infty}}$
$\newcommand{\xbar}{\overline{x}}$
### Statistical Tasks in Astrophysics
Four common statistical tasks:
1. Parameter estimation
2. Comparison of hypotheses
3. Absolute evaluation of a hypothesis
4. Forecasting of errors
Another task, slightly less common: Prediction of values from a model fit to some set of data, when the parameters of the model are uncertain.
### Simple Example: Data points with error bars
**Parameter estimation:** What are slope and amplitude of a power-law fit?
What are the uncertainties in the parameters?
When you fit a power-law model to data, you _assume_ that power-law description is valid.
**Hypothesis comparison:** Is a double power-law better than a single power-law?
Hypothesis comparisons are trickier when the number of parameters is different, since one must decide whether the fit to the data is _sufficiently_ better given the extra freedom in the more complex model.
A simpler comparison would be single power-law vs. two constant plateaus with a break at a specified location, both with two parameters.
**Absolute evaluation:** Are the data consistent with a power-law?
Absolute assessments of this sort are generally much more problematic than hypothesis comparisons.
**Forecasting of errors:** How many more measurements, or what reduction of uncertainties in the measurements, would allow single and double power-law models to be clearly distinguished?
Need to specify goals, and assumptions about the data. This is a common need for observing proposals, grant proposals, satellite proposals etc.
### Complicated example: CMB power spectrum with errors.
**Parameter estimation:** In a "vanilla" $\Lambda$CDM model, what are the best values of $\Omega_m$, $\Omega_b$, $h$, $n$, and $\tau$?
One often wants to combine CMB with other data to break degeneracies and get better constraints.
**Hypothesis comparisons:** Are data consistent with $\Omega_m=1$? Do they favor inclusion of space curvature, or gravity waves?
This typically involves comparison of models with different numbers of parameters.
**Absolute assessment:** Can the restricted, "vanilla" $\Lambda$CDM model be rejected?
**Forecasting:** What constraints or tests could be achieved with a new experiment?
This kind of analysis played a key role in the design and approval of WMAP, Planck, DESI, and other major cosmological surveys.
There is presently a lot of work along these lines for future cosmological surveys and CMB experiments.
### PDF, Mean, and Variance
If $p(x)$ is the **probability distribution function** (pdf) of a **random variable** $x$, then $p(x) dx$ is the probability that $x$ lies in a small interval $dx$.
The **expectation value** of a random variable $x$ is $\expect{x} = \intinf xp(x)dx = \mu$. The expectation value of $x$ is equal to the (arithmetic) mean. It is sometimes also written $\mu = E(x)$.
The expectation value of a function $y(x)$ is $\expect{y(x)} = \intinf y(x) p(x) dx.$
The variance is $V(x)=\expect{(x-\mu)^2} \equiv \sigma^2$.
The standard deviation is $\sigma = \sqrt{\sigma^2}$. This is also called the dispersion.
#### Useful variance relation
$$
V(x)=\expect{(x-\mu)^2} = \int (x - \mu)^2 p(x) dx
$$
$$
= \int (x^2 - 2\mu x + \mu^2) p(x) dx = \int x^2 p(x) dx - 2 \mu \int x p(x) dx + \mu^2 \int p(x) dx
$$
$$
= \expect{x^2} - 2 \expect{x}^2 + \expect{x}^2
$$
This reduces to the useful result that $V(x) = \expect{x^2} - \expect{x}^2$.
#### Sum of the variances
For _independent_ random variables $y_1$, $y_2$, ... $y_N$ (drawn from the same distribution or different distributions), the variance of the sum is the sum of the variances:
$$
V(y_1+y_2+...y_N) = \sum_{i=1,N} V(y_i).
$$
This can be proved by induction.
If random variables $x$ and $y$ are independent, then $p(x,y) = p(x)p(y)$ and
$$
{\rm Cov}(x,y) \equiv \expect{(x-\mu_x)(y-\mu_y)}=0.
$$
The second statement can be proved from the first.
#### Demonstration
$$
Var(y_1 + y_2) = \expect{(y_1 + y_2)^2} - \expect{y_1+y_2}^2
$$
$$
= \expect{y_1^2 + 2 y_1 y_2 + y_2^2} - \expect{y_1+y_2}^2
$$
Then looking at just the first term:
$$
\expect{y_1^2 + 2 y_1 y_2 + y_2^2} = \int y_1^2 p(y_1) p(y_2) dy_1 dy_2 + 2 \int y_1 y_2 p(y_1) p(y_2) dy_1 dy_2 + \int y_2^2 p(y_1) p(y_2) dy_1 dy_2\int
$$
Note that the integral \int p(y_1) dy_1 = 1 by definition, so we can simplify the above to:
$$
= \expect{y_1^2} + 2 \expect{y_1 y_2} + \expect{y_2^2}
$$
Now looking at the second term:
$$
\expect{y_1+y_2}^2 = \left[ \int (y_1 + y_2) p(y_1) p(y_2) dy_1 dy_2 \right]^2
$$
$$
= \expect{y_1}^2 + 2 \expect{y_1} \expect{y_2} + \expect{y_2}^2
$$
Now combining these two:
$$
Var(y_1 + y_2) = \expect{y_1^2} + 2 \expect{y_1 y_2} + \expect{y_2^2} - \expect{y_1}^2 - 2 \expect{y_1} \expect{y_2} - \expect{y_2}^2
$$
$$
= \expect{y_1^2} + \expect{y_2^2} - \expect{y_1}^2 - \expect{y_2}^2
$$
Which is equivalent to:
$$
Var(y_1 + y_2) = Var(y_1) + Var(y_2)
$$
#### Linearity of Expectation
This is often invoked more generally as a statement about the _Linearity of Expectation_.
$$
\expect{x + y} = \int (x + y) p(x) p(y) dx dy = \int x p(x) p(y) dx dy + \int y p(x) p(y) dx dy = \expect{x} + \expect{y}
$$
### Covariance
Covariance is a measure of the _joint probability_ of 2 random variables. It describes how they change together.
It is commonly written as:
$$
Cov(y_1, y_2) = \expect{ (y_1 - \expect{y_1} ) (y_2 - \expect{y_2}) } = \expect{ (y_1 - \mu_1) (y_2 - \mu_2) }
$$
This can also be written as:
$$
Cov(y_1, y_2) = \expect{y_1 y_2 - \expect{y_1} y_2 - y_1 \expect{y_2} + \expect{y_1} \expect{y_2} }
$$
using the linearity of expectation
$$
= \expect{y_1 y_2} - \expect{y_1}\expect{y_2} - \expect{y_1}\expect{y_2} + \expect{y_1} \expect{y_2}
$$
or
$$
Cov(y_1, y_2) = \expect{y_1 y_2} - \expect{y_1} \expect{y_2}
$$
Note that if $y_1$ and $y_2$ are independent variables,
$$
\expect{y_1 y_2} = \int y_1 y_2 p(y_1) p(y_2) dy_1 dy_2 = \int y_1 p(y_1) dy_1 \int y_2 p(y_2) dy_2 = \expect{y_1} \expect{y_2}
$$
and therefore $Cov(y_1, y_2) = 0$.
```
### Covariance Example
np.random.seed(1216)
sig_x = 2
sig_y = 1
sig_xy = 0
mean = np.array([0, 0], dtype=float)
cov = np.array( [[sig_x, sig_xy], [sig_xy, sig_y]], dtype=float)
x = np.random.multivariate_normal(mean, cov, size=1000)
fig, axarr = plt.subplots(1, 2, figsize=(14,7))
axarr[0].plot(x.T[0], x.T[1], 'k.')
axarr[0].set_xlabel(r"$x_1$")
axarr[0].set_ylabel(r"$x_2$")
axarr[0].set_xlim(-5, 5)
axarr[0].set_ylim(-5, 5)
axarr[0].text(-4, 4, r"$\sigma_{xy} = 0.0$")
sig_x = 2
sig_y = 1
sig_xy = 0.5
mean = np.array([0, 0], dtype=float)
cov = np.array( [[sig_x, sig_xy], [sig_xy, sig_y]], dtype=float)
x = np.random.multivariate_normal(mean, cov, size=1000)
axarr[1].plot(x.T[0], x.T[1], 'k.')
axarr[1].set_xlim(-5, 5)
axarr[1].set_ylim(-5, 5)
axarr[1].plot( [x[0], x[-1]], [0, 0], 'k:')
axarr[1].set_xlabel("$x_1$")
axarr[1].text(-4, 4, r"$\sigma_{xy} = 0.5$")
```
### Estimators
An estimator is a mathematical function of data that estimates a quantity of interest. An important distinction to keep in mind for data is the distinction between "population statistics" (the underlying distribution) and "sample statistics" (the measurements of the population).
Ideally one wants an estimator to be
- _unbiased_ -- even with a small amount of data, the expectation value of estimator is equal to the quantity being estimated
- _efficient_ -- makes good use of the data, giving a low variance about the true value of the quantity
- _robust_ -- isn't easily thrown off by data that violate your assumptions about the pdf, e.g., by non-Gaussian tails of the error distribution
- _consistent_ -- in the limit of lots of data, it converges to the true value
These four desiderata sometimes pull in different directions.
Suppose we have $N$ independent data points (the sample) drawn from an unknown distribution $p(x)$ (the population).
#### The mean estimator
The obvious estimator for the mean of the distribution is the sample mean, $\xbar={1\over N}\sum x_i$. The expectation value for the sample mean is:
$$
\expect{\xbar} = \expect{\frac{1}{N} \sum x_i} =
\frac{1}{N} \sum \expect{x_i} = \mu.
$$
Thus, the sample mean is an _unbiased_ estimator of $\mu$.
#### Variance of the mean estimator
The variance of this estimator is
$$
\expect{(\xbar-\mu)^2} = V\left(\frac{1}{N} \sum x_i\right) =
{1 \over N^2} V\left(\sum x_i\right) =
{1 \over N^2} \sum V(x_i) =
{1 \over N^2} \times N\sigma^2 = {\sigma^2 \over N},
$$
where $\sigma^2$ is the variance of the underlying distribution.
We have used the fact that $\expect{\xbar}=\mu$, and we have used the assumed independence of the $x_i$ to go from the variance of a sum to a sum of variances.
#### Other mean estimators
An alternative estimator for the mean is the value of the third sample member, $x_3$.
Since $\expect{x_3} = \mu$, this estimator is unbiased, but $V(x_3) = \sigma^2$, so this estimate is noisier than the sample mean by $\sqrt{N}$.
A more reasonable estimator is the sample _median_, though this is a biased estimator if $p(x)$ is asymmetric about the mean.
If $p(x)$ is Gaussian, then the variance of the sample median is ${\pi \over 2}{\sigma^2 \over N}$, so it is a less _efficient_ estimator than the sample mean.
However, if $p(x)$ has long non-Gaussian tails, then the median may be a much _more_ efficient estimator of the true mean(i.e., giving a more accurate answer for a fixed number of data points), since it is not sensitive to rare large or small values.
Estimators that are insensitive to the extremes of a distribution are often called _robust_ estimators.
#### Variance estimator
The obvious estimator for the variance of the distribution is the sample variance
$$
s^2 = \frac{1}{N} \sum (x_i-\xbar)^2 = \frac{1}{N} \sum x_i^2 - \xbar^2.
$$
However, a short derivation shows that the sample variance is biased low:
$$
\expect{s^2} = {N-1 \over N}\sigma^2,
$$
This is because we had to use the sample mean rather than the true mean, which on average drives down the variance.
An unbiased estimator is therefore
$$
\hat{\sigma}^2 = {1\over N-1} \sum (x_i-\xbar)^2.
$$
If you compute the mean of a sample, or of data values in a bin, the estimated _standard deviation of the mean_ is
$$
\hat{\sigma}_\mu = \left[{1 \over N(N-1)}\sum (x_i-\xbar)^2\right]^{1/2}.
$$
Note that this is smaller by $N^{-1/2}$ than the estimate of the dispersion within the bin. You should always be clear which quantity (dispersion or standard deviation of the mean) you are plotting.
If $p(x)$ is Gaussian, then the distribution of $\xbar/\sigma$ is a Gaussian of width $N^{-1/2}$. However, the distribution of $\xbar/\hat{\sigma}$ is broader (a Student's $t$ distribution).
### Snap-judging Error Bars
What is wrong with this plot?
```
Npts = 20
x = np.linspace(0, 5, Npts)
m = 2
b = 3
y = m*x + b
sig_y = np.random.normal(0, 1, Npts)
fx = y + sig_y
err_y = 3*np.ones(len(x)) # + 2.*np.ones(len(x))
plt.figure(figsize=(10,5))
plt.errorbar(x, fx, yerr=err_y, fmt='bo', capsize=4, label="Data")
plt.plot(x, y, 'k:', label="Relation")
plt.ylabel("Y")
plt.xlabel("X")
plt.legend(loc='upper left')
```
### Bayesian vs. Frequentist Statistics
Suppose we have measured the mean mass of a sample of G stars, by some method, and say: at the 68\% confidence level the mean mass of G stars is $a \pm b$. What does this statement mean?
Bayesian answer: There is some true mean mass $\alpha$ of G stars, and there is a 68\% probability that $a-b \leq \alpha \leq a+b$.
More pedantically: The hypothesis that the true mean mass $\alpha$ of G stars lies in the range $a-b$ to $a+b$ has a 68\% probability of being true.
The **probability of the hypothesis is a real-numbered expression of the degree of belief we should have in the hypothesis**, and it obeys the axioms of probability theory.
In "classical" or "frequentist" statistics, a probability is a statement about the frequency of outcomes in many repeated trials. With this restricted definition, **one can't refer to the probability
of a hypothesis -- it is either true or false**. One can refer to the probability of data if a hypothesis is true, where probability means the fraction of time the data would have come out the way it did in many repeated trials.
Frequentist answer: The statement means something like: if $\alpha = a$, we would have expected to obtain a sample mean in the range $a\pm b$ 68\% of the time.
##### This is the fundamental conceptual difference between Bayesian and frequentist statistics.
**Bayesian:** Evaluate the probability of a hypothesis in light of data (and prior information). Parameter values or probability of truth of a hypothesis are random variables, _data are not_ (though they are drawn from a pdf).
**Frequentist:** Evaluate the probability of obtaining the data --- more precisely, the fraction of times a given _statistic_ (such as the sample mean) applied to the data would come out the way it did in many repeated trials --- given the hypothesis, or parameter values. A probability is a statement about the frequency of outcomes in many repeated trials. Data are random variables, parameter values or truth of hypotheses are not.
#### Summary of the differences
| Bayesian | Frequentist |
| :-: | :-: |
| Evaluate the probability of a hypothesis, given the data | Evaluate the probability of obtaining the data |
| Parameters and probability of truth are random variables | Data are random variables |
| Data are not random variables | Parameters and probability of truth are not random variables |
| Need to specify alternatives to evaluate hypotheses | Statistical tests implicitly account for alternatives |
David's opinion: The Bayesian formulation corresponds better to the way scientists actually think about probability, hypotheses, and data. It provides a better conceptual basis for figuring out what to do in a case where a
standard recipe does not neatly apply. But frequentist methods sometimes seem easier to apply, and they clearly capture _some_ of our intuition about probability.
Bottom line: One should be a Bayesian in principle, but maybe not always
in practice.
### Probability Axioms and Bayes' Theorem
Probabilities are real numbers $0 \leq p \leq 1$ obeying the axioms
$$
p(A|C) + p(\overline{A}|C) = 1.
$$
$$
p(AB|C) = p(A|BC)P(B|C)
$$
$\overline{A}$ means "not $A$"
$AB$ means "$A$ and $B$" and is thus equivalent to $BA$.
From this equivalence we see that
$$
p(AB|C) = p(A|BC)p(B|C)=p(BA|C)=p(B|AC)p(A|C).
$$
From the 2nd and 4th entries above, we arrive at **Bayes' Theorem**
$$
p(A|BC) = p(A|C) {p(B|AC) \over p(B|C)}.
$$
### Bayesian Inference
In application to scientific inference, this theorem is usually written
$$
p(H|DI) = p(H|I) {p(D|HI) \over p(D|I)},
$$
where
$H$ = hypothesis, which might be a statement about a parameter value, e.g., the population mean lies in the range $x \rightarrow x+dx$.
$D$ = data
$I$ = background information, which may be minimally informative or highly
informative.
$p(H|I)$ = **prior probability**, i.e., before data are considered
$p(D|HI)$ = **likelihood** of data given $H$ and $I$
$p(D|I)$ = **global likelihood**
$p(H|DI)$ = **posterior probability**, the probability of the hypothesis
after consideration of the data
Bayes' Theorem tells us how to update our estimate of the probability of a hypothesis in light of new data.
It can be applied sequentially, with the posterior probability from one experiment becoming the prior for the next, as more data become available.
Calculation of likelihood $P(D|HI)$ is sometimes straightforward, sometimes difficult. The background information
$I$ may specify assumptions like a Gaussian error distribution, independence of data points.
Important aspect of Bayesian approach: only the actual data enter, not hypothetical data that could have been taken.
_All the evidence of the data is contained in the likelihood._
### Global Likelihood and Absolute Assessment
The global likelihood of the data, $P(D|I)$ is the sum (or integral) over "all" hypotheses. This can be a slippery concept.
Often $P(D|I)$ doesn't matter: in comparing hypotheses or parameter values, it cancels out.
When needed, it can often be found by requiring that $p(H|DI)$ integrate (or sum) to one, as it must if it is a true probability.
The Bayesian approach forces specification of alternatives to evaluate hypotheses.
Frequentist assessment tends to do this implicitly via the choice of statistical test.
### Criticism of Bayesian approach
The incorporation of priors makes Bayesian methods seem subjective, and it is the main source of criticism of the Bayesian approach.
If the data are compelling and the prior is broad, then the prior doesn't have much effect on the posterior. But if the data are weak, or the prior is narrow, then it can have a big effect.
Sometimes there are well defined ways of assigning an "uninformative" prior, but sometimes there is genuine ambiguity.
Bayesian methods sometimes seem like a lot of work to get to a straightforward answer.
In particular, we sometimes want to carry out an "absolute" hypothesis test without having to enumerate all alternative hypotheses.
### Criticism of frequentist approach
The frequentist approach doesn't correspond as well to scientific intuition. We want to talk about the probability of hypotheses or parameter values.
The choice of which statistical test to apply is often arbitrary. There is not a clear way to go from the result of a test to an actual scientific inference about parameter values or validity of a hypothesis.
Bayesians argue (and I agree) that frequentist methods obtain the appearance of objectivity only by sweeping priors under the rug, making assumptions implicit rather than explicit.
Frequentist approach relies on hypothetical data as well as actual data obtained. Choice of hypothetical data sets is often ambiguous, e.g., in the "stopping" problem.
Sometimes we _do_ have good prior information. It is straightforward to incorporate this in a Bayesian approach, while it is not in the frequentist approach.
Frequentist methods are poorly equipped to handle "nuisance parameters," which in the Bayesian approach are easily handled by marginalization.
For example, the marginal distribution of a parameter $x$
$$
p(x) = \int p(x|a,b,c) da\,db\,dc
$$
can only exist if $x$ is a random variable.
|
github_jupyter
|
# <p style="text-align: center;"> Self Driving Car in OpenAI Gym using Imitation Learning and Reinforcement Learning</p>

# <p style="text-align: center;"> 1.0 Abstract </p> <a id='abstract'></a>
We all know self-driving cars is one of the hottest areas of research and business for the tech giants. What seemed like a science-fiction, a few years ago, now seems more like something which is soon to become a part and parcel of life. The reason, I am saying “soon to be” is because of the fact that even though companies like Tesla, Nissan, Cadillac do have self-driving car assistance software, but, they still require a human to keep an eye on the road and take control when needed. However, it is fascinating to see how far we have come in terms of innovation and how fast technology is advancing. So much so, that now, with the help of basic deep learning, neural network magic, we can build our own pipeline for autonomous driving.
Our idea to try and build our very own self driving car emerged from here. In order to understand the basics of the process , we did this project in two parts.
- Self Driving Car using Supervised Learning
- Self Driving Car using Reinforcement Learning
**PS- To make you understand the structure for the same, We have done this project in 3 parts, and all 3 parts are divided into seperate notebooks. And these individual notebooks contain the whole code and documentation of the entire part.
### Readme Structure
### 1. Basics of CNN :-
The main agenda of this notebook is as follow:-
> - To understand the convolution operation
> - To understand the pooling operation
> - Remembering the vocabulary used in convolutional neural networks (padding, stride, filter, etc.)
> - Building a convolutional neural network for multi-class classification in images
>- Basics of Imitation Learning
This notebook includes the basics of convolutional operations and whole network in general. This was a very integral part of our project and will serve as a guide for any beginner trying to understand CNN .
### 2. Self Driving Car using Supervised Learning :-
In this notebook ,we applied a supervised learning algorithm (convolution networks), to control the direction of a car in a 2D simulation. The notebook captures the following:-
> - How a convolution network works?
> - How to create the dataset and use it for training our network
> - How to use gym to retrieve the output of our neural network in order to control the simulation.
The general idea that we used is that of the supervised classifier. We are going to train a convolutional neural network to classify images in the game, according to three labels: left, right and straight ahead. We will then convert these commands into instructions for the simulator, which will execute them.
### 3. Basics of Deep Q-Learning:-
The main agenda of this notebook is as follow:-
> - Q-Learning
> - Why ‘Deep’ Q-Learning?
> -Introduction to Deep Q-Learning
> - Challenges of Deep Reinforcement Learning as compared to Deep Learning
> - Experience Replay
> - Target Network
This notebook includes the basics of deep q learning. This was a very integral part of our project and will serve as a guide for any beginner trying to understand Q-Learning .
### 4. Self Driving Car using Reinforcement Learning :-
In this notebook, a python based car racing environment is trained using a deep reinforcement learning algorithm to perform efficient self driving on a racing track. The notebook captures the following.
> - Development of a deep Q learning algorithm which is then used to train an autonomous driver agent.
> - Different configurations in the deep Q learning algorithm parameters and in the neural network architecture are then tested and compared in order to obtain the best racing car average score over a period of 100 races. This score is given by the gym environment and can be seen on the bottom left corner.
According to OpenAI Gym, this environment is considered solved when the agent successfully reaches an average score of 900 on the last 100 runs. In this project, this goal was surpassed having obtained an average score of 905 over the last 100 runs. Therefore, we successfully solved the environment.
# <p style="text-align: center;"> Index </p>
- # 1 [Abstract](#abstract)
- # 2 [Basics of CNN](./Umbrella_Academy_INFO7390_Project/INFO7390_Notebooks/Basics_of_Convolutional_Neural_Network.ipynb)
- # 3 [Self Driving Car using Supervised Learning](./Umbrella_Academy_INFO7390_Project/INFO7390_Notebooks/Self_Driving_Car_Imitation_Learning.ipynb)
- # 4 [Basics of Deep Q learning](./Umbrella_Academy_INFO7390_Project/INFO7390_Notebooks/Basics_of_Deep_Q_Learning.ipynb)
- # 5 [Self Driving Car using Reinforcement Learning](./Umbrella_Academy_INFO7390_Project/INFO7390_Notebooks/RL_Self_Driving_Car.ipynb)
- # 6 [Conclusion](#Conclusion)

# Setting up the Environment <a id='Environment'></a>
Before we start with the setup of our environment, we need to install a few pakages which will make our game and neural network work.
### 1) Gym facility
Install OpenAI Gym on the machine
Follow the instructions at https://github.com/openai/gym#installation for extensive and deep guide.
**Summary of instructions:**
- Install Python 3.5+
- Clone the gym repo: git clone https://github.com/openai/gym.git
- cd gym
- Gym installation, with the box2d environments: pip install -e '.[box2d]'
Follow the following steps to play the Car Racing Game
- cd gym/envs/box2d
- python car_racing.py
### 2) Pytorch
Pytorch is the deep learning framework that we will be using. It makes it possible to build neural networks very simply.
Follow the instructions on http://pytorch.org/ for a deep guide.
**Summary of instructions:**
- Install Python 3.5+
- It is recommended to manage PyTorch with Anaconda. Please install Anaconda
- Install PyTorch following instructions at https://pytorch.org/get-started/locally/

For example this is the setup for my Computer
> pip install torch==1.7.0+cpu torchvision==0.8.1+cpu torchaudio===0.7.0 -f https://download.pytorch.org/whl/torch_stable.html
## The Environment
For this tutorial, we will use the gym library developed by OpenAI. It provides environments (simple games) to develop reinforcement learning algorithms.
The environment we will be using is CarRacing-v0 ( https://gym.openai.com/envs/CarRacing-v0/ ). It is about driving a car on a circuit, the objective being to move forward while staying on the track, which contains many turns. The input to the algorithm (the state provided by the environment) is only the image displayed by the environment: we see the car, and the terrain around it.

The idea is to drive the car by analyzing this image.
We are going to use this library in a roundabout way: It is designed for reinforcement learning. The objective is in principle to use the rewards (rewards) provided by the environment to learn the optimal strategy without user action. Here we will not be using these rewards.
In addition, we will be doing end-to-end learning , which means that the neural network will directly give us the commands to navigate the car. This is not a road detection module, which will then be analyzed by another program (most true autonomous driving systems are made this way). Here, the neural network takes the field matrix as input, and issues a command to be executed (turn left, turn right, continue straight ahead), without any intermediate program.
To use the environment, you need to import it like this:
>import gym
>env = gym.make('CarRacing-v0').env
You can then access several useful functions:
- **env.reset() :** Allows you to restart the environment
- **env.step(action) :** Allows you to perform the action `action`. This function returns a tuple `state`, `reward`, `done`, `info` containing the state of the game after the action, the reward obtained, doneindicates if the game is finished, and infocontains debug data.
- **env.render() :** Displays the game window.
Here, the state `state` that will be returned by env.step(action)is the image displayed on the screen (the pixel matrix). It is this data that we will use to steer our car.
# <p style="text-align: center;"> Conclusion<p><a id='Conclusion'></a>
#### 1. Video Simulation of self driving car by supervised learning (Imitation Learning) :-
<video controls src="main_videos/IL_Result.mp4" width="500" height="340"/>
#### 2. Video Simulation of self driving car by Reinforcement learning (Deep Q Learning) :-
<video controls src="main_videos/RL_SelfDriving.mp4" height="340"/>
3. Our network recognizes the shapes to keep the car on the desired path. It's a sort of classifier that just indicates whether the car is in the right position, too far to the right or too far to the left. We then send this command to the simulator. All of this is done in real time.
> Behavioural Cloning though has a few disadvantages, and we can see them here in this notebook.
- We need to manually accelerate and decelerate, and we can only accelerate till a certain limit, because beyond that, the car will spin out of control and go outside in the patch of grass.
- Since while training we never leave the track, the car has no way of coming back to the road after it has left the track and is into the grass.
- Here we only have a train set of 3000 and validation set of 600, but we tried increasing the sizes of these by a magintude of 10 (30,000 and 6000), but because of the substantial increase in the size of the dataset, the error while generating the dataset also shot up, which turned out to be a very bad dataset for out neural net.
- Also, because we were well within the tracks, the car has no data on cases in which it goes out by accident.
- A possible remedy for this is preprocessing the data in such a way that the dataset has images of car coming in, but not going out.
For seeing how this works refer to :- [Self Driving Car using Supervised Learning](./Umbrella_Academy_INFO7390_Project/INFO7390_Notebooks/Self_Driving_Car_Imitation_Learning.ipynb)
# <p style="text-align: center;"> Contribution<p><a id='Contribution'></a>
- Code by self : 75%
- Code from external Sources : 25%
# <p style="text-align: center;"> License<p><a id='License'></a>
Copyright (c) 2020 Rushabh Nisher, Manali Sharma
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
github_jupyter
|
# Earthquakes
In this notebook we'll try and model the intensity of earthquakes, basically replicating one of the examples in [this](http://user.it.uu.se/~thosc112/dahlin2014-lic.pdf) paper. To that end, let's first grab the data we need from USGS. We then filter the data to only include earthquakes of a magnitude 7.0, on the Richter scale, or higher.
```
from requests import get
from datetime import datetime
from json import loads
import pandas as pd
url = url = "https://earthquake.usgs.gov/fdsnws/event/1/query.geojson?minsig=600"
resp = get(url, params={"starttime": datetime(1900, 1, 1), "endtime": datetime(2021, 1, 1)})
json = resp.json()
data = pd.DataFrame.from_dict((i["properties"] for i in json["features"]), orient="columns")
data.set_index("time", inplace=True)
data.index = pd.to_datetime(data.index, unit="ms")
data = data.where(data["mag"] >= 7.0).sort_index()
by_year = data.groupby(data.index.year)["mag"].count()
by_year.plot(figsize=(16, 9), color="gray")
```
Next, we'll setup the model for the data. We'll use the same one as Dahlin uses, i.e.
\begin{cases}
d \log {\lambda_t} = \kappa (\mu - \log{\lambda_t})dt + \sigma dW_t, \\
Y_t \sim \mathcal{P} \left ( \lambda_t \right),
\end{cases}
where $\mathcal{P(x)}$ denotes a Poisson distribution with rate $x$.
```
from pyfilter.timeseries import models as m, GeneralObservable, StateSpaceModel
from pyfilter.distributions import Prior
from torch.distributions import Poisson, Normal, Exponential, LogNormal
import torch
class EarthquakeObservable(GeneralObservable):
def build_density(self, x):
return Poisson(rate=x.values.exp(), validate_args=False)
priors = Prior(Exponential, rate=5.0), Prior(Normal, loc=0.0, scale=1.0), Prior(LogNormal, loc=0.0, scale=1.0)
initial_state_mean = Prior(Normal, loc=0.0, scale=1.0)
latent = m.OrnsteinUhlenbeck(*priors, initial_state_mean=initial_state_mean, dt=1.0, ndim=1)
obs = EarthquakeObservable(torch.Size([]), ())
ssm = StateSpaceModel(latent, obs)
```
Next, we'll perform the inference. For this model we'll use PMMH together with a gradient based proposal, corresponding to PMH1 of the dissertation referenced earlier.
```
from pyfilter.inference.batch.mcmc import PMMH, proposals as p
from pyfilter.filters.particle import APF
as_tensor = torch.from_numpy(by_year.values).int()
filt = APF(ssm, 500, record_states=True)
alg = PMMH(filt, 3000, num_chains=6, proposal=p.GradientBasedProposal(scale=5e-2)).cuda()
state = alg.fit(as_tensor.cuda())
```
Plot one smoothed realization.
```
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(16, 9))
smoothed = filt.smooth(state.filter_state.states).mean((1, 2)).exp().cpu().numpy()[1:]
ax.plot(by_year.index, smoothed, color="gray", label="Rate")
ax2 = ax.twinx()
by_year.plot(ax=ax2, color="salmon", alpha=0.75, label="Earthquakes")
fig.legend()
```
And finally plot the posterior distributions of the parameters.
```
from pyfilter.inference.utils import params_to_tensor
from arviz import plot_trace
parameters = state.samples.values().transpose(1, 0).cpu().numpy()
# fig, ax = plt.subplots(parameters.shape[-1], figsize=(16, 9))
plot_trace(parameters)
```
|
github_jupyter
|
# Analysis of one-year trace of gut microbiome
This notebook records the code used for analyzing data from [Gibbons _et. al._ (2017)](http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1005364).
## Load required packages
```
library(beem)
library(grid)
library(ggplot2)
library(ggsci)
library(igraph)
library(reshape2)
```
## Load functions and data
```
input.da <- read.table('~/BEEM/vignettes/gibbons_et_al_analysis/DA.counts.txt', head=F, row.names=1)
metadata.da <- read.table('~/BEEM/vignettes//gibbons_et_al_analysis/DA.metadata.txt', head=T)
## For DB, point #74 has extremely high of one species and #180 is sampled too far from the previous time point
input.db <- read.table('~/BEEM/vignettes//gibbons_et_al_analysis/DB.counts.txt', head=F, row.names=1)[,-c(74,180)]
metadata.db <- read.table('~/BEEM/vignettes/gibbons_et_al_analysis/DB.metadata.txt', head=T)[-c(74,180),]
## For M3, data from 330:332 are too far from previous time point
input.m3 <- read.table('~/BEEM/vignettes/gibbons_et_al_analysis/M3.counts.txt', head=F, row.names=1)[,1:329]
metadata.m3 <- read.table('~/BEEM/vignettes/gibbons_et_al_analysis/M3.metadata.txt', head=T)[1:329,]
input.f4 <- read.table('~/BEEM/vignettes/gibbons_et_al_analysis/F4.counts.txt', head=F, row.names=1)
metadata.f4 <- read.table('~/BEEM/vignettes/gibbons_et_al_analysis/F4.metadata.txt', head=T)
```
## Run BEEM
### Individual DA
```
counts.da <- input.da[-1,] + 0.0001 ## added pseudo value for R>3.5
colnames(counts.da) <- as.character(input.da[1,])
res.da <- EM(dat=counts.da,meta=metadata.da, dev=10, verbose=FALSE,
min_iter=50, max_iter=100, converge_thre = 1e-4,
scaling = 10000, ncpu=4, seed=0)
```
### Individual M3
```
counts.m3 <- input.m3[-1,]
colnames(counts.m3) <- as.character(input.m3[1,])
res.m3 <- EM(dat=counts.m3, meta=metadata.m3, dev=10, verbose=FALSE,
min_iter=50, max_iter=100, converge_thre = 1e-4,
scaling = 10000, ncpu=4, seed=0)
```
### Individual DB
```
counts.db <- input.db[-1,]
colnames(counts.db) <- as.character(input.db[1,])
res.db <- EM(dat=counts.db,meta=metadata.db, dev=10, verbose=FALSE,
min_iter=50, max_iter=100, converge_thre=1e-4,
scaling = 10000, ncpu=4, seed=0)
```
### Individual F4
```
counts.f4 <- input.f4[-1,]
colnames(counts.f4) <- as.character(input.f4[1,])
res.f4 <- EM(dat=counts.f4,meta=metadata.f4, dev=10, verbose=FALSE,
min_iter=50, max_iter=100, converge_thre=1e-4,
scaling = 10000, ncpu=4, seed=0)
```
## Infer parameters
```
params.da <- paramFromEM(res.da, counts.da, metadata.da, ncpu=4)
params.m3 <- paramFromEM(res.m3, counts.m3, metadata.m3, ncpu=4)
params.db <- paramFromEM(res.db, counts.db, metadata.db, ncpu=4)
params.f4 <- paramFromEM(res.f4, counts.f4, metadata.f4, ncpu=4)
```
## Functions for analysis
```
int.net <- function(counts, parms, sig=1, title){
## plot interaction network
minmax <- function(x) (x-min(x))/(max(x)-min(x))
annote <- read.table('~/BEEM/vignettes/gibbons_et_al_analysis/all_otu_mapping.txt',head=F, row.names=1)
counts.mean <- rowMeans(counts)
int <- parms[parms$parameter_type=='interaction' & parms$source_taxon!=parms$target_taxon,]
int.f <- int[int$significance>sig,2:4]
g <- graph.data.frame(int.f[,1:2])
V(g)$color <- annote[V(g)$name,]$V4
V(g)$size <- log(counts.mean[V(g)$name]) +4
E(g)$color <- ifelse(int.f$value>0,fill_cols[12],fill_cols[13])
E(g)$lty <- ifelse(int.f$value>0,1,2)
E(g)$width <- minmax(abs(int.f$value) ) * 2 + 0.5
plot(g, main=title,asp=0,edge.arrow.size=0.5,edge.curved=.15)
return(g)
}
```
## Biomass trajectory of individual DA
Note the periodic behaviour of the biomass -- the period is around 90 days (i.e. 3 months).
```
par(mfrow = c(4,1))
plot(x=metadata.da$measurementID,y=biomassFromEM(res.da), xlim=c(0, 450), type='b', pch=19, xlab='Date', ylab='Estimated biomass', log='y')
plot(x=metadata.m3$measurementID,y=biomassFromEM(res.m3), xlim=c(0, 450), type='b', pch=19, xlab='Date', ylab='Estimated biomass', log='y')
plot(x=metadata.db$measurementID,y=biomassFromEM(res.db), xlim=c(0, 450), type='b', pch=19, xlab='Date', ylab='Estimated biomass', log='y')
plot(x=metadata.f4$measurementID,y=biomassFromEM(res.f4), xlim=c(0, 450), type='b', pch=19, xlab='Date', ylab='Estimated biomass', log='y')
```
## Plot interaction network
```
fill_cols <- pal_simpsons(c("springfield"))(16)
ga <- int.net(counts.da, params.da, 1.5, 'DA')
gm <- int.net(counts.m3, params.m3, 1.5, 'M3')
gb <- int.net(counts.db, params.db, 1.5, 'DB')
f4 <- int.net(counts.f4, params.f4, 1.5, 'F4')
res.da$counts <- counts.da
res.da$metadata <- metadata.da
saveRDS(res.da, '~/BEEM/vignettes/gibbons_et_al_analysis/DA.EM.rds')
write.table(params.da, '~/BEEM/vignettes/gibbons_et_al_analysis/DA.params.txt', col.names=TRUE, row.names=FALSE, sep='\t', quote=FALSE)
res.m3$counts <- counts.m3
res.m3$metadata <- metadata.m3
saveRDS(res.m3, '~/BEEM/vignettes/gibbons_et_al_analysis/M3.EM.rds')
write.table(params.m3, '~/BEEM/vignettes/gibbons_et_al_analysis/M3.params.txt', col.names=TRUE, row.names=FALSE, sep='\t', quote=FALSE)
res.db$counts <- counts.db
res.db$metadata <- metadata.db
saveRDS(res.db, '~/BEEM/vignettes/gibbons_et_al_analysis/DB.EM.rds')
write.table(params.db, '~/BEEM/vignettes/gibbons_et_al_analysis/DB.params.txt', col.names=TRUE, row.names=FALSE, sep='\t', quote=FALSE)
res.f4$counts <- counts.f4
res.f4$metadata <- metadata.f4
saveRDS(res.f4, '~/BEEM/vignettes/gibbons_et_al_analysis/F4.EM.rds')
write.table(params.f4, '~/BEEM/vignettes/gibbons_et_al_analysis/F4.params.txt', col.names=TRUE, row.names=FALSE, sep='\t', quote=FALSE)
sessionInfo()
```
|
github_jupyter
|
```
import wandb
import nltk
from nltk.stem.porter import *
from torch.nn import *
from torch.optim import *
import numpy as np
import pandas as pd
import torch,torchvision
import random
from tqdm import *
from torch.utils.data import Dataset,DataLoader
stemmer = PorterStemmer()
PROJECT_NAME = 'kickstarter-NLP-v3'
device = 'cuda'
def tokenize(sentence):
return nltk.word_tokenize(sentence.lower())
tokenize('$100')
def stem(word):
return stemmer.stem(word.lower())
stem('organic')
def bag_of_words(tokenized_words,all_words):
tokenized_words = [stem(w) for w in tokenized_words]
bag = np.zeros(len(all_words))
for idx,w in enumerate(all_words):
if w in tokenized_words:
bag[idx] = 1.0
return bag
bag_of_words(['hi'],['hi','how','hi'])
data = pd.read_csv('./data.csv',encoding='latin-1')[:5000]
data
X = data['OriginalTweet']
y = data['Sentiment']
words = []
data = []
idx = 0
labels = {}
labels_r = {}
for label in y:
if label not in list(labels.keys()):
idx += 1
labels[label] = idx
labels_r[idx] = label
for X_batch,y_batch in tqdm(zip(X,y)):
X_batch = tokenize(X_batch)
new_X = []
for Xb in X_batch:
new_X.append(stem(Xb))
words.extend(new_X)
data.append([
new_X,
np.eye(labels[y_batch],len(labels))[labels[y_batch]-1]
])
words = sorted(set(words))
np.random.shuffle(words)
np.random.shuffle(data)
X = []
y = []
for X_batch,y_batch in tqdm(data):
X.append(bag_of_words(X_batch,words))
y.append(y_batch)
from sklearn.model_selection import *
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.125,shuffle=False)
X_train = torch.from_numpy(np.array(X_train)).to(device).float()
y_train = torch.from_numpy(np.array(y_train)).to(device).float()
X_test = torch.from_numpy(np.array(X_test)).to(device).float()
y_test = torch.from_numpy(np.array(y_test)).to(device).float()
def get_loss(model,X,y,criterion):
preds = model(X)
loss = criterion(preds,y)
return loss.item()
def get_accuracy(model,X,y):
preds = model(X)
correct = 0
total = 0
for pred,yb in zip(preds,y):
pred = int(torch.argmax(pred))
yb = int(torch.argmax(yb))
if pred == yb:
correct += 1
total += 1
acc = round(correct/total,3)*100
return acc
class Model(Module):
def __init__(self):
super().__init__()
self.activation = ReLU()
self.iters = 2
self.dropout = Dropout()
self.hidden = 512
self.linear1 = Linear(len(words),self.hidden)
self.linear2 = Linear(self.hidden,self.hidden)
self.linear3 = Linear(self.hidden,self.hidden)
self.linear4 = Linear(self.hidden,self.hidden)
self.linear5 = Linear(self.hidden,self.hidden)
self.output = Linear(self.hidden,len(labels))
def forward(self,X):
preds = self.linear1(X)
preds = self.activation(self.linear2(preds))
for _ in range(self.iters):
preds = self.dropout(self.activation(self.linear3(preds)))
preds = self.activation(self.linear4(preds))
preds = self.activation(self.linear5(preds))
preds = self.output(preds)
return preds
model = Model().to(device)
criterion = MSELoss()
optimizer = Adam(model.parameters(),lr=0.001)
epochs = 100
batch_size = 32
torch.save(model,'model-custom.pt')
torch.save(model,'model-custom.pth')
torch.save(model.state_dict(),'model-custom-sd.pt')
torch.save(model.state_dict(),'model-custom-sd.pth')
torch.save(words,'words.pt')
torch.save(words,'words.pth')
torch.save(data,'data.pt')
torch.save(data,'data.pth')
torch.save(labels,'labels.pt')
torch.save(labels,'labels.pth')
torch.save(idx,'idx.pt')
torch.save(idx,'idx.pth')
torch.save(y_train,'y_train.pt')
torch.save(y_test,'y_test.pth')
wandb.init(project=PROJECT_NAME,name='baseline')
for _ in tqdm(range(epochs)):
for i in range(0,len(X_train),batch_size):
X_batch = X_train[i:i+batch_size]
y_batch = y_train[i:i+batch_size]
preds = model(X_batch)
loss = criterion(preds,y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model.eval()
torch.cuda.empty_cache()
wandb.log({'Loss':(get_loss(model,X_train,y_train,criterion)+get_loss(model,X_batch,y_batch,criterion)/2)})
torch.cuda.empty_cache()
wandb.log({'Val Loss':get_loss(model,X_test,y_test,criterion)})
torch.cuda.empty_cache()
wandb.log({'Acc':(get_accuracy(model,X_train,y_train)+get_accuracy(model,X_batch,y_batch))/2})
torch.cuda.empty_cache()
wandb.log({'Val Acc':get_accuracy(model,X_test,y_test)})
torch.cuda.empty_cache()
model.train()
wandb.finish()
torch.cuda.empty_cache()
torch.save(model,'model.pt')
torch.save(model,'model.pth')
torch.save(model.state_dict(),'model-sd.pt')
torch.save(model.state_dict(),'model-sd.pth')
torch.save(words,'words.pt')
torch.save(words,'words.pth')
torch.save(data,'data.pt')
torch.save(data,'data.pth')
torch.save(labels,'labels.pt')
torch.save(labels,'labels.pth')
torch.save(idx,'idx.pt')
torch.save(idx,'idx.pth')
torch.save(y_train,'y_train.pt')
torch.save(y_test,'y_test.pth')
```
|
github_jupyter
|
[](https://colab.research.google.com/github/huggingface/education-toolkit/blob/main/03_getting-started-with-transformers.ipynb)
💡 **Welcome!**
We’ve assembled a toolkit that university instructors and organizers can use to easily prepare labs, homework, or classes. The content is designed in a self-contained way such that it can easily be incorporated into the existing curriculum. This content is free and uses widely known Open Source technologies (`transformers`, `gradio`, etc).
Alternatively, you can request for someone on the Hugging Face team to run the tutorials for your class via the [ML demo.cratization tour](https://huggingface2.notion.site/ML-Demo-cratization-tour-with-66847a294abd4e9785e85663f5239652) initiative!
You can find all the tutorials and resources we’ve assembled [here](https://huggingface2.notion.site/Education-Toolkit-7b4a9a9d65ee4a6eb16178ec2a4f3599).
# Tutorial: Getting Started with Transformers
**Learning goals:** The goal of this tutorial is to learn how:
1. Transformer neural networks can be used to tackle a wide range of tasks in natural language processing and beyond.
3. Transfer learning allows one to adapt Transformers to specific tasks.
2. The `pipeline()` function from the `transformers` library can be used to run inference with models from the [Hugging Face Hub](https://huggingface.co/models).
This tutorial is based on the first of our O'Reilly book [_Natural Language Processing with Transformers_](https://transformersbook.com/) - check it out if you want to dive deeper into the topic!
**Duration**: 30-45 minutes
**Prerequisites:** Knowledge of Python and basic familiarity with machine learning
**Author**: [Lewis Tunstall](https://twitter.com/_lewtun) (feel free to ping me with any questions about this tutorial)
All of these steps can be done for free! All you need is an Internet browser and a place where you can write Python 👩💻
## 0. Why Transformers?
Deep learning is currently undergoing a period of rapid progress across a wide variety of domains, including:
* 📖 Natural language processing
* 👀 Computer vision
* 🔊 Audio
* 🧬 Biology
* and many more!
The main driver of these breakthroughs is the **Transformer** -- a novel **neural network** developed by Google researchers in 2017. In short, if you’re into deep learning, you need Transformers!
Here's a few examples of what Transformers can do:
* 💻 They can **generate code** as in products like [GitHub Copilot](https://copilot.github.com/), which is based on OpenAI's family of [GPT models](https://huggingface.co/gpt2?text=My+name+is+Clara+and+I+am).
* ❓ They can be used for **improve search engines**, like [Google did](https://www.blog.google/products/search/search-language-understanding-bert/) with a Transformer called [BERT](https://huggingface.co/bert-base-uncased).
* 🗣️ They can **process speech in multiple languages** to perform speech recognition, speech translation, and language identification. For example, Facebook's [XLS-R model](https://huggingface.co/spaces/facebook/XLS-R-2B-22-16) can automatically transcribe audio in one language to another!
Training these models **from scratch** involves **a lot of resources**: you need large amounts of compute, data, and days to train for 😱.
Fortunately, you don't need to do this in most cases! Thanks to a technique known as **transfer learning**, it is possible to adapt a model that has been trained from scratch (usually called a **pretrained model**), to a variety of downstream tasks. This process is called **fine-tuning** and can typically be carried with a single GPU and a dataset of the size that you're like to find in your university or company.
The models that we'll be looking at in this tutorial are all examples of fine-tuned models, and you can learn more about the transfer learning process in the video below:
```
from IPython.display import YouTubeVideo
YouTubeVideo('BqqfQnyjmgg')
```
Now, Transformers are coolest kids in town, but how can we use them? If only there was a library that could help us ... oh wait, there is! The [Hugging Face Transformers library](https://github.com/huggingface/transformers) provides a unified API across dozens of Transformer architectures, as well as the means to train models and run inference with them. So to get started, let's install the library with the following command:
```
%%capture
%pip install transformers[sentencepiece]
```
Now that we've installed the library, let's take a look at some applications!
## 1. Pipelines for Transformers
The fastest way to learn what Transformers can do is via the `pipeline()` function. This function loads a model from the Hugging Face Hub and takes care of all the preprocessing and postprocessing steps that are needed to convert inputs into predictions:
<img src="https://github.com/huggingface/workshops/blob/main/nlp-zurich/images/pipeline.png?raw=1" alt="Alt text that describes the graphic" title="Title text" width=800>
In the next few sections we'll see how these steps are combined for different applications. If you want to learn more about what is happening under the hood, then check out the video below:
```
YouTubeVideo('1pedAIvTWXk')
```
## 2. Text classification
Let's start with one of the most common tasks in NLP: text classification. We need a snippet of text for our models to analyze, so let's use the following (fictious!) customer feedback about a certain online order:
```
text = """Dear Amazon, last week I ordered an Optimus Prime action figure \
from your online store in Germany. Unfortunately, when I opened the package, \
I discovered to my horror that I had been sent an action figure of Megatron \
instead! As a lifelong enemy of the Decepticons, I hope you can understand my \
dilemma. To resolve the issue, I demand an exchange of Megatron for the \
Optimus Prime figure I ordered. Enclosed are copies of my records concerning \
this purchase. I expect to hear from you soon. Sincerely, Bumblebee."""
```
While we're at it, let's create a simple wrapper so that we can pretty print out texts:
```
import textwrap
wrapper = textwrap.TextWrapper(width=80, break_long_words=False, break_on_hyphens=False)
print(wrapper.fill(text))
```
Now suppose that we'd like to predict the _sentiment_ of this text, i.e. whether the feedback is positive or negative. This is a special type of text classification that is often used in industry to aggregate customer feedback across products or services. The example below shows how a Transformer like BERT converts the inputs into atomic chunks called **tokens** which are then fed through the network to produce a single prediction:
<img src="https://github.com/huggingface/workshops/blob/main/nlp-zurich/images/clf_arch.png?raw=1" alt="Alt text that describes the graphic" title="Title text" width=600>
To load a Transformer model for this task is quite simple. We just need to specify the task in the `pipeline()` function as follows;
```
from transformers import pipeline
sentiment_pipeline = pipeline('text-classification')
```
When you run this code, you'll see a message about which Hub model is being used by default. In this case, the `pipeline()` function loads the `distilbert-base-uncased-finetuned-sst-2-english` model, which is a small BERT variant trained on [SST-2](https://paperswithcode.com/sota/sentiment-analysis-on-sst-2-binary) which is a sentiment analysis dataset.
💡 The first time you execute the code, the model will be automatically downloaded from the Hub and cached for later use!
Now we are ready to run our example through pipeline and look at some predictions:
```
sentiment_pipeline(text)
```
The model predicts negative sentiment with a high confidence which makes sense given that we have a disgruntled customer. You can also see that the pipeline returns a list of Python dictionaries with the predictions. We can also pass several texts at the same time in which case we would get several dicts in the list for each text one.
⚡ **Your turn!** Feed a list of texts with different types of sentiment to the `sentiment_pipeline` object. Do the predictions always make sense?
## 3. Named entity recognition
Let's now do something a little more sophisticated. Instead of just finding the overall sentiment, let's see if we can extract **entities** such as organizations, locations, or individuals from the text. This task is called named entity recognition, or NER for short. Instead of predicting just a class for the whole text **a class is predicted for each token**, as shown in the example below:
<img src="https://github.com/huggingface/workshops/blob/main/nlp-zurich/images/ner_arch.png?raw=1" alt="Alt text that describes the graphic" title="Title text" width=600>
Again, we just load a pipeline for NER without specifying a model. This will load a default BERT model that has been trained on the [CoNLL-2003](https://huggingface.co/datasets/conll2003) dataset:
```
ner_pipeline = pipeline('ner')
```
When we pass our text through the model, we now get a long list of Python dictionaries, where each dictionary corresponds to one detected entity. Since multiple tokens can correspond to a a single entity ,we can apply an aggregation strategy that merges entities if the same class appears in consequtive tokens:
```
entities = ner_pipeline(text, aggregation_strategy="simple")
print(entities)
```
This isn't very easy to read, so let's clean up the outputs a bit:
```
for entity in entities:
print(f"{entity['word']}: {entity['entity_group']} ({entity['score']:.2f})")
```
That's much better! It seems that the model found most of the named entities but was confused about "Megatron" andn "Decepticons", which are characters in the transformers franchise. This is no surprise since the original dataset probably did not contain many transformer characters. For this reason it makes sense to further fine-tune a model on your on dataset!
Now that we've seen an example of text and token classification using Transformers, let's look at an interesting application called **question answering**.
## 4. Question answering
In this task, the model is given a **question** and a **context** and needs to find the answer to the question within the context. This problem can be rephrased as a classification problem: For each token the model needs to predict whether it is the start or the end of the answer. In the end we can extract the answer by looking at the span between the token with the highest start probability and highest end probability:
<img src="https://github.com/huggingface/workshops/blob/main/nlp-zurich/images/qa_arch.png?raw=1" alt="Alt text that describes the graphic" title="Title text" width=600>
You can imagine that this requires quite a bit of pre- and post-processing logic. Good thing that the pipeline takes care of all that! As usual, we load the model by specifying the task in the `pipeline()` function:
```
qa_pipeline = pipeline("question-answering")
```
This default model is trained on the famous [SQuAD dataset](https://huggingface.co/datasets/squad). Let's see if we can ask it what the customer wants:
```
question = "What does the customer want?"
outputs = qa_pipeline(question=question, context=text)
outputs
```
Awesome, that sounds about right!
## 5. Text summarization
Let's see if we can go beyond these natural language understanding tasks (NLU) where BERT excels and delve into the generative domain. Note that generation is much more computationally demanding since we usually generate one token at a time and need to run this several times. An example for how this process works is shown below:
<img src="https://github.com/huggingface/workshops/blob/main/nlp-zurich/images/gen_steps.png?raw=1" alt="Alt text that describes the graphic" title="Title text" width=600>
A popular task involving generation is summarization. Let's see if we can use a transformer to generate a summary for us:
```
summarization_pipeline = pipeline("summarization")
```
This model is trained was trained on the [CNN/Dailymail dataset](https://huggingface.co/datasets/cnn_dailymail) to summarize news articles.
```
outputs = summarization_pipeline(text, max_length=45, clean_up_tokenization_spaces=True)
print(wrapper.fill(outputs[0]['summary_text']))
```
That's not too bad! We can see the model was able to get the main gist of the customer feedback and even identified the author as "Bumblebee".
## 6. Translation
But what if there is no model in the language of my data? You can still try to translate the text. The [Helsinki NLP team](https://huggingface.co/models?pipeline_tag=translation&sort=downloads&search=Helsinkie-NLP) has provided over 1,000 language pair models for translation 🤯. Here we load one that translates English to German:
```
translator = pipeline("translation_en_to_de", model="Helsinki-NLP/opus-mt-en-de")
```
Let's translate the our text to German:
```
outputs = translator(text, clean_up_tokenization_spaces=True, min_length=100)
print(wrapper.fill(outputs[0]['translation_text']))
```
We can see that the text is clearly not perfectly translated, but the core meaning stays the same. Another cool application of translation models is data augmentation via backtranslation!
## 7. Zero-shot classification
As a last example let's have a look at a cool application showing the versatility of transformers: zero-shot classification. In zero-shot classification the model receives a text and a list of candidate labels and determines which labels are compatible with the text. Instead of having fixed classes this allows for flexible classification without any labelled data! Usually this is a good first baseline!
```
zero_shot_classifier = pipeline("zero-shot-classification",
model="vicgalle/xlm-roberta-large-xnli-anli")
```
Let's have a look at an example:
```
text = 'Dieser Tutorial ist großartig! Ich hoffe, dass jemand von Hugging Face meine Universität besuchen wird :)'
classes = ['Treffen', 'Arbeit', 'Digital', 'Reisen']
zero_shot_classifier(text, classes, multi_label=True)
```
This seems to have worked really well on this short example. Naturally, for longer and more domain specific examples this approach might suffer.
## 8. Going beyond text
As mentioned at the start of this tutorial, Transformers can also be used for domains other than NLP! For these domains, there are many more pipelines that you can experiment with. Look at the following list for an overview:
```
from transformers import pipelines
for task in pipelines.SUPPORTED_TASKS:
print(task)
```
Let's have a look at an application involving images!
### Computer vision
Recently, transformer models have also entered computer vision. Check out the DETR model on the [Hub](https://huggingface.co/facebook/detr-resnet-101-dc5):
<img src="https://github.com/huggingface/workshops/blob/main/nlp-zurich/images/object_detection.png?raw=1" alt="Alt text that describes the graphic" title="Title text" width=400>
### Audio
Another promising area is audio processing. Especially Speech2Text there have been some promising advancements recently. See for example the [wav2vec2 model](https://huggingface.co/facebook/wav2vec2-base-960h):
<img src="https://github.com/huggingface/workshops/blob/main/nlp-zurich/images/speech2text.png?raw=1" alt="Alt text that describes the graphic" title="Title text" width=400>
### Table QA
Finally, a lot of real world data is still in form of tables. Being able to query tables is very useful and with [TAPAS](https://huggingface.co/google/tapas-large-finetuned-wtq) you can do tabular question-answering:
<img src="https://github.com/huggingface/workshops/blob/main/nlp-zurich/images/tapas.png?raw=1" alt="Alt text that describes the graphic" title="Title text" width=400>
## 9. Where to next?
Hopefully this tutorial has given you a taste of what Transformers can do and you're now excited to learn more! Here's a few resources you can use to dive deeper into the topic and the Hugging Face ecosystem:
🤗 **A Tour through the Hugging Face Hub**
In this tutorial, you get to:
- Explore the over 30,000 models shared in the Hub.
- Learn efficient ways to find the right model and datasets for your own task.
- Learn how to contribute and work collaboratively in your ML workflows
***Duration: 20-40 minutes***
👉 [click here to access the tutorial](https://www.notion.so/Workshop-A-Tour-through-the-Hugging-Face-Hub-2098e4bae9ba4288857e85c87ff1c851)
✨ **Build and Host Machine Learning Demos with Gradio & Hugging Face**
In this tutorial, you get to:
- Explore ML demos created by the community.
- Build a quick demo for your machine learning model in Python using the `gradio` library
- Host the demos for free with Hugging Face Spaces
- Add your demo to the Hugging Face org for your class or conference
***Duration: 20-40 minutes***
👉 [click here to access the tutorial](https://colab.research.google.com/github/huggingface/education-toolkit/blob/main/02_ml-demos-with-gradio.ipynb)
🎓 **The Hugging Face Course**
This course teaches you about applying Transformers to various tasks in natural language processing and beyond. Along the way, you'll learn how to use the Hugging Face ecosystem — 🤗 Transformers, 🤗 Datasets, 🤗 Tokenizers, and 🤗 Accelerate — as well as the Hugging Face Hub. It's completely free too!
```
YouTubeVideo('00GKzGyWFEs')
```
|
github_jupyter
|
<table style="border: none" align="center">
<tr style="border: none">
<th style="border: none"><font face="verdana" size="4" color="black"><b> Demonstrate adversarial training using ART </b></font></font></th>
</tr>
</table>
In this notebook we demonstrate adversarial training using ART on the MNIST dataset.
## Contents
1. [Load prereqs and data](#prereqs)
2. [Train and evaluate a baseline classifier](#classifier)
3. [Adversarially train a robust classifier](#adv_training)
4. [Evaluate the robust classifier](#evaluation)
<a id="prereqs"></a>
## 1. Load prereqs and data
```
import warnings
warnings.filterwarnings('ignore')
from keras.models import load_model
from art.config import ART_DATA_PATH
from art.utils import load_dataset, get_file
from art.estimators.classification import KerasClassifier
from art.attacks.evasion import FastGradientMethod
from art.attacks.evasion import BasicIterativeMethod
from art.defences.trainer import AdversarialTrainer
import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
(x_train, y_train), (x_test, y_test), min_, max_ = load_dataset('mnist')
```
<a id="classifier"></a>
## 2. Train and evaluate a baseline classifier
Load the classifier model:
```
path = get_file('mnist_cnn_original.h5', extract=False, path=ART_DATA_PATH,
url='https://www.dropbox.com/s/p2nyzne9chcerid/mnist_cnn_original.h5?dl=1')
classifier_model = load_model(path)
classifier = KerasClassifier(clip_values=(min_, max_), model=classifier_model, use_logits=False)
classifier_model.summary()
```
Evaluate the classifier performance on the first 100 original test samples:
```
x_test_pred = np.argmax(classifier.predict(x_test[:100]), axis=1)
nb_correct_pred = np.sum(x_test_pred == np.argmax(y_test[:100], axis=1))
print("Original test data (first 100 images):")
print("Correctly classified: {}".format(nb_correct_pred))
print("Incorrectly classified: {}".format(100-nb_correct_pred))
```
Generate some adversarial samples:
```
attacker = FastGradientMethod(classifier, eps=0.5)
x_test_adv = attacker.generate(x_test[:100])
```
And evaluate performance on those:
```
x_test_adv_pred = np.argmax(classifier.predict(x_test_adv), axis=1)
nb_correct_adv_pred = np.sum(x_test_adv_pred == np.argmax(y_test[:100], axis=1))
print("Adversarial test data (first 100 images):")
print("Correctly classified: {}".format(nb_correct_adv_pred))
print("Incorrectly classified: {}".format(100-nb_correct_adv_pred))
```
<a id="adv_training"></a>
## 3. Adversarially train a robust classifier
```
path = get_file('mnist_cnn_robust.h5', extract=False, path=ART_DATA_PATH,
url='https://www.dropbox.com/s/yutsncaniiy5uy8/mnist_cnn_robust.h5?dl=1')
robust_classifier_model = load_model(path)
robust_classifier = KerasClassifier(clip_values=(min_, max_), model=robust_classifier_model, use_logits=False)
```
Note: the robust classifier has the same architecture as above, except the first dense layer has **1024** instead of **128** units. (This was recommend by Madry et al. (2017), *Towards Deep Learning Models Resistant to Adversarial Attacks*)
```
robust_classifier_model.summary()
```
Also as recommended by Madry et al., we use BIM/PGD attacks during adversarial training:
```
attacks = BasicIterativeMethod(robust_classifier, eps=0.3, eps_step=0.01, max_iter=40)
```
Perform adversarial training:
```
# We had performed this before, starting with a randomly intialized model.
# Adversarial training takes about 80 minutes on an NVIDIA V100.
# The resulting model is the one loaded from mnist_cnn_robust.h5 above.
# Here is the command we had used for the Adversarial Training
# trainer = AdversarialTrainer(robust_classifier, attacks, ratio=1.0)
# trainer.fit(x_train, y_train, nb_epochs=83, batch_size=50)
```
<a id="evaluation"></a>
## 4. Evaluate the robust classifier
Evaluate the robust classifier's performance on the original test data:
```
x_test_robust_pred = np.argmax(robust_classifier.predict(x_test[:100]), axis=1)
nb_correct_robust_pred = np.sum(x_test_robust_pred == np.argmax(y_test[:100], axis=1))
print("Original test data (first 100 images):")
print("Correctly classified: {}".format(nb_correct_robust_pred))
print("Incorrectly classified: {}".format(100-nb_correct_robust_pred))
```
Evaluate the robust classifier's performance on the adversarial test data (**white-box** setting):
```
attacker_robust = FastGradientMethod(robust_classifier, eps=0.5)
x_test_adv_robust = attacker_robust.generate(x_test[:100])
x_test_adv_robust_pred = np.argmax(robust_classifier.predict(x_test_adv_robust), axis=1)
nb_correct_adv_robust_pred = np.sum(x_test_adv_robust_pred == np.argmax(y_test[:100], axis=1))
print("Adversarial test data (first 100 images):")
print("Correctly classified: {}".format(nb_correct_adv_robust_pred))
print("Incorrectly classified: {}".format(100-nb_correct_adv_robust_pred))
```
Compare the performance of the original and the robust classifier over a range of `eps` values:
```
eps_range = [0.01, 0.02, 0.03, 0.04, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
nb_correct_original = []
nb_correct_robust = []
for eps in eps_range:
attacker.set_params(**{'eps': eps})
attacker_robust.set_params(**{'eps': eps})
x_test_adv = attacker.generate(x_test[:100])
x_test_adv_robust = attacker_robust.generate(x_test[:100])
x_test_adv_pred = np.argmax(classifier.predict(x_test_adv), axis=1)
nb_correct_original += [np.sum(x_test_adv_pred == np.argmax(y_test[:100], axis=1))]
x_test_adv_robust_pred = np.argmax(robust_classifier.predict(x_test_adv_robust), axis=1)
nb_correct_robust += [np.sum(x_test_adv_robust_pred == np.argmax(y_test[:100], axis=1))]
eps_range = [0] + eps_range
nb_correct_original = [nb_correct_pred] + nb_correct_original
nb_correct_robust = [nb_correct_robust_pred] + nb_correct_robust
fig, ax = plt.subplots()
ax.plot(np.array(eps_range), np.array(nb_correct_original), 'b--', label='Original classifier')
ax.plot(np.array(eps_range), np.array(nb_correct_robust), 'r--', label='Robust classifier')
legend = ax.legend(loc='upper center', shadow=True, fontsize='large')
legend.get_frame().set_facecolor('#00FFCC')
plt.xlabel('Attack strength (eps)')
plt.ylabel('Correct predictions')
plt.show()
```
|
github_jupyter
|
```
import numpy as np
import tensorflow as tf
import pyreadr
import pandas as pd
import keras
from keras.layers import Dense,Dropout,BatchNormalization
from keras.models import Sequential,Model
from keras.callbacks import ModelCheckpoint,EarlyStopping,ReduceLROnPlateau
from keras.optimizers import Adam
from keras.regularizers import l1
from sklearn.preprocessing import StandardScaler
from keras.models import load_model
from sklearn.covariance import MinCovDet,EmpiricalCovariance
from matplotlib.pyplot import hist
from matplotlib import pyplot as plt
from sklearn.metrics import accuracy_score
%matplotlib inline
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
# config.log_device_placement = True # to log device placement (on which device the operation ran)
sess = tf.Session(config=config)
set_session(sess) # set this TensorFlow session as the default session for Keras
# Set seeds for random number generators for reproducable results
seed = 0
np.random.seed(seed)
tf.set_random_seed(seed)
# Load training data
data = pyreadr.read_r("/home/shanmukh/Documents/IICT/tep-fault-detection/dataset/TEP_FaultFree_Training.RData")
df = data['fault_free_training']
training_data = df.drop(["faultNumber","simulationRun","sample"],axis=1)
# Standard Normalization
# 0 mean
# 1 std
scaler = StandardScaler()
scaler.fit(training_data)
training_data = scaler.transform(training_data)
model = load_model("/home/shanmukh/Documents/IICT/tep-fault-detection/models/weights-55-0.09.hdf5")
encoder = Model(inputs=model.input,outputs=model.get_layer('latent_space').output)
# model.summary()
# encoder.summary()
# Get outputs
predictions = model.predict(training_data,batch_size=512)
latent = encoder.predict(training_data,batch_size=512)
# Set Percentile Tresholds
percentile_treshold = 95
# SPE statistic
spe = np.sum((training_data - predictions)**2,axis=1)
cutoff_spe = np.percentile(spe,percentile_treshold)
np.savetxt("spe_train.dat",spe)
_ = hist(spe,bins=100)
print (cutoff_spe)
# Mahalanobis distance
cov = EmpiricalCovariance().fit(latent)
md = cov.mahalanobis(latent)
cutoff_md = np.percentile(md,percentile_treshold)
_ = hist(md,bins=100)
np.savetxt("T2_train.dat",md)
print (cutoff_md)
# Unified Index
ui = spe/cutoff_spe + md/cutoff_md
cutoff_ui = np.percentile(ui,percentile_treshold)
_ = hist(ui,bins=100)
print (cutoff_ui)
np.savetxt("Unified_index_train.dat",ui)
# Hotelling's T^2 Statistic
# covariance = cov.covariance_
# # pseudo inverse
# inv = np.linalg.pinv(covariance)
# t2 = [np.matmul(np.matmul(np.matrix(i),np.matrix(inv)),np.matrix(i).T) for i in latent]
# t2 = np.array(t2).squeeze()
# Load and normalize Testing Data
test_files = []
for i in range(22):
test_files.append('d'+format(i, '02d')+"_te.dat")
path_to_test = "/home/shanmukh/Documents/IICT/tep-fault-detection/dataset/TE_process/"
test_data = []
test_data_normalized = []
for i in test_files:
test_data.append(np.loadtxt(path_to_test+i))
test_data_normalized.append(scaler.transform(test_data[-1]))
truth = np.ones(shape = (800,))
# Metrics
spe_all = []
md_all = []
ui_all = []
missed_detection_rates = []
x = np.array(list(range(960)))
temp = 0
for i in test_data_normalized:
predictions_test = model.predict(i,batch_size=480)
latent_test = encoder.predict(i,batch_size=480)
spe_test = np.sum((i - predictions_test)**2,axis=1)
md_test = cov.mahalanobis(latent_test)
ui_test = spe_test/cutoff_spe + md_test/cutoff_md
spe_y = np.zeros_like(spe_test)
spe_y[spe_test>cutoff_spe] = 1
md_y = np.zeros_like(md_test)
md_y[md_test>cutoff_md] = 1
ui_y = np.zeros_like(ui_test)
ui_y[ui_test>cutoff_ui] = 1
np.savetxt("indices/spe_"+test_files[temp],spe_test)
np.savetxt("indices/T2_"+test_files[temp],md_test)
np.savetxt("indices/Unified_"+test_files[temp],ui_test)
# plt.plot(x,spe_test)
print (temp,",",1-accuracy_score(spe_y[160:],truth),",",1-accuracy_score(md_y[160:],truth),",",1-accuracy_score(ui_y[160:],truth))
missed_detection_rates.append(1-accuracy_score(ui_y[160:],truth))
temp+=1
np.mean(missed_detection_rates[1:])
x
```
|
github_jupyter
|
# The pyabf Cookbook: Using `ABF.memtest`
This page demonstrates how to access the abf membrane test data. For theoretical details about membrane properties, how to measure them, and how to computationally create and analyze membrane test data see the [membrane test theory and simulation](memtest-simulation.ipynb) page.
For more resources, see the pyABF project website: http://www.GitHub.com/swharden/pyABF
### Common variables:
* $ I_{h} $ - average clamp current at the holding voltage (a.k.a. holding current)
* $ C_{m} $ - membrane capacitance
* $ R_{a} $ - access resistance (synonymous with series resistance)
* $ R_{m} $ - membrane resistance (the true property of the cell membrane)
* $ \tau $ - (tau) the time constant of the decay curve of a current transient in response to a voltage step
### Prepare the Environment:
```
# prepare the environment
import numpy as np
np.set_printoptions(precision=3)
import matplotlib.pyplot as plt
plt.style.use('seaborn')
%matplotlib inline
```
### Load the ABF Class
```
import sys
sys.path.insert(0, '../src/')
import pyabf
pyabf.info()
```
### Import a Recording
_Membrane tests can be analyzed from any episodic voltage clamp recording with a hyperpolarizing current step at the start of every sweep_
```
abf=pyabf.ABF("../data/16d05007_vc_tags.abf")
print("This ABF has %d sweeps"%abf.sweepCount)
plt.plot(abf.dataX,abf.dataY)
abf.plotDecorate()
```
### Calculate $I_{h}$, $R_{m}$, $R_{a}$, $C_{m}$, and $\tau$ for Every Sweep
```
abf.memtestAnalyzeAll()
```
### Display Memtest Averages
```
print("Ih:", abf.memtest.Ih.average, abf.memtest.Ih.units)
print("Ra:", abf.memtest.Ra.average, abf.memtest.Ra.units)
print("Rm:", abf.memtest.Rm.average, abf.memtest.Rm.units)
print("Cm:", abf.memtest.Cm.average, abf.memtest.Cm.units)
print("Tau:", abf.memtest.Tau.average, abf.memtest.Tau.units)
```
### Display Memtest Values per Sweep
```
print(abf.memtest.Ih)
```
### Plot Memtest Information
```
plt.figure(figsize=(6,8))
ax1=plt.subplot(411)
plt.plot(abf.sweepTimesMin,abf.memtest.Ih)
plt.title(abf.memtest.Ih.desc, fontweight='bold')
plt.ylabel(abf.memtest.Ih.label)
plt.subplot(412,sharex=ax1)
plt.plot(abf.sweepTimesMin,abf.memtest.Ra)
plt.title(abf.memtest.Ra.desc, fontweight='bold')
plt.ylabel(abf.memtest.Ra.label)
plt.subplot(413,sharex=ax1)
plt.plot(abf.sweepTimesMin,abf.memtest.Rm)
plt.title(abf.memtest.Rm.desc, fontweight='bold')
plt.ylabel(abf.memtest.Rm.label)
plt.subplot(414,sharex=ax1)
plt.plot(abf.sweepTimesMin,abf.memtest.Cm)
plt.title(abf.memtest.Cm.desc, fontweight='bold')
plt.ylabel(abf.memtest.Cm.label)
plt.xlabel("Experiment Duration (minutes)")
plt.margins(0,.1)
plt.tight_layout()
```
|
github_jupyter
|
# Title of the work
```
import pickle
import logging
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from matplotlib import rcParams
rcParams['font.size'] = 14
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'svg'
# logging.getLogger('tensorflow').setLevel(logging.INFO)
print('Tensorflow version:', tf.__version__)
```
## Definitions
```
number_components = [x for x in range(1, 9)]
encoder_layers = [
[40],
[100, 40],
[400, 100, 40],
]
lr = 0.01
# lr = 0.001
optimizer = tf.keras.optimizers.SGD(learning_rate=lr)
# optimizer = tf.keras.optimizers.RMSprop(learning_rate=lr)
# dataset_filter = 'all' # done
dataset_filter = 'normal' # doing now
seed = 42
np.random.seed(seed)
number_epochs = 600
test_size = 0.5 # proportion of the number of samples used for testing, i.e., (1-test_size) used for training
figure_format = 'svg'
folder = '/nobackup/carda/datasets/ml-simulation-optical/2019-ecoc-demo'
```
## Importing dataset
```
with open(folder + '/compiled-dataset.h5', 'rb') as file:
final_dataframe, scaled_dataframe, class_columns, class_names = pickle.load(file)
input_dim = final_dataframe.shape[1] - 3 # the last three columns are classes
```
## Auxiliary functions
```
def build_model(data_dim, layers, optimizer='sgd', loss='mse', metrics=['mse', 'msle']):
model = tf.keras.Sequential(name='encoder_' + '-'.join(str(x) for x in layers))
model.add(tf.keras.layers.Dense(layers[0], input_shape=(data_dim,), name='input_and_0'))
for i in range(1, len(layers)-1):
model.add(tf.keras.layers.Dense(layers[i], name=f'encoder_{i}'))
print('enc:', layers[i], i)
# model.add(tf.keras.layers.Dense(layers[len(layers)-1], name=f'encoder_{len(layers)-1}', activation='tanh'))
for i in range(len(layers)-1, -1, -1):
model.add(tf.keras.layers.Dense(layers[i], name=f'decoder_{i}'))
print('dec:', layers[i], i)
# model.add(DenseTied(model.layers[i], name=f'decoder_{i}'))
model.add(tf.keras.layers.Dense(data_dim, name=f'output'))
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
return model
```
## Building training and testing datasets
```
if dataset_filter == 'normal':
normal_conditions = scaled_dataframe[(scaled_dataframe['attack'] == 0)].values
else:
normal_conditions = scaled_dataframe.values
x_train, x_test, y_train, y_test = train_test_split(normal_conditions[:, :input_dim], normal_conditions[:, -1], test_size=test_size, random_state=seed)
```
## Training the autoencoders
```
histories = []
for layer in encoder_layers:
for n_components in number_components:
final_layer = layer + [n_components]
print(final_layer)
model = build_model(input_dim, final_layer, optimizer=optimizer)
model.summary()
# saving a graphical representation
tf.keras.utils.plot_model(model, to_file=f'./models/{dataset_filter}_{optimizer._name}_{lr}_{model.name}-model.png', show_shapes=True, show_layer_names=False)
history = model.fit(x_train, x_train, epochs=number_epochs, batch_size=64, verbose=0, validation_data=(x_test, x_test))
model.save(f'./models/{dataset_filter}_{optimizer._name}_{lr}_{model.name}-model.h5')
histories.append(history.history)
metrics = [x for x in histories[0].keys() if 'val' not in x]
for i, metric in enumerate(metrics):
plt.figure(figsize=(12, 4.5))
plt.subplot(1, 2, 1)
plt.title(f'Optm: {optimizer._name} / lr: {lr}')
for j, layer in enumerate(encoder_layers):
for n_components in number_components:
layers = layer + [n_components]
ls = '-'
if len(layers) == 2:
ls = '-'
elif len(layers) == 3:
ls = ':'
elif len(layers) == 4:
ls = '--'
plt.semilogy(histories[j][metric], label='-'.join(str(x) for x in layers), linestyle=ls)
plt.xlabel('Epoch')
plt.ylabel(metric)
plt.subplot(1, 2, 2)
for j, layer in enumerate(encoder_layers):
for n_components in number_components:
layers = layer + [n_components]
ls = '-'
if len(layers) == 2:
ls = '-'
elif len(layers) == 3:
ls = ':'
elif len(layers) == 4:
ls = '--'
diff = np.array(histories[j]['val_' + metric]) - np.array(histories[j][metric])
print(j, np.sum(diff), np.mean(diff))
plt.semilogy(histories[j]['val_' + metric], label='-'.join(str(x) for x in layers), linestyle=ls)
plt.xlabel('Epoch')
plt.ylabel('val ' + metric)
# plt.xlim([-5, 50])
plt.legend(ncol=2)
plt.tight_layout()
plt.savefig(f'./figures/{dataset_filter}_{optimizer._name}_{lr}_{"-".join(str(x) for x in layers)}-accuracy-{metric}.{figure_format}')
plt.show()
with open(f'./models/{dataset_filter}_histories.h5', 'wb') as file:
pickle.dump({'histories': histories}, file)
print('done')
```
|
github_jupyter
|
Ordinal Regression
--
Ordinal regression aims at fitting a model to some data $(X, Y)$, where $Y$ is an ordinal variable. To do so, we use a `VPG` model with a specific likelihood (`gpflow.likelihoods.Ordinal`).
```
import gpflow
import numpy as np
import matplotlib
%matplotlib inline
matplotlib.rcParams['figure.figsize'] = (12, 6)
plt = matplotlib.pyplot
#make a one dimensional ordinal regression problem
# This function generates a set of inputs X,
# quantitative output f (latent) and ordinal values Y
def generate_data(num_data):
# First generate random inputs
X = np.random.rand(num_data, 1)
# Now generate values of a latent GP
kern = gpflow.kernels.RBF(1, lengthscales=0.1)
K = kern.compute_K_symm(X)
f = np.random.multivariate_normal(mean=np.zeros(num_data), cov=K).reshape(-1, 1)
# Finally convert f values into ordinal values Y
Y = np.round((f + f.min())*3)
Y = Y - Y.min()
Y = np.asarray(Y, np.float64)
return X, f, Y
np.random.seed(1)
num_data = 20
X, f, Y = generate_data(num_data)
plt.figure(figsize=(11, 6))
plt.plot(X, f, '.')
plt.ylabel('latent function value')
plt.twinx()
plt.plot(X, Y, 'kx', mew=1.5)
plt.ylabel('observed data value')
# construct ordinal likelihood - bin_edges is the same as unique(Y) but centered
bin_edges = np.array(np.arange(np.unique(Y).size + 1), dtype=float)
bin_edges = bin_edges - bin_edges.mean()
likelihood=gpflow.likelihoods.Ordinal(bin_edges)
# build a model with this likelihood
m = gpflow.models.VGP(X, Y,
kern=gpflow.kernels.Matern32(1),
likelihood=likelihood)
# fit the model
gpflow.train.ScipyOptimizer().minimize(m)
# here we'll plot the expected value of Y +- 2 std deviations, as if the distribution were Gaussian
plt.figure(figsize=(11, 6))
Xtest = np.linspace(m.X.read_value().min(), m.X.read_value().max(), 100).reshape(-1, 1)
mu, var = m.predict_y(Xtest)
line, = plt.plot(Xtest, mu, lw=2)
col=line.get_color()
plt.plot(Xtest, mu+2*np.sqrt(var), '--', lw=2, color=col)
plt.plot(Xtest, mu-2*np.sqrt(var), '--', lw=2, color=col)
plt.plot(m.X.read_value(), m.Y.read_value(), 'kx', mew=2)
# to see the predictive density, try predicting every possible discrete value for Y.
def pred_density(m):
Xtest = np.linspace(m.X.read_value().min(), m.X.read_value().max(), 100).reshape(-1, 1)
ys = np.arange(m.Y.read_value().max()+1)
densities = []
for y in ys:
Ytest = np.ones_like(Xtest) * y
# Predict the log density
densities.append(m.predict_density(Xtest, Ytest))
return np.hstack(densities).T
fig = plt.figure(figsize=(14, 6))
plt.imshow(np.exp(pred_density(m)), interpolation='nearest',
extent=[m.X.read_value().min(), m.X.read_value().max(), -0.5, m.Y.read_value().max()+0.5],
origin='lower', aspect='auto', cmap=plt.cm.viridis)
plt.colorbar()
plt.plot(X, Y, 'kx', mew=2, scalex=False, scaley=False)
# Predictive density for a single input x=0.5
x_new = 0.5
ys = np.arange(np.max(m.Y.value+1)).reshape([-1, 1])
x_new_vec = x_new*np.ones_like(ys)
# for predict_density x and y need to have the same number of rows
dens_new = np.exp(m.predict_density(x_new_vec, ys))
fig = plt.figure(figsize=(8, 4))
plt.bar(x=ys.flatten(), height=dens_new.flatten())
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/noorhaq/Google_Colab/blob/master/Welcome_To_Colaboratory.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
<p><img alt="Colaboratory logo" height="45px" src="/img/colab_favicon.ico" align="left" hspace="10px" vspace="0px"></p>
<h1>What is Colaboratory?</h1>
Colaboratory, or "Colab" for short, allows you to write and execute Python in your browser, with
- Zero configuration required
- Free access to GPUs
- Easy sharing
Whether you're a **student**, a **data scientist** or an **AI researcher**, Colab can make your work easier. Watch [Introduction to Colab](https://www.youtube.com/watch?v=inN8seMm7UI) to learn more, or just get started below!
## **Getting started**
The document you are reading is not a static web page, but an interactive environment called a **Colab notebook** that lets you write and execute code.
For example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result:
```
seconds_in_a_day = 24 * 60 * 90
seconds_in_a_day
```
To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut "Command/Ctrl+Enter". To edit the code, just click the cell and start editing.
Variables that you define in one cell can later be used in other cells:
```
seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week
```
Colab notebooks allow you to combine **executable code** and **rich text** in a single document, along with **images**, **HTML**, **LaTeX** and more. When you create your own Colab notebooks, they are stored in your Google Drive account. You can easily share your Colab notebooks with co-workers or friends, allowing them to comment on your notebooks or even edit them. To learn more, see [Overview of Colab](/notebooks/basic_features_overview.ipynb). To create a new Colab notebook you can use the File menu above, or use the following link: [create a new Colab notebook](http://colab.research.google.com#create=true).
Colab notebooks are Jupyter notebooks that are hosted by Colab. To learn more about the Jupyter project, see [jupyter.org](https://www.jupyter.org).
## Data science
With Colab you can harness the full power of popular Python libraries to analyze and visualize data. The code cell below uses **numpy** to generate some random data, and uses **matplotlib** to visualize it. To edit the code, just click the cell and start editing.
```
import numpy as np
from matplotlib import pyplot as plt
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Sample Visualization")
plt.show()
```
You can import your own data into Colab notebooks from your Google Drive account, including from spreadsheets, as well as from Github and many other sources. To learn more about importing data, and how Colab can be used for data science, see the links below under [Working with Data](#working-with-data).
## Machine learning
With Colab you can import an image dataset, train an image classifier on it, and evaluate the model, all in just [a few lines of code](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/quickstart/beginner.ipynb). Colab notebooks execute code on Google's cloud servers, meaning you can leverage the power of Google hardware, including [GPUs and TPUs](#using-accelerated-hardware), regardless of the power of your machine. All you need is a browser.
Colab is used extensively in the machine learning community with applications including:
- Getting started with TensorFlow
- Developing and training neural networks
- Experimenting with TPUs
- Disseminating AI research
- Creating tutorials
To see sample Colab notebooks that demonstrate machine learning applications, see the [machine learning examples](#machine-learning-examples) below.
## More Resources
### Working with Notebooks in Colab
- [Overview of Colaboratory](/notebooks/basic_features_overview.ipynb)
- [Guide to Markdown](/notebooks/markdown_guide.ipynb)
- [Importing libraries and installing dependencies](/notebooks/snippets/importing_libraries.ipynb)
- [Saving and loading notebooks in GitHub](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)
- [Interactive forms](/notebooks/forms.ipynb)
- [Interactive widgets](/notebooks/widgets.ipynb)
- <img src="/img/new.png" height="20px" align="left" hspace="4px" alt="New"></img>
[TensorFlow 2 in Colab](/notebooks/tensorflow_version.ipynb)
<a name="working-with-data"></a>
### Working with Data
- [Loading data: Drive, Sheets, and Google Cloud Storage](/notebooks/io.ipynb)
- [Charts: visualizing data](/notebooks/charts.ipynb)
- [Getting started with BigQuery](/notebooks/bigquery.ipynb)
### Machine Learning Crash Course
These are a few of the notebooks from Google's online Machine Learning course. See the [full course website](https://developers.google.com/machine-learning/crash-course/) for more.
- [Intro to Pandas](/notebooks/mlcc/intro_to_pandas.ipynb)
- [Tensorflow concepts](/notebooks/mlcc/tensorflow_programming_concepts.ipynb)
- [First steps with TensorFlow](/notebooks/mlcc/first_steps_with_tensor_flow.ipynb)
- [Intro to neural nets](/notebooks/mlcc/intro_to_neural_nets.ipynb)
- [Intro to sparse data and embeddings](/notebooks/mlcc/intro_to_sparse_data_and_embeddings.ipynb)
<a name="using-accelerated-hardware"></a>
### Using Accelerated Hardware
- [TensorFlow with GPUs](/notebooks/gpu.ipynb)
- [TensorFlow with TPUs](/notebooks/tpu.ipynb)
<a name="machine-learning-examples"></a>
## Machine Learning Examples
To see end-to-end examples of the interactive machine learning analyses that Colaboratory makes possible, check out these tutorials using models from [TensorFlow Hub](https://tfhub.dev).
A few featured examples:
- [Retraining an Image Classifier](https://tensorflow.org/hub/tutorials/tf2_image_retraining): Build a Keras model on top of a pre-trained image classifier to distinguish flowers.
- [Text Classification](https://tensorflow.org/hub/tutorials/tf2_text_classification): Classify IMDB movie reviews as either *positive* or *negative*.
- [Style Transfer](https://tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization): Use deep learning to transfer style between images.
- [Multilingual Universal Sentence Encoder Q&A](https://tensorflow.org/hub/tutorials/retrieval_with_tf_hub_universal_encoder_qa): Use a machine learning model to answer questions from the SQuAD dataset.
- [Video Interpolation](https://tensorflow.org/hub/tutorials/tweening_conv3d): Predict what happened in a video between the first and the last frame.
|
github_jupyter
|
<div class="contentcontainer med left" style="margin-left: -50px;">
<dl class="dl-horizontal">
<dt>Title</dt> <dd> QuadMesh Element</dd>
<dt>Dependencies</dt> <dd>Matplotlib</dd>
<dt>Backends</dt> <dd><a href='./QuadMesh.ipynb'>Matplotlib</a></dd> <dd><a href='../bokeh/QuadMesh.ipynb'>Bokeh</a></dd>
</dl>
</div>
```
import numpy as np
import holoviews as hv
hv.extension('matplotlib')
```
A ``QuadMesh`` represents 2D rectangular grid expressed as x- and y-coordinates defined as 1D or 2D arrays. Unlike the Image type, a QuadMesh may be regularly or irregularly spaced and contain either bin edges or bin centers. If bin edges are supplied, the shape of the x/y-coordinate arrays should be one greater than the value array shape.
The default interface expects data to be specified in the form:
QuadMesh((X, Y, Z))
where ``X`` and ``Y`` may be 1D or 2D arrays of the shape ``N(+1)`` and ``M(+1)`` respectively or ``N(+1)xM(+1)`` and the ``Z`` value array should be of shape NxM. Other gridded formats such as xarray are also supported if installed.
The grid orientation follows the standard matrix convention: An array ``Z`` with shape (nrows, ncolumns) is plotted with the column number as ``X`` and the row number as ``Y``. See the [Gridded Datasets](../../../user_guide/08-Gridded_Datasets.ipynb) user guide for all the other accepted data formats.
Here is a simple ``QuadMesh`` with logarithmic sampling along the 'x' dimensions:
```
n = 8 # Number of bins in each direction
xs = np.logspace(1, 3, n)
ys = np.linspace(1, 10, n)
zs = np.arange((n-1)**2).reshape(n-1, n-1)
print('Shape of x-coordinates:', xs.shape)
print('Shape of y-coordinates:', ys.shape)
print('Shape of value array:', zs.shape)
hv.QuadMesh((xs, ys, zs))
```
The coordinate system of a ``QuadMesh`` is defined by the bin edges, therefore any index falling into a binned region will return the appropriate value. As the bin edges have continuous values, you can use non-linear axes such as log axes:
```
%%opts QuadMesh [xticks=[10, 100,1000]] QuadMesh.LogScale [logx=True]
hv.QuadMesh((xs, ys, zs), group='LinearScale') + hv.QuadMesh((xs, ys, zs), group='LogScale')
```
Unlike ``Image`` objects, slices must be inclusive of the bin edges but otherwise the slicing semantics are the same. The reason for this difference is that ``QuadMesh`` is really a two-dimensional histogram and when slicing, you only want to see the bins that fall within the specified slice ranges.
In the next example, we specify a slice along the x- and y-axis to extract the lower corner and we set the z-dimension range to maintain the full color range of the colormap:
```
qmesh = hv.QuadMesh((xs, ys, zs))
qmesh[20:400, :8].redim.range(z=qmesh.range('z'))
```
To use an interactive hover tool to inspect the sample values, you can use ``QuadMesh`` with the hover tool in the [Bokeh backend](../bokeh/QuadMesh.ipynb).
For full documentation and the available style and plot options, use ``hv.help(hv.QuadMesh).``
## Irregular meshes
In addition to axis aligned meshes like those we worked with above, a ``QuadMesh`` may also be used to represent irregular or unstructured meshes. In this example we will create an irregular mesh consisting of 2D X, Y and Z arrays defining the position and value of each simplex in the mesh:
```
n=20
coords = np.linspace(-1.5,1.5,n)
X,Y = np.meshgrid(coords, coords);
Qx = np.cos(Y) - np.cos(X)
Qy = np.sin(Y) + np.sin(X)
Z = np.sqrt(X**2 + Y**2)
print('Shape of x-coordinates:', Qx.shape)
print('Shape of y-coordinates:', Qy.shape)
print('Shape of value array:', Z.shape)
qmesh = hv.QuadMesh((Qx, Qy, Z))
qmesh
```
To illustrate irregular meshes a bit further we will randomly jitter the mesh coordinates along both dimensions, demonstrating that ``QuadMesh`` may be used to represent completely arbitrary meshes. It may also be used to represent overlapping meshes, however the behavior during slicing and other operations may not be well defined in such cases.
```
np.random.seed(13)
xs, ys = np.meshgrid(np.linspace(-20, 20, 10), np.linspace(0, 30, 8))
xs += xs/10 + np.random.rand(*xs.shape)*4
ys += ys/10 + np.random.rand(*ys.shape)*4
zs = np.arange(80).reshape(8, 10)
hv.QuadMesh((xs, ys, zs))
```
|
github_jupyter
|
# `ricecooker` exercises
This mini-tutorial will walk you through the steps of running a simple chef script `ExercisesChef` that creates two exercises nodes, and four exercises questions.
### Running the notebooks
To follow along and run the code in this notebook, you'll need to clone the `ricecooker` repository, crate a virtual environement, install `ricecooker` using `pip install ricecooker`, install Jypyter notebook using `pip install jupyter`, then start the jupyter notebook server by running `jupyter notebook`. You will then be able to run all the code sections in this notebook and poke around.
### Creating a Sushi Chef class
```
from ricecooker.chefs import SushiChef
from ricecooker.classes.nodes import TopicNode, ExerciseNode
from ricecooker.classes.questions import SingleSelectQuestion, MultipleSelectQuestion, InputQuestion, PerseusQuestion
from ricecooker.classes.licenses import get_license
from le_utils.constants import licenses
from le_utils.constants import exercises
from le_utils.constants.languages import getlang
class ExercisesChef(SushiChef):
channel_info = {
'CHANNEL_TITLE': 'Sample Exercises',
'CHANNEL_SOURCE_DOMAIN': '<yourdomain.org>', # where you got the content
'CHANNEL_SOURCE_ID': '<unique id for channel>', # channel's unique id CHANGE ME
'CHANNEL_LANGUAGE': 'en', # le_utils language code
'CHANNEL_DESCRIPTION': 'A test channel with different types of exercise questions', # (optional)
'CHANNEL_THUMBNAIL': None, # (optional)
}
def construct_channel(self, **kwargs):
channel = self.get_channel(**kwargs)
topic = TopicNode(title="Math Exercises", source_id="folder-id")
channel.add_child(topic)
exercise_node = ExerciseNode(
source_id='<some unique id>',
title='Basic questions',
author='LE content team',
description='Showcase of the simple question type supported by Ricecooker and Studio',
language=getlang('en').code,
license=get_license(licenses.PUBLIC_DOMAIN),
thumbnail=None,
exercise_data={
'mastery_model': exercises.M_OF_N, # \
'm': 2, # learners must get 2/3 questions correct to complete exercise
'n': 3, # /
'randomize': True, # show questions in random order
},
questions=[
MultipleSelectQuestion(
id='sampleEX_Q1',
question = "Which numbers the following numbers are even?",
correct_answers = ["2", "4",],
all_answers = ["1", "2", "3", "4", "5"],
hints=['Even numbers are divisible by 2.'],
),
SingleSelectQuestion(
id='sampleEX_Q2',
question = "What is 2 times 3?",
correct_answer = "6",
all_answers = ["2", "3", "5", "6"],
hints=['Multiplication of $a$ by $b$ is like computing the area of a rectangle with length $a$ and width $b$.'],
),
InputQuestion(
id='sampleEX_Q3',
question = "Name one of the *factors* of 10.",
answers = ["1", "2", "5", "10"],
hints=['The factors of a number are the divisors of the number that leave a whole remainder.'],
)
]
)
topic.add_child(exercise_node)
# LOAD JSON DATA (as string) FOR PERSEUS QUESTIONS
RAW_PERSEUS_JSON_STR = open('../../examples/exercises/chefdata/perseus_graph_question.json', 'r').read()
# or
# import requests
# RAW_PERSEUS_JSON_STR = requests.get('https://raw.githubusercontent.com/learningequality/sample-channels/master/contentnodes/exercise/perseus_graph_question.json').text
exercise_node2 = ExerciseNode(
source_id='<another unique id>',
title='An exercise containing a perseus question',
author='LE content team',
description='An example exercise with a Persus question',
language=getlang('en').code,
license=get_license(licenses.CC_BY, copyright_holder='Copyright holder name'),
thumbnail=None,
exercise_data={
'mastery_model': exercises.M_OF_N,
'm': 1,
'n': 1,
},
questions=[
PerseusQuestion(
id='ex2bQ4',
raw_data=RAW_PERSEUS_JSON_STR,
source_url='https://github.com/learningequality/sample-channels/blob/master/contentnodes/exercise/perseus_graph_question.json'
),
]
)
topic.add_child(exercise_node2)
return channel
```
### Running the chef
Run of you chef by creating an instance of the chef class and calling it's `run` method:
```
chef = ExercisesChef()
args = {
'command': 'dryrun', # use 'uploadchannel' for real run
'verbose': True,
'token': 'YOURTOKENHERE9139139f3a23232'
}
options = {}
chef.run(args, options)
```
Congratulations, you put some math exercises on the internet!
**Note**: you will need to change the value of `CHANNEL_SOURCE_ID` if you
before you try running this script with `{'command': 'uploadchannel', ...}`.
The combination of source domain and source id are used to compute the `channel_id`
for the Kolibri channel you're creating. If you keep the lines above unchanged,
you'll get an error because you don't have edit rights on that channel.
|
github_jupyter
|
```
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import json
import cx_Oracle
import os
import pandas as pd
os.environ['TNS_ADMIN'] = '/home/opc/adj_esportsdb'
!pip install dataprep
!pip install dask
!pip install pandas_profiling
## install packages
!pip install -q scikit-learn
!pip install -U setuptools wheel
!pip install -U "mxnet<2.0.0"
!pip install autogluon
import cx_Oracle
import yaml
import os
from pathlib import Path
home = str(Path.home())
def process_yaml():
with open("../config.yaml") as file:
return yaml.safe_load(file)
class OracleJSONDatabaseConnection:
def __init__(self, data=process_yaml()):
# wallet location (default is HOME/wallets/wallet_X)
os.environ['TNS_ADMIN'] = '{}/{}'.format(home, process_yaml()['WALLET_DIR'])
print(os.environ['TNS_ADMIN'])
self.pool = cx_Oracle.SessionPool(data['db']['username'], data['db']['password'], data['db']['dsn'],
min=1, max=4, increment=1, threaded=True,
getmode=cx_Oracle.SPOOL_ATTRVAL_WAIT
)
print('Connection successful.')
def close_pool(self):
self.pool.close()
print('Connection pool closed.')
def insert(self, collection_name, json_object_to_insert):
connection = self.pool.acquire()
connection.autocommit = True
soda = connection.getSodaDatabase()
x_collection = soda.createCollection(collection_name)
try:
x_collection.insertOne(json_object_to_insert)
print('[DBG] INSERT {} OK'.format(json_object_to_insert))
except cx_Oracle.IntegrityError:
print('[DBG] INSERT {} ERR'.format(json_object_to_insert))
return 0
self.pool.release(connection)
return 1
def delete(self, collection_name, on_column, on_value):
connection = self.pool.acquire()
soda = connection.getSodaDatabase()
x_collection = soda.createCollection(collection_name)
qbe = {on_column: on_value}
x_collection.find().filter(qbe).remove()
self.pool.release(connection)
def get_connection(self):
return self.pool.acquire()
def close_connection(self, conn_object):
self.pool.release(conn_object)
def get_collection_names(self):
connection = self.pool.acquire()
returning_object = connection.getSodaDatabase().getCollectionNames(startName=None, limit=0)
self.pool.release(connection)
return returning_object
def open_collection(self, collection_name):
connection = self.pool.acquire()
returning_object = self.pool.acquire().getSodaDatabase().openCollection(collection_name)
self.pool.release(connection)
return returning_object
def test_class():
object = OracleJSONDatabaseConnection()
print(object.pool)
object.close_pool()
print(os.environ['TNS_ADMIN'])
db = OracleJSONDatabaseConnection()
print(db.get_collection_names())
data = db.open_collection('predictor_liveclient')
all_data = list()
for doc in data.find().getCursor():
content = doc.getContent()
all_data.append(content)
print('Data length: {}'.format(len(all_data)))
df = pd.read_json(json.dumps(all_data), orient='records')
df.head(5)
df.describe()
from pandas_profiling import ProfileReport
report = ProfileReport(df)
report #uncomment to display all.
from autogluon.tabular import TabularPredictor, TabularDataset
df = TabularDataset(df)
# drop columns we don't want (constant values + identifier)
df = df.drop(columns=['bonusArmorPenetrationPercent', 'bonusMagicPenetrationPercent',
'identifier', 'cooldownReduction', 'armorPenetrationFlat'])
train = df.sample(frac=0.8,random_state=200) #random state is a seed value
test = df.drop(train.index)
train.head(5)
label = 'winner'
save_path = './autogluon_trained_models_liveclient_classifier' # specifies folder to store trained models
predictor = TabularPredictor(label=label, path=save_path).fit(train)
y_test = test[label] # values to predict
test_data_nolabel = test.drop(columns=[label]) # delete label column to prove we're not cheating, also drop identifier column
test_data_nolabel.head(5)
predictor = TabularPredictor.load(save_path)
y_pred = predictor.predict(test_data_nolabel)
print("Predictions: \n", y_pred)
perf = predictor.evaluate_predictions(y_true=y_test, y_pred=y_pred, auxiliary_metrics=True)
predictor.leaderboard(test, silent=False)
predictor.feature_importance(test)
```
|
github_jupyter
|
# PENSA Tutorial Using GPCRmd Trajectories
Here we show some common functions included in PENSA, using trajectories of a G protein-coupled receptor (GPCR). We retrieve the molecular dynamics trajectories for this tutorial from [GPCRmd](https://submission.gpcrmd.org/home/), an online platform for collection and curation of GPCR simulations. It is described in more detail [here](https://www.nature.com/articles/s41592-020-0884-y).
<p align="center">
<img src="https://pbs.twimg.com/media/Ej8-VJ5WkAAbgJc?format=jpg&name=large" width="500">
</p>
The example system is the mu-opioid receptor (mOR), once in its apo form and once bound to the ligand [BU72](https://www.guidetopharmacology.org/GRAC/LigandDisplayForward?ligandId=9363). The structure of this GPCR (G protein-coupled receptor) is reported by [*Huang et al (2015)*](https://www.nature.com/articles/nature14886).
We are going to compare the structural ensembles of the receptor in these two conditions.
This tutorial assumes that you can download the trajectories (see below). If you can't, you can use any other system you have available and adapt the file names and residue selections accordingly.
We only need to import the module "os" and all functions from PENSA itself which in turn loads all the modules it needs.
```
import os
from pensa import *
```
## Download
PENSA has a predefined function to download GPCRmd trajectories.
```
# Define where to save the GPCRmd files
root_dir = './mor-data'
# Define which files to download
md_files = ['11427_dyn_151.psf','11426_dyn_151.pdb', # MOR-apo
'11423_trj_151.xtc','11424_trj_151.xtc','11425_trj_151.xtc',
'11580_dyn_169.psf','11579_dyn_169.pdb', # MOR-BU72
'11576_trj_169.xtc','11577_trj_169.xtc','11578_trj_169.xtc']
# Download all the files that do not exist yet
for file in md_files:
if not os.path.exists(os.path.join(root_dir,file)):
download_from_gpcrmd(file,root_dir)
```
## Preprocessing
To work with the protein coordinates, we first need to extract them from the simulation, i.e., remove the solvent, lipids etc. This is the hardest part but you usually only have to do it once and can then play with your data. Preprocessing can handle many common trajectory formats (as it is based on MDAnalysis) but the internal featurization (based on PyEMMA) is a bit more restrictive, so we will always write xtc trajectories. For large trajectories, you might want to use the scripts provided in the PENSA repository, e.g., to run them on the computing cluster and then download the processed data. Once you know how PENSA works, you can write your own scripts.
In the following, we define the necessary files. For each simulation, we need a reference file (.psf for AMBER), a PDB file, and the trajetory.
Make sure to adapt the root directory such that it links to wherever you have mounted Oak. I you cannot access the Sherlock cluster at Stanford, use any other simulation that you would like to compare.
To run this tutorial on another system, you'll have to adapt the file paths and names in the following box and, in case you need them, the residue selections in the folder ```selections```. We explain how they work further below. Note that for some PENSA functions it is sufficient that the derived features are the same while for others (especially those that involve trajectory manipulation), all atoms need to be the same. In our particular example, we exclude hydrogen atoms because residue Asp114 is protonated in the BU72 simulation but not in the apo simulation.
```
root_dir = './mor-data'
# Simulation A
ref_file_a = root_dir+'/11427_dyn_151.psf'
pdb_file_a = root_dir+'/11426_dyn_151.pdb'
trj_file_a = [root_dir+'/11423_trj_151.xtc',
root_dir+'/11424_trj_151.xtc',
root_dir+'/11425_trj_151.xtc']
# Simulation B
ref_file_b = root_dir+'/11580_dyn_169.psf'
pdb_file_b = root_dir+'/11579_dyn_169.pdb'
trj_file_b = [root_dir+'/11576_trj_169.xtc',
root_dir+'/11577_trj_169.xtc',
root_dir+'/11578_trj_169.xtc']
# Base for the selection string for each simulation
sel_base_a = "(not name H*) and protein"
sel_base_b = "(not name H*) and protein"
# Names of the output files
out_name_a = "traj/condition-a"
out_name_b = "traj/condition-b"
out_name_combined="traj/combined"
```
For this tutorial, we will save the processed trajectories in the subfolder ```traj```. We also create subfolders for other results that we will generate.
```
for subdir in ['traj','plots','vispdb','pca','clusters','results']:
if not os.path.exists(subdir):
os.makedirs(subdir)
```
We have to ensure that from both simulations, we use the exact same parts of the receptor for the analysis. Often, this will be easy and you just provide a simple selection string for the corresponding segment. For more complicated cases, we can use the function ```load_selection()``` to generate a complete residue list from a plain text file. This file should provide in each line the first and the last residue to be considered for a part of the protein.
In the first case, we will extract all protein residues, assuming (correctly) that the same ones are present in both simulations.
```
# Extract the coordinates of the receptor from the trajectory
extract_coordinates(ref_file_a, pdb_file_a, trj_file_a, out_name_a+"_receptor", sel_base_a)
extract_coordinates(ref_file_b, pdb_file_b, trj_file_b, out_name_b+"_receptor", sel_base_b)
```
In many cases, you probably have several runs of the same simulation that you want to combine to one structural ensemble. This is why the trajectory argument takes a list as arguments, e.g.
extract_coordinates(system.psf, system.pdb, ['run1.nc','run2.nc','run3.nc'],
'rho_receptor', 'protein', start_frame=1000)
With the option ```start_frame```, you can exclude the equilibrium phase already at this stage. Be aware that in combined simulations, there is no straightforward way to exclude it later as it would require bookkeeping about how long each simulation was etc.
For some analysis types, we only want to use the part of the receptor that is inside the membrane. In this way, very flexible loops outside the membrane cannot distort the analysis result. We can manually construct a selection string in MDAnalysis format or load the selections from a file. We call this file ```mor_tm.txt``` and generate it on the fly so we can demonstrate the loader function. We use selections based on the definitions of transmembrane helices in the [GPCRdb](https://gpcrdb.org/protein/oprm_human/).
```
! echo "76 98\n105 133\n138 173\n182 208\n226 264\n270 308\n315 354" > mor_tm.txt
! cat mor_tm.txt
# Load the selection and generate the strings
sel_string_a = load_selection("mor_tm.txt", sel_base_a+" and ")
print('Selection A:\n', sel_string_a, '\n')
sel_string_b = load_selection("mor_tm.txt", sel_base_b+" and ")
print('Selection B:\n', sel_string_b, '\n')
# Extract the coordinates of the transmembrane region from the trajectory
extract_coordinates(ref_file_a, pdb_file_a, [trj_file_a], out_name_a+"_tm", sel_string_a)
extract_coordinates(ref_file_b, pdb_file_b, [trj_file_b], out_name_b+"_tm", sel_string_b)
```
### Generalization
If you want to combine data from different simulation conditions, you can use the ```_combined``` version of the extraction function: ```extract_coordinates_combined()```. It takes lists as arguments for the topology files, too. To use the same selection, "multiply" a list of one string, as demonstrated below. For this to work, the two selections need to have the exactly same atoms.
```
extract_coordinates_combined([ref_file_a]*3 + [ref_file_b]*3,
trj_file_a + trj_file_b,
[sel_string_a]*3 + [sel_string_b]*3,
'traj/combined_tm.xtc',
start_frame=400)
```
## Featurization
The analysis is not performed on the coordinates directly but on features derived from these coordinates.
PENSA uses the featurization provided by PyEMMA, so far including:
- backbone torsions: ```'bb-torsions'```,
- backbone C-alpha distances: ```'bb-distances'```, and
- sidechain torsions: ```'sc-torsions'```.
You can combine these with any other function implemented in PyEMMA, even if it is not included in PENSA.
In case the equilibration phase has not been already excluded during preprocessing, we can exclude it here by setting the start frame to a value greater than 0.
```
feature_start_frame = 400
```
The function ```get_structure_features``` loads the names of the features and their values separately
```
sim_a_rec = get_structure_features("traj/condition-a_receptor.gro",
"traj/condition-a_receptor.xtc",
feature_start_frame)
sim_a_rec_feat, sim_a_rec_data = sim_a_rec
sim_b_rec = get_structure_features("traj/condition-b_receptor.gro",
"traj/condition-b_receptor.xtc",
feature_start_frame)
sim_b_rec_feat, sim_b_rec_data = sim_b_rec
```
Having a look at the shape of the loaded data, we see that the first dimension is the number of frames. The second dimension is the number of features. It must be the same for both simulations.
```
for k in sim_a_rec_data.keys():
print(k, sim_a_rec_data[k].shape)
for k in sim_b_rec_data.keys():
print(k, sim_b_rec_data[k].shape)
```
Now do the same only for the transmembrane region.
```
sim_a_tmr = get_structure_features("traj/condition-a_tm.gro",
"traj/condition-a_tm.xtc",
feature_start_frame)
sim_b_tmr = get_structure_features("traj/condition-b_tm.gro",
"traj/condition-b_tm.xtc",
feature_start_frame)
sim_a_tmr_feat, sim_a_tmr_data = sim_a_tmr
sim_b_tmr_feat, sim_b_tmr_data = sim_b_tmr
for k in sim_a_rec_data.keys():
print(k, sim_a_rec_data[k].shape)
for k in sim_b_rec_data.keys():
print(k, sim_b_rec_data[k].shape)
```
## Comparison of Structural Ensembles
Here we compare the two ensembles using measures for the relative entropy.
You can as well calculate the Kolmogorov-Smirnov metric and the corresponding p value using the function ```kolmogorov_smirnov_analysis()```.
Another possibility is to compare only the means and standard deviations of the distributions using ```mean_difference_analysis()```.
### Backbone Torsions
We start with the backbone torsions, which we can select via ```'bb-torsions'```. To do the same analysis on sidechain torsions, replace ```'bb-torsions'``` with ```'sc-torsions'```.
```
# Relative Entropy analysis with torsions
relen = relative_entropy_analysis(sim_a_rec_feat['bb-torsions'],
sim_b_rec_feat['bb-torsions'],
sim_a_rec_data['bb-torsions'],
sim_b_rec_data['bb-torsions'],
bin_num=10, verbose=False)
names_bbtors, jsd_bbtors, kld_ab_bbtors, kld_ba_bbtors = relen
```
The above function also returns the Kullback-Leibler divergences of A with respect to B and vice versa.
To find out where the ensembles differ the most, let's print out the most different features and the corresponding value.
```
# Print the features with the 12 highest values
sf = sort_features(names_bbtors, jsd_bbtors)
for f in sf[:12]: print(f[0], f[1])
```
To get an overview of how strongly the ensembles differ in which region, we can plot the maximum deviation of the features related to a certain residue.
```
# Plot the maximum Jensen-Shannon distance per residue as "B factor" in a PDB file
ref_filename = "traj/condition-a_receptor.gro"
out_filename = "receptor_bbtors-deviations_tremd"
vis = residue_visualization(names_bbtors, jsd_bbtors, ref_filename,
"plots/"+out_filename+"_jsd.pdf",
"vispdb/"+out_filename+"_jsd.pdb",
y_label='max. JS dist. of BB torsions')
# Save the corresponding data
np.savetxt('results/'+out_filename+'_relen.csv',
np.array(relen).T, fmt='%s', delimiter=',',
header='Name, JSD(A,B), KLD(A,B), KLD(B,A)')
np.savetxt('results/'+out_filename+'_jsd.csv',
np.array(vis).T, fmt='%s', delimiter=',',
header='Residue, max. JSD(A,B)')
```
### Backbone C-alpha Distances
Another common representation for the overall structure of a protein are the distances between the C-alpha atoms. We can perform the same analysis on them.
```
# Relative entropy analysis for C-alpha distances
relen = relative_entropy_analysis(sim_a_rec_feat['bb-distances'],
sim_b_rec_feat['bb-distances'],
sim_a_rec_data['bb-distances'],
sim_b_rec_data['bb-distances'],
bin_num=10, verbose=False)
names_bbdist, jsd_bbdist, kld_ab_bbdist, kld_ba_bbdist = relen
# Print the features with the 12 highest values
sf = sort_features(names_bbdist, jsd_bbdist)
for f in sf[:12]: print(f[0], f[1])
```
To visualize distances, we need a two-dimensional representation with the residues on each axis.
We color each field with the value of the Jensen-Shannon distance (but could as well use Kullback-Leibler divergence, Kolmogorov-Smirnov statistic etc. instead).
```
# Visualize the deviations in a matrix plot
matrix = distances_visualization(names_bbdist, jsd_bbdist,
"plots/receptor_jsd-bbdist.pdf",
vmin = 0.0, vmax = 1.0,
cbar_label='JSD')
```
## Principal Component Analysis
Here we show how to calculate the principal components in the space of backbone torsions. It is also common to calculate principal components in the space of backbone distances. For the latter, again just change ```'bb-torsions'``` to ```'bb-distances'```. As mentioned above, we only consider the transmembrane region here, so flexible loops outside the membrane do not distort the more important slow motions in the receptor core.
#### Combined PCA
In the spirit of comparing two simulations, we calculate the principal components of their joint ensemble of structures.
```
# Combine the data of the different simulations
combined_data_tors = np.concatenate([sim_a_tmr_data['bb-torsions'],sim_b_tmr_data['bb-torsions']],0)
```
We can now calculate the principal components of this combined dataset. The corresponding function returns a PyEMMA PCA object, so you can combine it with all functionality in PyEMMA to perform more advanced or specialized analysis.
```
pca_combined = calculate_pca(combined_data_tors)
```
To find out how relevant each PC is, let's have a look at their eigenvalues.
```
pca_eigenvalues_plot(pca_combined, num=12, plot_file='plots/combined_tmr_eigenvalues.pdf')
```
Let us now have a look at the most relevant features of the first three principal components.
Here, we define a feature as important if its correlation with the respective PC is above a threshold of 0.4.
The function also plots the correlation analysis for each PC.
```
pca_features(pca_combined,sim_a_tmr_feat['bb-torsions'], 3, 0.4)
```
Now we can compare how the frames of each ensemble are distributed along the principal components.
```
compare_projections(sim_a_tmr_data['bb-torsions'],
sim_b_tmr_data['bb-torsions'],
pca_combined,
label_a='A',
label_b='B')
```
To get a better glimpse on what the Principal components look like, we would like to visualize them.
For that purpose, let us sort the structures from the trajectories along the principal components instead of along simulation time.
We can then look at the resulting PC trajectories with a molecular visualization program like VMD.
The trajectory to be sorted does not have to be the same subsystem from which we calcualted the PCA. Here, we are going to write frames with the entire receptor, sorted by the PCs of the transmembrane region.
```
_ = sort_trajs_along_common_pc(sim_a_tmr_data['bb-torsions'],
sim_b_tmr_data['bb-torsions'],
feature_start_frame,
"traj/condition-a_receptor.gro",
"traj/condition-b_receptor.gro",
"traj/condition-a_receptor.xtc",
"traj/condition-b_receptor.xtc",
"pca/receptor_by_tmr",
num_pc=3)
```
The above function deals with the special case of two input trajectories. We also provide the functions for a single one (see below). You use these to calculate PCA for any number of combined simulations and then sort the single or combined simulations.
#### Single simulation
Here are the major steps of a PCA demonstrated for a single simulation.
```
sim_a_tmr_data['bb-torsions'].shape
pca_a = calculate_pca(sim_a_tmr_data['bb-torsions'])
pca_features(pca_a, sim_a_tmr_feat['bb-torsions'], 3, 0.4)
_, __ = sort_traj_along_pc(sim_a_tmr_data['bb-torsions'],
pca_a, feature_start_frame,
"traj/condition-a_receptor.gro",
"traj/condition-a_receptor.xtc",
"pca/condition-a_receptor_by_tmr", num_pc=3)
```
## Clustering
To identify important states of an ensemble, we can use clustering algorithms. Here we show how to cluster a combined ensemble from two simulations into two clusters using k-means clustering. The plot shows how many frames from which simulation were sorted in which cluster.
```
cc = obtain_combined_clusters(sim_a_tmr_data['bb-torsions'],sim_b_tmr_data['bb-torsions'],
label_a='A', label_b='B', start_frame=0,
algorithm='kmeans', max_iter=100, num_clusters=3, min_dist=12,
saveas='plots/combined_clust_bbtors.pdf')
cidx, cond, oidx, wss, centroids = cc
np.savetxt('results/combined-cluster-indices.csv',
np.array([cidx, cond, oidx], dtype=int).T,
delimiter=',', fmt='%i',
header='Cluster, Condition, Index within condition')
```
We can sort the frames from each ensemble into these clusters, writing them as separate trajectory files. As with pricipal components, we can look at them using VMD.
```
name = "condition-a_tm"
write_cluster_traj(cidx[cond==0], "traj/"+name+".gro","traj/"+name+".xtc",
"clusters/"+"combined_clust_bbtors_"+name, feature_start_frame )
name = "condition-b_tm"
write_cluster_traj(cidx[cond==1], "traj/"+name+".gro","traj/"+name+".xtc",
"clusters/"+"combined_clust_bbtors_"+name, feature_start_frame )
```
A common method to obtain the optimal number of clusters is the elbow plot. We plot the with-in-sum-of-squares (WSS) for a few repetitions for an increasing number of clusters. Then we look for the "elbow" in the resulting plot. Unfortunately, sometimes there is no clear result though.
```
wss_avg, wss_std = wss_over_number_of_combined_clusters(sim_a_tmr_data['bb-torsions'],
sim_b_tmr_data['bb-torsions'],
label_a='A', label_b='B',
start_frame=feature_start_frame,
algorithm='kmeans',
max_iter=100, num_repeats = 5,
max_num_clusters = 12,
plot_file = None)
```
Of course, we can also cluster a single simulation
```
_ci, _wss, _centroids = obtain_clusters( sim_a_tmr_data['bb-torsions'], num_clusters=5 )
name = "condition-a_tm"
write_cluster_traj( _ci, "traj/"+name+".gro","traj/"+name+".xtc",
"clusters/"+"clust_bbtors_"+name, feature_start_frame )
wss_avg, wss_std = wss_over_number_of_clusters(sim_a_tmr_data['bb-torsions'],
algorithm='kmeans',
max_iter=100, num_repeats = 5,
max_num_clusters = 12,
plot_file = None)
```
|
github_jupyter
|
# Pandas cheat sheet
This notebook has some common data manipulations you might do while working in the popular Python data analysis library [`pandas`](https://pandas.pydata.org/). It assumes you're already are set up to analyze data in pandas using Python 3.
(If you're _not_ set up, [here's IRE's guide](https://docs.google.com/document/d/1cYmpfZEZ8r-09Q6Go917cKVcQk_d0P61gm0q8DAdIdg/edit#) to setting up Python. [Hit me up](mailto:[email protected]) if you get stuck.)
### Topics
- [Importing pandas](#Importing-pandas)
- [Creating a dataframe from a CSV](#Creating-a-dataframe-from-a-CSV)
- [Checking out the data](#Checking-out-the-data)
- [Selecting columns of data](#Selecting-columns-of-data)
- [Getting unique values in a column](#Getting-unique-values-in-a-column)
- [Running basic summary stats](#Running-basic-summary-stats)
- [Sorting your data](#Sorting-your-data)
- [Filtering rows of data](#Filtering-rows-of-data)
- [Filtering text columns with string methods](#Filtering-text-columns-with-string-methods)
- [Filtering against multiple values](#Filtering-against-multiple-values)
- [Exclusion filtering](#Exclusion-filtering)
- [Adding a calculated column](#Adding-a-calculated-column)
- [Filtering for nulls](#Filtering-for-nulls)
- [Grouping and aggregating data](#Grouping-and-aggregating-data)
- [Pivot tables](#Pivot-tables)
- [Applying a function across rows](#Applying-a-function-across-rows)
- [Joining data](#Joining-data)
### Importing pandas
Before we can use pandas, we need to import it. The most common way to do this is:
```
import pandas as pd
```
### Creating a dataframe from a CSV
To begin with, let's import a CSV of Major League Baseball player salaries on opening day. The file, which is in the same directory as this notebook, is called `mlb.csv`.
Pandas has a `read_csv()` method that we can use to get this data into a [dataframe](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html) (it has methods to read other file types, too). At minimum, you need to tell this method where the file lives:
```
mlb = pd.read_csv('mlb.csv')
```
### Checking out the data
When you first load up your data, you'll want to get a sense of what's in there. A pandas dataframe has several useful things to help you get a quick read of your data:
- `.head()`: Shows you the first 5 records in the data frame (optionally, if you want to see a different number of records, you can pass in a number)
- `.tail()`: Same as `head()`, but it pull records from the end of the dataframe
- `.sample(n)` will give you a sample of *n* rows of the data -- just pass in a number
- `.info()` will give you a count of non-null values in each column -- useful for seeing if any columns have null values
- `.describe()` will compute summary stats for numeric columns
- `.columns` will list the column names
- `.dtypes` will list the data types of each column
- `.shape` will give you a pair of numbers: _(number of rows, number of columns)_
```
mlb.head()
mlb.tail()
mlb.sample(5)
mlb.info()
mlb.describe()
mlb.columns
mlb.dtypes
mlb.shape
```
To get the number of records in a dataframe, you can access the first item in the `shape` pair, or you can just use the Python function `len()`:
```
len(mlb)
```
### Selecting columns of data
If you need to select just one column of data, you can use "dot notation" (`mlb.SALARY`) as long as your column name doesn't have spaces and it isn't the name of a dataframe method (e.g., `product`). Otherwise, you can use "bracket notation" (`mlb['SALARY']`).
Selecting one column will return a [`Series`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.Series.html).
If you want to select multiple columns of data, use bracket notation and pass in a _list_ of columns that you want to select. In Python, a list is a collection of items enclosed in square brackets, separated by commas: `['SALARY', 'NAME']`.
Selecting multiple columns will return a [`DataFrame`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.html).
```
# select one column of data
teams = mlb.TEAM
# bracket notation would do the same thing -- note the quotes around the column name
# teams = mlb['TEAM']
teams.head()
type(teams)
# select multiple columns of data
salaries_and_names = mlb[['SALARY', 'NAME']]
salaries_and_names.head()
type(salaries_and_names)
```
### Getting unique values in a column
As you evaluate your data, you'll often want to get a list of unique values in a column (for cleaning, filtering, grouping, etc.).
To do this, you can use the Series method `unique()`. If you wanted to get a list of baseball positions, you could do:
```
mlb.POS.unique()
```
If useful, you could also sort the results alphabetically with the Python [`sorted()`](https://docs.python.org/3/library/functions.html#sorted) function:
```
sorted(mlb.POS.unique())
```
Sometimes you just need the _number_ of unique values in a column. To do this, you can use the pandas method `nunique()`:
```
mlb.POS.nunique()
```
(You can also run `nunique()` on an entire dataframe:)
```
mlb.nunique()
```
If you want to count up the number of times a value appears in a column of data -- the equivalent of doing a pivot table in Excel and aggregating by count -- you can use the Series method [`value_counts()`](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.Series.value_counts.html).
To get a list of MLB teams and the number of times each one appears in our salary data -- in other words, the roster count for each team -- we could do:
```
mlb.TEAM.value_counts()
```
### Running basic summary stats
Some of this already surfaced with `describe()`, but in some cases you'll want to compute these stats manually:
- `sum()`
- `mean()`
- `median()`
- `max()`
- `min()`
You can run these on a Series (e.g., a column of data), or on an entire DataFrame.
```
mlb.SALARY.sum()
mlb.SALARY.mean()
mlb.SALARY.median()
mlb.SALARY.max()
mlb.SALARY.min()
# entire dataframe
mlb.mean()
```
### Sorting your data
You can use the [`sort_values()`](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.sort_values.html) method to sort a dataframe by one or more columns. The default is to sort the values ascending; if you want your results sorted descending, specify `ascending=False`.
Let's sort our dataframe by `SALARY` descending:
```
mlb.sort_values('SALARY', ascending=False).head()
```
To sort by multiple columns, pass a list of columns to the `sort_values()` method -- the sorting will happen in the order you specify in the list. You'll also need to pass a list to the `ascending` keyword argument, otherwise both will sort ascending.
Let's sort our dataframe first by `TEAM` ascending, then by `SALARY` descending:
```
mlb.sort_values(['TEAM', 'SALARY'], ascending=[True, False]).head()
```
### Filtering rows of data
To filter your data by some criteria, you'd pass your filtering condition(s) to a dataframe using bracket notation.
You can use Python's [comparison operators](https://docs.python.org/3/reference/expressions.html#comparisons) in your filters, which include:
- `>` greater than
- `<` less than
- `>=` greater than or equal to
- `<=` less than or equal to
- `==` equal to
- `!=` not equal to
Example: You want to filter your data to keep records where the `TEAM` value is 'ARI':
```
diamondbacks = mlb[mlb.TEAM == 'ARI']
diamondbacks.head()
```
We could filter to get all records where the `TEAM` value is _not_ 'ARI':
```
non_diamondbacks = mlb[mlb.TEAM != 'ARI']
non_diamondbacks.head()
```
We could filter our data to just grab the players that make at least $1 million:
```
million_a_year = mlb[mlb.SALARY >= 1000000]
million_a_year.head()
```
### Filtering against multiple values
You can use the `isin()` method to test a value against multiple matches -- just hand it a _list_ of values to check against.
Example: Let's say we wanted to filter to get just players in Texas (in other words, just the Texas Rangers and the Houston Astros):
```
tx = mlb[mlb.TEAM.isin(['TEX', 'HOU'])]
tx.head()
```
### Exclusion filtering
Sometimes it's easier to specify what records you _don't_ want returned. To flip the meaning of a filter condition, prepend a tilde `~`.
For instance, if we wanted to get all players who are _not_ from Texas, we'd use the same filter condition we just used to get the TX players but add a tilde at the beginning:
```
not_tx = mlb[~mlb.TEAM.isin(['TEX', 'HOU'])]
not_tx.head()
```
### Filtering text columns with string methods
You can access the text values in a column with `.str`, and you can use any of Python's native string functions to manipulate them.
For our purposes, though, the pandas [`str.contains()`](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.Series.str.contains.html) method is useful for filtering data by matching text patterns.
If we wanted to get every player with 'John' in their name, we could do something like this:
```
johns = mlb[mlb.NAME.str.contains('John', case=False)]
johns.head()
```
Note the `case=False` keyword argument -- we're telling pandas to match case-insensitive. And if the pattern you're trying to match is more complex, the method is set up to support [regular expressions](https://docs.python.org/3/howto/regex.html) by default.
### Multiple filters
Sometimes you have multiple filters to apply to your data. Lots of the time, it makes sense to break the filters out into separate statements.
For instance, if you wanted to get all Texas players who make at least $1 million, I might do this:
```
tx = mlb[mlb.TEAM.isin(['TEX', 'HOU'])]
# note that I'm filtering the dataframe I just created, not the original `mlb` dataframe
tx_million_a_year = tx[tx.SALARY >= 1000000]
tx_million_a_year.head()
```
But sometimes you want to chain your filters together into one statement. Use `|` for "or" and `&` for "and" rather than Python's built-in `or` and `and` statements, and use grouping parentheses around each statement.
The same filter in one statement:
```
tx_million_a_year = mlb[(mlb.TEAM.isin(['TEX', 'HOU'])) & (mlb.SALARY > 1000000)]
tx_million_a_year.head()
```
Do what works for you and makes sense in context, but I find the first version a little easier to read.
### Adding a calculated column
To add a new column to a dataframe, use bracket notation to supply the name of the new column (in quotes, or apostrophes, as long as they match), then set it equal to a value -- maybe a calculation derived from other data in your dataframe.
For example, let's create a new column, `contract_total`, that multiplies the annual salary by the number of contract years:
```
mlb['contract_total'] = mlb['SALARY'] * mlb['YEARS']
mlb.head()
```
### Filtering for nulls
You can use the `isnull()` method to get records that are null, or `notnull()` to get records that aren't. The most common use I've seen for these methods is during filtering to see how many records you're missing (and, therefore, how that affects your analysis).
The MLB data is complete, so to demonstrate this, let's load up a new data set: A cut of the [National Inventory of Dams](https://ire.org/nicar/database-library/databases/national-inventory-of-dams/) database, courtesy of the NICAR data library. (We'll need to specify the `encoding` on this CSV because it's not UTF-8.)
```
dams = pd.read_csv('dams.csv',
encoding='latin-1')
dams.head()
```
Maybe we're interested in looking at the year the dam was completed (the `Year_Comp`) column. Running `.info()` on the dataframe shows that we're missing some values:
```
dams.info()
```
We can filter for `isnull()` to take a closer look:
```
no_year_comp = dams[dams.Year_Comp.isnull()]
no_year_comp.head()
```
How many are we missing? That will help us determine whether the analysis would be valid:
```
# calculate the percentage of records with no Year_Comp value
# (part / whole) * 100
(len(no_year_comp) / len(dams)) * 100
```
So this piece of our analysis would exclude one-third of our records -- something you'd need to explain to your audience, if indeed your reporting showed that the results of your analysis would still be meaningful.
To get records where the `Year_Comp` is not null, we'd use `notnull()`:
```
has_year_comp = dams[dams.Year_Comp.notnull()]
has_year_comp.head()
```
What years remain? Let's use `value_counts()` to find out:
```
has_year_comp.Year_Comp.value_counts()
```
(To sort by year, not count, we could tack on a `sort_index()`:
```
has_year_comp.Year_Comp.value_counts().sort_index()
```
### Grouping and aggregating data
You can use the `groupby()` method to group and aggregate data in pandas, similar to what you'd get by running a pivot table in Excel or a `GROUP BY` query in SQL. We'll also provide the aggregate function to use.
Let's group our baseball salary data by team to see which teams have the biggest payrolls -- in other words, we want to use `sum()` as our aggregate function:
```
grouped_mlb = mlb.groupby('TEAM').sum()
grouped_mlb.head()
```
If you don't specify what columns you want, it will run `sum()` on every numeric column. Typically I select just the grouping column and the column I'm running the aggregation on:
```
grouped_mlb = mlb[['TEAM', 'SALARY']].groupby('TEAM').sum()
grouped_mlb.head()
```
... and we can sort descending, with `head()` to get the top payrolls:
```
grouped_mlb.sort_values('SALARY', ascending=False).head(10)
```
You can use different aggregate functions, too. Let's say we wanted to get the top median salaries by team:
```
mlb[['TEAM', 'SALARY']].groupby('TEAM').median().sort_values('SALARY', ascending=False).head(10)
```
You can group by multiple columns by passing a list. Here, we'll select our columns of interest and group by `TEAM`, then by `POS`, using `sum()` as our aggregate function:
```
mlb[['TEAM', 'POS', 'SALARY']].groupby(['TEAM', 'POS']).sum()
```
### Pivot tables
Sometimes you need a full-blown pivot table, and [pandas has a function to make one](https://pandas.pydata.org/pandas-docs/stable/generated/pandas.pivot_table.html).
For this example, we'll look at some foreign trade data -- specifically, eel product imports from 2010 to mid-2017:
```
eels = pd.read_csv('eels.csv')
eels.head()
```
Let's run a pivot table where the grouping column is `country`, the values are the sum of `kilos`, and the columns are the year:
```
pivoted_sums = pd.pivot_table(eels,
index='country',
columns='year',
values='kilos',
aggfunc=sum)
pivoted_sums.head()
```
Let's sort by the `2017` value. While we're at it, let's fill in null values (`NaN`) with zeroes using the [`fillna()`](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.fillna.html) method.
```
pivoted_sums.sort_values(2017, ascending=False).fillna(0)
```
### Applying a function across rows
Often, you'll want to calculate a value for every column but it won't be that simple, and you'll write a separate function that accepts one row of data, does some calculations and returns a value. We'll use the [`apply()`](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.apply.html) method to accomplish this.
For this example, we're going to load up a CSV of gators killed by hunters in Florida:
```
gators = pd.read_csv('gators.csv')
gators.head()
```
We want to find the longest gator in our data, of course, but there's a problem: right now, the caracass size value is being stored as text: `{} ft. {} in.`. The pattern is predicatable, though, and we can use some Python to turn those values into constant numbers -- inches -- that we can then sort on. Here's our function:
```
def get_inches(row):
'''Accepts a row from our dataframe, calculates carcass length in inches and returns that value'''
# get the value in the 'Carcass Size' column
carcass_size = row['Carcass Size']
# split the text on 'ft.'
# the result is a list
size_split = carcass_size.split('ft.')
# strip whitespace from the first item ([0]) in the resulting list -- the feet --
# and coerce it to an integer with the Python `int()` function
feet = int(size_split[0].strip())
# in the second item ([1]) in the resulting list -- the inches -- replace 'in.' with nothing,
# strip whitespace and coerce to an integer
inches = int(size_split[1].replace('in.', '').strip())
# add the feet times 12 plus the inches and return that value
return inches + (feet * 12)
```
Now we're going to create a new column, `length_in` and use the [`apply()`](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.apply.html) method to apply our function to every row. The `axis=1` keyword argument means that we're applying our function row-wise, not column-wise.
```
gators['length_in'] = gators.apply(get_inches, axis=1)
gators.sort_values('length_in', ascending=False).head()
```
### Joining data
You can use [`merge()`](https://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.DataFrame.merge.html) to join data in pandas.
In this simple example, we're going to take a CSV of country population data in which each country is represented by an [ISO 3166-1 numeric country code](https://en.wikipedia.org/wiki/ISO_3166-1_numeric) and join it to a CSV that's basically a lookup table with the ISO codes and the names of the countries to which they refer.
Some of the country codes have leading zeroes, so we're going to use the `dtype` keyword when we import each CSV to specify that the `'code'` column in each dataset should be treated as a string (text), not a number.
```
pop_csv = pd.read_csv('country-population.csv', dtype={'code': str})
pop_csv.head()
code_csv = pd.read_csv('country-codes.csv', dtype={'code': str})
code_csv.head()
```
Now we'll use `merge()` to join them.
The `on` keyword argument tells the method what column to join on. If the names of the columns were different, you'd use `left_on` and `right_on`, with the "left" dataframe being the first one you hand to the `merge()` function.
The `how` keyword argument tells the method what type of join to use -- the default is `'inner'`.
```
joined_data = pd.merge(pop_csv,
code_csv,
on='code',
how='left')
joined_data.head()
```
|
github_jupyter
|
# IPython Magic Commands
Here we'll begin discussing some of the enhancements that IPython adds on top of the normal Python syntax.
These are known in IPython as *magic commands*, and are prefixed by the ``%`` character.
These magic commands are designed to succinctly solve various common problems in standard data analysis.
Magic commands come in two flavors: *line magics*, which are denoted by a single ``%`` prefix and operate on a single line of input, and *cell magics*, which are denoted by a double ``%%`` prefix and operate on multiple lines of input.
We'll demonstrate and discuss a few brief examples here, and come back to more focused discussion of several useful magic commands later in the chapter.
## Running External Code: ``%run``
As you begin developing more extensive code, you will likely find yourself working in both IPython for interactive exploration, as well as a text editor to store code that you want to reuse.
Rather than running this code in a new window, it can be convenient to run it within your IPython session.
This can be done with the ``%run`` magic.
For example, let's create a ``myscript.py`` file with the following contents (note that we are using the `%%bash` magic to write bash code in notebook:
```
%%bash
echo """
'''square functions'''
def square(x):
'''square a number'''
return x ** 2
for N in range(1, 4):
print(N, 'squared is', square(N))""" > myscript.py
```
We can see the content of this file either from the Files tab on the laft bar or using a terminal command such as `cat`:
```
%%bash
cat myscript.py
```
You can execute this from your IPython session as follows:
```
%run myscript.py
```
Note that after you've run this script, any functions defined within it are available for use in your IPython session:
```
square(5)
square??
```
There are several options to fine-tune how your code is run; you can see the documentation in the normal way, by typing **``%run?``** in the IPython interpreter.
## Timing Code Execution: ``%timeit``
Another example of a useful magic function is ``%timeit``, which will automatically determine the execution time of the single-line Python statement that follows it.
For example, we may want to check the performance of a list comprehension:
```
%timeit L = [n ** 2 for n in range(1000)]
```
The benefit of ``%timeit`` is that for short commands it will automatically perform multiple runs in order to attain more robust results.
For multi line statements, adding a second ``%`` sign will turn this into a cell magic that can handle multiple lines of input.
For example, here's the equivalent construction with a ``for``-loop:
```
%%timeit
L = []
for n in range(1000):
L.append(n ** 2)
```
We can immediately see that list comprehensions are about 20% faster than the equivalent ``for``-loop construction in this case.
## Help on Magic Functions: ``?``, ``%magic``, and ``%lsmagic``
Like normal Python functions, IPython magic functions have docstrings, and this useful
documentation can be accessed in the standard manner.
So, for example, to read the documentation of the ``%timeit`` magic simply type this:
```
%timeit?
```
Documentation for other functions can be accessed similarly.
To access a general description of available magic functions, including some examples, you can type this:
```
%magic
```
For a quick and simple list of all available magic functions, type this:
```
%lsmagic
```
|
github_jupyter
|

Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
# Azure ML Hardware Accelerated Models Quickstart
This tutorial will show you how to deploy an image recognition service based on the ResNet 50 classifier using the Azure Machine Learning Accelerated Models service. Get more information about our service from our [documentation](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-accelerate-with-fpgas), [API reference](https://docs.microsoft.com/en-us/python/api/azureml-accel-models/azureml.accel?view=azure-ml-py), or [forum](https://aka.ms/aml-forum).
We will use an accelerated ResNet50 featurizer running on an FPGA. Our Accelerated Models Service handles translating deep neural networks (DNN) into an FPGA program.
For more information about using other models besides Resnet50, see the [README](./README.md).
The steps covered in this notebook are:
1. [Set up environment](#set-up-environment)
* [Construct model](#construct-model)
* Image Preprocessing
* Featurizer (Resnet50)
* Classifier
* Save Model
* [Register Model](#register-model)
* [Convert into Accelerated Model](#convert-model)
* [Create Image](#create-image)
* [Deploy](#deploy-image)
* [Test service](#test-service)
* [Clean-up](#clean-up)
<a id="set-up-environment"></a>
## 1. Set up environment
```
import os
import tensorflow as tf
```
### Retrieve Workspace
If you haven't created a Workspace, please follow [this notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) to do so. If you have, run the codeblock below to retrieve it.
```
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
```
<a id="construct-model"></a>
## 2. Construct model
There are three parts to the model we are deploying: pre-processing, featurizer with ResNet50, and classifier with ImageNet dataset. Then we will save this complete Tensorflow model graph locally before registering it to your Azure ML Workspace.
### 2.a. Image preprocessing
We'd like our service to accept JPEG images as input. However the input to ResNet50 is a tensor. So we need code that decodes JPEG images and does the preprocessing required by ResNet50. The Accelerated AI service can execute TensorFlow graphs as part of the service and we'll use that ability to do the image preprocessing. This code defines a TensorFlow graph that preprocesses an array of JPEG images (as strings) and produces a tensor that is ready to be featurized by ResNet50.
**Note:** Expect to see TF deprecation warnings until we port our SDK over to use Tensorflow 2.0.
```
# Input images as a two-dimensional tensor containing an arbitrary number of images represented a strings
import azureml.accel.models.utils as utils
tf.reset_default_graph()
in_images = tf.placeholder(tf.string)
image_tensors = utils.preprocess_array(in_images)
print(image_tensors.shape)
```
### 2.b. Featurizer
We use ResNet50 as a featurizer. In this step we initialize the model. This downloads a TensorFlow checkpoint of the quantized ResNet50.
```
from azureml.accel.models import QuantizedResnet50
save_path = os.path.expanduser('~/models')
model_graph = QuantizedResnet50(save_path, is_frozen = True)
feature_tensor = model_graph.import_graph_def(image_tensors)
print(model_graph.version)
print(feature_tensor.name)
print(feature_tensor.shape)
```
### 2.c. Classifier
The model we downloaded includes a classifier which takes the output of the ResNet50 and identifies an image. This classifier is trained on the ImageNet dataset. We are going to use this classifier for our service. The next [notebook](./accelerated-models-training.ipynb) shows how to train a classifier for a different data set. The input to the classifier is a tensor matching the output of our ResNet50 featurizer.
```
classifier_output = model_graph.get_default_classifier(feature_tensor)
print(classifier_output)
```
### 2.d. Save Model
Now that we loaded all three parts of the tensorflow graph (preprocessor, resnet50 featurizer, and the classifier), we can save the graph and associated variables to a directory which we can register as an Azure ML Model.
```
# model_name must be lowercase
model_name = "resnet50"
model_save_path = os.path.join(save_path, model_name)
print("Saving model in {}".format(model_save_path))
with tf.Session() as sess:
model_graph.restore_weights(sess)
tf.saved_model.simple_save(sess, model_save_path,
inputs={'images': in_images},
outputs={'output_alias': classifier_output})
```
### 2.e. Important! Save names of input and output tensors
These input and output tensors that were created during the preprocessing and classifier steps are also going to be used when **converting the model** to an Accelerated Model that can run on FPGA's and for **making an inferencing request**. It is very important to save this information! You can see our defaults for all the models in the [README](./README.md).
By default for Resnet50, these are the values you should see when running the cell below:
* input_tensors = "Placeholder:0"
* output_tensors = "classifier/resnet_v1_50/predictions/Softmax:0"
```
input_tensors = in_images.name
output_tensors = classifier_output.name
print(input_tensors)
print(output_tensors)
```
<a id="register-model"></a>
## 3. Register Model
You can add tags and descriptions to your models. Using tags, you can track useful information such as the name and version of the machine learning library used to train the model. Note that tags must be alphanumeric.
```
from azureml.core.model import Model
registered_model = Model.register(workspace = ws,
model_path = model_save_path,
model_name = model_name)
print("Successfully registered: ", registered_model.name, registered_model.description, registered_model.version, sep = '\t')
```
<a id="convert-model"></a>
## 4. Convert Model
For conversion you need to provide names of input and output tensors. This information can be found from the model_graph you saved in step 2.e. above.
**Note**: Conversion may take a while and on average for FPGA model it is about 1-3 minutes and it depends on model type.
```
from azureml.accel import AccelOnnxConverter
convert_request = AccelOnnxConverter.convert_tf_model(ws, registered_model, input_tensors, output_tensors)
if convert_request.wait_for_completion(show_output = False):
# If the above call succeeded, get the converted model
converted_model = convert_request.result
print("\nSuccessfully converted: ", converted_model.name, converted_model.url, converted_model.version,
converted_model.id, converted_model.created_time, '\n')
else:
print("Model conversion failed. Showing output.")
convert_request.wait_for_completion(show_output = True)
```
<a id="create-image"></a>
## 5. Package the model into an Image
You can add tags and descriptions to image. Also, for FPGA model an image can only contain **single** model.
**Note**: The following command can take few minutes.
```
from azureml.core.image import Image
from azureml.accel import AccelContainerImage
image_config = AccelContainerImage.image_configuration()
# Image name must be lowercase
image_name = "{}-image".format(model_name)
image = Image.create(name = image_name,
models = [converted_model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = False)
```
<a id="deploy-image"></a>
## 6. Deploy
Once you have an Azure ML Accelerated Image in your Workspace, you can deploy it to two destinations, to a Databox Edge machine or to an AKS cluster.
### 6.a. Databox Edge Machine using IoT Hub
See the sample [here](https://github.com/Azure-Samples/aml-real-time-ai/) for using the Azure IoT CLI extension for deploying your Docker image to your Databox Edge Machine.
### 6.b. Azure Kubernetes Service (AKS) using Azure ML Service
We are going to create an AKS cluster with FPGA-enabled machines, then deploy our service to it. For more information, see [AKS official docs](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-deploy-and-where#aks).
#### Create AKS ComputeTarget
```
from azureml.core.compute import AksCompute, ComputeTarget
# Uses the specific FPGA enabled VM (sku: Standard_PB6s)
# Standard_PB6s are available in: eastus, westus2, westeurope, southeastasia
prov_config = AksCompute.provisioning_configuration(vm_size = "Standard_PB6s",
agent_count = 1,
location = "eastus")
aks_name = 'my-aks-pb6'
# Create the cluster
aks_target = ComputeTarget.create(workspace = ws,
name = aks_name,
provisioning_configuration = prov_config)
```
Provisioning an AKS cluster might take awhile (15 or so minutes), and we want to wait until it's successfully provisioned before we can deploy a service to it. If you interrupt this cell, provisioning of the cluster will continue. You can also check the status in your Workspace under Compute.
```
%%time
aks_target.wait_for_completion(show_output = True)
print(aks_target.provisioning_state)
print(aks_target.provisioning_errors)
```
#### Deploy AccelContainerImage to AKS ComputeTarget
```
%%time
from azureml.core.webservice import Webservice, AksWebservice
# Set the web service configuration (for creating a test service, we don't want autoscale enabled)
# Authentication is enabled by default, but for testing we specify False
aks_config = AksWebservice.deploy_configuration(autoscale_enabled=False,
num_replicas=1,
auth_enabled = False)
aks_service_name ='my-aks-service-1'
aks_service = Webservice.deploy_from_image(workspace = ws,
name = aks_service_name,
image = image,
deployment_config = aks_config,
deployment_target = aks_target)
aks_service.wait_for_deployment(show_output = True)
```
<a id="test-service"></a>
## 7. Test the service
### 7.a. Create Client
The image supports gRPC and the TensorFlow Serving "predict" API. We will create a PredictionClient from the Webservice object that can call into the docker image to get predictions. If you do not have the Webservice object, you can also create [PredictionClient](https://docs.microsoft.com/en-us/python/api/azureml-accel-models/azureml.accel.predictionclient?view=azure-ml-py) directly.
**Note:** If you chose to use auth_enabled=True when creating your AksWebservice, see documentation [here](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.core.webservice(class)?view=azure-ml-py#get-keys--) on how to retrieve your keys and use either key as an argument to PredictionClient(...,access_token=key).
**WARNING:** If you are running on Azure Notebooks free compute, you will not be able to make outgoing calls to your service. Try locating your client on a different machine to consume it.
```
# Using the grpc client in AzureML Accelerated Models SDK
from azureml.accel import client_from_service
# Initialize AzureML Accelerated Models client
client = client_from_service(aks_service)
```
You can adapt the client [code](https://github.com/Azure/aml-real-time-ai/blob/master/pythonlib/amlrealtimeai/client.py) to meet your needs. There is also an example C# [client](https://github.com/Azure/aml-real-time-ai/blob/master/sample-clients/csharp).
The service provides an API that is compatible with TensorFlow Serving. There are instructions to download a sample client [here](https://www.tensorflow.org/serving/setup).
### 7.b. Serve the model
To understand the results we need a mapping to the human readable imagenet classes
```
import requests
classes_entries = requests.get("https://raw.githubusercontent.com/Lasagne/Recipes/master/examples/resnet50/imagenet_classes.txt").text.splitlines()
# Score image with input and output tensor names
results = client.score_file(path="./snowleopardgaze.jpg",
input_name=input_tensors,
outputs=output_tensors)
# map results [class_id] => [confidence]
results = enumerate(results)
# sort results by confidence
sorted_results = sorted(results, key=lambda x: x[1], reverse=True)
# print top 5 results
for top in sorted_results[:5]:
print(classes_entries[top[0]], 'confidence:', top[1])
```
<a id="clean-up"></a>
## 8. Clean-up
Run the cell below to delete your webservice, image, and model (must be done in that order). In the [next notebook](./accelerated-models-training.ipynb) you will learn how to train a classfier on a new dataset using transfer learning and finetune the weights.
```
aks_service.delete()
aks_target.delete()
image.delete()
registered_model.delete()
converted_model.delete()
```
|
github_jupyter
|
```
import graphlab
```
# Load some text data - from wikipedia, page on people
```
people = graphlab.SFrame('people_wiki.gl/')
people.head()
len(people)
```
# Explore the dataset and checkout the text it contains
```
obama = people[people['name'] == 'Barack Obama']
obama
obama['text']
clooney = people[people['name'] == 'George Clonney']
```
# Get the word counts for Obama article
```
obama['word_count'] = graphlab.text_analytics.count_words(obama['text'])
print obama['word_count']
```
## Sort the word count for the Obama article
```
obama_word_count_table = obama[['word_count']].stack('word_count', new_column_name = ['word', 'count'])
obama_word_count_table
obama_word_count_table.sort('count', ascending=False)
```
# Compute TF-IDF for the corpus
```
people['word_count'] = graphlab.text_analytics.count_words(people['text'])
people.head()
tfidf = graphlab.text_analytics.tf_idf(people['word_count'])
tfidf.head()
people['tfidf'] = tfidf
```
## Examine the TF-IDF for the Obama article
```
obama = people[people['name'] == 'Barack Obama']
obama[['tfidf']].stack('tfidf', new_column_name=['word', 'tfidf']).sort('tfidf', ascending=False)
```
# Manually compute distances between a few people
```
clinton = people[people['name'] == 'Bill Clinton']
backham = people[people['name'] == 'David Beckham']
```
## Is Obama closer to Clinton than to Beckham?
```
graphlab.distances.cosine(obama['tfidf'][0], clinton['tfidf'][0])
graphlab.distances.cosine(obama['tfidf'][0], backham['tfidf'][0])
```
# Build a nearest neighbor model for document retrieval
```
knn_model = graphlab.nearest_neighbors.create(people, features=['tfidf'],label='name')
```
# Applying the nearest-neighbors model for retrieval
## Who is closest to Obama?
```
knn_model.query(obama)
```
## Other examples of document retrieval
```
swift = people[people['name'] == 'Taylor Swift']
knn_model.query(swift)
arnold = people[people['name'] == 'Arnold Schwarzenegger']
knn_model.query(arnold)
```
# Ex4
## Compare top words according to word counts to TF-IDF
```
john = people[people['name'] == 'Elton John']
john_word_count_table = john[['word_count']].stack('word_count', new_column_name = ['word', 'count']).sort('count', ascending=False)
john_word_count_table.head()
john[['tfidf']].stack('tfidf', new_column_name=['word', 'tfidf']).sort('tfidf', ascending=False)
```
## Measuring distance
```
victoria = people[people['name'] == 'Victoria Beckham']
paul = people[people['name'] == 'Paul McCartney']
graphlab.distances.cosine(john['tfidf'][0], victoria['tfidf'][0])
graphlab.distances.cosine(john['tfidf'][0], paul['tfidf'][0])
```
## Building nearest neighbors models with different input features and setting the distance metric
```
word_count_model = graphlab.nearest_neighbors.create(people, features=['word_count'],label='name',distance='cosine')
tfidf_model = graphlab.nearest_neighbors.create(people, features=['tfidf'],label='name',distance='cosine')
word_count_model.query(john)
tfidf_model.query(john)
word_count_model.query(victoria)
tfidf_model.query(victoria)
```
|
github_jupyter
|
<h1> 2c. Loading large datasets progressively with the tf.data.Dataset </h1>
In this notebook, we continue reading the same small dataset, but refactor our ML pipeline in two small, but significant, ways:
<ol>
<li> Refactor the input to read data from disk progressively.
<li> Refactor the feature creation so that it is not one-to-one with inputs.
</ol>
<br/>
The Pandas function in the previous notebook first read the whole data into memory -- on a large dataset, this won't be an option.
```
import datalab.bigquery as bq
import tensorflow as tf
import numpy as np
import shutil
print(tf.__version__)
```
<h2> 1. Refactor the input </h2>
Read data created in Lab1a, but this time make it more general, so that we can later handle large datasets. We use the Dataset API for this. It ensures that, as data gets delivered to the model in mini-batches, it is loaded from disk only when needed.
```
CSV_COLUMNS = ['fare_amount', 'pickuplon','pickuplat','dropofflon','dropofflat','passengers', 'key']
DEFAULTS = [[0.0], [-74.0], [40.0], [-74.0], [40.7], [1.0], ['nokey']]
def read_dataset(filename, mode, batch_size = 512):
def decode_csv(row):
columns = tf.decode_csv(row, record_defaults = DEFAULTS)
features = dict(zip(CSV_COLUMNS, columns))
features.pop('key') # discard, not a real feature
label = features.pop('fare_amount') # remove label from features and store
return features, label
# Create list of file names that match "glob" pattern (i.e. data_file_*.csv)
filenames_dataset = tf.data.Dataset.list_files(filename)
# Read lines from text files
textlines_dataset = filenames_dataset.flat_map(tf.data.TextLineDataset)
# Parse text lines as comma-separated values (CSV)
dataset = textlines_dataset.map(decode_csv)
# Note:
# use tf.data.Dataset.flat_map to apply one to many transformations (here: filename -> text lines)
# use tf.data.Dataset.map to apply one to one transformations (here: text line -> feature list)
if mode == tf.estimator.ModeKeys.TRAIN:
num_epochs = None # loop indefinitely
dataset = dataset.shuffle(buffer_size = 10 * batch_size)
else:
num_epochs = 1 # end-of-input after this
dataset = dataset.repeat(num_epochs).batch(batch_size)
return dataset
def get_train_input_fn():
return read_dataset('./taxi-train.csv', mode = tf.estimator.ModeKeys.TRAIN)
def get_valid_input_fn():
return read_dataset('./taxi-valid.csv', mode = tf.estimator.ModeKeys.EVAL)
```
<h2> 2. Refactor the way features are created. </h2>
For now, pass these through (same as previous lab). However, refactoring this way will enable us to break the one-to-one relationship between inputs and features.
```
INPUT_COLUMNS = [
tf.feature_column.numeric_column('pickuplon'),
tf.feature_column.numeric_column('pickuplat'),
tf.feature_column.numeric_column('dropofflat'),
tf.feature_column.numeric_column('dropofflon'),
tf.feature_column.numeric_column('passengers'),
]
def add_more_features(feats):
# Nothing to add (yet!)
return feats
feature_cols = add_more_features(INPUT_COLUMNS)
```
<h2> Create and train the model </h2>
Note that we train for num_steps * batch_size examples.
```
tf.logging.set_verbosity(tf.logging.INFO)
OUTDIR = 'taxi_trained'
shutil.rmtree(OUTDIR, ignore_errors = True) # start fresh each time
model = tf.estimator.LinearRegressor(
feature_columns = feature_cols, model_dir = OUTDIR)
model.train(input_fn = get_train_input_fn, steps = 200)
```
<h3> Evaluate model </h3>
As before, evaluate on the validation data. We'll do the third refactoring (to move the evaluation into the training loop) in the next lab.
```
metrics = model.evaluate(input_fn = get_valid_input_fn, steps = None)
print('RMSE on dataset = {}'.format(np.sqrt(metrics['average_loss'])))
```
## Challenge Exercise
Create a neural network that is capable of finding the volume of a cylinder given the radius of its base (r) and its height (h). Assume that the radius and height of the cylinder are both in the range 0.5 to 2.0. Unlike in the challenge exercise for b_estimator.ipynb, assume that your measurements of r, h and V are all rounded off to the nearest 0.1. Simulate the necessary training dataset. This time, you will need a lot more data to get a good predictor.
Hint (highlight to see):
<p style='color:white'>
Create random values for r and h and compute V. Then, round off r, h and V (i.e., the volume is computed from the true value of r and h; it's only your measurement that is rounded off). Your dataset will consist of the round values of r, h and V. Do this for both the training and evaluation datasets.
</p>
Now modify the "noise" so that instead of just rounding off the value, there is up to a 10% error (uniformly distributed) in the measurement followed by rounding off.
Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
github_jupyter
|
# Performing measurements using QCoDeS parameters and DataSet
This notebook shows some ways of performing different measurements using
QCoDeS parameters and the [DataSet](DataSet-class-walkthrough.ipynb) via a powerful ``Measurement`` context manager. Here, it is assumed that the reader has some degree of familiarity with fundamental objects and methods of QCoDeS.
## Implementing a measurement
Now, let us start with necessary imports:
```
%matplotlib inline
import numpy.random as rd
import matplotlib.pyplot as plt
import numpy as np
from time import sleep, monotonic
import qcodes as qc
from qcodes import Station, load_or_create_experiment, \
initialise_database, Measurement, load_by_run_spec, load_by_guid
from qcodes.tests.instrument_mocks import DummyInstrument, DummyInstrumentWithMeasurement
from qcodes.dataset.plotting import plot_dataset
from qcodes.dataset.descriptions.detect_shapes import detect_shape_of_measurement
qc.logger.start_all_logging()
```
In what follows, we shall define some utility functions as well as declare our dummy instruments. We, then, add these instruments to a ``Station`` object.
The dummy dmm is setup to generate an output depending on the values set on the dummy dac simulating a real experiment.
```
# preparatory mocking of physical setup
dac = DummyInstrument('dac', gates=['ch1', 'ch2'])
dmm = DummyInstrumentWithMeasurement(name='dmm', setter_instr=dac)
station = qc.Station(dmm, dac)
# now make some silly set-up and tear-down actions
def veryfirst():
print('Starting the measurement')
def numbertwo(inst1, inst2):
print('Doing stuff with the following two instruments: {}, {}'.format(inst1, inst2))
def thelast():
print('End of experiment')
```
**Note** that database and experiments may be missing.
If this is the first time you create a dataset, the underlying database file has
most likely not been created. The following cell creates the database file. Please
refer to documentation on [The Experiment Container](The-Experiment-Container.ipynb) for details.
Furthermore, datasets are associated to an experiment. By default, a dataset (or "run")
is appended to the latest existing experiments. If no experiment has been created,
we must create one. We do that by calling the `load_or_create_experiment` function.
Here we explicitly pass the loaded or created experiment to the `Measurement` object to ensure that we are always
using the `performing_meas_using_parameters_and_dataset` `Experiment` created within this tutorial. Note that a keyword argument `name` can also be set as any string value for `Measurement` which later becomes the `name` of the dataset that running that `Measurement` produces.
```
initialise_database()
exp = load_or_create_experiment(
experiment_name='performing_meas_using_parameters_and_dataset',
sample_name="no sample"
)
```
And then run an experiment:
```
meas = Measurement(exp=exp, name='exponential_decay')
meas.register_parameter(dac.ch1) # register the first independent parameter
meas.register_parameter(dmm.v1, setpoints=(dac.ch1,)) # now register the dependent oone
meas.add_before_run(veryfirst, ()) # add a set-up action
meas.add_before_run(numbertwo, (dmm, dac)) # add another set-up action
meas.add_after_run(thelast, ()) # add a tear-down action
meas.write_period = 0.5
with meas.run() as datasaver:
for set_v in np.linspace(0, 25, 10):
dac.ch1.set(set_v)
get_v = dmm.v1.get()
datasaver.add_result((dac.ch1, set_v),
(dmm.v1, get_v))
dataset1D = datasaver.dataset # convenient to have for data access and plotting
ax, cbax = plot_dataset(dataset1D)
```
And let's add an example of a 2D measurement. For the 2D, we'll need a new batch of parameters, notably one with two
other parameters as setpoints. We therefore define a new Measurement with new parameters.
```
meas = Measurement(exp=exp, name='2D_measurement_example')
meas.register_parameter(dac.ch1) # register the first independent parameter
meas.register_parameter(dac.ch2) # register the second independent parameter
meas.register_parameter(dmm.v2, setpoints=(dac.ch1, dac.ch2)) # now register the dependent oone
# run a 2D sweep
with meas.run() as datasaver:
for v1 in np.linspace(-1, 1, 200):
for v2 in np.linspace(-1, 1, 200):
dac.ch1(v1)
dac.ch2(v2)
val = dmm.v2.get()
datasaver.add_result((dac.ch1, v1),
(dac.ch2, v2),
(dmm.v2, val))
dataset2D = datasaver.dataset
ax, cbax = plot_dataset(dataset2D)
```
## Accessing and exporting the measured data
QCoDeS ``DataSet`` implements a number of methods for accessing the data of a given dataset. Here we will concentrate on the two most user friendly methods. For a more detailed walkthrough of the `DataSet` class, refer to [DataSet class walkthrough](DataSet-class-walkthrough.ipynb) notebook.
The method `get_parameter_data` returns the data as a dictionary of ``numpy`` arrays. The dictionary is indexed by the measured (dependent) parameter in the outermost level and the names of the dependent and independent parameters in the innermost level. The first parameter in the innermost level is always the dependent parameter.
```
dataset1D.get_parameter_data()
```
By default `get_parameter_data` returns all data stored in the dataset. The data that is specific to one or more measured parameters can be returned by passing the parameter name(s) or by using `ParamSpec` object:
```
dataset1D.get_parameter_data('dmm_v1')
```
You can also simply fetch the data for one or more dependent parameter
```
dataset1D.get_parameter_data('dac_ch1')
```
For more details about accessing data of a given `DataSet`, see [Accessing data in DataSet notebook](Accessing-data-in-DataSet.ipynb).
The data can also be exported as one or more [Pandas](https://pandas.pydata.org/) DataFrames.
The DataFrames cane be returned either as a single dataframe or as a dictionary from measured parameters to DataFrames.
If you measure all parameters as a function of the same set of parameters you probably want to export to a single dataframe.
```
dataset1D.to_pandas_dataframe()
```
However, there may be cases where the data within a dataset cannot be put into a single dataframe.
In those cases you can use the other method to export the dataset to a dictionary from name of the measured parameter to Pandas dataframes.
```
dataset1D.to_pandas_dataframe_dict()
```
When exporting a two or higher dimensional datasets as a Pandas DataFrame a [MultiIndex](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html) is used to index the measured parameter based on all the dependencies
```
dataset2D.to_pandas_dataframe()[0:10]
```
If your data is on a regular grid it may make sense to view the data as an [XArray](https://pandas.pydata.org/pandas-docs/stable/user_guide/advanced.html) Dataset. The dataset can be directly exported to a XArray Dataset.
```
dataset2D.to_xarray_dataset()
```
Note, however, that XArray is only suited for data that is on a rectangular grid with few or no missing values. If the data does not lie on a grid, all the measured data points will have an unique combination of the two dependent parameters. When exporting to XArray, NaN's will therefore replace all the missing combinations of `dac_ch1` and `dac_ch2` and the data is unlikely to be useful in this format.
For more details about using Pandas and XArray see [Working With Pandas and XArray](./Working-With-Pandas-and-XArray.ipynb)
It is also possible to export the datasets directly to various file formats see [Exporting QCoDes Datasets](./Exporting-data-to-other-file-formats.ipynb)
## Reloading datasets
To load existing datasets QCoDeS provides several functions. The most useful and generic function is called `load_by_run_spec`.
This function takes one or more pieces of information about a dataset and will either, if the dataset is uniquely identifiable by the information, load the dataset or print information about all the datasets that match the supplied information allowing you to provide more information to uniquely identify the dataset.
Here, we will load a dataset based on the `captured_run_id` printed on the plot above.
```
dataset1D.captured_run_id
loaded_ds = load_by_run_spec(captured_run_id=dataset1D.captured_run_id)
loaded_ds.the_same_dataset_as(dataset1D)
```
As long as you are working within one database file the dataset should be uniquely identified by `captured_run_id`. However, once you mix several datasets from different database files this is likely not unique. See the following section and [Extracting runs from one DB file to another](Extracting-runs-from-one-DB-file-to-another.ipynb) for more information on how to handle this.
### DataSet GUID
Internally each dataset is refereed too by a Globally Unique Identifier (GUID) that ensures that the dataset uniquely identified even if datasets from several databases with potentially identical captured_run_id, experiment and sample names.
A dataset can always be reloaded from the GUID if known.
```
print(f"Dataset GUID is: {dataset1D.guid}")
loaded_ds = load_by_guid(dataset1D.guid)
loaded_ds.the_same_dataset_as(dataset1D)
```
## Specifying shape of measurement
As the context manager allows you to store data of any shape (with the only restriction being that you supply values for both dependent and independent parameters together), it cannot know if the data is being measured on a grid. As a consequence, the Numpy array of data loaded from the dataset may not be of the shape that you expect. `plot_dataset`, `DataSet.to_pandas...` and `DataSet.to_xarray...` contain logic that can detect the shape of the data measured at load time. However, if you know the shape of the measurement that you are going to perform up front, you can choose to specify it before initializing the measurement using ``Measurement.set_shapes`` method.
`dataset.get_parameter_data` and `dataset.cache.data` automatically makes use of this information to return shaped data when loaded from the database. Note that these two methods behave slightly different when loading data on a partially completed dataset. `dataset.get_parameter_data` will only reshape the data if the number of points measured matches the number of points expected according to the metadata. `dataset.cache.data` will however return a dataset with empty placeholders (either NaN, zeros or empty strings depending on the datatypes) for missing values in a partially filled dataset.
Note that if you use the doNd functions demonstrated in [Using doNd functions in comparison to Measurement context manager for performing measurements](Using_doNd_functions_in_comparison_to_Measurement_context_manager_for_performing_measurements.ipynb) the shape information will be detected and stored automatically.
In the example below we show how the shape can be specified manually.
```
n_points_1 = 100
n_points_2 = 200
meas_with_shape = Measurement(exp=exp, name='shape_specification_example_measurement')
meas_with_shape.register_parameter(dac.ch1) # register the first independent parameter
meas_with_shape.register_parameter(dac.ch2) # register the second independent parameter
meas_with_shape.register_parameter(dmm.v2, setpoints=(dac.ch1, dac.ch2)) # now register the dependent oone
meas_with_shape.set_shapes(detect_shape_of_measurement((dmm.v2,), (n_points_1, n_points_2)))
with meas_with_shape.run() as datasaver:
for v1 in np.linspace(-1, 1, n_points_1):
for v2 in np.linspace(-1, 1, n_points_2):
dac.ch1(v1)
dac.ch2(v2)
val = dmm.v2.get()
datasaver.add_result((dac.ch1, v1),
(dac.ch2, v2),
(dmm.v2, val))
dataset = datasaver.dataset # convenient to have for plotting
for name, data in dataset.get_parameter_data()['dmm_v2'].items():
print(f"{name}: data.shape={data.shape}, expected_shape=({n_points_1},{n_points_2})")
assert data.shape == (n_points_1, n_points_2)
```
## Performing several measuments concurrently
It is possible to perform two or more measurements at the same time. This may be convenient if you need to measure several parameters as a function of the same independent parameters.
```
# setup two measurements
meas1 = Measurement(exp=exp, name='multi_measurement_1')
meas1.register_parameter(dac.ch1)
meas1.register_parameter(dac.ch2)
meas1.register_parameter(dmm.v1, setpoints=(dac.ch1, dac.ch2))
meas2 = Measurement(exp=exp, name='multi_measurement_2')
meas2.register_parameter(dac.ch1)
meas2.register_parameter(dac.ch2)
meas2.register_parameter(dmm.v2, setpoints=(dac.ch1, dac.ch2))
with meas1.run() as datasaver1, meas2.run() as datasaver2:
v1points = np.concatenate((np.linspace(-2, -0.5, 10),
np.linspace(-0.51, 0.5, 200),
np.linspace(0.51, 2, 10)))
v2points = np.concatenate((np.linspace(-2, -0.25, 10),
np.linspace(-0.26, 0.5, 200),
np.linspace(0.51, 2, 10)))
for v1 in v1points:
for v2 in v2points:
dac.ch1(v1)
dac.ch2(v2)
val1 = dmm.v1.get()
datasaver1.add_result((dac.ch1, v1),
(dac.ch2, v2),
(dmm.v1, val1))
val2 = dmm.v2.get()
datasaver2.add_result((dac.ch1, v1),
(dac.ch2, v2),
(dmm.v2, val2))
ax, cbax = plot_dataset(datasaver1.dataset)
ax, cbax = plot_dataset(datasaver2.dataset)
```
## Interrupting measurements early
There may be cases where you do not want to complete a measurement. Currently QCoDeS is designed to allow the user
to interrupt the measurements with a standard KeyBoardInterrupt. KeyBoardInterrupts can be raised with either a Ctrl-C keyboard shortcut or using the interrupt button in Juypter / Spyder which is typically in the form of a Square stop button. QCoDeS is designed such that KeyboardInterrupts are delayed around critical parts of the code and the measurement is stopped when its safe to do so.
## QCoDeS Array and MultiParameter
The ``Measurement`` object supports automatic handling of ``Array`` and ``MultiParameters``. When registering these parameters
the individual components are unpacked and added to the dataset as if they were separate parameters. Lets consider a ``MultiParamter`` with array components as the most general case.
First lets use a dummy instrument that produces data as ``Array`` and ``MultiParameters``.
```
from qcodes.tests.instrument_mocks import DummyChannelInstrument
mydummy = DummyChannelInstrument('MyDummy')
```
This instrument produces two ``Array``s with the names, shapes and setpoints given below.
```
mydummy.A.dummy_2d_multi_parameter.names
mydummy.A.dummy_2d_multi_parameter.shapes
mydummy.A.dummy_2d_multi_parameter.setpoint_names
meas = Measurement(exp=exp)
meas.register_parameter(mydummy.A.dummy_2d_multi_parameter)
meas.parameters
```
When adding the MultiParameter to the measurement we can see that we add each of the individual components as a
separate parameter.
```
with meas.run() as datasaver:
datasaver.add_result((mydummy.A.dummy_2d_multi_parameter, mydummy.A.dummy_2d_multi_parameter()))
```
And when adding the result of a ``MultiParameter`` it is automatically unpacked into its components.
```
plot_dataset(datasaver.dataset)
datasaver.dataset.get_parameter_data('MyDummy_ChanA_that')
datasaver.dataset.to_pandas_dataframe()
datasaver.dataset.to_xarray_dataset()
```
## Avoiding verbosity of the Measurement context manager for simple measurements
For simple 1D/2D grid-type of measurements, it may feel like an overkill to use the verbose and flexible Measurement context manager construct. For this case, so-called ``doNd`` functions come ti rescue - convenient one- or two-line calls, read more about them in [Using doNd functions](./Using_doNd_functions_in_comparison_to_Measurement_context_manager_for_performing_measurements.ipynb).
## Optimizing measurement time
There are measurements that are data-heavy or time consuming, or both. QCoDeS provides some features and tools that should help in optimizing the measurement time. Some of those are:
* [Saving data in the background](./Saving_data_in_the_background.ipynb)
* Setting more appropriate ``paramtype`` when registering parameters, see [Paramtypes explained](./Paramtypes%20explained.ipynb)
* Adding result to datasaver by creating threads per instrument, see [Threaded data acquisition](./Threaded%20data%20acquisition.ipynb)
## The power of the Measurement context manager construct
This new form is so free that we may easily do thing impossible with the old Loop construct.
Say, that from the plot of the above 1D measurement,
we decide that a voltage below 1 V is uninteresting,
so we stop the sweep at that point, thus,
we do not know in advance how many points we'll measure.
```
meas = Measurement(exp=exp)
meas.register_parameter(dac.ch1) # register the first independent parameter
meas.register_parameter(dmm.v1, setpoints=(dac.ch1,)) # now register the dependent oone
with meas.run() as datasaver:
for set_v in np.linspace(0, 25, 100):
dac.ch1.set(set_v)
get_v = dmm.v1.get()
datasaver.add_result((dac.ch1, set_v),
(dmm.v1, get_v))
if get_v < 1:
break
dataset = datasaver.dataset
ax, cbax = plot_dataset(dataset)
```
Or we might want to simply get as many points as possible in 10 s
randomly sampling the region between 0 V and 10 V (for the setpoint axis).
```
from time import monotonic, sleep
with meas.run() as datasaver:
t_start = monotonic()
while monotonic() - t_start < 3:
set_v = 10/2*(np.random.rand() + 1)
dac.ch1.set(set_v)
# some sleep to not get too many points (or to let the system settle)
sleep(0.04)
get_v = dmm.v1.get()
datasaver.add_result((dac.ch1, set_v),
(dmm.v1, get_v))
dataset = datasaver.dataset # convenient to have for plotting
axes, cbax = plot_dataset(dataset)
# we slightly tweak the plot to better visualise the highly non-standard axis spacing
axes[0].lines[0].set_marker('o')
axes[0].lines[0].set_markerfacecolor((0.6, 0.6, 0.9))
axes[0].lines[0].set_markeredgecolor((0.4, 0.6, 0.9))
axes[0].lines[0].set_color((0.8, 0.8, 0.8))
```
### Finer sampling in 2D
Looking at the plot of the 2D measurement above, we may decide to sample more finely in the central region:
```
meas = Measurement(exp=exp)
meas.register_parameter(dac.ch1) # register the first independent parameter
meas.register_parameter(dac.ch2) # register the second independent parameter
meas.register_parameter(dmm.v2, setpoints=(dac.ch1, dac.ch2)) # now register the dependent oone
with meas.run() as datasaver:
v1points = np.concatenate((np.linspace(-1, -0.5, 5),
np.linspace(-0.51, 0.5, 200),
np.linspace(0.51, 1, 5)))
v2points = np.concatenate((np.linspace(-1, -0.25, 5),
np.linspace(-0.26, 0.5, 200),
np.linspace(0.51, 1, 5)))
for v1 in v1points:
for v2 in v2points:
dac.ch1(v1)
dac.ch2(v2)
val = dmm.v2.get()
datasaver.add_result((dac.ch1, v1),
(dac.ch2, v2),
(dmm.v2, val))
dataset = datasaver.dataset # convenient to have for plotting
ax, cbax = plot_dataset(dataset)
```
### Simple adaptive 2D sweep
.. or even perform an adaptive sweep... ooohh...
(the example below is a not-very-clever toy model example,
but it nicely shows a semi-realistic measurement that the old Loop
could not handle)
```
v1_points = np.linspace(-1, 1, 250)
v2_points = np.linspace(1, -1, 250)
threshold = 0.25
with meas.run() as datasaver:
# Do normal sweeping until the peak is detected
for v2ind, v2 in enumerate(v2_points):
for v1ind, v1 in enumerate(v1_points):
dac.ch1(v1)
dac.ch2(v2)
val = dmm.v2.get()
datasaver.add_result((dac.ch1, v1),
(dac.ch2, v2),
(dmm.v2, val))
if val > threshold:
break
else:
continue
break
print(v1ind, v2ind, val)
print('-'*10)
# now be more clever, meandering back and forth over the peak
doneyet = False
rowdone = False
v1_step = 1
while not doneyet:
v2 = v2_points[v2ind]
v1 = v1_points[v1ind+v1_step-1]
dac.ch1(v1)
dac.ch2(v2)
val = dmm.v2.get()
datasaver.add_result((dac.ch1, v1),
(dac.ch2, v2),
(dmm.v2, val))
if val < threshold:
if rowdone:
doneyet = True
v2ind += 1
v1_step *= -1
rowdone = True
else:
v1ind += v1_step
rowdone = False
dataset = datasaver.dataset # convenient to have for plotting
ax, cbax = plot_dataset(dataset)
```
### Random sampling
We may also chose to sample completely randomly across the phase space
```
meas2 = Measurement(exp=exp, name='random_sampling_measurement')
meas2.register_parameter(dac.ch1)
meas2.register_parameter(dac.ch2)
meas2.register_parameter(dmm.v2, setpoints=(dac.ch1, dac.ch2))
threshold = 0.25
npoints = 5000
with meas2.run() as datasaver:
for i in range(npoints):
x = 2*(np.random.rand()-.5)
y = 2*(np.random.rand()-.5)
dac.ch1(x)
dac.ch2(y)
z = dmm.v2()
datasaver.add_result((dac.ch1, x),
(dac.ch2, y),
(dmm.v2, z))
dataset = datasaver.dataset # convenient to have for plotting
ax, cbax = plot_dataset(dataset)
datasaver.dataset.to_pandas_dataframe()[0:10]
```
Unlike the data measured above, which lies on a grid, here, all the measured data points have an unique combination of the two dependent parameters. When exporting to XArray NaN's will therefore replace all the missing combinations of `dac_ch1` and `dac_ch2` and the data is unlikely to be useful in this format.
```
datasaver.dataset.to_xarray_dataset()
```
### Optimiser
An example to show that the algorithm is flexible enough to be used with completely unstructured data such as the output of an downhill simplex optimization. The downhill simplex is somewhat more sensitive to noise and it is important that 'fatol' is set to match the expected noise.
```
from scipy.optimize import minimize
def set_and_measure(*xk):
dac.ch1(xk[0])
dac.ch2(xk[1])
return dmm.v2.get()
noise = 0.0005
x0 = [np.random.rand(), np.random.rand()]
with meas.run() as datasaver:
def mycallback(xk):
dac.ch1(xk[0])
dac.ch2(xk[1])
datasaver.add_result((dac.ch1, xk[0]),
(dac.ch2, xk[1]),
(dmm.v2, dmm.v2.cache.get()))
res = minimize(lambda x: -set_and_measure(*x),
x0,
method='Nelder-Mead',
tol=1e-10,
callback=mycallback,
options={'fatol': noise})
dataset = datasaver.dataset # convenient to have for plotting
res
ax, cbax = plot_dataset(dataset)
```
## Subscriptions
The ``Measurement`` object can also handle subscriptions to the dataset. Subscriptions are, under the hood, triggers in the underlying SQLite database. Therefore, the subscribers are only called when data is written to the database (which happens every `write_period`).
When making a subscription, two things must be supplied: a function and a mutable state object. The function **MUST** have a call signature of `f(result_list, length, state, **kwargs)`, where ``result_list`` is a list of tuples of parameter values inserted in the dataset, ``length`` is an integer (the step number of the run), and ``state`` is the mutable state object. The function does not need to actually use these arguments, but the call signature must match this.
Let us consider two generic examples:
### Subscription example 1: simple printing
```
def print_which_step(results_list, length, state):
"""
This subscriber does not use results_list nor state; it simply
prints how many results we have added to the database
"""
print(f'The run now holds {length} rows')
meas = Measurement(exp=exp)
meas.register_parameter(dac.ch1)
meas.register_parameter(dmm.v1, setpoints=(dac.ch1,))
meas.write_period = 0.2 # We write to the database every 0.2s
meas.add_subscriber(print_which_step, state=[])
with meas.run() as datasaver:
for n in range(7):
datasaver.add_result((dac.ch1, n), (dmm.v1, n**2))
print(f'Added points to measurement, step {n}.')
sleep(0.2)
```
### Subscription example 2: using the state
We add two subscribers now.
```
def get_list_of_first_param(results_list, length, state):
"""
Modify the state (a list) to hold all the values for
the first parameter
"""
param_vals = [parvals[0] for parvals in results_list]
state += param_vals
meas = Measurement(exp=exp)
meas.register_parameter(dac.ch1)
meas.register_parameter(dmm.v1, setpoints=(dac.ch1,))
meas.write_period = 0.2 # We write to the database every 0.2s
first_param_list = []
meas.add_subscriber(print_which_step, state=[])
meas.add_subscriber(get_list_of_first_param, state=first_param_list)
with meas.run() as datasaver:
for n in range(10):
datasaver.add_result((dac.ch1, n), (dmm.v1, n**2))
print(f'Added points to measurement, step {n}.')
print(f'First parameter value list: {first_param_list}')
sleep(0.1)
```
|
github_jupyter
|
# Activations functions.
> Activations functions. Set of act_fn.
Activation functions, forked from https://github.com/rwightman/pytorch-image-models/timm/models/layers/activations.py
Mish: Self Regularized
Non-Monotonic Activation Function
https://github.com/digantamisra98/Mish
fastai forum discussion https://forums.fast.ai/t/meet-mish-new-activation-function-possible-successor-to-relu
Mish is in Pytorch from version 1.9. Use this version!
```
# hide
# forked from https://github.com/rwightman/pytorch-image-models/timm/models/layers/activations.py
import torch
from torch import nn as nn
from torch.nn import functional as F
```
## Mish
```
def mish(x, inplace: bool = False):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
NOTE: I don't have a working inplace variant
"""
return x.mul(F.softplus(x).tanh())
class Mish(nn.Module):
"""Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681"""
def __init__(self, inplace: bool = False):
"""NOTE: inplace variant not working """
super(Mish, self).__init__()
def forward(self, x):
return mish(x)
```
## MishJit
```
@torch.jit.script
def mish_jit(x, _inplace: bool = False):
"""Jit version of Mish.
Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
"""
return x.mul(F.softplus(x).tanh())
class MishJit(nn.Module):
def __init__(self, inplace: bool = False):
"""Jit version of Mish.
Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681"""
super(MishJit, self).__init__()
def forward(self, x):
return mish_jit(x)
```
## MishJitMe - memory-efficient.
```
@torch.jit.script
def mish_jit_fwd(x):
# return x.mul(torch.tanh(F.softplus(x)))
return x.mul(F.softplus(x).tanh())
@torch.jit.script
def mish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))
class MishJitAutoFn(torch.autograd.Function):
""" Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
A memory efficient, jit scripted variant of Mish"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return mish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return mish_jit_bwd(x, grad_output)
def mish_me(x, inplace=False):
return MishJitAutoFn.apply(x)
class MishMe(nn.Module):
""" Mish: A Self Regularized Non-Monotonic Neural Activation Function - https://arxiv.org/abs/1908.08681
A memory efficient, jit scripted variant of Mish"""
def __init__(self, inplace: bool = False):
super(MishMe, self).__init__()
def forward(self, x):
return MishJitAutoFn.apply(x)
```
## HardMishJit
```
@torch.jit.script
def hard_mish_jit(x, inplace: bool = False):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
return 0.5 * x * (x + 2).clamp(min=0, max=2)
class HardMishJit(nn.Module):
""" Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
def __init__(self, inplace: bool = False):
super(HardMishJit, self).__init__()
def forward(self, x):
return hard_mish_jit(x)
```
## HardMishJitMe - memory efficient.
```
@torch.jit.script
def hard_mish_jit_fwd(x):
return 0.5 * x * (x + 2).clamp(min=0, max=2)
@torch.jit.script
def hard_mish_jit_bwd(x, grad_output):
m = torch.ones_like(x) * (x >= -2.)
m = torch.where((x >= -2.) & (x <= 0.), x + 1., m)
return grad_output * m
class HardMishJitAutoFn(torch.autograd.Function):
""" A memory efficient, jit scripted variant of Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return hard_mish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors[0]
return hard_mish_jit_bwd(x, grad_output)
def hard_mish_me(x, inplace: bool = False):
return HardMishJitAutoFn.apply(x)
class HardMishMe(nn.Module):
""" A memory efficient, jit scripted variant of Hard Mish
Experimental, based on notes by Mish author Diganta Misra at
https://github.com/digantamisra98/H-Mish/blob/0da20d4bc58e696b6803f2523c58d3c8a82782d0/README.md
"""
def __init__(self, inplace: bool = False):
super(HardMishMe, self).__init__()
def forward(self, x):
return HardMishJitAutoFn.apply(x)
#hide
act_fn = Mish(inplace=True)
```
# end
model_constructor
by ayasyrev
|
github_jupyter
|
```
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell.
## Install dependencies
!pip install wget
!apt-get install sox libsndfile1 ffmpeg
!pip install unidecode
# ## Install NeMo
BRANCH = 'main'
!python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[asr]
## Install TorchAudio
!pip install torchaudio -f https://download.pytorch.org/whl/torch_stable.html
```
## Introduction
Who Speaks When? Speaker Diarization is the task of segmenting audio recordings by speaker labels.
A diarization system consists of Voice Activity Detection (VAD) model to get the time stamps of audio where speech is being spoken ignoring the background and Speaker Embeddings model to get speaker embeddings on segments that were previously time stamped. These speaker embeddings would then be clustered into clusters based on number of speakers present in the audio recording.
In NeMo we support both **oracle VAD** and **non-oracle VAD** diarization.
In this tutorial, we shall first demonstrate how to perform diarization with a oracle VAD time stamps (we assume we already have speech time stamps) and pretrained speaker verification model which can be found in tutorial for [Speaker Identification and Verification in NeMo](https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb).
In ORACLE-VAD-DIARIZATION we show how to perform VAD and then diarization if ground truth timestamped speech were not available (non-oracle VAD). We also have tutorials for [VAD training in NeMo](https://github.com/NVIDIA/NeMo/blob/main/tutorials/asr/Voice_Activity_Detection.ipynb) and [online offline microphone inference](https://github.com/NVIDIA/NeMo/blob/main/tutorials/asr/Online_Offline_Microphone_VAD_Demo.ipynb), where you can custom your model and training/finetuning on your own data.
For demonstration purposes we would be using simulated audio from [an4 dataset](http://www.speech.cs.cmu.edu/databases/an4/)
```
import os
import wget
ROOT = os.getcwd()
data_dir = os.path.join(ROOT,'data')
os.makedirs(data_dir, exist_ok=True)
an4_audio = os.path.join(data_dir,'an4_diarize_test.wav')
an4_rttm = os.path.join(data_dir,'an4_diarize_test.rttm')
if not os.path.exists(an4_audio):
an4_audio_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.wav"
an4_audio = wget.download(an4_audio_url, data_dir)
if not os.path.exists(an4_rttm):
an4_rttm_url = "https://nemo-public.s3.us-east-2.amazonaws.com/an4_diarize_test.rttm"
an4_rttm = wget.download(an4_rttm_url, data_dir)
```
Let's plot and listen to the audio and visualize the RTTM speaker labels
```
import IPython
import matplotlib.pyplot as plt
import numpy as np
import librosa
sr = 16000
signal, sr = librosa.load(an4_audio,sr=sr)
fig,ax = plt.subplots(1,1)
fig.set_figwidth(20)
fig.set_figheight(2)
plt.plot(np.arange(len(signal)),signal,'gray')
fig.suptitle('Reference merged an4 audio', fontsize=16)
plt.xlabel('time (secs)', fontsize=18)
ax.margins(x=0)
plt.ylabel('signal strength', fontsize=16);
a,_ = plt.xticks();plt.xticks(a,a/sr);
IPython.display.Audio(an4_audio)
```
We would use [pyannote_metrics](https://pyannote.github.io/pyannote-metrics/) for visualization and score calculation purposes. Hence all the labels in rttm formats would eventually be converted to pyannote objects, we created two helper functions rttm_to_labels (for NeMo intermediate processing) and labels_to_pyannote_object for scoring and visualization format
```
from nemo.collections.asr.parts.utils.speaker_utils import rttm_to_labels, labels_to_pyannote_object
```
Let's load ground truth RTTM labels and view the reference Annotation timestamps visually
```
# view the sample rttm file
!cat {an4_rttm}
labels = rttm_to_labels(an4_rttm)
reference = labels_to_pyannote_object(labels)
print(labels)
reference
```
Speaker Diarization scripts commonly expects following arguments:
1. manifest_filepath : Path to manifest file containing json lines of format: {'audio_filepath': /path/to/audio_file, 'offset': 0, 'duration':None, 'label': 'infer', 'text': '-', 'num_speakers': None, 'rttm_filepath': /path/to/rttm/file, 'uem_filepath'='/path/to/uem/filepath'}
2. out_dir : directory where outputs and intermediate files are stored.
3. oracle_vad: If this is true then we extract speech activity labels from rttm files, if False then either
4. vad.model_path or external_manifestpath containing speech activity labels has to be passed.
Mandatory fields are audio_filepath, offset, duration, label and text. For the rest if you would like to evaluate with known number of speakers pass the value else None. If you would like to score the system with known rttms then that should be passed as well, else None. uem file is used to score only part of your audio for evaluation purposes, hence pass if you would like to evaluate on it else None.
**Note** we expect audio and corresponding RTTM have **same base name** and the name should be **unique**.
For eg: if audio file name is **test_an4**.wav, if provided we expect corresponding rttm file name to be **test_an4**.rttm (note the matching **test_an4** base name)
Lets create manifest with the an4 audio and rttm available. If you have more than one files you may also use the script `pathsfiles_to_manifest.py` to generate manifest file from list of audio files and optionally rttm files
```
# Create a manifest for input with below format.
# {'audio_filepath': /path/to/audio_file, 'offset': 0, 'duration':None, 'label': 'infer', 'text': '-',
# 'num_speakers': None, 'rttm_filepath': /path/to/rttm/file, 'uem_filepath'='/path/to/uem/filepath'}
import json
meta = {
'audio_filepath': an4_audio,
'offset': 0,
'duration':None,
'label': 'infer',
'text': '-',
'num_speakers': 2,
'rttm_filepath': an4_rttm,
'uem_filepath' : None
}
with open('data/input_manifest.json','w') as fp:
json.dump(meta,fp)
fp.write('\n')
!cat data/input_manifest.json
output_dir = os.path.join(ROOT, 'oracle_vad')
os.makedirs(output_dir,exist_ok=True)
```
# ORACLE-VAD DIARIZATION
Oracle-vad diarization is to compute speaker embeddings from known speech label timestamps rather than depending on VAD output. This step can also be used to run speaker diarization with rttms generated from any external VAD, not just VAD model from NeMo.
For it, the first step is to start converting reference audio rttm(vad) time stamps to oracle manifest file. This manifest file would be sent to our speaker diarizer to extract embeddings.
This is just an argument in our config, and system automatically computes oracle manifest based on the rttms provided through input manifest file
Our config file is based on [hydra](https://hydra.cc/docs/intro/).
With hydra config, we ask users to provide values to variables that were filled with **???**, these are mandatory fields and scripts expect them for successful runs. And notice some variables were filled with **null** are optional variables. Those could be provided if needed but are not mandatory.
```
from omegaconf import OmegaConf
MODEL_CONFIG = os.path.join(data_dir,'offline_diarization.yaml')
if not os.path.exists(MODEL_CONFIG):
config_url = "https://raw.githubusercontent.com/NVIDIA/NeMo/main/examples/speaker_tasks/diarization/conf/offline_diarization.yaml"
MODEL_CONFIG = wget.download(config_url,data_dir)
config = OmegaConf.load(MODEL_CONFIG)
print(OmegaConf.to_yaml(config))
```
Now we can perform speaker diarization based on timestamps generated from ground truth rttms rather than generating through VAD
```
pretrained_speaker_model='ecapa_tdnn'
config.diarizer.manifest_filepath = 'data/input_manifest.json'
config.diarizer.out_dir = output_dir #Directory to store intermediate files and prediction outputs
config.diarizer.speaker_embeddings.model_path = pretrained_speaker_model
config.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5
config.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75
config.diarizer.oracle_vad = True # ----> ORACLE VAD
config.diarizer.clustering.parameters.oracle_num_speakers = True
from nemo.collections.asr.models import ClusteringDiarizer
oracle_model = ClusteringDiarizer(cfg=config)
# And lets diarize
oracle_model.diarize()
```
With DER 0 -> means it clustered speaker embeddings correctly. Let's view
```
!cat {output_dir}/pred_rttms/an4_diarize_test.rttm
pred_labels = rttm_to_labels(output_dir+'/pred_rttms/an4_diarize_test.rttm')
hypothesis = labels_to_pyannote_object(pred_labels)
hypothesis
reference
```
# VAD DIARIZATION
In this method we compute VAD time stamps using NeMo VAD model on input manifest file and then use these time stamps of speech label to find speaker embeddings followed by clustering them into num of speakers
Before we proceed let's look at the speaker diarization config, which we would be depending up on for vad computation
and speaker embedding extraction
```
print(OmegaConf.to_yaml(config))
```
As can be seen most of the variables in config are self explanatory
with VAD variables under vad section and speaker related variables under speaker embeddings section.
To perform VAD based diarization we can ignore `oracle_vad_manifest` in `speaker_embeddings` section for now and needs to fill up the rest. We also needs to provide pretrained `model_path` of vad and speaker embeddings .nemo models
```
pretrained_vad = 'vad_marblenet'
pretrained_speaker_model = 'ecapa_tdnn'
```
Note in this tutorial, we use the VAD model MarbleNet-3x2 introduced and published in [ICASSP MarbleNet](https://arxiv.org/pdf/2010.13886.pdf). You might need to tune on dev set similar to your dataset if you would like to improve the performance.
And the speakerNet-M-Diarization model achieves 7.3% confusion error rate on CH109 set with oracle vad. This model is trained on voxceleb1, voxceleb2, Fisher, SwitchBoard datasets. So for more improved performance specific to your dataset, finetune speaker verification model with a devset similar to your test set.
```
output_dir = os.path.join(ROOT,'outputs')
config.diarizer.manifest_filepath = 'data/input_manifest.json'
config.diarizer.out_dir = output_dir #Directory to store intermediate files and prediction outputs
config.diarizer.speaker_embeddings.model_path = pretrained_speaker_model
config.diarizer.speaker_embeddings.parameters.window_length_in_sec = 1.5
config.diarizer.speaker_embeddings.parameters.shift_length_in_sec = 0.75
config.diarizer.oracle_vad = False # compute VAD provided with model_path to vad config
config.diarizer.clustering.parameters.oracle_num_speakers=True
#Here we use our inhouse pretrained NeMo VAD
config.diarizer.vad.model_path = pretrained_vad
config.diarizer.vad.window_length_in_sec = 0.15
config.diarizer.vad.shift_length_in_sec = 0.01
config.diarizer.vad.parameters.onset = 0.8
config.diarizer.vad.parameters.offset = 0.6
config.diarizer.vad.parameters.min_duration_on = 0.1
config.diarizer.vad.parameters.min_duration_off = 0.4
```
Now that we passed all the variables we needed lets initialize the clustering model with above config
```
from nemo.collections.asr.models import ClusteringDiarizer
sd_model = ClusteringDiarizer(cfg=config)
```
And Diarize with single line of code
```
sd_model.diarize()
```
As can be seen, we first performed VAD, then with the timestamps created in `{output_dir}/vad_outputs` by VAD we calculated speaker embeddings (`{output_dir}/speaker_outputs/embeddings/`) which are then clustered using spectral clustering.
To generate VAD predicted time step. We perform VAD inference to have frame level prediction → (optional: use decision smoothing) → given `threshold`, write speech segment to RTTM-like time stamps manifest.
we use vad decision smoothing (87.5% overlap median) as described [here](https://github.com/NVIDIA/NeMo/blob/stable/nemo/collections/asr/parts/utils/vad_utils.py)
you can also tune the threshold on your dev set. Use this provided [script](https://github.com/NVIDIA/NeMo/blob/stable/scripts/voice_activity_detection/vad_tune_threshold.py)
```
# VAD predicted time stamps
# you can also use single threshold(=onset=offset) for binarization and plot here
from nemo.collections.asr.parts.utils.vad_utils import plot
plot(
an4_audio,
'outputs/vad_outputs/overlap_smoothing_output_median_0.875/an4_diarize_test.median',
an4_rttm,
per_args = config.diarizer.vad.parameters, #threshold
)
print(f"postprocessing_params: {config.diarizer.vad.parameters}")
```
Predicted outputs are written to `output_dir/pred_rttms` and see how we predicted along with VAD prediction
```
!cat outputs/pred_rttms/an4_diarize_test.rttm
pred_labels = rttm_to_labels('outputs/pred_rttms/an4_diarize_test.rttm')
hypothesis = labels_to_pyannote_object(pred_labels)
hypothesis
reference
```
# Storing and Restoring models
Now we can save the whole config and model parameters in a single .nemo and restore from it anytime.
```
oracle_model.save_to(os.path.join(output_dir,'diarize.nemo'))
```
Restore from saved model
```
del oracle_model
import nemo.collections.asr as nemo_asr
restored_model = nemo_asr.models.ClusteringDiarizer.restore_from(os.path.join(output_dir,'diarize.nemo'))
```
# ADD ON - ASR
```
IPython.display.Audio(an4_audio)
quartznet = nemo_asr.models.EncDecCTCModel.from_pretrained(model_name="QuartzNet15x5Base-En")
for fname, transcription in zip([an4_audio], quartznet.transcribe(paths2audio_files=[an4_audio])):
print(f"Audio in {fname} was recognized as:\n{transcription}")
```
|
github_jupyter
|
```
from baselines.ppo2.ppo2 import learn
from baselines.ppo2 import defaults
from baselines.common.vec_env import VecEnv, VecFrameStack
from baselines.common.cmd_util import make_vec_env, make_env
from baselines.common.models import register
import tensorflow as tf
@register("custom_cnn")
def custom_cnn():
def network_fn(input_shape, **conv_kwargs):
"""
Custom CNN
"""
print('input shape is {}'.format(input_shape))
x_input = tf.keras.Input(shape=input_shape, dtype=tf.uint8)
h = x_input
h = tf.cast(h, tf.float32) / 255.
h = tf.keras.layers.Conv2D(filters=32, kernel_size=8, strides=4, padding='valid',
data_format='channels_last', activation='relu')(h)
h2 = tf.keras.layers.Conv2D(filters=64, kernel_size=4, strides=2, padding='valid',
data_format='channels_last', activation='relu')(h)
h3 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, strides=1, padding='valid',
data_format='channels_last', activation='relu')(h2)
h3 = tf.keras.layers.Flatten()(h3)
h3 = tf.keras.layers.Dense(units=512, name='fc1', activation='relu')(h3)
network = tf.keras.Model(inputs=[x_input], outputs=[h3])
network.summary()
return network
return network_fn
def build_env(env_id, env_type):
if env_type in {'atari', 'retro'}:
env = make_vec_env(env_id, env_type, 1, None, gamestate=None, reward_scale=1.0)
env = VecFrameStack(env, 4)
else:
env = make_vec_env(env_id, env_type, 1, None, reward_scale=1.0, flatten_dict_observations=True)
return env
env_id = 'PongNoFrameskip-v0'
env_type = 'atari'
print("Env type = ", env_type)
env = build_env(env_id, env_type)
model = learn(network="custom_cnn", env=env, total_timesteps=1e4)
obs = env.reset()
if not isinstance(env, VecEnv):
obs = np.expand_dims(np.array(obs), axis=0)
episode_rew = 0
while True:
actions, _, state, _ = model.step(obs)
obs, reward, done, info = env.step(actions.numpy())
if not isinstance(env, VecEnv):
obs = np.expand_dims(np.array(obs), axis=0)
env.render()
print("Reward = ", reward)
episode_rew += reward
if done:
print('Episode Reward = {}'.format(episode_rew))
break
env.close()
!python -m baselines.run --alg=ppo2 --env=PongNoFrameskip-v0 --num_timesteps=1e4 --save_path=./models/Pong_20M_ppo2 --log_path=./logs/Pong/
!python -m baselines.run --alg=ppo2 --env=PongNoFrameskip-v0 --num_timesteps=0 --load_path=./models/Pong_20M_ppo2 --play
!wget -O pong_20M_ppo2.tar.gz https://github.com/PacktWorkshops/The-Reinforcement-Learning-Workshop/blob/master/Chapter04/pong_20M_ppo2.tar.gz?raw=true
!tar xvzf pong_20M_ppo2.tar.gz
!python -m baselines.run --alg=ppo2 --env=PongNoFrameskip-v0 --num_timesteps=0 --load_path=./pong_20M_ppo2 --play
```
|
github_jupyter
|
This exercise will test your ability to read a data file and understand statistics about the data.
In later exercises, you will apply techniques to filter the data, build a machine learning model, and iteratively improve your model.
The course examples use data from Melbourne. To ensure you can apply these techniques on your own, you will have to apply them to a new dataset (with house prices from Iowa).
# Exercises
Run the following cell to set up code-checking, which will verify your work as you go.
```
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.machine_learning.ex2 import *
print("Setup Complete")
```
## Step 1: Loading Data
Read the Iowa data file into a Pandas DataFrame called `home_data`.
```
import pandas as pd
# Path of the file to read
iowa_file_path = '../input/home-data-for-ml-course/train.csv'
# Fill in the line below to read the file into a variable home_data
home_data = ____
# Check your answer
step_1.check()
#%%RM_IF(PROD)%%
# Path of the file to read
iowa_file_path = '../input/home-data-for-ml-course/train.csv'
# Fill in the line below to read the file into a variable home_data
home_data = 0
# Call line below with no argument to check that you've loaded the data correctly
step_1.assert_check_failed()
#%%RM_IF(PROD)%%
# Fill in the line below to read the file into a variable home_data
home_data = pd.DataFrame()
# Call line below with no argument to check that you've loaded the data correctly
step_1.assert_check_failed()
home_data = pd.read_csv(iowa_file_path)
step_1.assert_check_passed()
# Lines below will give you a hint or solution code
#_COMMENT_IF(PROD)_
step_1.hint()
#_COMMENT_IF(PROD)_
step_1.solution()
```
## Step 2: Review The Data
Use the command you learned to view summary statistics of the data. Then fill in variables to answer the following questions
```
# Print summary statistics in next line
____
# What is the average lot size (rounded to nearest integer)?
avg_lot_size = ____
# As of today, how old is the newest home (current year - the date in which it was built)
newest_home_age = ____
# Check your answers
step_2.check()
#step_2.hint()
#step_2.solution()
```
## Think About Your Data
The newest house in your data isn't that new. A few potential explanations for this:
1. They haven't built new houses where this data was collected.
1. The data was collected a long time ago. Houses built after the data publication wouldn't show up.
If the reason is explanation #1 above, does that affect your trust in the model you build with this data? What about if it is reason #2?
How could you dig into the data to see which explanation is more plausible?
Check out this **[discussion thread](https://www.kaggle.com/learn-forum/60581)** to see what others think or to add your ideas.
#$KEEP_GOING$
|
github_jupyter
|
```
import numpy as np
import math
import tensorflow as tf
from tensorflow.contrib.layers import fully_connected
import time
import random
import matplotlib.pyplot as plt
import heapq
from mpl_toolkits.mplot3d import Axes3D
tf.VERSION
%matplotlib inline
```
## Finite Element Model of the Space Frame Element
```
def PlaneTrussElementLength(x1,y1,z1,x2,y2,z2):
return math.sqrt((x2-x1)*(x2-x1) + (y2-y1)*(y2-y1)+(z2-z1)*(z2-z1))
def SpaceFrameElementStiffness(E,G,A,Iy,Iz,J,x1,y1,z1,x2,y2,z2):
L = PlaneTrussElementLength(x1,y1,z1,x2,y2,z2)
w1 = E*A/L
w2 = 12*E*Iz/(L*L*L)
w3 = 6*E*Iz/(L*L)
w4 = 4*E*Iz/L
w5 = 2*E*Iz/L
w6 = 12*E*Iy/(L*L*L)
w7 = 6*E*Iy/(L*L)
w8 = 4*E*Iy/L
w9 = 2*E*Iy/L
w10 = G*J/L
kprime = np.array([[w1, 0, 0, 0, 0, 0, -w1, 0, 0, 0, 0, 0],
[0, w2, 0, 0, 0, w3, 0, -w2, 0, 0, 0, w3],
[0, 0, w6, 0, -w7, 0, 0, 0, -w6, 0, -w7, 0],
[0, 0, 0, w10, 0, 0, 0, 0, 0, -w10, 0, 0],
[0, 0, -w7, 0, w8, 0, 0, 0, w7, 0, w9, 0],
[0, w3, 0, 0, 0, w4, 0, -w3, 0, 0, 0, w5],
[-w1, 0, 0, 0, 0, 0, w1, 0, 0, 0, 0, 0],
[0, -w2, 0, 0, 0, -w3, 0, w2, 0, 0, 0, -w3],
[0, 0, -w6, 0, w7, 0, 0, 0, w6, 0, w7, 0],
[0, 0, 0, -w10, 0, 0, 0, 0, 0, w10, 0, 0],
[0, 0, -w7, 0, w9, 0, 0, 0, w7, 0, w8, 0],
[0, w3, 0, 0, 0, w5, 0, -w3, 0, 0, 0, w4]])
if x1 == x2 and y1 == y2:
if z2 > z1:
Lambda = np.array([[0, 0, 1], [0, 1, 0], [-1, 0, 0]])
else:
Lambda = np.array([[0, 0, -1], [0, 1, 0], [1, 0, 0]])
else:
CXx = (x2-x1)/L
CYx = (y2-y1)/L
CZx = (z2-z1)/L
D = math.sqrt(CXx*CXx + CYx*CYx)
CXy = -CYx/D
CYy = CXx/D
CZy = 0
CXz = -CXx*CZx/D
CYz = -CYx*CZx/D
CZz = D
Lambda = np.array([[CXx, CYx, CZx], [CXy, CYy, CZy], [CXz, CYz, CZz]])
R = np.array([np.concatenate((np.concatenate((Lambda,np.zeros((3,3)),np.zeros((3,3)),np.zeros((3,3))),axis=1),
np.concatenate((np.zeros((3,3)), Lambda, np.zeros((3,3)), np.zeros((3,3))),axis=1) ,
np.concatenate((np.zeros((3,3)), np.zeros((3,3)), Lambda, np.zeros((3,3))),axis=1),
np.concatenate((np.zeros((3,3)), np.zeros((3,3)), np.zeros((3,3)), Lambda),axis=1)))])[0]
return np.dot(np.dot(R.T,kprime),R)
def SpaceFrameAssemble(K,k,i,j):
K[6*i,6*i] = K[6*i,6*i] + k[0,0]
K[6*i,6*i+1] = K[6*i,6*i+1] + k[0,1]
K[6*i,6*i+2] = K[6*i,6*i+2] + k[0,2]
K[6*i,6*i+3] = K[6*i,6*i+3] + k[0,3]
K[6*i,6*i+4] = K[6*i,6*i+4] + k[0,4]
K[6*i,6*i+5] = K[6*i,6*i+5] + k[0,5]
K[6*i,6*j] = K[6*i,6*j] + k[0,6]
K[6*i,6*j+1] = K[6*i,6*j+1] + k[0,7]
K[6*i,6*j+2] = K[6*i,6*j+2] + k[0,8]
K[6*i,6*j+3] = K[6*i,6*j+3] + k[0,9]
K[6*i,6*j+4] = K[6*i,6*j+4] + k[0,10]
K[6*i,6*j+5] = K[6*i,6*j+5] + k[0,11]
K[6*i+1,6*i] = K[6*i+1,6*i] + k[1,0]
K[6*i+1,6*i+1] = K[6*i+1,6*i+1] + k[1,1]
K[6*i+1,6*i+2] = K[6*i+1,6*i+2] + k[1,2]
K[6*i+1,6*i+3] = K[6*i+1,6*i+3] + k[1,3]
K[6*i+1,6*i+4] = K[6*i+1,6*i+4] + k[1,4]
K[6*i+1,6*i+5] = K[6*i+1,6*i+5] + k[1,5]
K[6*i+1,6*j] = K[6*i+1,6*j] + k[1,6]
K[6*i+1,6*j+1] = K[6*i+1,6*j+1] + k[1,7]
K[6*i+1,6*j+2] = K[6*i+1,6*j+2] + k[1,8]
K[6*i+1,6*j+3] = K[6*i+1,6*j+3] + k[1,9]
K[6*i+1,6*j+4] = K[6*i+1,6*j+4] + k[1,10]
K[6*i+1,6*j+5] = K[6*i+1,6*j+5] + k[1,11]
K[6*i+2,6*i] = K[6*i+2,6*i] + k[2,0]
K[6*i+2,6*i+1] = K[6*i+2,6*i+1] + k[2,1]
K[6*i+2,6*i+2] = K[6*i+2,6*i+2] + k[2,2]
K[6*i+2,6*i+3] = K[6*i+2,6*i+3] + k[2,3]
K[6*i+2,6*i+4] = K[6*i+2,6*i+4] + k[2,4]
K[6*i+2,6*i+5] = K[6*i+2,6*i+5] + k[2,5]
K[6*i+2,6*j] = K[6*i+2,6*j] + k[2,6]
K[6*i+2,6*j+1] = K[6*i+2,6*j+1] + k[2,7]
K[6*i+2,6*j+2] = K[6*i+2,6*j+2] + k[2,8]
K[6*i+2,6*j+3] = K[6*i+2,6*j+3] + k[2,9]
K[6*i+2,6*j+4] = K[6*i+2,6*j+4] + k[2,10]
K[6*i+2,6*j+5] = K[6*i+2,6*j+5] + k[2,11]
K[6*i+3,6*i] = K[6*i+3,6*i] + k[3,0]
K[6*i+3,6*i+1] = K[6*i+3,6*i+1] + k[3,1]
K[6*i+3,6*i+2] = K[6*i+3,6*i+2] + k[3,2]
K[6*i+3,6*i+3] = K[6*i+3,6*i+3] + k[3,3]
K[6*i+3,6*i+4] = K[6*i+3,6*i+4] + k[3,4]
K[6*i+3,6*i+5] = K[6*i+3,6*i+5] + k[3,5]
K[6*i+3,6*j] = K[6*i+3,6*j] + k[3,6]
K[6*i+3,6*j+1] = K[6*i+3,6*j+1] + k[3,7]
K[6*i+3,6*j+2] = K[6*i+3,6*j+2] + k[3,8]
K[6*i+3,6*j+3] = K[6*i+3,6*j+3] + k[3,9]
K[6*i+3,6*j+4] = K[6*i+3,6*j+4] + k[3,10]
K[6*i+3,6*j+5] = K[6*i+3,6*j+5] + k[3,11]
K[6*i+4,6*i] = K[6*i+4,6*i] + k[4,0]
K[6*i+4,6*i+1] = K[6*i+4,6*i+1] + k[4,1]
K[6*i+4,6*i+2] = K[6*i+4,6*i+2] + k[4,2]
K[6*i+4,6*i+3] = K[6*i+4,6*i+3] + k[4,3]
K[6*i+4,6*i+4] = K[6*i+4,6*i+4] + k[4,4]
K[6*i+4,6*i+5] = K[6*i+4,6*i+5] + k[4,5]
K[6*i+4,6*j] = K[6*i+4,6*j] + k[4,6]
K[6*i+4,6*j+1] = K[6*i+4,6*j+1] + k[4,7]
K[6*i+4,6*j+2] = K[6*i+4,6*j+2] + k[4,8]
K[6*i+4,6*j+3] = K[6*i+4,6*j+3] + k[4,9]
K[6*i+4,6*j+4] = K[6*i+4,6*j+4] + k[4,10]
K[6*i+4,6*j+5] = K[6*i+4,6*j+5] + k[4,11]
K[6*i+5,6*i] = K[6*i+5,6*i] + k[5,0]
K[6*i+5,6*i+1] = K[6*i+5,6*i+1] + k[5,1]
K[6*i+5,6*i+2] = K[6*i+5,6*i+2] + k[5,2]
K[6*i+5,6*i+3] = K[6*i+5,6*i+3] + k[5,3]
K[6*i+5,6*i+4] = K[6*i+5,6*i+4] + k[5,4]
K[6*i+5,6*i+5] = K[6*i+5,6*i+5] + k[5,5]
K[6*i+5,6*j] = K[6*i+5,6*j] + k[5,6]
K[6*i+5,6*j+1] = K[6*i+5,6*j+1] + k[5,7]
K[6*i+5,6*j+2] = K[6*i+5,6*j+2] + k[5,8]
K[6*i+5,6*j+3] = K[6*i+5,6*j+3] + k[5,9]
K[6*i+5,6*j+4] = K[6*i+5,6*j+4] + k[5,10]
K[6*i+5,6*j+5] = K[6*i+5,6*j+5] + k[5,11]
K[6*j,6*i] = K[6*j,6*i] + k[6,0]
K[6*j,6*i+1] = K[6*j,6*i+1] + k[6,1]
K[6*j,6*i+2] = K[6*j,6*i+2] + k[6,2]
K[6*j,6*i+3] = K[6*j,6*i+3] + k[6,3]
K[6*j,6*i+4] = K[6*j,6*i+4] + k[6,4]
K[6*j,6*i+5] = K[6*j,6*i+5] + k[6,5]
K[6*j,6*j] = K[6*j,6*j] + k[6,6]
K[6*j,6*j+1] = K[6*j,6*j+1] + k[6,7]
K[6*j,6*j+2] = K[6*j,6*j+2] + k[6,8]
K[6*j,6*j+3] = K[6*j,6*j+3] + k[6,9]
K[6*j,6*j+4] = K[6*j,6*j+4] + k[6,10]
K[6*j,6*j+5] = K[6*j,6*j+5] + k[6,11]
K[6*j+1,6*i] = K[6*j+1,6*i] + k[7,0]
K[6*j+1,6*i+1] = K[6*j+1,6*i+1] + k[7,1]
K[6*j+1,6*i+2] = K[6*j+1,6*i+2] + k[7,2]
K[6*j+1,6*i+3] = K[6*j+1,6*i+3] + k[7,3]
K[6*j+1,6*i+4] = K[6*j+1,6*i+4] + k[7,4]
K[6*j+1,6*i+5] = K[6*j+1,6*i+5] + k[7,5]
K[6*j+1,6*j] = K[6*j+1,6*j] + k[7,6]
K[6*j+1,6*j+1] = K[6*j+1,6*j+1] + k[7,7]
K[6*j+1,6*j+2] = K[6*j+1,6*j+2] + k[7,8]
K[6*j+1,6*j+3] = K[6*j+1,6*j+3] + k[7,9]
K[6*j+1,6*j+4] = K[6*j+1,6*j+4] + k[7,10]
K[6*j+1,6*j+5] = K[6*j+1,6*j+5] + k[7,11]
K[6*j+2,6*i] = K[6*j+2,6*i] + k[8,0]
K[6*j+2,6*i+1] = K[6*j+2,6*i+1] + k[8,1]
K[6*j+2,6*i+2] = K[6*j+2,6*i+2] + k[8,2]
K[6*j+2,6*i+3] = K[6*j+2,6*i+3] + k[8,3]
K[6*j+2,6*i+4] = K[6*j+2,6*i+4] + k[8,4]
K[6*j+2,6*i+5] = K[6*j+2,6*i+5] + k[8,5]
K[6*j+2,6*j] = K[6*j+2,6*j] + k[8,6]
K[6*j+2,6*j+1] = K[6*j+2,6*j+1] + k[8,7]
K[6*j+2,6*j+2] = K[6*j+2,6*j+2] + k[8,8]
K[6*j+2,6*j+3] = K[6*j+2,6*j+3] + k[8,9]
K[6*j+2,6*j+4] = K[6*j+2,6*j+4] + k[8,10]
K[6*j+2,6*j+5] = K[6*j+2,6*j+5] + k[8,11]
K[6*j+3,6*i] = K[6*j+3,6*i] + k[9,0]
K[6*j+3,6*i+1] = K[6*j+3,6*i+1] + k[9,1]
K[6*j+3,6*i+2] = K[6*j+3,6*i+2] + k[9,2]
K[6*j+3,6*i+3] = K[6*j+3,6*i+3] + k[9,3]
K[6*j+3,6*i+4] = K[6*j+3,6*i+4] + k[9,4]
K[6*j+3,6*i+5] = K[6*j+3,6*i+5] + k[9,5]
K[6*j+3,6*j] = K[6*j+3,6*j] + k[9,6]
K[6*j+3,6*j+1] = K[6*j+3,6*j+1] + k[9,7]
K[6*j+3,6*j+2] = K[6*j+3,6*j+2] + k[9,8]
K[6*j+3,6*j+3] = K[6*j+3,6*j+3] + k[9,9]
K[6*j+3,6*j+4] = K[6*j+3,6*j+4] + k[9,10]
K[6*j+3,6*j+5] = K[6*j+3,6*j+5] + k[9,11]
K[6*j+4,6*i] = K[6*j+4,6*i] + k[10,0]
K[6*j+4,6*i+1] = K[6*j+4,6*i+1] + k[10,1]
K[6*j+4,6*i+2] = K[6*j+4,6*i+2] + k[10,2]
K[6*j+4,6*i+3] = K[6*j+4,6*i+3] + k[10,3]
K[6*j+4,6*i+4] = K[6*j+4,6*i+4] + k[10,4]
K[6*j+4,6*i+5] = K[6*j+4,6*i+5] + k[10,5]
K[6*j+4,6*j] = K[6*j+4,6*j] + k[10,6]
K[6*j+4,6*j+1] = K[6*j+4,6*j+1] + k[10,7]
K[6*j+4,6*j+2] = K[6*j+4,6*j+2] + k[10,8]
K[6*j+4,6*j+3] = K[6*j+4,6*j+3] + k[10,9]
K[6*j+4,6*j+4] = K[6*j+4,6*j+4] + k[10,10]
K[6*j+4,6*j+5] = K[6*j+4,6*j+5] + k[10,11]
K[6*j+5,6*i] = K[6*j+5,6*i] + k[11,0]
K[6*j+5,6*i+1] = K[6*j+5,6*i+1] + k[11,1]
K[6*j+5,6*i+2] = K[6*j+5,6*i+2] + k[11,2]
K[6*j+5,6*i+3] = K[6*j+5,6*i+3] + k[11,3]
K[6*j+5,6*i+4] = K[6*j+5,6*i+4] + k[11,4]
K[6*j+5,6*i+5] = K[6*j+5,6*i+5] + k[11,5]
K[6*j+5,6*j] = K[6*j+5,6*j] + k[11,6]
K[6*j+5,6*j+1] = K[6*j+5,6*j+1] + k[11,7]
K[6*j+5,6*j+2] = K[6*j+5,6*j+2] + k[11,8]
K[6*j+5,6*j+3] = K[6*j+5,6*j+3] + k[11,9]
K[6*j+5,6*j+4] = K[6*j+5,6*j+4] + k[11,10]
K[6*j+5,6*j+5] = K[6*j+5,6*j+5] + k[11,11]
return K
def FEA_u(coord, elcon, bc_u_elim, f_after_u_elim, E=210e6,G=84e6,A=2e-2,Iy=10e-5,Iz=20e-5,J=5e-5):
coord=np.array(coord)
elcon=np.array(elcon)
K=np.zeros(shape=(6*(np.max(elcon)+1),6*(np.max(elcon)+1)))
for el in elcon:
k=SpaceFrameElementStiffness(E,G,A,Iy,Iz,J,coord[el[0]][0],coord[el[0]][1],coord[el[0]][2],\
coord[el[1]][0],coord[el[1]][1],coord[el[1]][2])
K=SpaceFrameAssemble(K,k,el[0],el[1])
K=np.delete(K,bc_u_elim,0)
K=np.delete(K,bc_u_elim,1)
d=np.dot(np.linalg.inv(K),f_after_u_elim)
u=np.zeros(shape=(6*len(coord)))
j=0
for i in range(len(u)):
if i not in bc_u_elim:
u[i]=d[j]
j+=1
if j>len(d)-1:
break
return u
```
## Utils
```
def total_length(coord,elcon):
coord=np.array(coord)
elcon=np.array(elcon)
t_length=0
for i in range(len(elcon)):
l=PlaneTrussElementLength(coord[elcon[i][0]][0],\
coord[elcon[i][0]][1],\
coord[elcon[i][0]][2],\
coord[elcon[i][1]][0],\
coord[elcon[i][1]][1],\
coord[elcon[i][1]][2])
t_length+=l
return t_length
def possible_lines_dic(n,m,dx,dy):
A=[(-dx,0),(-dx,dy),(0,dy),(dx,dy),(dx,0),(dx,-dy),(0,-dy),(-dx,-dy)]
dic={}
t=0
for i in range(n):
for j in range(m):
for item in A:
x,y=j*dx,i*dy
x1,y1=x+item[0],y+item[1]
if x1>=0 and x1<=(m-1)*dx and y1>=0 and y1<=(n-1)*dy and (x1,y1,x,y) not in dic:
dic[(x,y,x1,y1)]=t
t+=1
return dic
# def possible_elcon_dic(n,m,dx,dy):
# dic={}
# t=0
# for i in range(n):
# for j in range(m):
# x,y=j*dx,i*dy
# dic[(x,y)]=t
# t+=1
# return dic
# lines_dic(5,5,1,1)
# possible_elcon_dic(5,5,1,1)
# def pad(l, content, width):
# l.extend([content] * (width - len(l)))
# return l
# def triangle(elcon):
# c=random.randint(0,np.max(elcon))
# T=[]
# for i in range(len(elcon)):
# if np.any(np.not_equal(elcon[i],elcon[c])) and elcon[c][1] in elcon[i]:
# T+=list(elcon[i])
# T=set(T)
# T=list(T)
# for j in range(len(T)):
# if ([elcon[c][0],T[j]] in elcon.tolist() or [T[j],elcon[c][0]] in elcon.tolist()) \
# and np.any(np.not_equal(np.array([elcon[c][0],T[j]]),elcon[c])):
# return [elcon[c][0],elcon[c][1],T[j]]
# def mid_point_triangle(tr_edges,coord):
# ox=(coord[tr_edges[0]][0]+coord[tr_edges[1]][0]+coord[tr_edges[2]][0])/3
# oy=(coord[tr_edges[0]][1]+coord[tr_edges[1]][1]+coord[tr_edges[2]][1])/3
# oz=0
# return list((ox,oy,oz))
# def new_connections(tr_edges,coord):
# K=tr_edges
# return [[len(coord),K[0]],[len(coord),K[1]],[len(coord),K[2]]]
# list(range(0,6))
```
## Model
```
class Model:
def __init__(self,n=5,m=5,dx=1,dy=1, force=-500,
E=210e6, G=84e6, A=2e-2, Iy=10e-5, Iz=20e-5, J=5e-5, break_flag=False):
# n,m,dx,dy - grid parameters
self.E=E
self.G=G
self.A=A
self.Iy=Iy
self.Iz=Iz
self.J=J
self.n=n
self.m=m
self.dx=dx
self.dy=dy
self.dic_lines=possible_lines_dic(self.n,self.m,self.dx,self.dy)
self.line_list=len(self.dic_lines)*[0]
self.break_flag=break_flag
self.coord=[[2,2,0]]
self.elcon=[]
self.el_dic={(2,2):0}
self.max_el=0
self.bc_u_elim=[]
self.force=force
self.f_after_u_elim=[0,self.force,0,0,0,0]
self.old_weight=float("inf")
self.old_strength=-float("inf")
self.visit_list = [0,0,0,0] # number of checkpoints is 4
def reset(self,break_flag,force):
self.dic_lines=possible_lines_dic(self.n,self.m,self.dx,self.dy)
self.line_list=len(self.dic_lines)*[0]
self.break_flag=break_flag
self.coord=[[2,2,0]]
self.elcon=[]
self.el_dic={(2,2):0}
self.max_el=0
self.bc_u_elim=[]
self.force=force
self.f_after_u_elim=[0,self.force,0,0,0,0]
# self.old_weight=-float("inf")
# self.old_strength=-float("inf")
self.visit_list = [0,0,0,0] # number of checkpoints is 4
def FEA(self):
return FEA_u(self.coord, self.elcon, self.bc_u_elim, self.f_after_u_elim,
self.E,self.G,self.A,self.Iy,self.Iz,self.J)
def max_u(self,FEA_output_arr):
t=1
A=[]
while t<len(FEA_output_arr):
A.append(FEA_output_arr[t])
t+=6
return min(A)
def length(self):
return total_length(self.coord,self.elcon)
def move_w(self,x,y):
# x,y - current location
x_new=x-self.dx
y_new=y
if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \
or 3 in self.line_list:
self.break_flag=True
else:
try:
self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1
except KeyError:
self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1
if (x_new,y_new) not in self.el_dic:
self.max_el+=1
self.el_dic[(x_new,y_new)]=self.max_el
self.coord.append([x_new,y_new,0])
# if (x_new,y_new)!=(0,0) and (x_new,y_new)!=((self.m-1)*self.dx,0) and \
# (x_new,y_new)!=((self.m-1)*self.dx,(self.n-1)*self.dy) and \
# (x_new,y_new)!=(0,(self.n-1)*self.dy):
if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \
(x_new,y_new)!=(self.m-2,self.n-2) and \
(x_new,y_new)!=(1,self.n-2):
self.f_after_u_elim+=[0,self.force,0,0,0,0]
elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \
(x_new,y_new)==(self.m-2,self.n-2) or \
(x_new,y_new)==(1,self.n-2):
self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))
if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \
(self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:
self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])
if (x_new,y_new) in self.el_dic:
if (x_new,y_new)==(1,1):
self.visit_list[0]+=1
elif (x_new,y_new)==(self.m-2,1):
self.visit_list[1]+=1
elif (x_new,y_new)==(self.m-2,self.n-2):
self.visit_list[2]+=1
elif (x_new,y_new)==(1,self.n-2):
self.visit_list[3]+=1
return x_new, y_new
def move_nw(self,x,y):
# x,y - current location
x_new=x-self.dx
y_new=y+self.dy
if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \
or 3 in self.line_list:
self.break_flag=True
else:
try:
self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1
except KeyError:
self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1
if (x_new,y_new) not in self.el_dic:
self.max_el+=1
self.el_dic[(x_new,y_new)]=self.max_el
self.coord.append([x_new,y_new,0])
if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \
(x_new,y_new)!=(self.m-2,self.n-2) and \
(x_new,y_new)!=(1,self.n-2):
self.f_after_u_elim+=[0,self.force,0,0,0,0]
elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \
(x_new,y_new)==(self.m-2,self.n-2) or \
(x_new,y_new)==(1,self.n-2):
self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))
if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \
(self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:
self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])
if (x_new,y_new) in self.el_dic:
if (x_new,y_new)==(1,1):
self.visit_list[0]+=1
elif (x_new,y_new)==(self.m-2,1):
self.visit_list[1]+=1
elif (x_new,y_new)==(self.m-2,self.n-2):
self.visit_list[2]+=1
elif (x_new,y_new)==(1,self.n-2):
self.visit_list[3]+=1
return x_new, y_new
def move_n(self,x,y):
# x,y - current location
x_new=x
y_new=y+self.dy
if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \
or 3 in self.line_list:
self.break_flag=True
else:
try:
self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1
except KeyError:
self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1
if (x_new,y_new) not in self.el_dic:
self.max_el+=1
self.el_dic[(x_new,y_new)]=self.max_el
self.coord.append([x_new,y_new,0])
if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \
(x_new,y_new)!=(self.m-2,self.n-2) and \
(x_new,y_new)!=(1,self.n-2):
self.f_after_u_elim+=[0,self.force,0,0,0,0]
elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \
(x_new,y_new)==(self.m-2,self.n-2) or \
(x_new,y_new)==(1,self.n-2):
self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))
if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \
(self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:
self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])
if (x_new,y_new) in self.el_dic:
if (x_new,y_new)==(1,1):
self.visit_list[0]+=1
elif (x_new,y_new)==(self.m-2,1):
self.visit_list[1]+=1
elif (x_new,y_new)==(self.m-2,self.n-2):
self.visit_list[2]+=1
elif (x_new,y_new)==(1,self.n-2):
self.visit_list[3]+=1
return x_new, y_new
def move_ne(self,x,y):
# x,y - current location
x_new=x+self.dx
y_new=y+self.dy
if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \
or 3 in self.line_list:
self.break_flag=True
else:
try:
self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1
except KeyError:
self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1
if (x_new,y_new) not in self.el_dic:
self.max_el+=1
self.el_dic[(x_new,y_new)]=self.max_el
self.coord.append([x_new,y_new,0])
if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \
(x_new,y_new)!=(self.m-2,self.n-2) and \
(x_new,y_new)!=(1,self.n-2):
self.f_after_u_elim+=[0,self.force,0,0,0,0]
elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \
(x_new,y_new)==(self.m-2,self.n-2) or \
(x_new,y_new)==(1,self.n-2):
self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))
if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \
(self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:
self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])
if (x_new,y_new) in self.el_dic:
if (x_new,y_new)==(1,1):
self.visit_list[0]+=1
elif (x_new,y_new)==(self.m-2,1):
self.visit_list[1]+=1
elif (x_new,y_new)==(self.m-2,self.n-2):
self.visit_list[2]+=1
elif (x_new,y_new)==(1,self.n-2):
self.visit_list[3]+=1
return x_new, y_new
def move_e(self,x,y):
# x,y - current location
x_new=x+self.dx
y_new=y
if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \
or 3 in self.line_list:
self.break_flag=True
else:
try:
self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1
except KeyError:
self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1
if (x_new,y_new) not in self.el_dic:
self.max_el+=1
self.el_dic[(x_new,y_new)]=self.max_el
self.coord.append([x_new,y_new,0])
if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \
(x_new,y_new)!=(self.m-2,self.n-2) and \
(x_new,y_new)!=(1,self.n-2):
self.f_after_u_elim+=[0,self.force,0,0,0,0]
elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \
(x_new,y_new)==(self.m-2,self.n-2) or \
(x_new,y_new)==(1,self.n-2):
self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))
if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \
(self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:
self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])
if (x_new,y_new) in self.el_dic:
if (x_new,y_new)==(1,1):
self.visit_list[0]+=1
elif (x_new,y_new)==(self.m-2,1):
self.visit_list[1]+=1
elif (x_new,y_new)==(self.m-2,self.n-2):
self.visit_list[2]+=1
elif (x_new,y_new)==(1,self.n-2):
self.visit_list[3]+=1
return x_new, y_new
def move_se(self,x,y):
# x,y - current location
x_new=x+self.dx
y_new=y-self.dy
if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \
or 3 in self.line_list:
self.break_flag=True
else:
try:
self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1
except KeyError:
self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1
if (x_new,y_new) not in self.el_dic:
self.max_el+=1
self.el_dic[(x_new,y_new)]=self.max_el
self.coord.append([x_new,y_new,0])
if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \
(x_new,y_new)!=(self.m-2,self.n-2) and \
(x_new,y_new)!=(1,self.n-2):
self.f_after_u_elim+=[0,self.force,0,0,0,0]
elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \
(x_new,y_new)==(self.m-2,self.n-2) or \
(x_new,y_new)==(1,self.n-2):
self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))
if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \
(self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:
self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])
if (x_new,y_new) in self.el_dic:
if (x_new,y_new)==(1,1):
self.visit_list[0]+=1
elif (x_new,y_new)==(self.m-2,1):
self.visit_list[1]+=1
elif (x_new,y_new)==(self.m-2,self.n-2):
self.visit_list[2]+=1
elif (x_new,y_new)==(1,self.n-2):
self.visit_list[3]+=1
return x_new, y_new
def move_s(self,x,y):
# x,y - current location
x_new=x
y_new=y-self.dy
if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \
or 3 in self.line_list:
self.break_flag=True
else:
try:
self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1
except KeyError:
self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1
if (x_new,y_new) not in self.el_dic:
self.max_el+=1
self.el_dic[(x_new,y_new)]=self.max_el
self.coord.append([x_new,y_new,0])
if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \
(x_new,y_new)!=(self.m-2,self.n-2) and \
(x_new,y_new)!=(1,self.n-2):
self.f_after_u_elim+=[0,self.force,0,0,0,0]
elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \
(x_new,y_new)==(self.m-2,self.n-2) or \
(x_new,y_new)==(1,self.n-2):
self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))
if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \
(self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:
self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])
if (x_new,y_new) in self.el_dic:
if (x_new,y_new)==(1,1):
self.visit_list[0]+=1
elif (x_new,y_new)==(self.m-2,1):
self.visit_list[1]+=1
elif (x_new,y_new)==(self.m-2,self.n-2):
self.visit_list[2]+=1
elif (x_new,y_new)==(1,self.n-2):
self.visit_list[3]+=1
return x_new, y_new
def move_sw(self,x,y):
# x,y - current location
x_new=x-self.dx
y_new=y-self.dy
if x_new<0 or x_new>(self.m-1)*self.dx or y_new<0 or y_new>(self.n-1)*self.dy \
or 3 in self.line_list:
self.break_flag=True
else:
try:
self.line_list[self.dic_lines[(x,y,x_new,y_new)]]+=1
except KeyError:
self.line_list[self.dic_lines[(x_new,y_new, x,y)]]+=1
if (x_new,y_new) not in self.el_dic:
self.max_el+=1
self.el_dic[(x_new,y_new)]=self.max_el
self.coord.append([x_new,y_new,0])
if (x_new,y_new)!=(1,1) and (x_new,y_new)!=(self.m-2,1) and \
(x_new,y_new)!=(self.m-2,self.n-2) and \
(x_new,y_new)!=(1,self.n-2):
self.f_after_u_elim+=[0,self.force,0,0,0,0]
elif (x_new,y_new)==(1,1) or (x_new,y_new)==(self.m-2,1) or \
(x_new,y_new)==(self.m-2,self.n-2) or \
(x_new,y_new)==(1,self.n-2):
self.bc_u_elim+=list(range(self.el_dic[(x_new,y_new)]*6-6,self.el_dic[(x_new,y_new)]*6))
if (self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]) not in self.elcon and \
(self.el_dic[(x_new,y_new)],self.el_dic[(x,y)]) not in self.elcon:
self.elcon.append([self.el_dic[(x,y)], self.el_dic[(x_new,y_new)]])
if (x_new,y_new) in self.el_dic:
if (x_new,y_new)==(1,1):
self.visit_list[0]+=1
elif (x_new,y_new)==(self.m-2,1):
self.visit_list[1]+=1
elif (x_new,y_new)==(self.m-2,self.n-2):
self.visit_list[2]+=1
elif (x_new,y_new)==(1,self.n-2):
self.visit_list[3]+=1
return x_new, y_new
def action_space(self,action,x0,y0):
if action==0:
return self.move_w(x0,y0)
elif action==1:
return self.move_nw(x0,y0)
elif action==2:
return self.move_n(x0,y0)
elif action==3:
return self.move_ne(x0,y0)
elif action==4:
return self.move_e(x0,y0)
elif action==5:
return self.move_se(x0,y0)
elif action==6:
return self.move_s(x0,y0)
elif action==7:
return self.move_sw(x0,y0)
def nn_input(self,x,y):
return self.line_list+[x,y]
def reward_(self,x_new,y_new,n_steps):
reward=2*n_steps
# reward=2
if all([x>=1 for x in self.visit_list]):
reward+=10000
weight=self.length()
# self.draw("green")
FEA_output_arr=self.FEA()
max_=self.max_u(FEA_output_arr)
strength=max_
# print(weight,strength)
if weight<=self.old_weight:
reward+=50000
self.old_weight=weight
if strength>=self.old_strength:
reward+=100000000
self.old_strength=strength
# print(self.old_weight, self.old_strength)
self.break_flag=True
return reward
# elif any([x==1 for x in self.visit_list]):
# reward+=250*(self.visit_list[0]+self.visit_list[1]+self.visit_list[2]+self.visit_list[3])
# return reward
# if x_new<0+1 or x_new>(self.m-1)*self.dx-1 or y_new<0+1 or y_new>(self.n-1)*self.dy-1:
# reward-=50
# return reward
return reward
def draw(self,color):
c=self.coord
e=self.elcon
c=np.array(c)
e=np.array(e)
coord=c.reshape(np.max(e)+1,3)
fig=plt.figure(figsize=(13,5))
for item in e:
ax = fig.gca(projection='3d')
ax.plot([coord[item[0]][0],coord[item[1]][0]],\
[coord[item[0]][1],coord[item[1]][1]],\
[coord[item[0]][2],coord[item[1]][2]],
color=color)
# ax.view_init(70,300)
ax.view_init(-90,90)
# ax1 = plt.subplot(131)
ax.set_xlim([0, 5])
ax.set_ylim([0, 5])
plt.show()
```
## Neural Network Policy - Policy Gradients
```
n_inputs = 74
n_hidden = 100
n_outputs = 8
initializer = tf.contrib.layers.variance_scaling_initializer()
learning_rate = 0.0001
# Build the neural network
X_ = tf.placeholder(tf.float64, shape=[None, n_inputs], name="X_")
hidden = fully_connected(X_, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)
hidden1 = fully_connected(hidden, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)
hidden2 = fully_connected(hidden1, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)
logits = fully_connected(hidden2, n_outputs, activation_fn=None, weights_initializer=initializer)
outputs = tf.nn.softmax(logits, name="Y_proba")
# outputs = tf.exp(logits) / tf.reduce_sum(tf.exp(logits), -1)
# Select a random action based on the estimated probabilities
action = tf.random.multinomial(tf.log(outputs), num_samples=1,output_dtype=tf.int64)
y=tf.reshape(tf.one_hot(action,depth=8,dtype=tf.float64),[8,1])
xentropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=tf.transpose(logits))
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(xentropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float64, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
# with tf.Session() as sess:
# A=sess.run(tf.exp([-0.39514669, -0.9955475, 0.36458025, 0.02534027, 1.11079987, -0.25412942,
# 0.68900028, 0.42532931]) /tf.reduce_sum(tf.exp([-0.39514669, -0.9955475, 0.36458025, 0.02534027, 1.11079987, -0.25412942,
# 0.68900028, 0.42532931]), -1))
# A
def discount_rewards(rewards, discount_rate=0.99):
discounted_rewards = np.empty(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate=0.99):
all_discounted_rewards = [discount_rewards(rewards) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
# coord=np.array([0,0,0,0,7,0,7,7,0,7,0,0,3.5,3.5,0,4,4,0,3,4,0])
# elcon=np.array([[0,1],[1,2],[2,3],[0,3],[1,6],[2,6],[2,5],[4,5],[4,6],[0,4],[3,4],[3,5],[5,6],[0,6]])
# bc_u_elim=list(range(30,42))
# f_after_u_elim=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-500,0,0,0,0]
# G=Geometry(coord,elcon,bc_u_elim,f_after_u_elim)
M=Model()
n_iterations = 1001 # number of training iterations
n_max_steps = 500 # max steps per episode
n_games_per_update = 10 # train the policy every 10 episodes
save_iterations = 100 # save the model every 10 training iterations
with tf.Session() as sess:
start=time.time()
init.run()
# saver.restore(sess, tf.train.latest_checkpoint("C:\\Temp\\tf_save\\policy0\\"))
# tf.get_default_graph()
for iteration in range(n_iterations):
all_rewards = [] # all sequences of raw rewards for each episode
all_gradients = [] # gradients saved at each step of each episode
for game in range(n_games_per_update):
# tf.random.set_random_seed(game)
# init.run() # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
current_rewards = [] # all raw rewards from the current episode
current_gradients = [] # all gradients from the current episode
M.reset(False,-500)
x0,y0=2,2
obs=M.nn_input(x0,y0)
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients],
feed_dict={X_: np.array(obs).reshape(1,n_inputs)})
# outputs_, gradients_val = sess.run([X_, gradients],
# feed_dict={X_: np.array(obs).reshape(1,n_inputs)})
x_new,y_new=x0,y0
# print(outputs_)
# print(x_new,y_new)
# print(action_val[0][0])
x_new,y_new=M.action_space(action_val[0][0],x_new,y_new)
obs=M.nn_input(x_new,y_new)
reward=M.reward_(x_new,y_new,step)
x0,y0 = x_new,y_new
if M.break_flag:
reward-=10000
current_rewards.append(reward)
current_gradients.append(gradients_val)
if M.break_flag:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
# At this point we have run the policy for 10 episodes, and we are
# ready for a policy update using the algorithm described earlier.
all_rewards = discount_and_normalize_rewards(all_rewards)
feed_dict = {}
for var_index, grad_placeholder in enumerate(gradient_placeholders):
# multiply the gradients by the action scores, and compute the mean
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)],axis=0)
feed_dict[grad_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
# print("Saving {} iteration".format(iteration))
print('Time taken for {} epoch {} sec\n'.format(iteration, time.time() - start))
saver.save(sess, "./bionic_ckpt/bionic0.ckpt")
# end=time.time()
```
## AI designing the bionic partition
```
def predict(G2):
with tf.Session() as sess:
saver = tf.train.import_meta_graph('./bionic_ckpt/bionic0.ckpt.meta')
saver.restore(sess, "./bionic_ckpt/bionic0.ckpt")
graph = tf.get_default_graph()
outputs = graph.get_tensor_by_name("Y_proba:0")
X_ = graph.get_tensor_by_name("X_:0")
# G2.reset(np.array([0,0,0,0,7,0,7,7,0,7,0,0,3.5,3.5,0,4,4,0,3,4,0]),\
# np.array([[0,1],[1,2],[2,3],[0,3],[1,6],[2,6],[2,5],[4,5],[4,6],[0,4],[3,4],[3,5],[5,6],[0,6]]),\
# list(range(30,42)),[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-500,0,0,0,0])
# G2.position = random.randint(5,np.max(G2.elcon))
x0,y0=2,2
obs=G2.nn_input(x0,y0)
for step in range(100):
action_val= sess.run([outputs],feed_dict={X_: np.array(obs).reshape(1,n_inputs)})
action_val=np.log(action_val)
print(np.argmax(action_val))
x_new,y_new=x0,y0
x_new,y_new=G2.action_space(np.argmax(action_val),x_new,y_new)
print(x_new, y_new)
if G2.break_flag:
obs=G2.nn_input(x_new,y_new)
break
obs=G2.nn_input(x_new,y_new)
x0,y0=x_new,y_new
# print(obs[-1])
# G2.position = random.randint(5,np.max(G2.elcon))
G2.draw('blue')
return obs[-1], obs[-2]
# coord=np.array([0,0,0,0,7,0,7,7,0,7,0,0,3.5,3.5,0,4,4,0,3,4,0])
# elcon=np.array([[0,1],[1,2],[2,3],[0,3],[1,6],[2,6],[2,5],[4,5],[4,6],[0,4],[3,4],[3,5],[5,6],[0,6]])
# bc_u_elim=list(range(30,42))
# f_after_u_elim=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,-500,0,0,0,0]
M=Model()
# G3.draw('green')
# pr=G3.nn_input()
# pr[-1],pr[-2]
# G3.add_node()
# G3.add_node()
# G3.add_node()
# G3.add_node()
# G3.add_node()
# G3.add_node()
# G3.add_node()
# G3.add_node()
predict(M)
M.length()
FEA_output_arr=M.FEA()
M.max_u(FEA_output_arr)
```
|
github_jupyter
|
#### demo: training a DND LSTM on a contextual choice task
This is an implementation of the following paper:
```
Ritter, S., Wang, J. X., Kurth-Nelson, Z., Jayakumar, S. M., Blundell, C., Pascanu, R., & Botvinick, M. (2018).
Been There, Done That: Meta-Learning with Episodic Recall. arXiv [stat.ML].
Retrieved from http://arxiv.org/abs/1805.09692
```
```
'''
If you are using google colab, uncomment and run the following lines!
which grabs the dependencies from github
'''
# !git clone https://github.com/qihongl/dnd-lstm.git
# !cd dnd-lstm/src/
# import os
# os.chdir('dnd-lstm/src/')
import time
import torch
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from task import ContextualChoice
from model import DNDLSTM as Agent
from utils import compute_stats, to_sqnp
from model.DND import compute_similarities
from model.utils import get_reward, compute_returns, compute_a2c_loss
sns.set(style='white', context='talk', palette='colorblind')
seed_val = 0
torch.manual_seed(seed_val)
np.random.seed(seed_val)
'''init task'''
n_unique_example = 50
n_trials = 2 * n_unique_example
# n time steps of a trial
trial_length = 10
# after `tp_corrupt`, turn off the noise
t_noise_off = 5
# input/output/hidden/memory dim
obs_dim = 32
task = ContextualChoice(
obs_dim=obs_dim, trial_length=trial_length,
t_noise_off=t_noise_off
)
'''init model'''
# set params
dim_hidden = 32
dim_output = 2
dict_len = 100
learning_rate = 5e-4
n_epochs = 20
# init agent / optimizer
agent = Agent(task.x_dim, dim_hidden, dim_output, dict_len)
optimizer = torch.optim.Adam(agent.parameters(), lr=learning_rate)
'''train'''
log_return = np.zeros(n_epochs,)
log_loss_value = np.zeros(n_epochs,)
log_loss_policy = np.zeros(n_epochs,)
log_Y = np.zeros((n_epochs, n_trials, trial_length))
log_Y_hat = np.zeros((n_epochs, n_trials, trial_length))
# loop over epoch
for i in range(n_epochs):
time_start = time.time()
# get data for this epoch
X, Y = task.sample(n_unique_example)
# flush hippocampus
agent.reset_memory()
agent.turn_on_retrieval()
# loop over the training set
for m in range(n_trials):
# prealloc
cumulative_reward = 0
probs, rewards, values = [], [], []
h_t, c_t = agent.get_init_states()
# loop over time, for one training example
for t in range(trial_length):
# only save memory at the last time point
agent.turn_off_encoding()
if t == trial_length-1 and m < n_unique_example:
agent.turn_on_encoding()
# recurrent computation at time t
output_t, _ = agent(X[m][t].view(1, 1, -1), h_t, c_t)
a_t, prob_a_t, v_t, h_t, c_t = output_t
# compute immediate reward
r_t = get_reward(a_t, Y[m][t])
# log
probs.append(prob_a_t)
rewards.append(r_t)
values.append(v_t)
# log
cumulative_reward += r_t
log_Y_hat[i, m, t] = a_t.item()
returns = compute_returns(rewards)
loss_policy, loss_value = compute_a2c_loss(probs, values, returns)
loss = loss_policy + loss_value
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log
log_Y[i] = np.squeeze(Y.numpy())
log_return[i] += cumulative_reward / n_trials
log_loss_value[i] += loss_value.item() / n_trials
log_loss_policy[i] += loss_policy.item() / n_trials
# print out some stuff
time_end = time.time()
run_time = time_end - time_start
print(
'Epoch %3d | return = %.2f | loss: val = %.2f, pol = %.2f | time = %.2f' %
(i, log_return[i], log_loss_value[i], log_loss_policy[i], run_time)
)
'''learning curve'''
f, axes = plt.subplots(1, 2, figsize=(8, 3))
axes[0].plot(log_return)
axes[0].set_ylabel('Return')
axes[0].set_xlabel('Epoch')
axes[1].plot(log_loss_value)
axes[1].set_ylabel('Value loss')
axes[1].set_xlabel('Epoch')
sns.despine()
f.tight_layout()
'''show behavior'''
corrects = log_Y_hat[-1] == log_Y[-1]
acc_mu_no_memory, acc_se_no_memory = compute_stats(
corrects[:n_unique_example])
acc_mu_has_memory, acc_se_has_memory = compute_stats(
corrects[n_unique_example:])
n_se = 2
f, ax = plt.subplots(1, 1, figsize=(7, 4))
ax.errorbar(range(trial_length), y=acc_mu_no_memory,
yerr=acc_se_no_memory * n_se, label='w/o memory')
ax.errorbar(range(trial_length), y=acc_mu_has_memory,
yerr=acc_se_has_memory * n_se, label='w/ memory')
ax.axvline(t_noise_off, label='turn off noise', color='grey', linestyle='--')
ax.set_xlabel('Time')
ax.set_ylabel('Correct rate')
ax.set_title('Choice accuracy by condition')
f.legend(frameon=False, bbox_to_anchor=(1, .6))
sns.despine()
f.tight_layout()
'''visualize keys and values'''
keys, vals = agent.get_all_mems()
n_mems = len(agent.dnd.keys)
dmat_kk, dmat_vv = np.zeros((n_mems, n_mems)), np.zeros((n_mems, n_mems))
for i in range(n_mems):
dmat_kk[i, :] = to_sqnp(compute_similarities(
keys[i], keys, agent.dnd.kernel))
dmat_vv[i, :] = to_sqnp(compute_similarities(
vals[i], vals, agent.dnd.kernel))
# plot
dmats = {'key': dmat_kk, 'value': dmat_vv}
f, axes = plt.subplots(1, 2, figsize=(12, 5))
for i, (label_i, dmat_i) in enumerate(dmats.items()):
sns.heatmap(dmat_i, cmap='viridis', square=True, ax=axes[i])
axes[i].set_xlabel(f'id, {label_i} i')
axes[i].set_ylabel(f'id, {label_i} j')
axes[i].set_title(
f'{label_i}-{label_i} similarity, metric = {agent.dnd.kernel}'
)
f.tight_layout()
'''project memory content to low dim space'''
# convert the values to a np array, #memories x mem_dim
vals_np = np.vstack([to_sqnp(vals[i]) for i in range(n_mems)])
# project to PC space
vals_centered = (vals_np - np.mean(vals_np, axis=0, keepdims=True))
U, S, _ = np.linalg.svd(vals_centered, full_matrices=False)
vals_pc = np.dot(U, np.diag(S))
# pick pcs
pc_x = 0
pc_y = 1
# plot
f, ax = plt.subplots(1, 1, figsize=(7, 5))
Y_phase2 = to_sqnp(Y[:n_unique_example, 0])
for y_val in np.unique(Y_phase2):
ax.scatter(
vals_pc[Y_phase2 == y_val, pc_x],
vals_pc[Y_phase2 == y_val, pc_y],
marker='o', alpha=.7,
)
ax.set_title(f'Each point is a memory (i.e. value)')
ax.set_xlabel(f'PC {pc_x}')
ax.set_ylabel(f'PC {pc_y}')
ax.legend(['left trial', 'right trial'], bbox_to_anchor=(.6, .3))
sns.despine(offset=20)
f.tight_layout()
```
|
github_jupyter
|
# Simulation of BLER in RBF channel
```
import numpy as np
import pickle
from itertools import cycle, product
import dill
import matplotlib.pyplot as plt
from scipy.spatial.distance import cdist
```
Simulation Configuration
```
blkSize = 8
chDim = 4
# Input
inVecDim = 2 ** blkSize # 1-hot vector length for block
encDim = 2*chDim
SNR_range_dB = np.arange( 0.0, 40.1, 2.0 )
```
Simulation Utility functions
```
def rbf_channel(txBlk, n0):
N, dim = txBlk.shape
p1 = txBlk[:,:]
p2 = np.hstack((-txBlk[:,dim//2:],txBlk[:,:dim//2]))
h1 = np.sqrt(1.0/encDim) * np.random.randn(N)
h2 = np.sqrt(1.0/encDim) * np.random.randn(N)
outBlk = h1[:,None]*p1 + h2[:,None]*p2 + np.random.normal(scale=np.sqrt(n0), size=txBlk.shape)
return outBlk
def add_pilot(txBlk, pilotSym):
blkWithPilot = np.insert(txBlk, [0,chDim], values=pilotSym, axis=1)
return blkWithPilot
def equalizer(rxBlk, pilotSym):
N, dim = rxBlk.shape
p1 = rxBlk[:,:]
p2 = np.hstack((-rxBlk[:,dim//2:],rxBlk[:,:dim//2]))
rxPilots = rxBlk[:,[0,1+chDim]]
h1_hat = (pilotSym[1]*rxPilots[:,1]+pilotSym[0]*rxPilots[:,0])/(pilotSym[1]**2+pilotSym[0]**2)
h2_hat = (pilotSym[0]*rxPilots[:,1]-pilotSym[1]*rxPilots[:,0])/(pilotSym[1]**2+pilotSym[0]**2)
z1_hat = rxBlk[:,:dim//2]
z2_hat = rxBlk[:,dim//2:]
zR = (h1_hat[:,None]*z1_hat+h2_hat[:,None]*z2_hat) / (h1_hat[:,None]**2+h2_hat[:,None]**2)
zI = (h1_hat[:,None]*z2_hat-h2_hat[:,None]*z1_hat) / (h1_hat[:,None]**2+h2_hat[:,None]**2)
outBlk = np.hstack((zR[:,1:],zI[:,1:]))
return outBlk
```
To store results
```
results = {}
```
## QAM System
```
qam_map_unscaled = np.array(list(map(list, product([-1.0, +1.0], repeat=blkSize))))
qam_sym_pow_unscaled = np.mean(np.sum(qam_map_unscaled*qam_map_unscaled,axis=1))
print( "Unscaled QAM Block Avg. Tx Power:", qam_sym_pow_unscaled )
```
### Block Symbol power scaled to block length
Here the whole symbol block is scaled such a way that the power of the whole block is equal to the block length ie., poewr per dimension is $1$.
```
qam_map_norm = np.sqrt(blkSize/qam_sym_pow_unscaled) * qam_map_unscaled
qam_sym_pow_norm = np.mean(np.sum(qam_map_norm*qam_map_norm,axis=1))
print("Normalized to block length QAM Avg. Tx Power:", qam_sym_pow_norm)
# calculate the pilot symbol
qam_pilot_sym_norm = np.sqrt(qam_sym_pow_norm/encDim) * np.ones(2)
print("Pilot Signal :", qam_pilot_sym_norm)
```
The noise target SNR is assumed to affect the whoel block. So the noise power per component is calculated and the noise is added accordingly.
```
noisePower = qam_sym_pow_norm * 10.0**(-SNR_range_dB/10.0)
n0_per_comp = noisePower/(2*chDim+2)
err = []
for n0 in n0_per_comp:
thisErr = 0
thisCount = 0
while thisErr < 500 and thisCount < 100000:
txSym = np.random.randint(inVecDim, size=1000)
symBlk = qam_map_norm[txSym]
txTest = add_pilot(symBlk, qam_pilot_sym_norm)
rxTest = rbf_channel(txTest, n0)
rxEqualized = equalizer(rxTest, qam_pilot_sym_norm)
rxDecode = cdist(rxEqualized, qam_map_norm)
rxSym = np.argmin(rxDecode,axis=1)
thisErr += np.sum(rxSym!=txSym)
thisCount += 1000
err.append(thisErr/thisCount)
results["QAM (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))".format(qam_sym_pow_norm, *qam_pilot_sym_norm)] = np.array(err)
```
### Block Symbol Power scaled to unit power
```
qam_map_unit = np.sqrt(1.0/qam_sym_pow_unscaled) * qam_map_unscaled
qam_sym_pow_unit = np.mean(np.sum(qam_map_unit*qam_map_unit,axis=1))
print( "Normalized to block length QAM Avg. Tx Power:", qam_sym_pow_unit )
# calculate the pilot symbol
qam_pilot_sym_unit = np.sqrt(qam_sym_pow_unit/encDim) * np.ones(2)
noisePower = qam_sym_pow_unit * 10.0**(-SNR_range_dB/10.0)
n0_per_comp = noisePower/(2*chDim+2)
err = []
for n0 in n0_per_comp:
thisErr = 0
thisCount = 0
while thisErr < 500 and thisCount < 100000:
txSym = np.random.randint(inVecDim, size=1000)
symBlk = qam_map_unit[txSym]
txTest = add_pilot(symBlk, qam_pilot_sym_unit)
rxTest = rbf_channel(txTest, n0)
rxEqualized = equalizer(rxTest, qam_pilot_sym_unit)
rxDecode = cdist(rxEqualized, qam_map_unit)
rxSym = np.argmin(rxDecode,axis=1)
thisErr += np.sum(rxSym!=txSym)
thisCount += 1000
err.append(thisErr/thisCount)
results["QAM (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))".format(qam_sym_pow_unit, *qam_pilot_sym_unit)] = np.array(err)
```
### Block Symbol Power is scaled to unit power and Pilot power is $1$ per component
In this case, the power of pilots are high and hence they will may experience a high SNR than rest of the block.
```
qam_pilot_sym_1 = np.ones(2)
noisePower = qam_sym_pow_unit * 10.0**(-SNR_range_dB/10.0)
n0_per_comp = noisePower/(2*chDim+2)
err = []
for n0 in n0_per_comp:
thisErr = 0
thisCount = 0
while thisErr < 500 and thisCount < 100000:
txSym = np.random.randint(inVecDim, size=1000)
symBlk = qam_map_unit[txSym]
txTest = add_pilot(symBlk, qam_pilot_sym_1)
rxTest = rbf_channel(txTest, n0)
rxEqualized = equalizer(rxTest, qam_pilot_sym_1)
rxDecode = cdist(rxEqualized, qam_map_unit)
rxSym = np.argmin(rxDecode,axis=1)
thisErr += np.sum(rxSym!=txSym)
thisCount += 1000
err.append(thisErr/thisCount)
results["QAM (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))".format(qam_sym_pow_unit, *qam_pilot_sym_1)] = np.array(err)
```
## Agrell
```
agrell_map_unscaled = []
if blkSize==2 and chDim==1:
agrell_map_unscaled = np.array([
[ -1.0, -1.0 ],
[ -1.0, 1.0 ],
[ 1.0, -1.0 ],
[ 1.0, 1.0 ]
])
elif blkSize==4 and chDim==2:
agrell_map_unscaled = np.array([
[2.148934030042627, 0.0, 0.0, 0.0],
[0.7347204676695321, 1.4142135623730951, 0.0, 0.0],
[0.7347204676695321, -1.4142135623730951, 0.0, 0.0],
[0.7347204676695321, 0.0, 1.4142135623730951, 0.0],
[0.7347204676695321, 0.0, -1.4142135623730951, 0.0],
[0.7347204676695321, 0.0, 0.0, 1.4142135623730951],
[0.7347204676695321, 0.0, 0.0, -1.4142135623730951],
[-0.6174729817844246, 1.0, 1.0, 1.0],
[-0.6174729817844246, 1.0, 1.0, -1.0],
[-0.6174729817844246, 1.0, -1.0, 1.0],
[-0.6174729817844246, 1.0, -1.0, -1.0],
[-0.6174729817844246, -1.0, 1.0, 1.0],
[-0.6174729817844246, -1.0, 1.0, -1.0],
[-0.6174729817844246, -1.0, -1.0, 1.0],
[-0.6174729817844246, -1.0, -1.0, -1.0],
[-1.6174729817844242, 0.0, 0.0, 0.0]
])
elif blkSize==8 and chDim==4:
agrell_map_unscaled = np.array([
[ -256.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ -256.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ -256.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],
[ -256.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],
[ -256.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],
[ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],
[ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],
[ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],
[ -256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],
[ -256.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],
[ -256.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],
[ -256.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],
[ -256.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ -256.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ -128.0, -128.0, -120.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],
[ -128.0, -128.0, -120.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],
[ -128.0, -128.0, -120.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],
[ -128.0, -128.0, -120.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],
[ -128.0, -128.0, -120.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],
[ -128.0, -128.0, -120.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],
[ -128.0, -128.0, -120.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],
[ -128.0, -128.0, -120.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],
[ -128.0, -128.0, -120.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],
[ -128.0, -128.0, -120.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],
[ -128.0, -128.0, -120.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],
[ -128.0, -128.0, -120.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],
[ -128.0, -128.0, -120.0, 120.0, 120.0, -370.0, -117.0, 117.0 ],
[ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -373.0, 117.0 ],
[ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],
[ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 373.0 ],
[ -128.0, -128.0, -120.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],
[ -128.0, -128.0, -120.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],
[ -128.0, -128.0, -120.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],
[ -128.0, -128.0, 136.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],
[ -128.0, -128.0, 136.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],
[ -128.0, -128.0, 136.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],
[ -128.0, -128.0, 136.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],
[ -128.0, -128.0, 136.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],
[ -128.0, -128.0, 136.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],
[ -128.0, -128.0, 136.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],
[ -128.0, -128.0, 136.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],
[ -128.0, -128.0, 136.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],
[ -128.0, -128.0, 136.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],
[ -128.0, -128.0, 136.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],
[ -128.0, -128.0, 136.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],
[ -128.0, -128.0, 136.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],
[ -128.0, -128.0, 136.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],
[ -128.0, -128.0, 136.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],
[ -128.0, -128.0, 136.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],
[ -128.0, 128.0, -120.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],
[ -128.0, 128.0, -120.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],
[ -128.0, 128.0, -120.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],
[ -128.0, 128.0, -120.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],
[ -128.0, 128.0, -120.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],
[ -128.0, 128.0, -120.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],
[ -128.0, 128.0, -120.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],
[ -128.0, 128.0, -120.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],
[ -128.0, 128.0, -120.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],
[ -128.0, 128.0, -120.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],
[ -128.0, 128.0, -120.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],
[ -128.0, 128.0, -120.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],
[ -128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],
[ -128.0, 128.0, -120.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],
[ -128.0, 128.0, -120.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],
[ -128.0, 128.0, -120.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],
[ -128.0, 128.0, 136.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],
[ -128.0, 128.0, 136.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],
[ -128.0, 128.0, 136.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],
[ -128.0, 128.0, 136.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],
[ -128.0, 128.0, 136.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],
[ -128.0, 128.0, 136.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],
[ -128.0, 128.0, 136.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],
[ -128.0, 128.0, 136.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],
[ -128.0, 128.0, 136.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],
[ -128.0, 128.0, 136.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],
[ -128.0, 128.0, 136.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],
[ -128.0, 128.0, 136.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],
[ -128.0, 128.0, 136.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],
[ -128.0, 128.0, 136.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],
[ -128.0, 128.0, 136.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],
[ -128.0, 128.0, 136.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],
[ 0.0, -256.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, -256.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, -256.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],
[ 0.0, -256.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],
[ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],
[ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],
[ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],
[ 0.0, -256.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],
[ 0.0, -256.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],
[ 0.0, -256.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],
[ 0.0, -256.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, -256.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, -248.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, -248.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, -248.0, -8.0, -8.0, -242.0, -245.0, 245.0 ],
[ 0.0, 0.0, -248.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],
[ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],
[ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],
[ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],
[ 0.0, 0.0, -248.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],
[ 0.0, 0.0, -248.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],
[ 0.0, 0.0, -248.0, -8.0, 248.0, -242.0, -245.0, -11.0 ],
[ 0.0, 0.0, -248.0, -8.0, 248.0, -242.0, 11.0, 245.0 ],
[ 0.0, 0.0, -248.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, -248.0, 248.0, -8.0, -242.0, -245.0, -11.0 ],
[ 0.0, 0.0, -248.0, 248.0, -8.0, -242.0, 11.0, 245.0 ],
[ 0.0, 0.0, -248.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, -264.0, -264.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, -264.0, -8.0, -242.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, -245.0, -11.0 ],
[ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, -267.0 ],
[ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, 245.0 ],
[ 0.0, 0.0, 8.0, -264.0, -8.0, 14.0, 267.0, -11.0 ],
[ 0.0, 0.0, 8.0, -264.0, -8.0, 270.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, -264.0, 248.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, -264.0, -242.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, -245.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, -267.0 ],
[ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, 245.0 ],
[ 0.0, 0.0, 8.0, -8.0, -264.0, 14.0, 267.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, -264.0, 270.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, -245.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, -267.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, 245.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, -242.0, 267.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, -267.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, 245.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, -267.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, 245.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, -245.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, -267.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, 245.0 ],
[ 0.0, 0.0, 8.0, -8.0, -8.0, 270.0, 267.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, 248.0, -242.0, -245.0, 245.0 ],
[ 0.0, 0.0, 8.0, -8.0, 248.0, -242.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, -245.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, -267.0 ],
[ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, 245.0 ],
[ 0.0, 0.0, 8.0, -8.0, 248.0, 14.0, 267.0, -11.0 ],
[ 0.0, 0.0, 8.0, -8.0, 248.0, 270.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, 248.0, -264.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, 248.0, -8.0, -242.0, -245.0, 245.0 ],
[ 0.0, 0.0, 8.0, 248.0, -8.0, -242.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, -245.0, -11.0 ],
[ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, -267.0 ],
[ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, 245.0 ],
[ 0.0, 0.0, 8.0, 248.0, -8.0, 14.0, 267.0, -11.0 ],
[ 0.0, 0.0, 8.0, 248.0, -8.0, 270.0, 11.0, -11.0 ],
[ 0.0, 0.0, 8.0, 248.0, 248.0, -242.0, -245.0, -11.0 ],
[ 0.0, 0.0, 8.0, 248.0, 248.0, -242.0, 11.0, 245.0 ],
[ 0.0, 0.0, 8.0, 248.0, 248.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, 264.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, 264.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, 264.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],
[ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],
[ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],
[ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],
[ 0.0, 0.0, 264.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],
[ 0.0, 0.0, 264.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],
[ 0.0, 0.0, 264.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],
[ 0.0, 0.0, 264.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, 256.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, 256.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, 256.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],
[ 0.0, 256.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],
[ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],
[ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],
[ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],
[ 0.0, 256.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],
[ 0.0, 256.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],
[ 0.0, 256.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],
[ 0.0, 256.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],
[ 0.0, 256.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ 128.0, -128.0, -120.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],
[ 128.0, -128.0, -120.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],
[ 128.0, -128.0, -120.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],
[ 128.0, -128.0, -120.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],
[ 128.0, -128.0, -120.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],
[ 128.0, -128.0, -120.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],
[ 128.0, -128.0, -120.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],
[ 128.0, -128.0, -120.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],
[ 128.0, -128.0, -120.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],
[ 128.0, -128.0, -120.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],
[ 128.0, -128.0, -120.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],
[ 128.0, -128.0, -120.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],
[ 128.0, -128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],
[ 128.0, -128.0, -120.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],
[ 128.0, -128.0, -120.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],
[ 128.0, -128.0, -120.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],
[ 128.0, -128.0, 136.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],
[ 128.0, -128.0, 136.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],
[ 128.0, -128.0, 136.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],
[ 128.0, -128.0, 136.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],
[ 128.0, -128.0, 136.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],
[ 128.0, -128.0, 136.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],
[ 128.0, -128.0, 136.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],
[ 128.0, -128.0, 136.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],
[ 128.0, -128.0, 136.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],
[ 128.0, -128.0, 136.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],
[ 128.0, -128.0, 136.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],
[ 128.0, -128.0, 136.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],
[ 128.0, -128.0, 136.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],
[ 128.0, -128.0, 136.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],
[ 128.0, -128.0, 136.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],
[ 128.0, -128.0, 136.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],
[ 128.0, 128.0, -120.0, -136.0, -136.0, -114.0, -117.0, -139.0 ],
[ 128.0, 128.0, -120.0, -136.0, -136.0, -114.0, 139.0, 117.0 ],
[ 128.0, 128.0, -120.0, -136.0, -136.0, 142.0, -117.0, 117.0 ],
[ 128.0, 128.0, -120.0, -136.0, -136.0, 142.0, 139.0, -139.0 ],
[ 128.0, 128.0, -120.0, -136.0, 120.0, -114.0, -117.0, 117.0 ],
[ 128.0, 128.0, -120.0, -136.0, 120.0, -114.0, 139.0, -139.0 ],
[ 128.0, 128.0, -120.0, -136.0, 120.0, 142.0, -117.0, -139.0 ],
[ 128.0, 128.0, -120.0, -136.0, 120.0, 142.0, 139.0, 117.0 ],
[ 128.0, 128.0, -120.0, 120.0, -136.0, -114.0, -117.0, 117.0 ],
[ 128.0, 128.0, -120.0, 120.0, -136.0, -114.0, 139.0, -139.0 ],
[ 128.0, 128.0, -120.0, 120.0, -136.0, 142.0, -117.0, -139.0 ],
[ 128.0, 128.0, -120.0, 120.0, -136.0, 142.0, 139.0, 117.0 ],
[ 128.0, 128.0, -120.0, 120.0, 120.0, -370.0, -117.0, 117.0 ],
[ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -373.0, 117.0 ],
[ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -117.0, -139.0 ],
[ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, -117.0, 373.0 ],
[ 128.0, 128.0, -120.0, 120.0, 120.0, -114.0, 139.0, 117.0 ],
[ 128.0, 128.0, -120.0, 120.0, 120.0, 142.0, -117.0, 117.0 ],
[ 128.0, 128.0, -120.0, 120.0, 120.0, 142.0, 139.0, -139.0 ],
[ 128.0, 128.0, 136.0, -136.0, -136.0, -114.0, -117.0, 117.0 ],
[ 128.0, 128.0, 136.0, -136.0, -136.0, -114.0, 139.0, -139.0 ],
[ 128.0, 128.0, 136.0, -136.0, -136.0, 142.0, -117.0, -139.0 ],
[ 128.0, 128.0, 136.0, -136.0, -136.0, 142.0, 139.0, 117.0 ],
[ 128.0, 128.0, 136.0, -136.0, 120.0, -114.0, -117.0, -139.0 ],
[ 128.0, 128.0, 136.0, -136.0, 120.0, -114.0, 139.0, 117.0 ],
[ 128.0, 128.0, 136.0, -136.0, 120.0, 142.0, -117.0, 117.0 ],
[ 128.0, 128.0, 136.0, -136.0, 120.0, 142.0, 139.0, -139.0 ],
[ 128.0, 128.0, 136.0, 120.0, -136.0, -114.0, -117.0, -139.0 ],
[ 128.0, 128.0, 136.0, 120.0, -136.0, -114.0, 139.0, 117.0 ],
[ 128.0, 128.0, 136.0, 120.0, -136.0, 142.0, -117.0, 117.0 ],
[ 128.0, 128.0, 136.0, 120.0, -136.0, 142.0, 139.0, -139.0 ],
[ 128.0, 128.0, 136.0, 120.0, 120.0, -114.0, -117.0, 117.0 ],
[ 128.0, 128.0, 136.0, 120.0, 120.0, -114.0, 139.0, -139.0 ],
[ 128.0, 128.0, 136.0, 120.0, 120.0, 142.0, -117.0, -139.0 ],
[ 128.0, 128.0, 136.0, 120.0, 120.0, 142.0, 139.0, 117.0 ],
[ 256.0, -256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ 256.0, 0.0, -248.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ 256.0, 0.0, 8.0, -264.0, -8.0, 14.0, 11.0, -11.0 ],
[ 256.0, 0.0, 8.0, -8.0, -264.0, 14.0, 11.0, -11.0 ],
[ 256.0, 0.0, 8.0, -8.0, -8.0, -242.0, 11.0, -11.0 ],
[ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, -245.0, -11.0 ],
[ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, -267.0 ],
[ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 11.0, 245.0 ],
[ 256.0, 0.0, 8.0, -8.0, -8.0, 14.0, 267.0, -11.0 ],
[ 256.0, 0.0, 8.0, -8.0, -8.0, 270.0, 11.0, -11.0 ],
[ 256.0, 0.0, 8.0, -8.0, 248.0, 14.0, 11.0, -11.0 ],
[ 256.0, 0.0, 8.0, 248.0, -8.0, 14.0, 11.0, -11.0 ],
[ 256.0, 0.0, 264.0, -8.0, -8.0, 14.0, 11.0, -11.0 ],
[ 256.0, 256.0, 8.0, -8.0, -8.0, 14.0, 11.0, -11.0 ] ])
else:
raise NotImplementedError("Not implemented (blkSize={},chDim={})".format(blkSize,chDim))
```
### Block Symbol Power scaled to block length
```
agrell_sym_pow_unscaled = np.mean(np.sum(agrell_map_unscaled*agrell_map_unscaled,axis=1))
print( "Unscaled Agrell Avg. Tx Power:", agrell_sym_pow_unscaled )
agrell_map_norm = np.sqrt(blkSize/agrell_sym_pow_unscaled) * agrell_map_unscaled
agrell_sym_pow_norm = np.mean(np.sum(agrell_map_norm*agrell_map_norm,axis=1))
print( "Normalized Agrell Avg. Tx Power:", agrell_sym_pow_norm )
# calculate the pilot symbol
agrell_pilot_sym_norm = np.sqrt(agrell_sym_pow_norm/encDim) * np.ones(2)
noisePower = agrell_sym_pow_norm * 10.0**(-SNR_range_dB/10.0)
n0_per_comp = noisePower/(2*chDim+2)
err = []
for n0 in n0_per_comp:
thisErr = 0
thisCount = 0
while thisErr < 500 and thisCount < 100000:
txSym = np.random.randint(inVecDim, size=1000)
symBlk = agrell_map_norm[txSym]
txTest = add_pilot(symBlk, agrell_pilot_sym_norm)
rxTest = rbf_channel(txTest, n0)
rxEqualized = equalizer(rxTest, agrell_pilot_sym_norm)
rxDecode = cdist(rxEqualized, agrell_map_norm)
rxSym = np.argmin(rxDecode,axis=1)
thisErr += np.sum(rxSym!=txSym)
thisCount += 1000
err.append(thisErr/thisCount)
results["Agrell (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))".format(agrell_sym_pow_norm, *agrell_pilot_sym_norm)] = np.array(err)
```
### Block Symbol Power scaled to unit power
```
agrell_sym_pow_unscaled = np.mean(np.sum(agrell_map_unscaled*agrell_map_unscaled,axis=1))
print( "Unscaled Agrell Avg. Tx Power:", agrell_sym_pow_unscaled )
agrell_map_unit = np.sqrt(1.0/agrell_sym_pow_unscaled) * agrell_map_unscaled
agrell_sym_pow_unit = np.mean(np.sum(agrell_map_unit*agrell_map_unit,axis=1))
print( "Normalized Agrell Avg. Tx Power:", agrell_sym_pow_unit )
# calculate the pilot symbol
agrell_pilot_sym_unit = np.sqrt(agrell_sym_pow_unit/encDim) * np.ones(2)
noisePower = agrell_sym_pow_unit * 10.0**(-SNR_range_dB/10.0)
n0_per_comp = noisePower/(2*chDim+2)
err = []
for n0 in n0_per_comp:
thisErr = 0
thisCount = 0
while thisErr < 500 and thisCount < 100000:
txSym = np.random.randint(inVecDim, size=1000)
symBlk = agrell_map_unit[txSym]
txTest = add_pilot(symBlk, agrell_pilot_sym_unit)
rxTest = rbf_channel(txTest, n0)
rxEqualized = equalizer(rxTest, agrell_pilot_sym_unit)
rxDecode = cdist(rxEqualized, agrell_map_unit)
rxSym = np.argmin(rxDecode,axis=1)
thisErr += np.sum(rxSym!=txSym)
thisCount += 1000
err.append(thisErr/thisCount)
results["Agrell (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))".format(agrell_sym_pow_unit, *agrell_pilot_sym_unit)] = np.array(err)
```
### Block Symbol Power is scaled to unit power and Pilot power is $1$ per component
```
agrell_pilot_sym_1 = np.ones(2)
noisePower = agrell_sym_pow_unit * 10.0**(-SNR_range_dB/10.0)
n0_per_comp = noisePower/(2*chDim+2)
err = []
for n0 in n0_per_comp:
thisErr = 0
thisCount = 0
while thisErr < 500 and thisCount < 100000:
txSym = np.random.randint(inVecDim, size=1000)
symBlk = agrell_map_unit[txSym]
txTest = add_pilot(symBlk, agrell_pilot_sym_1)
rxTest = rbf_channel(txTest, n0)
rxEqualized = equalizer(rxTest, agrell_pilot_sym_1)
rxDecode = cdist(rxEqualized, agrell_map_unit)
rxSym = np.argmin(rxDecode,axis=1)
thisErr += np.sum(rxSym!=txSym)
thisCount += 1000
err.append(thisErr/thisCount)
results["Agrell (Block Power = ${:.1f}$, Pilot = ({:.2f},{:.2f}))".format(agrell_sym_pow_unit, *agrell_pilot_sym_1)] = np.array(err)
```
## Plot results
```
fig = plt.figure(figsize=(12,9))
for (l,v) in results.items():
plt.semilogy(SNR_range_dB, v, label=l, linewidth=2)
plt.legend(loc="lower left", prop={'size':14})
plt.grid()
```
|
github_jupyter
|
```
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/unofficial/pipelines/google-cloud-pipeline-components_automl_images.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/ai-platform-samples/blob/master/ai-platform-unified/notebooks/unofficial/pipelines/google-cloud-pipeline-components_automl_images.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://console.cloud.google.com/ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/ai-platform-samples/raw/master/ai-platform-unified/notebooks/unofficial/pipelines/google-cloud-pipeline-components_automl_images.ipynb">
Open in Google Cloud Notebooks
</a>
</td>
</table>
# Vertex Pipelines: AutoML Images pipelines using google-cloud-pipeline-components
## Overview
This notebook shows how to use the components defined in [`google_cloud_pipeline_components`](https://github.com/kubeflow/pipelines/tree/master/components/google-cloud) to build an AutoML Images workflow on [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines).
### Objective
In this example, you'll learn how to use components from `google_cloud_pipeline_components` to:
- create a _Dataset_
- train an AutoML Images model
- deploy the trained model to an _endpoint_ for serving
The components are [documented here](https://google-cloud-pipeline-components.readthedocs.io/en/latest/google_cloud_pipeline_components.aiplatform.html#module-google_cloud_pipeline_components.aiplatform).
### Costs
This tutorial uses billable components of Google Cloud:
* Vertex AI Training and Serving
* Cloud Storage
Learn about pricing for [Vertex AI](https://cloud.google.com/ai-platform-unified/pricing) and [Cloud Storage](https://cloud.google.com/storage/pricing), and use the [Pricing
Calculator](https://cloud.google.com/products/calculator/)
to generate a cost estimate based on your projected usage.
### Set up your local development environment
**If you are using Colab or Google Cloud Notebooks**, your environment already meets
all the requirements to run this notebook. You can skip this step.
**Otherwise**, make sure your environment meets this notebook's requirements.
You need the following:
* The Google Cloud SDK
* Git
* Python 3
* virtualenv
* Jupyter notebook running in a virtual environment with Python 3
The Google Cloud guide to [Setting up a Python development
environment](https://cloud.google.com/python/setup) and the [Jupyter
installation guide](https://jupyter.org/install) provide detailed instructions
for meeting these requirements. The following steps provide a condensed set of
instructions:
1. [Install and initialize the Cloud SDK.](https://cloud.google.com/sdk/docs/)
1. [Install Python 3.](https://cloud.google.com/python/setup#installing_python)
1. [Install
virtualenv](https://cloud.google.com/python/setup#installing_and_using_virtualenv)
and create a virtual environment that uses Python 3. Activate the virtual environment.
1. To install Jupyter, run `pip install jupyter` on the
command-line in a terminal shell.
1. To launch Jupyter, run `jupyter notebook` on the command-line in a terminal shell.
1. Open this notebook in the Jupyter Notebook Dashboard.
### Install additional packages
```
import sys
if "google.colab" in sys.modules:
USER_FLAG = ""
else:
USER_FLAG = "--user"
!pip3 install {USER_FLAG} google-cloud-aiplatform==1.0.0 --upgrade
!pip3 install {USER_FLAG} kfp google-cloud-pipeline-components==0.1.1 --upgrade
```
### Restart the kernel
After you install the additional packages, you need to restart the notebook kernel so it can find the packages.
```
# Automatically restart kernel after installs
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
```
Check the versions of the packages you installed. The KFP SDK version should be >=1.6.
```
!python3 -c "import kfp; print('KFP SDK version: {}'.format(kfp.__version__))"
```
## Before you begin
This notebook does not require a GPU runtime.
### Set up your Google Cloud project
**The following steps are required, regardless of your notebook environment.**
1. [Select or create a Google Cloud project](https://console.cloud.google.com/cloud-resource-manager). When you first create an account, you get a $300 free credit towards your compute/storage costs.
1. [Make sure that billing is enabled for your project](https://cloud.google.com/billing/docs/how-to/modify-project).
1. [Enable the AI Platform (Unified), Cloud Storage, and Compute Engine APIs](https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com,compute_component,storage-component.googleapis.com).
1. Follow the "**Configuring your project**" instructions from the AI Platform Pipelines documentation.
1. If you are running this notebook locally, you will need to install the [Cloud SDK](https://cloud.google.com/sdk).
1. Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
**Note**: Jupyter runs lines prefixed with `!` as shell commands, and it interpolates Python variables prefixed with `$` into these commands.
#### Set your project ID
**If you don't know your project ID**, you may be able to get your project ID using `gcloud`.
```
import os
PROJECT_ID = ""
# Get your Google Cloud project ID from gcloud
if not os.getenv("IS_TESTING"):
shell_output=!gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID: ", PROJECT_ID)
```
Otherwise, set your project ID here.
```
if PROJECT_ID == "" or PROJECT_ID is None:
PROJECT_ID = "python-docs-samples-tests" # @param {type:"string"}
```
#### Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append it onto the name of resources you create in this tutorial.
```
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
```
### Authenticate your Google Cloud account
**If you are using AI Platform Notebooks**, your environment is already
authenticated. Skip this step.
**If you are using Colab**, run the cell below and follow the instructions
when prompted to authenticate your account via oAuth.
**Otherwise**, follow these steps:
1. In the Cloud Console, go to the [**Create service account key**
page](https://console.cloud.google.com/apis/credentials/serviceaccountkey).
2. Click **Create service account**.
3. In the **Service account name** field, enter a name, and
click **Create**.
4. In the **Grant this service account access to project** section, click the **Role** drop-down list. Type "AI Platform"
into the filter box, and select
**AI Platform Administrator**. Type "Storage Object Admin" into the filter box, and select **Storage Object Admin**.
5. Click *Create*. A JSON file that contains your key downloads to your
local environment.
6. Enter the path to your service account key as the
`GOOGLE_APPLICATION_CREDENTIALS` variable in the cell below and run the cell.
```
import os
import sys
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
# If on AI Platform, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
```
### Create a Cloud Storage bucket as necessary
You need a Cloud Storage bucket for this example. If you don't have one that you want to use, you can make one now.
Set the name of your Cloud Storage bucket below. It must be unique across all
Cloud Storage buckets.
You can also change the `REGION` variable, which is used for operations
throughout the rest of this notebook. Make sure to [choose a region where AI Platform (Unified) services are
available](https://cloud.google.com/ai-platform-unified/docs/general/locations#available_regions). You may
not use a Multi-Regional Storage bucket for training with AI Platform (Unified) Pipelines.
```
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
REGION = "us-central1" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
```
**Only if your bucket doesn't already exist**: Run the following cell to create your Cloud Storage bucket.
```
! gsutil mb -l $REGION $BUCKET_NAME
```
Finally, validate access to your Cloud Storage bucket by examining its contents:
```
! gsutil ls -al $BUCKET_NAME
```
### Import libraries and define constants
<**TODO**: for preview, we shouldn't need the **API KEY**>.
Define some constants.
```
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
USER = "your-user-name" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(BUCKET_NAME, USER)
API_KEY = "your-api-key" # <---CHANGE THIS
PIPELINE_ROOT
```
Do some imports:
```
import kfp
from google.cloud import aiplatform
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import compiler
from kfp.v2.google.client import AIPlatformClient
```
## Define an AutoML Image classification pipeline that uses components from `google_cloud_pipeline_components`
Create a managed image dataset from a CSV file and train it using AutoML Image Training.
Define the pipeline:
```
@kfp.dsl.pipeline(name="automl-image-training-v2")
def pipeline(project: str = PROJECT_ID):
ds_op = gcc_aip.ImageDatasetCreateOp(
project=project,
display_name="flowers",
gcs_source="gs://cloud-samples-data/vision/automl_classification/flowers/all_data_v2.csv",
import_schema_uri=aiplatform.schema.dataset.ioformat.image.single_label_classification,
)
training_job_run_op = gcc_aip.AutoMLImageTrainingJobRunOp(
project=project,
display_name="train-iris-automl-mbsdk-1",
prediction_type="classification",
model_type="CLOUD",
base_model=None,
dataset=ds_op.outputs["dataset"],
model_display_name="iris-classification-model-mbsdk",
training_fraction_split=0.6,
validation_fraction_split=0.2,
test_fraction_split=0.2,
budget_milli_node_hours=8000,
)
endpoint_op = gcc_aip.ModelDeployOp( # noqa: F841
project=project, model=training_job_run_op.outputs["model"]
)
```
## Compile and run the pipeline
Compile the pipeline:
```
from kfp.v2 import compiler # noqa: F811
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="image_classif_pipeline.json"
)
```
The pipeline compilation generates the `image_classif_pipeline.json` job spec file.
Next, instantiate an API client object:
```
from kfp.v2.google.client import AIPlatformClient # noqa: F811
api_client = AIPlatformClient(project_id=PROJECT_ID, region=REGION)
```
Then, you run the defined pipeline like this:
```
response = api_client.create_run_from_job_spec(
"image_classif_pipeline.json",
pipeline_root=PIPELINE_ROOT,
parameter_values={"project": PROJECT_ID},
)
```
Click on the generated link to see your run in the Cloud Console. It should look something like this as it is running:
<a href="https://storage.googleapis.com/amy-jo/images/mp/automl_image_classif.png" target="_blank"><img src="https://storage.googleapis.com/amy-jo/images/mp/automl_image_classif.png" width="40%"/></a>
## Cleaning up
To clean up all Google Cloud resources used in this project, you can [delete the Google Cloud
project](https://cloud.google.com/resource-manager/docs/creating-managing-projects#shutting_down_projects) you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
- Delete Cloud Storage objects that were created. Uncomment and run the command in the cell below **only if you are not using the `PIPELINE_ROOT` path for any other purpose**.
- Delete your deployed model: first, undeploy it from its *endpoint*, then delete the model and endpoint.
```
# Warning: this command will delete ALL Cloud Storage objects under the PIPELINE_ROOT path.
# ! gsutil -m rm -r $PIPELINE_ROOT
```
|
github_jupyter
|
# Exploratory Analysis
## 1) Reading the data
```
import types
import pandas as pd
df_claim = pd.read_csv('https://raw.githubusercontent.com/IBMDeveloperUK/Machine-Learning-Models-with-AUTO-AI/master/Data/insurance.csv')
df_claim.head()
df_data = pd.read_csv('https://raw.githubusercontent.com/IBMDeveloperUK/Machine-Learning-Models-with-AUTO-AI/master/Data/patientdata_v2.csv')
df_data.head()
above_30 = df_data[df_data["BMI"] > 33]
```
## 2) Importing visualization libraries
```
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
import matplotlib.patches as mpatches
```
## 3) Exploring data
### statistical description:
Describe Generate various summary statistics, excluding NaN values
```
df_claim.describe()
```
#### A) Q: Is there a relationship between BMI and claim amount?
```
sns.jointplot(x=df_claim["expenses"], y=df_claim["bmi"])
plt.show()
```
#### A) A: There is NO relationship between BMI and claim amount
#### B) Q: Is there a relationship between gender and claim amount?
```
plt.figure(figsize = (5, 5))
sns.boxplot(x = 'sex', y = 'expenses', data = df_claim)
```
#### B) A: On average claims from male and female are the same with slightly bigger proportion of higher amounts for male
#### C) Q: Is there a relationship between region and claim amount?
```
plt.figure(figsize = (10, 5))
sns.boxplot(x = 'region', y = 'expenses', data = df_claim)
```
#### C) A: On average claims from regions are the same with slightly bigger proportion of higher amounts from Southeast
#### D) Q: Is there a relationship between claim amount for smokers and non-smokers?
```
plt.figure(figsize = (5, 5))
sns.boxplot(x = 'smoker', y = 'expenses', data = df_claim)
```
#### D) A: There is a strong relationship between smokers and non-smokers with claims, being much higher for smokers
#### Is the smoker group well represented?
```
sns.countplot(x='smoker', data=df_claim)
```
### E) Q: Is there a relationship between claim amount and age?
```
sns.jointplot(x=df_claim["expenses"], y=df_claim["age"], kind='scatter')
plt.show()
```
### E) A: Claim amounts increase with age and tend to form groups around 1.2K, up to 3K, and more than 3K
## 4) Understanding data
###### based on observations above let's bring several variables together to observe the difference
##### IMPACT OF SMOKING
```
claim_pplot=df_claim[['age', 'bmi', 'children', 'smoker', 'expenses']]
claim_pplot.head()
sns.pairplot(claim_pplot, kind="scatter", hue = "smoker" , markers=["o", "s"], palette="Set1")
plt.show()
```
#### GENDER IMPACT
```
claim_pplot=df_claim[['age', 'bmi', 'children', 'sex', 'expenses']]
claim_pplot.head()
sns.pairplot(claim_pplot, kind="scatter", hue = "sex" , markers=["o", "s"], palette="Set1")
plt.show()
```
#### Summary: Gender has very little impact on the charges
#### REGION IMPACT
```
claim_pplot=df_claim[['age', 'bmi', 'children', 'region', 'expenses']]
claim_pplot.head()
sns.pairplot(claim_pplot, kind="scatter", hue = "region" , markers=["o", "s","x","+"], palette="Set1")
plt.show()
```
#### Summary: Region does have some imact on the charges; however we can see some trends as it relates to BMI
### Summary: The charges are highly affected by the claimant being a smoker or non-smoker. Smokers tend to have BMIs above average, 30. Region and gender might play some role in determining the amount charged
<div class="alert alert-success">
<h1> Using the above Visualisations as reference, try creating similar charts for the second dataset </h1> <br/>
</div>
> *Tip*: If you want to run these in separate cells, activate the below cell by clicking on it and then click on the + at the top of the notebook. This will add extra cells. Click on the buttons with the upwards and downwards arrows to move the cells up and down to change their order.
```
plt.figure(figsize=(15,5))
sns.barplot(x="EXERCISEMINPERWEEK",y="CHOLESTEROL",data=above_30,hue="HEARTFAILURE",)
plt.show()
```
|
github_jupyter
|
# Support Vector Machines (SVM) with Sklearn
This notebook creates and measures an [LinearSVC with Sklearn](http://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC). This has more flexibility in the choice of penalties and loss functions and should scale better to large numbers of samples compared to SVC.
* Method: LinearSVC
* Dataset: Iris
## Imports
```
import numpy as np
from sklearn.datasets import load_iris
from sklearn.svm import LinearSVC
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from mlxtend.evaluate import confusion_matrix
from mlxtend.plotting import plot_confusion_matrix
%matplotlib inline
```
## Load and Prepare the Data
```
# Load the data
data = load_iris()
# Show the information about the dataset
print(data.DESCR)
# Split the data into labels (targets) and features
label_names = data['target_names']
labels = data['target']
feature_names = data['feature_names']
features = data['data']
# View the data
print(label_names)
print(labels[0])
print("")
print(feature_names)
print(features[0])
# Create test and training sets
X_train, X_test, Y_train, Y_test = train_test_split(features,
labels,
test_size=0.33,
random_state=42)
```
## Fit a LinearSVC Model
Parameters
* C: tells the SVM optimization how much you want to avoid misclassifying each training example
* If C is large: the hyperplane does a better job of getting all the training points classified correctly
* If C is small: the optimizer will look for a larger-margin separating hyperplane even if that hyperplane misclassifies more points
* random_state: seed of the pseudo random number generator to use when shuffling the data
```
# Create an instance of the GaussianNB classifier
model = LinearSVC(C=1.0, random_state=42)
# Train the model
model.fit(X_train, Y_train)
model
# Show the intercepts
print("Intercepts: {}".format(model.intercept_))
```
## Create Predictions
```
# Create predictions
predictions = model.predict(X_test)
print(predictions)
# Create a plot to compare actual labels (Y_test) and the predicted labels (predictions)
fig = plt.figure(figsize=(20,10))
plt.scatter(Y_test, predictions)
plt.xlabel("Actual Label: $Y_i$")
plt.ylabel("Predicted Label: $\hat{Y}_i$")
plt.title("Actual vs. Predicted Label: $Y_i$ vs. $\hat{Y}_i$")
plt.show()
```
## Model Evaluation
### Accuracy
The accuracy score is either the fraction (default) or the count (normalize=False) of correct predictions.
```
print("Accuracy Score: %.2f" % accuracy_score(Y_test, predictions))
```
### K-Fold Cross Validation
This estimates the accuracy of an SVM model by splitting the data, fitting a model and computing the score 5 consecutive times. The result is a list of the scores from each consecutive run.
```
# Get scores for 5 folds over the data
clf = LinearSVC(C=1.0, random_state=42)
scores = cross_val_score(clf, data.data, data.target, cv=5)
print(scores)
```
### Confusion Matrix
**Confusion Matrix for Binary Label**

```
# Plot the multi-label confusion matrix
print("Labels:")
for label in label_names:
i, = np.where(label_names == label)
print("{}: {}".format(i, label))
cm = confusion_matrix(y_target=Y_test,
y_predicted=predictions,
binary=False)
fig, ax = plot_confusion_matrix(conf_mat=cm)
plt.title("Confusion Matrix")
plt.show()
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import os
import json
import altair as alt
JSON_FILE = "../results/BDNF/Recombinants/BDNF_codons_RDP_recombinationFree.fas.FEL.json"
pvalueThreshold = 0.1
def getFELData(json_file):
with open(json_file, "r") as in_d:
json_data = json.load(in_d)
return json_data["MLE"]["content"]["0"]
#end method
def getFELHeaders(json_file):
with open(json_file, "r") as in_d:
json_data = json.load(in_d)
return json_data["MLE"]["headers"]
#end method
columns = getFELHeaders(JSON_FILE)
headers = [x[0] for x in columns]
headers
data = getFELData(JSON_FILE)
```
### Selected Sites -- Tables
```
df = pd.DataFrame(getFELData(JSON_FILE), columns=headers, dtype = float)
df["omega"] = df["beta"] / df["alpha"]
df.index += 1
df["Site"] = df.index
df
df_results = df[df["p-value"] <= pvalueThreshold]
df_results
positive_sites = df_results[df_results["omega"] > 1.0]
positive_sites = positive_sites.reset_index()
positive_sites.index += 1
positive_sites.drop('index', axis=1, inplace=True)
positive_sites
negative_sites = df_results[df_results["omega"] < 1.0]
negative_sites = negative_sites.reset_index()
negative_sites.index += 1
negative_sites.drop('index', axis=1, inplace=True)
negative_sites
#df = pd.DataFrame(getFELData(JSON_FILE), columns=headers, dtype = float)
#df.index += 1
# Save the DF here.
#OUTPUT = JSON_FILE.split("/")[-1].replace(".FEL.json", ".csv")
#print("# Saving:", OUTPUT)
#df.to_csv(OUTPUT)
#df["Site"] = df.index
#df["omega"] = df["beta"] / df["alpha"]
#df["Site"] = df.index
#df
```
## Visualizations
```
#source = df[df["omega"] < 10]
source = df
line = alt.Chart(source).mark_line().encode(
x='Site',
y='omega',
).properties(
width=800,
height=600)
line
source = df
line = alt.Chart(source).mark_line().encode(
x='Site',
y=alt.Y('omega',
scale=alt.Scale(domain=(0, 10), clamp=True)),
).properties(
width=800,
height=600)
line
import numpy as np
df["log10(omega)"] = np.log10(df["omega"])
source = df
line = alt.Chart(source).mark_bar().encode(
x='Site',
y='log10(omega)',
color=alt.Color('p-value', scale=alt.Scale(scheme='reds', reverse=True))
).properties(
width=800,
height=600)
line
import numpy as np
negative_sites["log10(omega)"] = np.log10(negative_sites["omega"])
source = negative_sites
line = alt.Chart(source).mark_bar().encode(
x='Site',
y='log10(omega)',
color=alt.Color('p-value', scale=alt.Scale(scheme='reds', reverse=True))
).properties(
width=800,
height=600)
line
import numpy as np
source = negative_sites
line = alt.Chart(source).mark_point().encode(
x='Site',
y='omega',
color=alt.Color('p-value', scale=alt.Scale(scheme='reds', reverse=True)),
size=alt.Size('p-value', scale=alt.Scale(reverse=True))
).properties(
width=800,
height=600)
line
```
## Go with this one for now
```
# Only the negative sites
source = negative_sites
line = alt.Chart(source).mark_circle().encode(
x='Site',
y='omega',
color=alt.Color('p-value', scale=alt.Scale(scheme='reds', reverse=True)),
size=alt.Size('p-value', scale=alt.Scale(reverse=True))
).properties(
width=800,
height=600)
line
import numpy as np
source = negative_sites
line = alt.Chart(source).mark_point().encode(
x='Site',
y='omega',
color=alt.Color('p-value', scale=alt.Scale(scheme='reds', reverse=True)),
size=alt.Size('p-value', scale=alt.Scale(reverse=True))
).properties(
width=800,
height=600)
line
```
## Figure legend.
```
## Summary
a = len(df["omega"])
b = len(negative_sites["omega"])
c = round((b/a) * 100, 3)
print("FEL analysis of your gene of interest found " + str(b) + " of " + str(a) + " sites to be statistically significant (p-value <= " + str(pvalueThreshold) + ") for pervasive negative/purifying selection" )
print(c)
```
## Table
```
negative_sites
df_AlnMap = pd.read_csv("BDNF_AlignmentMap.csv")
df_AlnMap
mapping = []
for site in negative_sites["Site"].to_list():
if site in df_AlnMap["AlignmentSite"].to_list():
for n, item in enumerate(df_AlnMap["AlignmentSite"].to_list()):
if item == site:
mapping.append(n+1)
break
#end if
#end for
else:
mapping.append(np.nan)
#end if
#end for
negative_sites["HumanBDNF"] = mapping
negative_sites
try:
negative_sites = negative_sites.drop(['log10(omega)'], axis=1)
except:
pass
negative_sites.to_csv("BDNF_FEL_Negative_Table.csv", index=False)
```
|
github_jupyter
|
```
import os
import django
from django.db import transaction
import random
from django_efilling.models import Instrument, InstrumentQuestion, InstrumentQuestionChoice
from django_efilling.models import (ESSAY, SINGLE_CHOICE, MULTIPLE_CHOICE, IMAGE_CHOICE, Respondent)
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
django.setup()
```
# Create Users
with transaction.atomic():
user1 = User.objects.create_user('john', '[email protected]', 'johnpassword')
user2 = User.objects.create_user('albert', '[email protected]', 'albertpassword')
user3 = User.objects.create_user('sonia', '[email protected]', 'soniapassword')
user4 = User.objects.create_user('james', '[email protected]', 'jamespassword')
user5 = User.objects.create_user('chika', '[email protected]', 'chikapassword')
user6 = User.objects.create_user('robert', '[email protected]', 'robertpassword')
user7 = User.objects.create_user('hendra', '[email protected]', 'hendrapassword')
user8 = User.objects.create_user('yoshi', '[email protected]', 'yoshipassword')
user9 = User.objects.create_user('bianca', '[email protected]', 'biancapassword')
user10 = User.objects.create_user('devia', '[email protected]', 'deviapassword')
user11 = User.objects.create_user('yudi', '[email protected]', 'yudipassword')
user12 = User.objects.create_user('meriam', '[email protected]', 'meriampassword')
user13 = User.objects.create_user('hania', '[email protected]', 'haniapassword')
user14 = User.objects.create_user('bimo', '[email protected]', 'bimopassword')
user15 = User.objects.create_user('gino', '[email protected]', 'ginopassword')
user16 = User.objects.create_user('juju', '[email protected]', 'jujupassword')
user17 = User.objects.create_user('romeo', '[email protected]', 'romeopassword')
user18 = User.objects.create_user('hansen', '[email protected]', 'hansenpassword')
user19 = User.objects.create_user('romi', '[email protected]', 'romipassword')
user20 = User.objects.create_user('gina', '[email protected]', 'ginapassword')
user21 = User.objects.create_user('huria', '[email protected]', 'huriapassword')
user22 = User.objects.create_user('jaka', '[email protected]', 'jakapassword')
user23 = User.objects.create_user('rendra', '[email protected]', 'rendrapassword')
user24 = User.objects.create_user('kiki', '[email protected]', 'kikipassword')
user25 = User.objects.create_user('rama', '[email protected]', 'ramapassword')
user26 = User.objects.create_user('habibie', '[email protected]', 'habibiepassword')
user27 = User.objects.create_user('iankasela', '[email protected]', 'iankaselapassword')
user28 = User.objects.create_user('koko', '[email protected]', 'kokopassword')
user29 = User.objects.create_user('joko', '[email protected]', 'jokopassword')
user30 = User.objects.create_user('momo', '[email protected]', 'momopassword')
```
# Creating Questions
user1 = User.objects.get(username='rizkisasri')
with transaction.atomic():
# def create_instrumet(number)
# Create Instrument
instrument1 = Instrument(
name="Owesome survey #3, Test All Question Type.",
creator=user1,
)
instrument1.save()
instrument1.tags.add("tag1")
instrument1.tags.add("tag2")
# Add Question to Instrument
q1 = InstrumentQuestion(
instrument=instrument1,
order=1,
question_type=ESSAY,
text="Essay question example, is it works?",
help_text="Describe something ..",
scoring=True,
answer=1
)
q1.save()
q2 = InstrumentQuestion(
order=2,
instrument=instrument1,
question_type=SINGLE_CHOICE,
text="Single choice question example, is it works?",
help_text="Choose one ..",
scoring=True,
answer=1
)
q2.save()
q2c1 = InstrumentQuestionChoice(question=q2, order=1, label="Choice 1", value=1)
q2c2 = InstrumentQuestionChoice(question=q2, order=2, label="Choice 2", value=2)
q2c3 = InstrumentQuestionChoice(question=q2, order=3, label="Choice 3", value=3)
q2c4 = InstrumentQuestionChoice(question=q2, order=4, label="Choice 4", value=4)
InstrumentQuestionChoice.objects.bulk_create([q2c1, q2c2, q2c3, q2c4])
q3 = InstrumentQuestion(
order=3,
instrument=instrument1,
question_type=MULTIPLE_CHOICE,
text="Multiple choice question example, is it works?",
help_text="Choose one or more ..",
scoring=True,
answer="1, 2"
)
q3.save()
q3c1 = InstrumentQuestionChoice(question=q3, order=1, label="Choice 1", value=1)
q3c2 = InstrumentQuestionChoice(question=q3, order=2, label="Choice 2", value=2)
q3c3 = InstrumentQuestionChoice(question=q3, order=3, label="Choice 3", value=3)
q3c4 = InstrumentQuestionChoice(question=q3, order=4, label="Choice 4", value=4)
InstrumentQuestionChoice.objects.bulk_create([q3c1, q3c2, q3c3, q3c4])
q4 = InstrumentQuestion(
order=4,
instrument=instrument1,
question_type=IMAGE_CHOICE,
text="Image choice question example, is it works?",
help_text="Choose one ..",
scoring=True,
answer="1, 2"
)
q4.save()
q4c1 = InstrumentQuestionChoice(question=q4, order=1, label="Image 1", value=1)
q4c2 = InstrumentQuestionChoice(question=q4, order=2, label="Image 2", value=2)
q4c3 = InstrumentQuestionChoice(question=q4, order=3, label="Image 3", value=3)
q4c4 = InstrumentQuestionChoice(question=q4, order=4, label="Image 4", value=4)
InstrumentQuestionChoice.objects.bulk_create([q4c1, q4c2, q4c3, q4c4])
q5 = InstrumentQuestion(
order=3,
instrument=instrument1,
question_type=MULTIPLE_CHOICE,
text="Multiple choice 2 question example, is it works?",
help_text="Choose one or more ..",
scoring=True,
answer="1"
)
q5.save()
q5c1 = InstrumentQuestionChoice(question=q5, order=1, label="Choice 1", value=1)
q5c2 = InstrumentQuestionChoice(question=q5, order=2, label="Choice 2", value=2)
q5c3 = InstrumentQuestionChoice(question=q5, order=3, label="Choice 3", value=3)
q5c4 = InstrumentQuestionChoice(question=q5, order=4, label="Choice 4", value=4)
q5c5 = InstrumentQuestionChoice(question=q5, order=5, label="Choice 5", value=5)
InstrumentQuestionChoice.objects.bulk_create([q5c1, q5c2, q5c3, q5c4, q5c5])
user1 = User.objects.get(username='rizkisasri')
with transaction.atomic():
respondents = []
users = User.objects.all()
ip = '127.0.0.{}'
for i in range(users.count()):
respondents.append(
Respondent(
ip_address=ip.format(i),
respondent=users[i],
instrument=instrument1,
response = json.dumps({
q1.id: "Text {}".format(random.choice(["one","two","tree","four"])),
q2.id: random.choice([1,2,3,4]),
q3.id: random.choice([1,2,3,4]),
q4.id: random.choice([1,2,3,4]),
q5.id: random.choice([1,2,3,4,5]),
})
)
)
Respondent.objects.bulk_create(respondents)
from numpy import int64
import pandas as pd
from django.utils.translation import gettext_lazy as _
from django_efilling.models import InstrumentQuestionChoice, ESSAY, SINGLE_CHOICE, MULTIPLE_CHOICE, IMAGE_CHOICE
class QuestionAnalizer:
def __init__(self, question, instrument_analizer):
self.questions = instrument_analizer.questions
self.question = question
self.responses = instrument_analizer.responses
self.choices = instrument_analizer.choices
def get_responses_df(self):
df = pd.DataFrame(self.responses)
if df.empty:
df = pd.DataFrame(columns=[str(id) for id in self.get_questions_id()])
return df[str(self.question["id"])]
def get_questions_id(self):
return [ question['id'] for question in self.questions ]
def get_dataframe(self):
raise NotImplementedError("%s class should implement get_dataframe() method." % self.__class__.__name__)
def get_report(self):
return self.get_dataframe().to_dict()
def get_report_transpose(self):
return self.get_dataframe().transpose().to_dict()
class EssayQuestionAnalizer(QuestionAnalizer):
def get_dataframe(self):
response_df = self.get_responses_df().value_counts().to_frame("count").sort_index()
response_df["value"] = response_df.index
response_df["label"] = response_df["value"]
response_df.index = [x for x in range(len(response_df.index))]
response_df["answer"] = response_df["count"].sum()
response_df["percent"] = response_df["count"] / response_df["answer"] * 100
return response_df
class SingleChoiceQuestionAnalizer(QuestionAnalizer):
def get_choices_dataframe(self):
choices_df = pd.DataFrame(self.choices)
return choices_df[choices_df["question"] == self.question["id"]]
def get_dataframe(self):
response_df = self.get_responses_df().value_counts().to_frame("count")
response_df["value"] = response_df.index.astype(int64)
response_df["answer"] = response_df["count"].sum()
response_df["percent"] = response_df["count"] / response_df["answer"] * 100
# Merge with choice to get label
choice_dataframe = self.get_choices_dataframe()
choice_dataframe = choice_dataframe[["label", "value"]]
choice_dataframe['value'] = choice_dataframe['value'].astype(int64)
results = pd.merge(response_df, choice_dataframe, on="value", how="left")
return results
class MultipleChoiceQuestionAnalizer(QuestionAnalizer):
def get_choices_dataframe(self):
choices_df = pd.DataFrame(self.choices)
return choices_df[choices_df["question"] == self.question["id"]]
def get_dataframe(self):
response_df = self.get_responses_df()
new_values = []
for val in response_df.values:
if isinstance(val, (list, tuple)):
for item in val:
new_values.append(item)
else:
new_values.append(val)
response_df = pd.Series(new_values)
response_df = response_df.value_counts().to_frame("count")
response_df["value"] = response_df.index.astype(int64)
response_df["answer"] = response_df["count"].sum()
response_df["percent"] = response_df["count"] / response_df["answer"] * 100
# Merge with choice to get label
choice_dataframe = self.get_choices_dataframe()
choice_dataframe = choice_dataframe[["label", "value"]]
choice_dataframe['value'] = choice_dataframe['value'].astype(int64)
results = pd.merge(response_df, choice_dataframe, on="value", how="left")
return results
class ImageChoiceQuestionAnalizer(SingleChoiceQuestionAnalizer):
pass
QUESTION_ANALIZERS = {
ESSAY: EssayQuestionAnalizer,
SINGLE_CHOICE: SingleChoiceQuestionAnalizer,
MULTIPLE_CHOICE: MultipleChoiceQuestionAnalizer,
IMAGE_CHOICE: ImageChoiceQuestionAnalizer,
}
class InstrumentAnalizer:
@property
def total_responses(self):
return len(self.respondents)
@property
def total_questions(self):
return len(self.questions)
def __init__(self, instrument):
self.instrument = instrument
self.questions = self.get_questions()
self.choices = self.get_choices()
self.respondents = self.get_respondents()
self.responses = self.get_responses()
def get_questions(self):
questions = self.instrument.questions.filter(scoring=True)
return questions.values("id", "order", "text", "help_text", "question_type")
def get_choices(self):
question_ids = [question["id"] for question in self.questions]
choices = InstrumentQuestionChoice.objects.filter(question__in=question_ids)
return choices.values("id", "question", "label", "value")
def get_respondents(self):
qs = self.instrument.respondents.select_related("respondent", "instrument")
respondents = qs.values("response", "created_at", "ip_address", "respondent__id", "respondent__username")
return respondents
def get_responses(self):
responses = [respondent["response"] for respondent in self.respondents]
return responses
def get_table_header(self):
# Build Response Table Header for Tabulator
headers = [
{"title": _("Question {}").format(question["order"]), "field": str(question["id"])}
for question in self.questions
]
return headers
def get_question_analizer(self, question):
analizer_class = QUESTION_ANALIZERS[question["question_type"]]
return analizer_class(question, self)
def get_questions_report(self):
questions_reports = list()
for question in self.questions:
question_analizer = self.get_question_analizer(question)
question_report = question_analizer.get_report()
question_report_transpose = question_analizer.get_report_transpose()
questions_reports.append(
{
"id": question["id"],
"order": question["order"],
"text": question["text"],
"help_text": question["help_text"],
"question_type": question["question_type"],
"results": question_report,
"results_transpose": question_report_transpose,
}
)
return questions_reports
def get_report(self):
report = {
"id": self.instrument.id,
"name": self.instrument.name,
"created_at": self.instrument.created_at,
"expired_at": self.instrument.expired_at,
"description": self.instrument.description,
"unique": self.instrument.unique,
"public": self.instrument.public,
"max_respondent": self.instrument.max_respondent,
"total_response": self.total_responses,
"total_questions": self.total_questions,
"completeness": (self.total_responses / self.instrument.max_respondent) * 100,
"response_headers": self.get_table_header(),
"response_list": self.responses,
"questions_reports": self.get_questions_report(),
}
return report
instrument = Instrument.objects.get(pk='a86f5c2d-2fe5-4423-8f8c-0ee6c67cdb09')
analizer = InstrumentAnalizer(instrument)
analizer.get_report()
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/csd-oss/vc-investmemt/blob/master/VC_Investment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# General preparation and GDrive conection
```
import pandas as pd
import matplotlib.pyplot as plt
```
# Uploiding info from [OECD](https://stats.oecd.org/Index.aspx?DataSetCode=VC_INVEST#)
```
vc_path = "https://raw.githubusercontent.com/csd-oss/vc-investmemt/master/VC_INVEST_06042020205501847.csv"
df = pd.read_csv(vc_path)
df
```
## Droping all not needed info
```
df = df.drop(columns=["Reference Period Code","Reference Period","Flag Codes","Flags","SUBJECT","Measure","Unit","Year","Subject","SUBJECT","Development stages"])
df
```
## Deviding data into 2 dataframes
```
df_usd = df.query('MEASURE == "USD_V"')
df_gdp = df.query('MEASURE == "SH_GDP"')
```
# Playing with USD data
## Creating filters
```
filt_total_us = (df_usd['STAGES'] == "VC_T") & (df_usd['LOCATION']== "USA")
filt_seed_us = (df_usd['STAGES'] == "SEED") & (df_usd['LOCATION']== "USA")
filt_start_us = (df_usd['STAGES'] == "START") & (df_usd['LOCATION']== "USA")
filt_later_us = (df_usd['STAGES'] == "LATER") & (df_usd['LOCATION']== "USA")
```
## Ploting US VC data
```
fig, ax = plt.subplots()
plt.style.use("ggplot")
ax.plot(df_usd.loc[filt_total_us].TIME, df_usd.loc[filt_total_us].Value, label = "Total")
ax.plot(df_usd.loc[filt_seed_us].TIME, df_usd.loc[filt_seed_us].Value, label = "Seed")
ax.plot(df_usd.loc[filt_start_us].TIME, df_usd.loc[filt_start_us].Value, label = "Series A")
ax.plot(df_usd.loc[filt_later_us].TIME, df_usd.loc[filt_later_us].Value, label = "Later Stages")
ax.set_xlabel("Years")
ax.set_label("Millions US$")
ax.set_title("USA VC investment")
ax.grid(True)
ax.legend()
plt.show()
```
# Playing with GDP data
```
filt_total_us = (df_gdp['STAGES'] == "VC_T") & (df_gdp['LOCATION']== "USA")
filt_seed_us = (df_gdp['STAGES'] == "SEED") & (df_gdp['LOCATION']== "USA")
filt_start_us = (df_gdp['STAGES'] == "START") & (df_gdp['LOCATION']== "USA")
filt_later_us = (df_gdp['STAGES'] == "LATER") & (df_gdp['LOCATION']== "USA")
fig, ax = plt.subplots()
plt.style.use("ggplot")
ax.plot(df_gdp.loc[filt_total_us].TIME, df_gdp.loc[filt_total_us].Value, label = "Total")
ax.plot(df_gdp.loc[filt_seed_us].TIME, df_gdp.loc[filt_seed_us].Value, label = "Seed")
ax.plot(df_gdp.loc[filt_start_us].TIME, df_gdp.loc[filt_start_us].Value, label = "Series A")
ax.plot(df_gdp.loc[filt_later_us].TIME, df_gdp.loc[filt_later_us].Value, label = "Later Stages")
ax.set_xlabel("Years")
ax.set_label("%GDP")
ax.set_title("USA VC investment")
ax.grid(True)
ax.legend()
plt.show()
```
# Countries Yearly Sum
```
filt_total = (df_usd['STAGES'] == "VC_T") & (df_usd['TIME'] >= 2007) #not enoght data till 2007
df_usd[filt_total].groupby(['TIME'])['Value'].sum().plot()
plt.title('Total investment')
plt.ylabel('Millions USD')
plt.show()
```
# Countries Yearly mean GDP Share
```
filt_total = (df_gdp['STAGES'] == "VC_T") & (df_gdp['TIME'] >= 2007) #not enoght data till 2007
df_gdp[filt_total].groupby(['TIME'])['Value'].mean().plot()
plt.title('Total investment')
plt.ylabel('% GDP')
plt.show()
```
# 2018 Pie Chart creation
```
filt_total_2018 = (df_usd['TIME']==2018)&(df_usd['STAGES']=='VC_T')
filt_other = df_usd['Value'] > 2185.094678
pie_2018 = df_usd[filt_total_2018 & filt_other]
pie_2018.drop(columns=['STAGES','MEASURE','TIME','Unit Code','PowerCode Code','PowerCode'], inplace=True)
pie_2018
pie_2018.loc[1]=['OTH', 'Other', df_usd[filt_total_2018 & ~filt_other]['Value'].sum()]
pie_2018
expl = [0,0.1,0]
plt.figure(figsize=(40,10))
plt.pie(pie_2018['Value'], explode=expl)
plt.legend(pie_2018['Country'],fontsize='11',loc='best')
# plt.style.use('qqplot')
plt.title('2018 Total Investment',fontdict={'fontsize':'20'},loc='left')
plt.show()
```
|
github_jupyter
|
## Review Calculus using by Python
Consider a sequence of n numbers $x_0, x_1, \cdots x_{n-1}$. We will start our index at 0, to remain in accordance with Python/Numpy's index system. $x_0$ is the first number in the sequence, $x_1$ is the second number in the sequence, and so forth, so $x_j$ is the general $j+1$ number in the sequence. We will utilize this $j$ index in our summation notation. Suppose we were to calculate the sum of all $n$ of these numbers in the sequence. We write that this sum is equivalent to
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
Let's parse this equation. $\Sigma$ is the summation sign, indicating that we are summing a sequence. $j$ is the index of summation that is being iterated over; $j$ is being used to subscript $x$. Then, we start our summation at 0 because the lower bound on our sum is set as $j=0$. The upper bound for our sum, or our stopping point is written as $n-1$, which is equivalent to writing $j=n-1$ on top of the $\Sigma$ symbol. Then, $x_j$ indicates the quantitiy that we are summing. One way to think about this is in terms of a for-loop. This summation concept is equivalent to a for loop for all integers in the range from the lower bound to the upper bound, indexed into the sequence $x$. Then, the code for our previous sum is:
Then, you can run through a few more examples of summation notation.
Example1:
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math1.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
Example2:
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math2.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
Example3:
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math3.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
Then, we can also sum over several different sequences. Consider the sequences A and B, where A consists of m values $a_0, a_1, \cdots, a_{m-1}$ and B contains n values $b_0, b_1, \cdots b_{n-1}$. Then, we can calculate the sum of the product of each value in A with each value in B (a sum over $m\times n$ products). Because the $i$ index only appears in association with A, and the $j$ index with B, we can group these summations.
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math4.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
```
import numpy as np
A = np.random.rand(5)
B = np.random.rand(7)
```
Note that the following does not hold
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math6.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
because this effectively treats the index $i$ in the first term independently from the $i$ in the second term of the product. Notice that in the right side of the equation, we could have interchanged the index $i$ in the second summation with $j$, without changing any of the mathematics. The sum on the left represents the summation of $m$ terms of $a_{i}^2$, whereas the sum on the right represents the summation of $m \times m$ terms - products between all possible pairs of A's terms.
```
import numpy as np
A = np.random.rand(5)
```
Note that typically when someone writes $\sum_{i} x_i$, this is just the sum of all values in the sequence X, and is the same as writing $\sum_{i=0}^{n-1} x_i$, where you'll usually know the value of n so you can still compute the sum.
## Matrices
Then suppose we have a m by n matrix that contains all products of the values in both sequence A and sequence B such that the matrix value of $M$ at index $(i,j)$ is $a_i \cdot b_j$.
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math5.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
You can form this matrix in numpy via an "outer product":
```
import numpy as np
```
You can use this matrix to visualize several possible uses of summations. For example, suppose $m=n$. Then, if we express the sequences A and B as column vectors, the dot product of the two vectors would be the sum of the diagonal of M, or $\sum_{k=0}^{n-1} a_k \cdot b_k$. Furthermore, we can take the sum of a row using the sum $\sum_{i=0}^{n-1} a_k \cdot b_i$ for the $k^{th}$ row of the matrix, and similarly take the sum of a column with $\sum_{i=0}^{n-1} a_i \cdot b_k$ for the $k^{th}$ column of the matrix.
```
k = 2
# sum over the kth column:
# sum over the kth row:
```
## Kronecker Delta
To make notation for working with summations (particularly in considering the matrix format) even simpler, we can use the Kronecker delta function, named after Prussian mathematician Leonard Kronecker. We use $\delta_{i,j}$ to denote the Kronecker delta function, defined as: \begin{equation} \begin{cases} \delta{i, j}= 0 & \text{if } i \neq j\
\delta{i,j}=1 & \text{if } i=j
\end{cases} \end{equation}
See that a Kronecker-delta can "collapse" a sum. Let $j$ be an integer between 0 and $n-1$:
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math7.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
See also that the identity matrix, $I$, can be written as $I_{i,j}=\delta_{i,j}$.
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math8.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
As an exercise, write a python function that behaves as a kronecker delta, and include it in a for-loop that is computing a sum. Verify that it does indeed collapse the sum.
## Partial Derivatives
Partial derivatives are used in multivariable functions, in which we essentially derive with respect to one of these variables and treat the remaining variables as constants. For example:
$$ f(x,y) = 6 x^2 y^3 $$ $$ \frac{\partial}{\partial x} f(x,y) = 12x y^3 $$ $$ \frac{\partial}{\partial y} f(x,y) = 18 x^2 y^2 $$
So, what if we want to take a partial derivative of a sum?
Suppose that we have two vectors containing variables:
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math9.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math10.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
Then, in our previous section we saw that $\vec{x} \cdot \vec{y} = \sum_{i=0}^{n-1} x_i \cdot y_i$. What happens when we take the partial derivative of this sum with respect to the variable $x_{j}$? Let $f = \vec{x} \cdot \vec{y}$, then
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math11.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
It is critical to see that, because $x_i$ and $x_j$ are distinct variables (unless $i = j$), \begin{equation} \begin{cases} \frac{\partial x_i}{\partial x_j} = 0 & \text{if } i \neq j\
\frac{\partial x_i}{\partial x_j} = 1 & \text{if } i=j
\end{cases} \end{equation} and thus $\frac{\partial x_i}{\partial x_j} = \delta_{i,j}$.
Thus we can simplify our sum by collapsing it via the kronecker-delta:
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math12.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
Defining $\frac{\partial f}{\partial \vec{x}} = [\frac{\partial f}{\partial x_0}, \cdots , \frac{\partial f}{\partial x_j}, \cdots, \frac{\partial f}{\partial x_{n-1}}]$, we can see from our above result that
<script src="https://npmcdn.com/jupyter-js-widgets@~1.2.0/dist/embed.js"></script><img src='https://blackboard.unist.ac.kr/bbcswebdav/courses/UNION_1892_ITP1170x/math13.svg'><script type="application/vnd.jupyter-embedded-widgets">{}</script>
Take a little bit of time to think through the above expression, and make sure you understand why it is true, writing out matrices to help your understanding. This is just one example of how we can apply some of the calculus we already know to vectors, leading to vector calculus, a pillar for linear algebra! This is especially important for deriving expressions used in back-propagation in machine learning.
The end of document
|
github_jupyter
|
# Gaussian Mixture Model
This is tutorial demonstrates how to marginalize out discrete latent variables in Pyro through the motivating example of a mixture model. We'll focus on the mechanics of parallel enumeration, keeping the model simple by training a trivial 1-D Gaussian model on a tiny 5-point dataset. See also the [enumeration tutorial](http://pyro.ai/examples/enumeration.html) for a broader introduction to parallel enumeration.
#### Table of contents
- [Overview](#Overview)
- [Training a MAP estimator](#Training-a-MAP-estimator)
- [Serving the model: predicting membership](#Serving-the-model:-predicting-membership)
- [Predicting membership using discrete inference](#Predicting-membership-using-discrete-inference)
- [Predicting membership by enumerating in the guide](#Predicting-membership-by-enumerating-in-the-guide)
- [MCMC](#MCMC)
```
import os
from collections import defaultdict
import torch
import numpy as np
import scipy.stats
from torch.distributions import constraints
from matplotlib import pyplot
%matplotlib inline
import pyro
import pyro.distributions as dist
from pyro import poutine
from pyro.infer.autoguide import AutoDelta
from pyro.optim import Adam
from pyro.infer import SVI, TraceEnum_ELBO, config_enumerate, infer_discrete
smoke_test = ('CI' in os.environ)
assert pyro.__version__.startswith('1.3.1')
pyro.enable_validation(True)
```
## Overview
Pyro's [TraceEnum_ELBO](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.traceenum_elbo.TraceEnum_ELBO) can automatically marginalize out variables in both the guide and the model. When enumerating guide variables, Pyro can either enumerate sequentially (which is useful if the variables determine downstream control flow), or enumerate in parallel by allocating a new tensor dimension and using nonstandard evaluation to create a tensor of possible values at the variable's sample site. These nonstandard values are then replayed in the model. When enumerating variables in the model, the variables must be enumerated in parallel and must not appear in the guide. Mathematically, guide-side enumeration simply reduces variance in a stochastic ELBO by enumerating all values, whereas model-side enumeration avoids an application of Jensen's inequality by exactly marginalizing out a variable.
Here is our tiny dataset. It has five points.
```
data = torch.tensor([0., 1., 10., 11., 12.])
```
## Training a MAP estimator
Let's start by learning model parameters `weights`, `locs`, and `scale` given priors and data. We will learn point estimates of these using an [AutoDelta](http://docs.pyro.ai/en/dev/infer.autoguide.html#autodelta) guide (named after its delta distributions). Our model will learn global mixture weights, the location of each mixture component, and a shared scale that is common to both components. During inference, [TraceEnum_ELBO](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.traceenum_elbo.TraceEnum_ELBO) will marginalize out the assignments of datapoints to clusters.
```
K = 2 # Fixed number of components.
@config_enumerate
def model(data):
# Global variables.
weights = pyro.sample('weights', dist.Dirichlet(0.5 * torch.ones(K)))
scale = pyro.sample('scale', dist.LogNormal(0., 2.))
with pyro.plate('components', K):
locs = pyro.sample('locs', dist.Normal(0., 10.))
with pyro.plate('data', len(data)):
# Local variables.
assignment = pyro.sample('assignment', dist.Categorical(weights))
pyro.sample('obs', dist.Normal(locs[assignment], scale), obs=data)
```
To run inference with this `(model,guide)` pair, we use Pyro's [config_enumerate()](http://docs.pyro.ai/en/dev/poutine.html#pyro.infer.enum.config_enumerate) handler to enumerate over all assignments in each iteration. Since we've wrapped the batched Categorical assignments in a [pyro.plate](http://docs.pyro.ai/en/dev/primitives.html#pyro.plate) indepencence context, this enumeration can happen in parallel: we enumerate only 2 possibilites, rather than `2**len(data) = 32`. Finally, to use the parallel version of enumeration, we inform Pyro that we're only using a single [plate](http://docs.pyro.ai/en/dev/primitives.html#pyro.plate) via `max_plate_nesting=1`; this lets Pyro know that we're using the rightmost dimension [plate](http://docs.pyro.ai/en/dev/primitives.html#pyro.plate) and that Pyro can use any other dimension for parallelization.
```
optim = pyro.optim.Adam({'lr': 0.1, 'betas': [0.8, 0.99]})
elbo = TraceEnum_ELBO(max_plate_nesting=1)
```
Before inference we'll initialize to plausible values. Mixture models are very succeptible to local modes. A common approach is choose the best among many randomly initializations, where the cluster means are initialized from random subsamples of the data. Since we're using an [AutoDelta](http://docs.pyro.ai/en/dev/infer.autoguide.html#autodelta) guide, we can initialize by defining a custom ``init_loc_fn()``.
```
def init_loc_fn(site):
if site["name"] == "weights":
# Initialize weights to uniform.
return torch.ones(K) / K
if site["name"] == "scale":
return (data.var() / 2).sqrt()
if site["name"] == "locs":
return data[torch.multinomial(torch.ones(len(data)) / len(data), K)]
raise ValueError(site["name"])
def initialize(seed):
global global_guide, svi
pyro.set_rng_seed(seed)
pyro.clear_param_store()
global_guide = AutoDelta(poutine.block(model, expose=['weights', 'locs', 'scale']),
init_loc_fn=init_loc_fn)
svi = SVI(model, global_guide, optim, loss=elbo)
return svi.loss(model, global_guide, data)
# Choose the best among 100 random initializations.
loss, seed = min((initialize(seed), seed) for seed in range(100))
initialize(seed)
print('seed = {}, initial_loss = {}'.format(seed, loss))
```
During training, we'll collect both losses and gradient norms to monitor convergence. We can do this using PyTorch's `.register_hook()` method.
```
# Register hooks to monitor gradient norms.
gradient_norms = defaultdict(list)
for name, value in pyro.get_param_store().named_parameters():
value.register_hook(lambda g, name=name: gradient_norms[name].append(g.norm().item()))
losses = []
for i in range(200 if not smoke_test else 2):
loss = svi.step(data)
losses.append(loss)
print('.' if i % 100 else '\n', end='')
pyplot.figure(figsize=(10,3), dpi=100).set_facecolor('white')
pyplot.plot(losses)
pyplot.xlabel('iters')
pyplot.ylabel('loss')
pyplot.yscale('log')
pyplot.title('Convergence of SVI');
pyplot.figure(figsize=(10,4), dpi=100).set_facecolor('white')
for name, grad_norms in gradient_norms.items():
pyplot.plot(grad_norms, label=name)
pyplot.xlabel('iters')
pyplot.ylabel('gradient norm')
pyplot.yscale('log')
pyplot.legend(loc='best')
pyplot.title('Gradient norms during SVI');
```
Here are the learned parameters:
```
map_estimates = global_guide(data)
weights = map_estimates['weights']
locs = map_estimates['locs']
scale = map_estimates['scale']
print('weights = {}'.format(weights.data.numpy()))
print('locs = {}'.format(locs.data.numpy()))
print('scale = {}'.format(scale.data.numpy()))
```
The model's `weights` are as expected, with about 2/5 of the data in the first component and 3/5 in the second component. Next let's visualize the mixture model.
```
X = np.arange(-3,15,0.1)
Y1 = weights[0].item() * scipy.stats.norm.pdf((X - locs[0].item()) / scale.item())
Y2 = weights[1].item() * scipy.stats.norm.pdf((X - locs[1].item()) / scale.item())
pyplot.figure(figsize=(10, 4), dpi=100).set_facecolor('white')
pyplot.plot(X, Y1, 'r-')
pyplot.plot(X, Y2, 'b-')
pyplot.plot(X, Y1 + Y2, 'k--')
pyplot.plot(data.data.numpy(), np.zeros(len(data)), 'k*')
pyplot.title('Density of two-component mixture model')
pyplot.ylabel('probability density');
```
Finally note that optimization with mixture models is non-convex and can often get stuck in local optima. For example in this tutorial, we observed that the mixture model gets stuck in an everthing-in-one-cluster hypothesis if `scale` is initialized to be too large.
## Serving the model: predicting membership
Now that we've trained a mixture model, we might want to use the model as a classifier.
During training we marginalized out the assignment variables in the model. While this provides fast convergence, it prevents us from reading the cluster assignments from the guide. We'll discuss two options for treating the model as a classifier: first using [infer_discrete](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.discrete.infer_discrete) (much faster) and second by training a secondary guide using enumeration inside SVI (slower but more general).
### Predicting membership using discrete inference
The fastest way to predict membership is to use the [infer_discrete](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.discrete.infer_discrete) handler, together with `trace` and `replay`. Let's start out with a MAP classifier, setting `infer_discrete`'s temperature parameter to zero. For a deeper look at effect handlers like `trace`, `replay`, and `infer_discrete`, see the [effect handler tutorial](http://pyro.ai/examples/effect_handlers.html).
```
guide_trace = poutine.trace(global_guide).get_trace(data) # record the globals
trained_model = poutine.replay(model, trace=guide_trace) # replay the globals
def classifier(data, temperature=0):
inferred_model = infer_discrete(trained_model, temperature=temperature,
first_available_dim=-2) # avoid conflict with data plate
trace = poutine.trace(inferred_model).get_trace(data)
return trace.nodes["assignment"]["value"]
print(classifier(data))
```
Indeed we can run this classifer on new data
```
new_data = torch.arange(-3, 15, 0.1)
assignment = classifier(new_data)
pyplot.figure(figsize=(8, 2), dpi=100).set_facecolor('white')
pyplot.plot(new_data.numpy(), assignment.numpy())
pyplot.title('MAP assignment')
pyplot.xlabel('data value')
pyplot.ylabel('class assignment');
```
To generate random posterior assignments rather than MAP assignments, we could set `temperature=1`.
```
print(classifier(data, temperature=1))
```
Since the classes are very well separated, we zoom in to the boundary between classes, around 5.75.
```
new_data = torch.arange(5.5, 6.0, 0.005)
assignment = classifier(new_data, temperature=1)
pyplot.figure(figsize=(8, 2), dpi=100).set_facecolor('white')
pyplot.plot(new_data.numpy(), assignment.numpy(), 'bx', color='C0')
pyplot.title('Random posterior assignment')
pyplot.xlabel('data value')
pyplot.ylabel('class assignment');
```
### Predicting membership by enumerating in the guide
A second way to predict class membership is to enumerate in the guide. This doesn't work well for serving classifier models, since we need to run stochastic optimization for each new input data batch, but it is more general in that it can be embedded in larger variational models.
To read cluster assignments from the guide, we'll define a new `full_guide` that fits both global parameters (as above) and local parameters (which were previously marginalized out). Since we've already learned good values for the global variables, we will block SVI from updating those by using [poutine.block](http://docs.pyro.ai/en/dev/poutine.html#pyro.poutine.block).
```
@config_enumerate
def full_guide(data):
# Global variables.
with poutine.block(hide_types=["param"]): # Keep our learned values of global parameters.
global_guide(data)
# Local variables.
with pyro.plate('data', len(data)):
assignment_probs = pyro.param('assignment_probs', torch.ones(len(data), K) / K,
constraint=constraints.unit_interval)
pyro.sample('assignment', dist.Categorical(assignment_probs))
optim = pyro.optim.Adam({'lr': 0.2, 'betas': [0.8, 0.99]})
elbo = TraceEnum_ELBO(max_plate_nesting=1)
svi = SVI(model, full_guide, optim, loss=elbo)
# Register hooks to monitor gradient norms.
gradient_norms = defaultdict(list)
svi.loss(model, full_guide, data) # Initializes param store.
for name, value in pyro.get_param_store().named_parameters():
value.register_hook(lambda g, name=name: gradient_norms[name].append(g.norm().item()))
losses = []
for i in range(200 if not smoke_test else 2):
loss = svi.step(data)
losses.append(loss)
print('.' if i % 100 else '\n', end='')
pyplot.figure(figsize=(10,3), dpi=100).set_facecolor('white')
pyplot.plot(losses)
pyplot.xlabel('iters')
pyplot.ylabel('loss')
pyplot.yscale('log')
pyplot.title('Convergence of SVI');
pyplot.figure(figsize=(10,4), dpi=100).set_facecolor('white')
for name, grad_norms in gradient_norms.items():
pyplot.plot(grad_norms, label=name)
pyplot.xlabel('iters')
pyplot.ylabel('gradient norm')
pyplot.yscale('log')
pyplot.legend(loc='best')
pyplot.title('Gradient norms during SVI');
```
We can now examine the guide's local `assignment_probs` variable.
```
assignment_probs = pyro.param('assignment_probs')
pyplot.figure(figsize=(8, 3), dpi=100).set_facecolor('white')
pyplot.plot(data.data.numpy(), assignment_probs.data.numpy()[:, 0], 'ro',
label='component with mean {:0.2g}'.format(locs[0]))
pyplot.plot(data.data.numpy(), assignment_probs.data.numpy()[:, 1], 'bo',
label='component with mean {:0.2g}'.format(locs[1]))
pyplot.title('Mixture assignment probabilities')
pyplot.xlabel('data value')
pyplot.ylabel('assignment probability')
pyplot.legend(loc='center');
```
## MCMC
Next we'll explore the full posterior over component parameters using collapsed NUTS, i.e. we'll use NUTS and marginalize out all discrete latent variables.
```
from pyro.infer.mcmc.api import MCMC
from pyro.infer.mcmc import NUTS
pyro.set_rng_seed(2)
kernel = NUTS(model)
mcmc = MCMC(kernel, num_samples=250, warmup_steps=50)
mcmc.run(data)
posterior_samples = mcmc.get_samples()
X, Y = posterior_samples["locs"].t()
pyplot.figure(figsize=(8, 8), dpi=100).set_facecolor('white')
h, xs, ys, image = pyplot.hist2d(X.numpy(), Y.numpy(), bins=[20, 20])
pyplot.contour(np.log(h + 3).T, extent=[xs.min(), xs.max(), ys.min(), ys.max()],
colors='white', alpha=0.8)
pyplot.title('Posterior density as estimated by collapsed NUTS')
pyplot.xlabel('loc of component 0')
pyplot.ylabel('loc of component 1')
pyplot.tight_layout()
```
Note that due to nonidentifiability of the mixture components the likelihood landscape has two equally likely modes, near `(11,0.5)` and `(0.5,11)`. NUTS has difficulty switching between the two modes.
```
pyplot.figure(figsize=(8, 3), dpi=100).set_facecolor('white')
pyplot.plot(X.numpy(), color='red')
pyplot.plot(Y.numpy(), color='blue')
pyplot.xlabel('NUTS step')
pyplot.ylabel('loc')
pyplot.title('Trace plot of loc parameter during NUTS inference')
pyplot.tight_layout()
```
|
github_jupyter
|
```
%matplotlib inline
```
# Cross-validation on diabetes Dataset Exercise
A tutorial exercise which uses cross-validation with linear models.
This exercise is used in the `cv_estimators_tut` part of the
`model_selection_tut` section of the `stat_learn_tut_index`.
```
from __future__ import print_function
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import LassoCV
from sklearn.linear_model import Lasso
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
diabetes = datasets.load_diabetes()
X = diabetes.data[:150]
y = diabetes.target[:150]
lasso = Lasso(random_state=0)
alphas = np.logspace(-4, -0.5, 30)
tuned_parameters = [{'alpha': alphas}]
n_folds = 3
clf = GridSearchCV(lasso, tuned_parameters, cv=n_folds, refit=False)
clf.fit(X, y)
scores = clf.cv_results_['mean_test_score']
scores_std = clf.cv_results_['std_test_score']
plt.figure().set_size_inches(8, 6)
plt.semilogx(alphas, scores)
# plot error lines showing +/- std. errors of the scores
std_error = scores_std / np.sqrt(n_folds)
plt.semilogx(alphas, scores + std_error, 'b--')
plt.semilogx(alphas, scores - std_error, 'b--')
# alpha=0.2 controls the translucency of the fill color
plt.fill_between(alphas, scores + std_error, scores - std_error, alpha=0.2)
plt.ylabel('CV score +/- std error')
plt.xlabel('alpha')
plt.axhline(np.max(scores), linestyle='--', color='.5')
plt.xlim([alphas[0], alphas[-1]])
# #############################################################################
# Bonus: how much can you trust the selection of alpha?
# To answer this question we use the LassoCV object that sets its alpha
# parameter automatically from the data by internal cross-validation (i.e. it
# performs cross-validation on the training data it receives).
# We use external cross-validation to see how much the automatically obtained
# alphas differ across different cross-validation folds.
lasso_cv = LassoCV(alphas=alphas, random_state=0)
k_fold = KFold(3)
print("Answer to the bonus question:",
"how much can you trust the selection of alpha?")
print()
print("Alpha parameters maximising the generalization score on different")
print("subsets of the data:")
for k, (train, test) in enumerate(k_fold.split(X, y)):
lasso_cv.fit(X[train], y[train])
print("[fold {0}] alpha: {1:.5f}, score: {2:.5f}".
format(k, lasso_cv.alpha_, lasso_cv.score(X[test], y[test])))
print()
print("Answer: Not very much since we obtained different alphas for different")
print("subsets of the data and moreover, the scores for these alphas differ")
print("quite substantially.")
plt.show()
```
|
github_jupyter
|
```
import plaidml.keras
plaidml.keras.install_backend()
import os
os.environ["KERAS_BACKEND"] = "plaidml.keras.backend"
# Importing useful libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, GRU, Bidirectional, Conv1D, Flatten, MaxPooling1D
from keras.optimizers import SGD
import math
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from keras import optimizers
import time
```
### Data Processing
```
df = pd.read_csv('../data/num_data.csv')
dataset = df
dataset.shape
def return_rmse(test,predicted):
rmse = math.sqrt(mean_squared_error(test, predicted))
return rmse
data_size = dataset.shape[0]
train_size=int(data_size * 0.6)
test_size = 100
valid_size = data_size - train_size - test_size
test_next_day = [12, 24, 48]
training_set = dataset[:train_size].iloc[:,4:16].values
valid_set = dataset[train_size:train_size+valid_size].iloc[:,4:16].values
test_set = dataset[data_size-test_size:].iloc[:,4:16].values
y = dataset.iloc[:,4].values
y = y.reshape(-1,1)
n_feature = training_set.shape[1]
y.shape
# Scaling the dataset
sc = MinMaxScaler(feature_range=(0,1))
training_set_scaled = sc.fit_transform(training_set)
valid_set_scaled = sc.fit_transform(valid_set)
test_set_scaled = sc.fit_transform(test_set)
sc_y = MinMaxScaler(feature_range=(0,1))
y_scaled = sc_y.fit_transform(y)
# split a multivariate sequence into samples
position_of_target = 4
def split_sequences(sequences, n_steps_in, n_steps_out):
X_, y_ = list(), list()
for i in range(len(sequences)):
# find the end of this pattern
end_ix = i + n_steps_in
out_end_ix = end_ix + n_steps_out-1
# check if we are beyond the dataset
if out_end_ix > len(sequences):
break
# gather input and output parts of the pattern
seq_x, seq_y = sequences[i:end_ix, :], sequences[end_ix-1:out_end_ix, position_of_target]
X_.append(seq_x)
y_.append(seq_y)
return np.array(X_), np.array(y_)
n_steps_in = 12
n_steps_out = 12
X_train, y_train = split_sequences(training_set_scaled, n_steps_in, n_steps_out)
X_valid, y_valid = split_sequences(valid_set_scaled, n_steps_in, n_steps_out)
X_test, y_test = split_sequences(test_set_scaled, n_steps_in, n_steps_out)
GRU_LSTM_reg = Sequential()
GRU_LSTM_reg.add(GRU(units=50, return_sequences=True, input_shape=(X_train.shape[1],n_feature), activation='tanh'))
GRU_LSTM_reg.add(LSTM(units=50, activation='tanh'))
GRU_LSTM_reg.add(Dense(units=n_steps_out))
DFS_2LSTM = Sequential()
DFS_2LSTM.add(Conv1D(filters=64, kernel_size=6, activation='tanh', input_shape=(X_train.shape[1],n_feature)))
DFS_2LSTM.add(MaxPooling1D(pool_size=4))
DFS_2LSTM.add(Dropout(0.2))
DFS_2LSTM.add(LSTM(units=50, return_sequences=True, input_shape=(X_train.shape[1],n_feature), activation='tanh'))
DFS_2LSTM.add(LSTM(units=50, activation='tanh'))
DFS_2LSTM.add(Dropout(0.190 + 0.0025 * n_steps_in))
DFS_2LSTM.add(Dense(units=n_steps_out))
# Compiling the RNNs
adam = optimizers.Adam(lr=0.01)
GRU_LSTM_reg.compile(optimizer=adam,loss='mean_squared_error')
DFS_2LSTM.compile(optimizer=adam,loss='mean_squared_error')
RnnModelDict = {'GRU_LSTM': GRU_LSTM_reg}
rmse_df = pd.DataFrame(columns=['Model', 'train_rmse', 'valid_rmse', 'train_time'])
# RnnModelDict = {'LSTM_GRU': LSTM_GRU_reg}
for model in RnnModelDict:
regressor = RnnModelDict[model]
print('training start for', model)
start = time.process_time()
regressor.fit(X_train,y_train,epochs=50,batch_size=1024)
train_time = round(time.process_time() - start, 2)
print('results for training set')
y_train_pred = regressor.predict(X_train)
# plot_predictions(y_train,y_train_pred)
train_rmse = return_rmse(y_train,y_train_pred)
print('results for valid set')
y_valid_pred = regressor.predict(X_valid)
# plot_predictions(y_valid,y_valid_pred)
valid_rmse = return_rmse(y_valid,y_valid_pred)
# print('results for test set - 24 hours')
# y_test_pred24 = regressor.predict(X_test_24)
# plot_predictions(y_test_24,y_test_pred24)
# test24_rmse = return_rmse(y_test_24,y_test_pred24)
one_df = pd.DataFrame([[model, train_rmse, valid_rmse, train_time]],
columns=['Model', 'train_rmse', 'valid_rmse', 'train_time'])
rmse_df = pd.concat([rmse_df, one_df])
# save the rmse results
# rmse_df.to_csv('../rmse_24h_plus_time.csv')
history = regressor.fit(X_train, y_train, epochs=50, batch_size=1024, validation_data=(X_valid, y_valid),
verbose=2, shuffle=False)
# plot history
plt.figure(figsize=(30, 15))
plt.plot(history.history['loss'], label='Training')
plt.plot(history.history['val_loss'], label='Validation')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Transform back and plot
y_train_origin = y[:train_size-46]
y_valid_origin = y[train_size:train_size+valid_size]
y_train_pred = regressor.predict(X_train)
y_train_pred_origin = sc_y.inverse_transform(y_train_pred)
y_valid_pred = regressor.predict(X_valid)
y_valid_pred_origin = sc_y.inverse_transform(y_valid_pred)
_y_train_pred_origin = y_train_pred_origin[:, 0:1]
_y_valid_pred_origin = y_valid_pred_origin[:, 0:1]
plt.figure(figsize=(20, 8));
plt.plot(pd.to_datetime(valid_original.index), valid_original,
alpha=0.5, color='red', label='Actual PM2.5 Concentration',)
plt.plot(pd.to_datetime(valid_original.index), y_valid_pred_origin[:,0:1],
alpha=0.5, color='blue', label='Predicted PM2.5 Concentation')
plt.title('PM2.5 Concentration Prediction')
plt.xlabel('Time')
plt.ylabel('PM2.5 Concentration')
plt.legend()
plt.show()
sample = 500
plt.figure(figsize=(20, 8));
plt.plot(pd.to_datetime(valid_original.index[-500:]), valid_original[-500:],
alpha=0.5, color='red', label='Actual PM2.5 Concentration',)
plt.plot(pd.to_datetime(valid_original.index[-500:]), y_valid_pred_origin[:,11:12][-500:],
alpha=0.5, color='blue', label='Predicted PM2.5 Concentation')
plt.title('PM2.5 Concentration Prediction')
plt.xlabel('Time')
plt.ylabel('PM2.5 Concentration')
plt.legend()
plt.show()
```
|
github_jupyter
|
# Optimization Methods
Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result.
Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this:
<img src="images/cost.jpg" style="width:650px;height:300px;">
<caption><center> <u> **Figure 1** </u>: **Minimizing the cost is like finding the lowest point in a hilly landscape**<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>
**Notations**: As usual, $\frac{\partial J}{\partial a } = $ `da` for any variable `a`.
To get started, run the following code to import the libraries you will need.
```
import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
```
## 1 - Gradient Descent
A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent.
**Warm-up exercise**: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$:
$$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$
$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$
where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
```
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] -= learning_rate * grads["dW" + str(l+1)]
parameters["b" + str(l+1)] -= learning_rate * grads["db" + str(l+1)]
### END CODE HERE ###
return parameters
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
```
**Expected Output**:
<table>
<tr>
<td > **W1** </td>
<td > [[ 1.63535156 -0.62320365 -0.53718766]
[-1.07799357 0.85639907 -2.29470142]] </td>
</tr>
<tr>
<td > **b1** </td>
<td > [[ 1.74604067]
[-0.75184921]] </td>
</tr>
<tr>
<td > **W2** </td>
<td > [[ 0.32171798 -0.25467393 1.46902454]
[-2.05617317 -0.31554548 -0.3756023 ]
[ 1.1404819 -1.09976462 -0.1612551 ]] </td>
</tr>
<tr>
<td > **b2** </td>
<td > [[-0.88020257]
[ 0.02561572]
[ 0.57539477]] </td>
</tr>
</table>
A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent.
- **(Batch) Gradient Descent**:
``` python
X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
# Forward propagation
a, caches = forward_propagation(X, parameters)
# Compute cost.
cost = compute_cost(a, Y)
# Backward propagation.
grads = backward_propagation(a, caches, parameters)
# Update parameters.
parameters = update_parameters(parameters, grads)
```
- **Stochastic Gradient Descent**:
```python
X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
for j in range(0, m):
# Forward propagation
a, caches = forward_propagation(X[:,j], parameters)
# Compute cost
cost = compute_cost(a, Y[:,j])
# Backward propagation
grads = backward_propagation(a, caches, parameters)
# Update parameters.
parameters = update_parameters(parameters, grads)
```
In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this:
<img src="images/kiank_sgd.png" style="width:750px;height:250px;">
<caption><center> <u> <font color='purple'> **Figure 1** </u><font color='purple'> : **SGD vs GD**<br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>
**Note** also that implementing SGD requires 3 for-loops in total:
1. Over the number of iterations
2. Over the $m$ training examples
3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)
In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.
<img src="images/kiank_minibatch.png" style="width:750px;height:250px;">
<caption><center> <u> <font color='purple'> **Figure 2** </u>: <font color='purple'> **SGD vs Mini-Batch GD**<br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>
<font color='blue'>
**What you should remember**:
- The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.
- You have to tune a learning rate hyperparameter $\alpha$.
- With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).
## 2 - Mini-Batch Gradient descent
Let's learn how to build mini-batches from the training set (X, Y).
There are two steps:
- **Shuffle**: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches.
<img src="images/kiank_shuffle.png" style="width:550px;height:300px;">
- **Partition**: Partition the shuffled (X, Y) into mini-batches of size `mini_batch_size` (here 64). Note that the number of training examples is not always divisible by `mini_batch_size`. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full `mini_batch_size`, it will look like this:
<img src="images/kiank_partition.png" style="width:550px;height:300px;">
**Exercise**: Implement `random_mini_batches`. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:
```python
first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]
second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]
...
```
Note that the last mini-batch might end up smaller than `mini_batch_size=64`. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is `math.floor(s)` in Python). If the total number of examples is not a multiple of `mini_batch_size=64` then there will be $\lfloor \frac{m}{mini\_batch\_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini_\_batch_\_size \times \lfloor \frac{m}{mini\_batch\_size}\rfloor$).
```
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, k * mini_batch_size : (k+1) * mini_batch_size]
mini_batch_Y = shuffled_Y[:, k * mini_batch_size : (k+1) * mini_batch_size]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, num_complete_minibatches * mini_batch_size: ]
mini_batch_Y = shuffled_Y[:, num_complete_minibatches * mini_batch_size: ]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
```
**Expected Output**:
<table style="width:50%">
<tr>
<td > **shape of the 1st mini_batch_X** </td>
<td > (12288, 64) </td>
</tr>
<tr>
<td > **shape of the 2nd mini_batch_X** </td>
<td > (12288, 64) </td>
</tr>
<tr>
<td > **shape of the 3rd mini_batch_X** </td>
<td > (12288, 20) </td>
</tr>
<tr>
<td > **shape of the 1st mini_batch_Y** </td>
<td > (1, 64) </td>
</tr>
<tr>
<td > **shape of the 2nd mini_batch_Y** </td>
<td > (1, 64) </td>
</tr>
<tr>
<td > **shape of the 3rd mini_batch_Y** </td>
<td > (1, 20) </td>
</tr>
<tr>
<td > **mini batch sanity check** </td>
<td > [ 0.90085595 -0.7612069 0.2344157 ] </td>
</tr>
</table>
<font color='blue'>
**What you should remember**:
- Shuffling and Partitioning are the two steps required to build mini-batches
- Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.
## 3 - Momentum
Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations.
Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill.
<img src="images/opt_momentum.png" style="width:400px;height:250px;">
<caption><center> <u><font color='purple'>**Figure 3**</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>
**Exercise**: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the `grads` dictionary, that is:
for $l =1,...,L$:
```python
v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
```
**Note** that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the `for` loop.
```
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = np.zeros((parameters['W' + str(l+1)].shape[0], parameters['W' + str(l+1)].shape[1]))
v["db" + str(l+1)] = np.zeros((parameters['b' + str(l+1)].shape[0], parameters['b' + str(l+1)].shape[1]))
### END CODE HERE ###
return v
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
```
**Expected Output**:
<table style="width:40%">
<tr>
<td > **v["dW1"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **v["db1"]** </td>
<td > [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td > **v["dW2"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **v["db2"]** </td>
<td > [[ 0.]
[ 0.]
[ 0.]] </td>
</tr>
</table>
**Exercise**: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$:
$$ \begin{cases}
v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \\
W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}
\end{cases}\tag{3}$$
$$\begin{cases}
v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \\
b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}}
\end{cases}\tag{4}$$
where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the `parameters` dictionary. Note that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift `l` to `l+1` when coding.
```
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l+1)] = beta * v['dW' + str(l+1)] + (1 - beta) * grads['dW' + str(l+1)]
v["db" + str(l+1)] = beta * v['db' + str(l+1)] + (1 - beta) * grads['db' + str(l+1)]
# update parameters
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * v["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * v["db" + str(l+1)]
### END CODE HERE ###
return parameters, v
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
```
**Expected Output**:
<table style="width:90%">
<tr>
<td > **W1** </td>
<td > [[ 1.62544598 -0.61290114 -0.52907334]
[-1.07347112 0.86450677 -2.30085497]] </td>
</tr>
<tr>
<td > **b1** </td>
<td > [[ 1.74493465]
[-0.76027113]] </td>
</tr>
<tr>
<td > **W2** </td>
<td > [[ 0.31930698 -0.24990073 1.4627996 ]
[-2.05974396 -0.32173003 -0.38320915]
[ 1.13444069 -1.0998786 -0.1713109 ]] </td>
</tr>
<tr>
<td > **b2** </td>
<td > [[-0.87809283]
[ 0.04055394]
[ 0.58207317]] </td>
</tr>
<tr>
<td > **v["dW1"]** </td>
<td > [[-0.11006192 0.11447237 0.09015907]
[ 0.05024943 0.09008559 -0.06837279]] </td>
</tr>
<tr>
<td > **v["db1"]** </td>
<td > [[-0.01228902]
[-0.09357694]] </td>
</tr>
<tr>
<td > **v["dW2"]** </td>
<td > [[-0.02678881 0.05303555 -0.06916608]
[-0.03967535 -0.06871727 -0.08452056]
[-0.06712461 -0.00126646 -0.11173103]] </td>
</tr>
<tr>
<td > **v["db2"]** </td>
<td > [[ 0.02344157]
[ 0.16598022]
[ 0.07420442]]</td>
</tr>
</table>
**Note** that:
- The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps.
- If $\beta = 0$, then this just becomes standard gradient descent without momentum.
**How do you choose $\beta$?**
- The larger the momentum $\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\beta$ is too big, it could also smooth out the updates too much.
- Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default.
- Tuning the optimal $\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$.
<font color='blue'>
**What you should remember**:
- Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.
- You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$.
## 4 - Adam
Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum.
**How does Adam work?**
1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction).
2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction).
3. It updates parameters in a direction based on combining information from "1" and "2".
The update rule is, for $l = 1, ..., L$:
$$\begin{cases}
v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \\
v^{corrected}_{dW^{[l]}} = \frac{v_{dW^{[l]}}}{1 - (\beta_1)^t} \\
s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \\
s^{corrected}_{dW^{[l]}} = \frac{s_{dW^{[l]}}}{1 - (\beta_1)^t} \\
W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{dW^{[l]}}}{\sqrt{s^{corrected}_{dW^{[l]}}} + \varepsilon}
\end{cases}$$
where:
- t counts the number of steps taken of Adam
- L is the number of layers
- $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages.
- $\alpha$ is the learning rate
- $\varepsilon$ is a very small number to avoid dividing by zero
As usual, we will store all parameters in the `parameters` dictionary
**Exercise**: Initialize the Adam variables $v, s$ which keep track of the past information.
**Instruction**: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for `grads`, that is:
for $l = 1, ..., L$:
```python
v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
s["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
s["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
```
```
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l+1)] = np.zeros((parameters["W" + str(l+1)].shape[0], parameters["W" + str(l+1)].shape[1]))
v["db" + str(l+1)] = np.zeros((parameters["b" + str(l+1)].shape[0], parameters["b" + str(l+1)].shape[1]))
s["dW" + str(l+1)] = np.zeros((parameters["W" + str(l+1)].shape[0], parameters["W" + str(l+1)].shape[1]))
s["db" + str(l+1)] = np.zeros((parameters["b" + str(l+1)].shape[0], parameters["b" + str(l+1)].shape[1]))
### END CODE HERE ###
return v, s
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
```
**Expected Output**:
<table style="width:40%">
<tr>
<td > **v["dW1"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **v["db1"]** </td>
<td > [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td > **v["dW2"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **v["db2"]** </td>
<td > [[ 0.]
[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td > **s["dW1"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **s["db1"]** </td>
<td > [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td > **s["dW2"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **s["db2"]** </td>
<td > [[ 0.]
[ 0.]
[ 0.]] </td>
</tr>
</table>
**Exercise**: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$:
$$\begin{cases}
v_{W^{[l]}} = \beta_1 v_{W^{[l]}} + (1 - \beta_1) \frac{\partial J }{ \partial W^{[l]} } \\
v^{corrected}_{W^{[l]}} = \frac{v_{W^{[l]}}}{1 - (\beta_1)^t} \\
s_{W^{[l]}} = \beta_2 s_{W^{[l]}} + (1 - \beta_2) (\frac{\partial J }{\partial W^{[l]} })^2 \\
s^{corrected}_{W^{[l]}} = \frac{s_{W^{[l]}}}{1 - (\beta_2)^t} \\
W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}_{W^{[l]}}}{\sqrt{s^{corrected}_{W^{[l]}}}+\varepsilon}
\end{cases}$$
**Note** that the iterator `l` starts at 0 in the `for` loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift `l` to `l+1` when coding.
```
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = beta1 * v["dW" + str(l+1)] + (1 - beta1) * grads['dW' + str(l+1)]
v["db" + str(l+1)] = beta1 * v["db" + str(l+1)] + (1 - beta1) * grads['db' + str(l+1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)] / (1 - beta1 ** t)
v_corrected["db" + str(l+1)] = v["db" + str(l+1)] / (1 - beta1 ** t)
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l+1)] = beta2 * s["dW" + str(l+1)] + (1 - beta2) * (grads['dW' + str(l+1)] ** 2)
s["db" + str(l+1)] = beta2 * s["db" + str(l+1)] + (1 - beta2) * (grads['db' + str(l+1)] ** 2)
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)] / (1 - beta2 ** t)
s_corrected["db" + str(l+1)] = s["db" + str(l+1)] / (1 - beta2 ** t)
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * v_corrected["dW" + str(l+1)] / (np.sqrt(s_corrected["dW" + str(l+1)]) + epsilon)
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * v_corrected["db" + str(l+1)] / (np.sqrt(s_corrected["db" + str(l+1)]) + epsilon)
### END CODE HERE ###
return parameters, v, s
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
```
**Expected Output**:
<table>
<tr>
<td > **W1** </td>
<td > [[ 1.63178673 -0.61919778 -0.53561312]
[-1.08040999 0.85796626 -2.29409733]] </td>
</tr>
<tr>
<td > **b1** </td>
<td > [[ 1.75225313]
[-0.75376553]] </td>
</tr>
<tr>
<td > **W2** </td>
<td > [[ 0.32648046 -0.25681174 1.46954931]
[-2.05269934 -0.31497584 -0.37661299]
[ 1.14121081 -1.09245036 -0.16498684]] </td>
</tr>
<tr>
<td > **b2** </td>
<td > [[-0.88529978]
[ 0.03477238]
[ 0.57537385]] </td>
</tr>
<tr>
<td > **v["dW1"]** </td>
<td > [[-0.11006192 0.11447237 0.09015907]
[ 0.05024943 0.09008559 -0.06837279]] </td>
</tr>
<tr>
<td > **v["db1"]** </td>
<td > [[-0.01228902]
[-0.09357694]] </td>
</tr>
<tr>
<td > **v["dW2"]** </td>
<td > [[-0.02678881 0.05303555 -0.06916608]
[-0.03967535 -0.06871727 -0.08452056]
[-0.06712461 -0.00126646 -0.11173103]] </td>
</tr>
<tr>
<td > **v["db2"]** </td>
<td > [[ 0.02344157]
[ 0.16598022]
[ 0.07420442]] </td>
</tr>
<tr>
<td > **s["dW1"]** </td>
<td > [[ 0.00121136 0.00131039 0.00081287]
[ 0.0002525 0.00081154 0.00046748]] </td>
</tr>
<tr>
<td > **s["db1"]** </td>
<td > [[ 1.51020075e-05]
[ 8.75664434e-04]] </td>
</tr>
<tr>
<td > **s["dW2"]** </td>
<td > [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]
[ 1.57413361e-04 4.72206320e-04 7.14372576e-04]
[ 4.50571368e-04 1.60392066e-07 1.24838242e-03]] </td>
</tr>
<tr>
<td > **s["db2"]** </td>
<td > [[ 5.49507194e-05]
[ 2.75494327e-03]
[ 5.50629536e-04]] </td>
</tr>
</table>
You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.
## 5 - Model with different optimization algorithms
Lets use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
```
train_X, train_Y = load_dataset()
```
We have already implemented a 3-layer neural network. You will train it with:
- Mini-batch **Gradient Descent**: it will call your function:
- `update_parameters_with_gd()`
- Mini-batch **Momentum**: it will call your functions:
- `initialize_velocity()` and `update_parameters_with_momentum()`
- Mini-batch **Adam**: it will call your functions:
- `initialize_adam()` and `update_parameters_with_adam()`
```
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost
cost = compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
```
You will now run this 3 layer neural network with each of the 3 optimization methods.
### 5.1 - Mini-batch Gradient descent
Run the following code to see how the model does with mini-batch gradient descent.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
### 5.2 - Mini-batch gradient descent with momentum
Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
### 5.3 - Mini-batch with Adam mode
Run the following code to see how the model does with Adam.
```
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
```
### 5.4 - Summary
<table>
<tr>
<td>
**optimization method**
</td>
<td>
**accuracy**
</td>
<td>
**cost shape**
</td>
</tr>
<td>
Gradient descent
</td>
<td>
79.7%
</td>
<td>
oscillations
</td>
<tr>
<td>
Momentum
</td>
<td>
79.7%
</td>
<td>
oscillations
</td>
</tr>
<tr>
<td>
Adam
</td>
<td>
94%
</td>
<td>
smoother
</td>
</tr>
</table>
Momentum usually helps, but given the small learning rate and the simplistic dataset, its impact is almost negligeable. Also, the huge oscillations you see in the cost come from the fact that some minibatches are more difficult thans others for the optimization algorithm.
Adam on the other hand, clearly outperforms mini-batch gradient descent and Momentum. If you run the model for more epochs on this simple dataset, all three methods will lead to very good results. However, you've seen that Adam converges a lot faster.
Some advantages of Adam include:
- Relatively low memory requirements (though higher than gradient descent and gradient descent with momentum)
- Usually works well even with little tuning of hyperparameters (except $\alpha$)
**References**:
- Adam paper: https://arxiv.org/pdf/1412.6980.pdf
|
github_jupyter
|
# Beacon Time Series, across the transition
Edit selector= below
Look at the beacons with the largest normalized spread.
( Steal plotMultiBeacons() from here.)
```
import math
import numpy as np
import pandas as pd
import BQhelper as bq
%matplotlib nbagg
import matplotlib.pyplot as plt
bq.project = "mlab-sandbox"
# bq.dataset = 'mattmathis'
# bq.UnitTestRunQuery()
# bq.UnitTestWriteQuery()
UnitTest=False
# Plot simple timeseries for a list of beacons
# UnitTest=True
# Modified start date
query="""
SELECT
a.TestTime,
client.IP,
a.MeanThroughputMbps,
node._instruments
# FROM `mlab-sandbox.mm_unified_testing.unified_{selector}`
FROM `measurement-lab.ndt.unified_{selector}`
WHERE client.IP in ( {clientIP} )
AND test_date >= '2018-01-01'
ORDER BY TestTime
"""
global StashData
def plotMultiBeacons(clients, columns=1, width=10, data=None, selector='downloads'):
if data is None:
clist = '"'+'", "'.join(clients)+'"'
data=bq.QueryTimestampTimeseries(query, clientIP=clist, selector=selector)
global StashData # Skip slow queries when debugging
StashData = data
rows = math.ceil(len(clients) / float(columns))
figLen = width/float(columns)*rows # assume square subplots
print('Size', figLen, width)
plt.rcParams['figure.figsize'] = [ width, figLen]
fig, axs = plt.subplots(nrows=rows, ncols=columns, squeeze=False, sharex='all')
for ax, client in zip([i for j in axs for i in j], clients):
print ('Beacon: '+client)
ax.set_title('Beacon: '+client)
cdata = data[data['IP'] == client]
ax.plot(cdata['MeanThroughputMbps'][cdata["_instruments"] == 'web100'], 'b.',
cdata['MeanThroughputMbps'][cdata["_instruments"] == 'tcpinfo'], 'r.')
fig.autofmt_xdate()
fig.show()
if UnitTest:
# %matplotlib nbagg
try:
TestData = StashData
print('Using StashData')
except:
pass
try:
TestData
except:
print('Genereating test data')
clients = [
'69.68.23.44', # Max deltaMean
'96.229.66.58', # Max deltaMax
'73.210.92.196',
]
clist = '"'+'", "'.join(clients)+'"'
TestData=bq.QueryTimestampTimeseries(query, clientIP=clist, selector='downloads')
# plt.ion()
clients=list(TestData['IP'].unique())
print (clients)
plotMultiBeacons(clients, data=TestData, columns=2, width=10)
print ('Done')
MIfastAfter=[
'24.127.189.188',
'68.61.90.228',
'68.32.195.88',
'98.209.182.228',
'68.40.138.115',
'23.116.227.182',
'98.209.29.89',
'68.36.121.102',
'2601:40d:200:a802::2',
'68.188.190.134']
plotMultiBeacons(MIfastAfter, columns=2, width=10)
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/shakasom/MapsDataScience/blob/master/Chapter4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Making sense of humongous location datasets
## Installations
The geospatial libraries are not pre installed in google colab as standard python library, therefore we need to install some libraries to use. Luckily this is an easy process. You can use either apt install or pip install. You can also create anaconda environment, but that is a bit complex so pip and apt are enough in our case to get the libraries we need. These are the libraries we need to install in this tutorial:
Gdal Geopandas Folium
The installation might take 1 minute.
```
%%time
!apt update --quiet
!apt upgrade --quiet
# GDAL Important library for many geopython libraries
!apt install gdal-bin python-gdal python3-gdal --quiet
# Install rtree - Geopandas requirment
!apt install python3-rtree --quiet
# Install Geopandas
!pip install git+git://github.com/geopandas/geopandas.git --quiet
# Install descartes - Geopandas requirment
!pip install descartes --quiet
# Install Folium for Geographic data visualization
!pip install folium --quiet
# Install Pysal
!pip install pysal --quiet
# Install splot --> pysal
!pip install splot --quiet
# Install mapclassify
!pip install mapclassify --quiet
import pandas as pd
import numpy as np
import geopandas as gpd
from shapely.geometry import Point
from pysal.explore import esda
from pysal.lib import weights
#import libysal as lps
from pysal.viz.splot.esda import plot_moran, plot_local_autocorrelation, lisa_cluster
import matplotlib
import matplotlib.pyplot as plt
import folium
import os
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.cluster import dbscan
from libpysal.weights.contiguity import Queen
from esda.moran import Moran
from splot.esda import moran_scatterplot
from esda.moran import Moran_Local
from splot.esda import lisa_cluster
import pysal as ps
ps.__version__
```
## Data
The dataset for this chapter is stored in the dropbox link. It is a valuable skill to learn how to access data on the web, so we will use WGET. WGET is great utility in accessing files from the web and supports different protocols.
```
# Get the data from dropbox link
!wget https://www.dropbox.com/s/xvs0ybc402mkrn8/2019-02-avon-and-somerset-street.zip --quiet
# see the folders available
import os
os.listdir(os.getcwd())
# We have zipped data so let us unzip it
!unzip 2019-02-avon-and-somerset-street.zip
crime_somerset = pd.read_csv("2019-02-avon-and-somerset-street.csv")
crime_somerset.head()
crime_somerset.shape
crime_somerset.isnull().sum()
# Drop columns with high missing values
crime_somerset.drop(['Last outcome category','Context', 'Crime ID' ], axis=1, inplace=True)
crime_somerset.head()
crime_somerset.isnull().sum()
# Drop rows with missin values
crime_somerset.dropna(axis=0,inplace=True)
crime_somerset.isnull().sum()
crime_somerset.shape
crime_somerset.head()
```
### Convert to GeoDataFrame
```
# Function to create a Geodataframe
def create_gdf(df, lat, lon):
""" Convert pandas dataframe into a Geopandas GeoDataFrame"""
crs = {'init': 'epsg:4326'}
geometry = [Point(xy) for xy in zip(df[lon], df[lat])]
gdf = gpd.GeoDataFrame(df, crs=crs, geometry=geometry)
return gdf
crime_somerset_gdf = create_gdf(crime_somerset, 'Latitude', 'Longitude')
crime_somerset_gdf.head()
fig, ax = plt.subplots(figsize=(12,10))
crime_somerset_gdf.plot(markersize=20, ax=ax);
plt.savefig('crime_somerset_map.png', bbox_inches='tight')
```
## KMeans Clustering Location Data
```
crime_somerset_gdf.head()
```
* Split training and test dataset
```
train = crime_somerset_gdf.sample(frac=0.7, random_state=14)
test = crime_somerset_gdf.drop(train.index)
train.shape, test.shape
# Get coordinates for the train and test dataset
train_coords = train[['Latitude', 'Longitude']].values
test_coords = test[['Latitude', 'Longitude']].values
# Fit Kmeans clustering on training dataset
kmeans = KMeans(n_clusters=5)
kmeans.fit(train_coords)
# Predict on the test dataset by clustering
preds = kmeans.predict(test_coords)
# Get centers of the clusters
centers = kmeans.cluster_centers_
fig, ax = plt.subplots(figsize=(12,10))
plt.scatter(test_coords[:, 0], test_coords[:, 1], c=preds, s=30, cmap='viridis')
plt.scatter(centers[:,0], centers[:,1], c='Red', marker="s", s=50);
```
## DBSCAN
### Detecting Outliers/Noise
```
coords = crime_somerset_gdf[['Latitude', 'Longitude']]
coords[:5]
# Get labels of each cluster
_, labels = dbscan(crime_somerset_gdf[['Latitude', 'Longitude']], eps=0.1, min_samples=10)
# Create a labels dataframe with the index of the dataset
labels_df = pd.DataFrame(labels, index=crime_somerset_gdf.index, columns=['cluster'])
labels_df.head()
# Groupby Labels
labels_df.groupby('cluster').size()
# Plot the groupedby labels
sns.countplot(labels_df.cluster);
plt.show()
# Get Noise (Outliers) with label -1
noise = crime_somerset_gdf.loc[labels_df['cluster']==-1, ['Latitude', 'Longitude']]
# Get core with labels 0
core = crime_somerset_gdf.loc[labels_df['cluster']== 0, ['Latitude', 'Longitude']]
# Display scatter plot with noises as stars and core as circle points
fig, ax = plt.subplots(figsize=(12,10))
ax.scatter(noise['Latitude'], noise['Longitude'],marker= '*', s=40, c='blue' )
ax.scatter(core['Latitude'], core['Longitude'], marker= 'o', s=20, c='red')
plt.savefig('outliers.png');
plt.show();
noise
```
### Detecting Clusters
```
_, labels = dbscan(crime_somerset_gdf[['Latitude', 'Longitude']], eps=0.01, min_samples=300)
labels_df = pd.DataFrame(labels, index=crime_somerset_gdf.index, columns=['cluster'])
labels_df.groupby('cluster').size()
noise = crime_somerset_gdf.loc[labels_df['cluster']==-1, ['Latitude', 'Longitude']]
core = crime_somerset_gdf.loc[labels_df['cluster']== 0, ['Latitude', 'Longitude']]
bp1 = crime_somerset_gdf.loc[labels_df['cluster']== 1, ['Latitude', 'Longitude']]
bp2 = crime_somerset_gdf.loc[labels_df['cluster']== 2, ['Latitude', 'Longitude']]
bp3 = crime_somerset_gdf.loc[labels_df['cluster']== 3, ['Latitude', 'Longitude']]
fig, ax = plt.subplots(figsize=(12,10))
ax.scatter(noise['Latitude'], noise['Longitude'], markers=10, c='gray' )
ax.scatter(core['Latitude'], core['Longitude'], s=100, c='red')
ax.scatter(bp1['Latitude'], bp1['Longitude'], s=50, c='yellow')
ax.scatter(bp2['Latitude'], bp2['Longitude'], s=50, c='green')
ax.scatter(bp3['Latitude'], bp3['Longitude'], s=50, c='blue')
plt.savefig('cluster_ex1.png');
plt.show()
fig, ax = plt.subplots(figsize=(15,12))
ax.scatter(noise['Latitude'], noise['Longitude'],s=1, c='gray' )
ax.scatter(core['Latitude'], core['Longitude'],marker= "*", s=10, c='red')
ax.scatter(bp1['Latitude'], bp1['Longitude'], marker = "v", s=10, c='yellow')
ax.scatter(bp2['Latitude'], bp2['Longitude'], marker= "P", s=10, c='green')
ax.scatter(bp3['Latitude'], bp3['Longitude'], marker= "d", s=10, c='blue')
ax.set_xlim(left=50.8, right=51.7)
ax.set_ylim(bottom=-3.5, top=-2.0)
plt.savefig('cluster_zoomed.png');
plt.show()
#Creates four polar axes, and accesses them through the returned array
fig, axes = plt.subplots(2, 2, figsize=(12,10))
axes[0, 0].scatter(noise['Latitude'], noise['Longitude'],s=0.01, c='gray' )
axes[0, 0].title.set_text('Noise')
axes[0, 1].scatter(core['Latitude'], core['Longitude'],marker= "*", s=10, c='red')
axes[0, 1].title.set_text('Core')
axes[1, 0].scatter(bp1['Latitude'], bp1['Longitude'], marker = "v", s=50, c='yellow')
axes[1, 0].title.set_text('Border Points 1')
axes[1,1].scatter(bp2['Latitude'], bp2['Longitude'], marker= "P", s=50, c='green')
axes[1, 1].title.set_text('Border Points 2')
plt.tight_layout()
plt.show()
```
## Spatial Autocorellation
We will Polygon data for this section. Let us first get the data from the dropbox URL
```
!wget https://www.dropbox.com/s/k2ynddy79k2r46i/ASC_Beats_2016.zip
!unzip ASC_Beats_2016.zip
boundaries = gpd.read_file('ASC_Beats_2016.shp')
boundaries.head()
boundaries.crs, crime_somerset_gdf.crs
boundaries_4326 = boundaries.to_crs({'init': 'epsg:4326'})
fig, ax = plt.subplots(figsize=(12,10))
boundaries_4326.plot(ax=ax)
crime_somerset_gdf.plot(ax=ax, markersize=10, color='red')
plt.savefig('overlayed_map.png')
# Points in Polygon
crimes_with_boundaries = gpd.sjoin(boundaries_4326,crime_somerset_gdf, op='contains' )
crimes_with_boundaries.head()
grouped_crimes = crimes_with_boundaries.groupby('BEAT_CODE').size()
grouped_crimes.head()
df = grouped_crimes.to_frame().reset_index()
df.columns = ['BEAT_CODE', 'CrimeCount']
df.head()
final_result = boundaries.merge(df, on='BEAT_CODE')
final_result.head()
```
* Choropleth Map of the Crime Count
```
fig, ax = plt.subplots(figsize=(12,10))
final_result.plot(column='CrimeCount', scheme='Quantiles', k=5, cmap='YlGnBu', legend=True, ax=ax);
plt.tight_layout()
ax.set_axis_off()
plt.savefig('choroplethmap.png')
plt.title('Crimes Choropleth Map ')
plt.show()
```
### GLobal Spatial Autocorrelation
```
# Create y variable values
y = final_result['CrimeCount'].values
# Sptial lag
ylag = weights.lag_spatial(wq, y)
final_result['ylag'] = ylag
# Get Weights (Queen)
wq = Queen.from_dataframe(final_result)
wq.transform = 'r'
moran = Moran(y, wq)
moran.I
from splot.esda import plot_moran
plot_moran(moran, zstandard=True, figsize=(10,4))
plt.tight_layout()
plt.savefig('moronPlot.png')
plt.show()
moran.p_sim
```
## Visualizing Local Autocorrelation with splot - Hot Spots, Cold Spots and Spatial Outliers
```
# calculate Moran_Local and plot
moran_loc = Moran_Local(y, w)
fig, ax = moran_scatterplot(moran_loc)
plt.savefig('moron_local.png')
plt.show()
fig, ax = moran_scatterplot(moran_loc, p=0.05)
plt.show()
lisa_cluster(moran_loc, final_result, p=0.05, figsize = (10,8))
plt.tight_layout()
plt.savefig('lisa_clusters.png')
plt.show()
```
# END
|
github_jupyter
|
<i>Copyright (c) Microsoft Corporation. All rights reserved.<br>
Licensed under the MIT License.</i>
<br>
# Model Comparison for NCF Using the Neural Network Intelligence Toolkit
This notebook shows how to use the **[Neural Network Intelligence](https://nni.readthedocs.io/en/latest/) toolkit (NNI)** for tuning hyperparameters for the Neural Collaborative Filtering Model.
To learn about each tuner NNI offers you can read about it [here](https://nni.readthedocs.io/en/latest/Tuner/BuiltinTuner.html).
NNI is a toolkit to help users design and tune machine learning models (e.g., hyperparameters), neural network architectures, or complex system’s parameters, in an efficient and automatic way. NNI has several appealing properties: ease of use, scalability, flexibility and efficiency. NNI can be executed in a distributed way on a local machine, a remote server, or a large scale training platform such as OpenPAI or Kubernetes.
In this notebook, we can see how NNI works with two different model types and the differences between their hyperparameter search spaces, yaml config file, and training scripts.
- [NCF Training Script](../../reco_utils/nni/ncf_training.py)
For this notebook we use a _local machine_ as the training platform (this can be any machine running the `reco_base` conda environment). In this case, NNI uses the available processors of the machine to parallelize the trials, subject to the value of `trialConcurrency` we specify in the configuration. Our runs and the results we report were obtained on a [Standard_D16_v3 virtual machine](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/sizes-general#dv3-series-1) with 16 vcpus and 64 GB memory.
### 1. Global Settings
```
import sys
import json
import os
import surprise
import papermill as pm
import pandas as pd
import shutil
import subprocess
import yaml
import pkg_resources
from tempfile import TemporaryDirectory
import tensorflow as tf
tf.get_logger().setLevel('ERROR') # only show error messages
import reco_utils
from reco_utils.common.timer import Timer
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_chrono_split
from reco_utils.evaluation.python_evaluation import rmse, precision_at_k, ndcg_at_k
from reco_utils.tuning.nni.nni_utils import (
check_experiment_status,
check_stopped,
check_metrics_written,
get_trials,
stop_nni, start_nni
)
from reco_utils.recommender.ncf.dataset import Dataset as NCFDataset
from reco_utils.recommender.ncf.ncf_singlenode import NCF
from reco_utils.tuning.nni.ncf_utils import compute_test_results, combine_metrics_dicts
print("System version: {}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
print("NNI version: {}".format(pkg_resources.get_distribution("nni").version))
tmp_dir = TemporaryDirectory()
%load_ext autoreload
%autoreload 2
```
### 2. Prepare Dataset
1. Download data and split into training, validation and test sets
2. Store the data sets to a local directory.
```
# Parameters used by papermill
# Select Movielens data size: 100k, 1m
MOVIELENS_DATA_SIZE = '100k'
SURPRISE_READER = 'ml-100k'
TMP_DIR = tmp_dir.name
NUM_EPOCHS = 10
MAX_TRIAL_NUM = 16
DEFAULT_SEED = 42
# time (in seconds) to wait for each tuning experiment to complete
WAITING_TIME = 20
MAX_RETRIES = MAX_TRIAL_NUM*4 # it is recommended to have MAX_RETRIES>=4*MAX_TRIAL_NUM
# Note: The NCF model can incorporate
df = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=["userID", "itemID", "rating", "timestamp"]
)
df.head()
train, validation, test = python_chrono_split(df, [0.7, 0.15, 0.15])
train = train.drop(['timestamp'], axis=1)
validation = validation.drop(['timestamp'], axis=1)
test = test.drop(['timestamp'], axis=1)
LOG_DIR = os.path.join(TMP_DIR, "experiments")
os.makedirs(LOG_DIR, exist_ok=True)
DATA_DIR = os.path.join(TMP_DIR, "data")
os.makedirs(DATA_DIR, exist_ok=True)
TRAIN_FILE_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_train.pkl"
train.to_pickle(os.path.join(DATA_DIR, TRAIN_FILE_NAME))
VAL_FILE_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_val.pkl"
validation.to_pickle(os.path.join(DATA_DIR, VAL_FILE_NAME))
TEST_FILE_NAME = "movielens_" + MOVIELENS_DATA_SIZE + "_test.pkl"
test.to_pickle(os.path.join(DATA_DIR, TEST_FILE_NAME))
```
### 3. Prepare Hyperparameter Tuning
To run an experiment on NNI we require a general training script for our model of choice.
A general framework for a training script utilizes the following components
1. Argument Parse for the fixed parameters (dataset location, metrics to use)
2. Data preprocessing steps specific to the model
3. Fitting the model on the train set
4. Evaluating the model on the validation set on each metric (ranking and rating)
5. Save metrics and model
To utilize NNI we also require a hypeyparameter search space. Only the hyperparameters we want to tune are required in the dictionary. NNI supports different methods of [hyperparameter sampling](https://nni.readthedocs.io/en/latest/Tutorial/SearchSpaceSpec.html).
The `script_params` below are the parameters of the training script that are fixed (unlike `hyper_params` which are tuned).
```
PRIMARY_METRIC = "precision_at_k"
RATING_METRICS = ["rmse"]
RANKING_METRICS = ["precision_at_k", "ndcg_at_k"]
USERCOL = "userID"
ITEMCOL = "itemID"
REMOVE_SEEN = True
K = 10
RANDOM_STATE = 42
VERBOSE = True
BIASED = True
script_params = " ".join([
"--datastore", DATA_DIR,
"--train-datapath", TRAIN_FILE_NAME,
"--validation-datapath", VAL_FILE_NAME,
"--surprise-reader", SURPRISE_READER,
"--rating-metrics", " ".join(RATING_METRICS),
"--ranking-metrics", " ".join(RANKING_METRICS),
"--usercol", USERCOL,
"--itemcol", ITEMCOL,
"--k", str(K),
"--random-state", str(RANDOM_STATE),
"--epochs", str(NUM_EPOCHS),
"--primary-metric", PRIMARY_METRIC
])
if BIASED:
script_params += " --biased"
if VERBOSE:
script_params += " --verbose"
if REMOVE_SEEN:
script_params += " --remove-seen"
```
We specify the search space for the NCF hyperparameters
```
ncf_hyper_params = {
'n_factors': {"_type": "choice", "_value": [2, 4, 8, 12]},
'learning_rate': {"_type": "uniform", "_value": [1e-3, 1e-2]},
}
with open(os.path.join(TMP_DIR, 'search_space_ncf.json'), 'w') as fp:
json.dump(ncf_hyper_params, fp)
```
This config file follows the guidelines provided in [NNI Experiment Config instructions](https://github.com/microsoft/nni/blob/master/docs/en_US/Tutorial/ExperimentConfig.md).
The options to pay attention to are
- The "searchSpacePath" which contains the space of hyperparameters we wanted to tune defined above
- The "tuner" which specifies the hyperparameter tuning algorithm that will sample from our search space and optimize our model
```
config = {
"authorName": "default",
"experimentName": "tensorflow_ncf",
"trialConcurrency": 8,
"maxExecDuration": "1h",
"maxTrialNum": MAX_TRIAL_NUM,
"trainingServicePlatform": "local",
# The path to Search Space
"searchSpacePath": "search_space_ncf.json",
"useAnnotation": False,
"logDir": LOG_DIR,
"tuner": {
"builtinTunerName": "TPE",
"classArgs": {
#choice: maximize, minimize
"optimize_mode": "maximize"
}
},
# The path and the running command of trial
"trial": {
"command": f"{sys.executable} ncf_training.py {script_params}",
"codeDir": os.path.join(os.path.split(os.path.abspath(reco_utils.__file__))[0], "tuning", "nni"),
"gpuNum": 0
}
}
with open(os.path.join(TMP_DIR, "config_ncf.yml"), "w") as fp:
fp.write(yaml.dump(config, default_flow_style=False))
```
### 4. Execute NNI Trials
The conda environment comes with NNI installed, which includes the command line tool `nnictl` for controlling and getting information about NNI experiments. <br>
To start the NNI tuning trials from the command line, execute the following command: <br>
`nnictl create --config <path of config.yml>` <br>
The `start_nni` function will run the `nnictl create` command. To find the URL for an active experiment you can run `nnictl webui url` on your terminal.
In this notebook the 16 NCF models are trained concurrently in a single experiment with batches of 8. While NNI can run two separate experiments simultaneously by adding the `--port <port_num>` flag to `nnictl create`, the total training time will probably be the same as running the batches sequentially since these are CPU bound processes.
```
stop_nni()
config_path_ncf = os.path.join(TMP_DIR, 'config_ncf.yml')
with Timer() as time_ncf:
start_nni(config_path_ncf, wait=WAITING_TIME, max_retries=MAX_RETRIES)
check_metrics_written(wait=WAITING_TIME, max_retries=MAX_RETRIES)
trials_ncf, best_metrics_ncf, best_params_ncf, best_trial_path_ncf = get_trials('maximize')
best_metrics_ncf
best_params_ncf
```
## 5. Baseline Model
Although we hope that the additional effort of utilizing an AutoML framework like NNI for hyperparameter tuning will lead to better results, we should also draw comparisons using our baseline model (our model trained with its default hyperparameters). This allows us to precisely understand what performance benefits NNI is or isn't providing.
```
data = NCFDataset(train, validation, seed=DEFAULT_SEED)
model = NCF(
n_users=data.n_users,
n_items=data.n_items,
model_type="NeuMF",
n_factors=4,
layer_sizes=[16,8,4],
n_epochs=NUM_EPOCHS,
learning_rate=1e-3,
verbose=True,
seed=DEFAULT_SEED
)
model.fit(data)
test_results = compute_test_results(model, train, validation, RATING_METRICS, RANKING_METRICS)
test_results
```
### 5. Show Results
The metrics for each model type is reported on the validation set. At this point we can compare the metrics for each model and select the one with the best score on the primary metric(s) of interest.
```
test_results['name'] = 'ncf_baseline'
best_metrics_ncf['name'] = 'ncf_tuned'
combine_metrics_dicts(test_results, best_metrics_ncf)
```
Based on the above metrics, we determine that NNI has identified a set of hyperparameters that does demonstrate an improvement on our metrics of interest. In this example, it turned out that an `n_factors` of 12 contributed to a better performance than an `n_factors` of 4. While the difference in `precision_at_k` and `ndcg_at_k` is small, NNI has helped us determine that a slightly larger embedding dimension for NCF may be useful for the movielens dataset.
```
# Stop the NNI experiment
stop_nni()
tmp_dir.cleanup()
```
### 7. Concluding Remarks
In this notebook we showed how to use the NNI framework on different models. By inspection of the training scripts, the differences between the two should help you identify what components would need to be modified to run another model with NNI.
In practice, an AutoML framework like NNI is just a tool to help you explore a large space of hyperparameters quickly with a pre-described level of randomization. It is recommended that in addition to using NNI one trains baseline models using typical hyperparamter choices (learning rate of 0.005, 0.001 or regularization rates of 0.05, 0.01, etc.) to draw more meaningful comparisons between model performances. This may help determine if a model is overfitting from the tuner or if there is a statistically significant improvement.
Another thing to note is the added computational cost required to train models using an AutoML framework. In this case, it takes about 6 minutes to train each of the models on a [Standard_NC6 VM](https://docs.microsoft.com/en-us/azure/virtual-machines/nc-series). With this in mind, while NNI can easily train hundreds of models over all hyperparameters for a model, in practice it may be beneficial to choose a subset of the hyperparameters that are deemed most important and to tune those. Too small of a hyperparameter search space may restrict our exploration, but too large may also lead to random noise in the data being exploited by a specific combination of hyperparameters.
For examples of scaling larger tuning workloads on clusters of machines, see [the notebooks](./README.md) that employ the [Azure Machine Learning service](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-tune-hyperparameters).
### 8. References
Recommenders Repo References
* [NCF deep-dive notebook](../02_model/ncf_deep_dive.ipynb)
* [SVD NNI notebook (uses more tuners available)](./nni_surprise_svd.ipynb)
External References
* [NCF Paper](https://arxiv.org/abs/1708.05031)
* [NNI Docs | Neural Network Intelligence toolkit](https://github.com/Microsoft/nni)
|
github_jupyter
|
# Load MXNet model
In this tutorial, you learn how to load an existing MXNet model and use it to run a prediction task.
## Preparation
This tutorial requires the installation of Java Kernel. For more information on installing the Java Kernel, see the [README](https://github.com/awslabs/djl/blob/master/jupyter/README.md).
```
// %mavenRepo snapshots https://oss.sonatype.org/content/repositories/snapshots/
%maven ai.djl:api:0.8.0
%maven ai.djl:model-zoo:0.8.0
%maven ai.djl.mxnet:mxnet-engine:0.8.0
%maven ai.djl.mxnet:mxnet-model-zoo:0.8.0
%maven org.slf4j:slf4j-api:1.7.26
%maven org.slf4j:slf4j-simple:1.7.26
%maven net.java.dev.jna:jna:5.3.0
// See https://github.com/awslabs/djl/blob/master/mxnet/mxnet-engine/README.md
// for more MXNet library selection options
%maven ai.djl.mxnet:mxnet-native-auto:1.7.0-backport
import java.awt.image.*;
import java.nio.file.*;
import ai.djl.*;
import ai.djl.inference.*;
import ai.djl.ndarray.*;
import ai.djl.modality.*;
import ai.djl.modality.cv.*;
import ai.djl.modality.cv.util.*;
import ai.djl.modality.cv.transform.*;
import ai.djl.modality.cv.translator.*;
import ai.djl.translate.*;
import ai.djl.training.util.*;
import ai.djl.util.*;
```
## Step 1: Prepare your MXNet model
This tutorial assumes that you have a MXNet model trained using Python. A MXNet symbolic model usually contains the following files:
* Symbol file: {MODEL_NAME}-symbol.json - a json file that contains network information about the model
* Parameters file: {MODEL_NAME}-{EPOCH}.params - a binary file that stores the parameter weight and bias
* Synset file: synset.txt - an optional text file that stores classification classes labels
This tutorial uses a pre-trained MXNet `resnet18_v1` model.
We use `DownloadUtils` for downloading files from internet.
```
DownloadUtils.download("https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/resnet/0.0.1/resnet18_v1-symbol.json", "build/resnet/resnet18_v1-symbol.json", new ProgressBar());
DownloadUtils.download("https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/resnet/0.0.1/resnet18_v1-0000.params.gz", "build/resnet/resnet18_v1-0000.params", new ProgressBar());
DownloadUtils.download("https://mlrepo.djl.ai/model/cv/image_classification/ai/djl/mxnet/synset.txt", "build/resnet/synset.txt", new ProgressBar());
```
## Step 2: Load your model
```
Path modelDir = Paths.get("build/resnet");
Model model = Model.newInstance("resnet");
model.load(modelDir, "resnet18_v1");
```
## Step 3: Create a `Translator`
```
Pipeline pipeline = new Pipeline();
pipeline.add(new CenterCrop()).add(new Resize(224, 224)).add(new ToTensor());
Translator<Image, Classifications> translator = ImageClassificationTranslator.builder()
.setPipeline(pipeline)
.optSynsetArtifactName("synset.txt")
.optApplySoftmax(true)
.build();
```
## Step 4: Load image for classification
```
var img = ImageFactory.getInstance().fromUrl("https://resources.djl.ai/images/kitten.jpg");
img.getWrappedImage()
```
## Step 5: Run inference
```
Predictor<Image, Classifications> predictor = model.newPredictor(translator);
Classifications classifications = predictor.predict(img);
classifications
```
## Summary
Now, you can load any MXNet symbolic model and run inference.
You might also want to check out [load_pytorch_model.ipynb](https://github.com/awslabs/djl/blob/master/jupyter/load_pytorch_model.ipynb) which demonstrates loading a local model using the ModelZoo API.
|
github_jupyter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.