path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
tutorials/128311_Search.ipynb | ###Markdown
128311 blind searchWe demonstrate how to conduct a blind search in pre- and post-uprade-separate HIRES data. NOTE: If notebook is not working, can just open iPython, import rvsearch.search and rvsearch.utils, and run the search with the 3 commands below.
###Code
import os
import radvel
import rvsearch
from rvsearch import search, inject, utils, plots, driver
###Output
_____no_output_____
###Markdown
- Load the data and initialize the search object.- Bin data within 12 hours, and search down to 10 days.
###Code
data = utils.read_from_csv('../../Planets/rvdata/vst128311.csv', binsize=0.5)
searcher = search.Search(data, starname='128311', min_per=60,
workers=8, mcmc=False, verbose=True, mstar=[1., 0.05])
###Output
_____no_output_____
###Markdown
Run search on a single core. In general, you'll want to allocate more CPUs for the search, but multi-threading within the Jupyter notebook environment is buggy. Run MCMC after search is done (default). With our current configuration, outputs will be saved in in the current working directory. Will save periodogram for each successive search, orbit plots, and corner plot.
###Code
searcher.run_search()
###Output
0%| | 0/769 [00:00<?, ?it/s]
###Markdown
128311 search with known planets
###Code
data = utils.read_from_csv('../../Planets/rvdata/vst128311.csv', binsize=0.5)
post = radvel.posterior.load('post_final.pkl')
searcher = search.Search(data, post=post, starname='128311', min_per=60,
workers=8, mcmc=False, verbose=True, mstar=[1., 0.05])
searcher.run_search()
args = None
class _args(object):
def __init__(self):
self.num_cpus = 8
self.num_inject = 1000
self.overwrite = True
self.mstar = 1.
self.rstar = 1.
self.teff = 1.
self.minP = 2.
self.maxP = 1e5
self.minK = 0.1
self.maxK = 1000.0
self.minE = 0.0
self.maxE = 1.0
self.betaE = False
# Run injections.
args = _args()
args.search_dir = '128311' # Directory in which search is saved
args.full_grid = False
args.verbose = False
driver.injections(args)
# Plot injections and completeness.
args.type = ['recovery']
args.fmt = 'png'
args.mstar = 1.
driver.plots(args)
###Output
Creating recovery plot for 128311
Plotting inj_msini vs. inj_au
Recovery plot saved to /Users/lee/Academics/Astronomy/Planets/rvsearch/tutorials/128311/128311_recoveries.png
###Markdown
128311 blind searchWe demonstrate how to conduct a blind search in pre- and post-uprade-separate HIRES data. NOTE: If notebook is not working, can just open iPython, import rvsearch.search and rvsearch.utils, and run the search with the 3 commands below.
###Code
import os
import radvel
import rvsearch
from rvsearch import search, inject, utils, plots, driver
###Output
_____no_output_____
###Markdown
- Load the data and initialize the search object.- Bin data within 12 hours, and search down to 10 days.
###Code
data = utils.read_from_csv('../../Planets/rvdata/vst128311.csv', binsize=0.5)
searcher = search.Search(data, starname='128311', min_per=60,
workers=8, mcmc=False, verbose=True, mstar=[1., 0.05])
###Output
_____no_output_____
###Markdown
Run search on a single core. In general, you'll want to allocate more CPUs for the search, but multi-threading within the Jupyter notebook environment is buggy. Run MCMC after search is done (default). With our current configuration, outputs will be saved in in the current working directory. Will save periodogram for each successive search, orbit plots, and corner plot.
###Code
searcher.run_search()
###Output
0%| | 0/769 [00:00<?, ?it/s]
###Markdown
128311 search with known planets
###Code
data = utils.read_from_csv('../../Planets/rvdata/vst128311.csv', binsize=0.5)
post = radvel.posterior.load('post_final.pkl')
searcher = search.Search(data, post=post, starname='128311', min_per=60,
workers=8, mcmc=False, verbose=True, mstar=[1., 0.05])
searcher.run_search()
args = None
class _args(object):
def __init__(self):
self.num_cpus = 8
self.num_inject = 500
self.overwrite = True
self.mstar = 1.
self.rstar = 1.
self.teff = 1.
self.minP = 2.
self.maxP = 1e5
self.minK = 0.1
self.maxK = 1000.0
self.minE = 0.0
self.maxE = 1.0
self.betaE = False
# Run injections.
args = _args()
args.search_dir = '128311' # Directory in which search is saved
args.full_grid = False
args.verbose = False
driver.injections(args)
# Plot injections and completeness.
args.type = ['recovery']
args.fmt = 'png'
args.mstar = 1.
driver.plots(args)
###Output
Creating recovery plot for 128311
Plotting inj_msini vs. inj_au
Recovery plot saved to /Users/lee/Academics/Astronomy/Planets/rvsearch/tutorials/128311/128311_recoveries.png
|
Experiments/Mars3DOF/Baseline/DRDV_10p.ipynb | ###Markdown
Test DRDV Policy with Engine Failure
###Code
import numpy as np
import os,sys
sys.path.append('../../../RL_lib/Agents/PPO')
sys.path.append('../../../RL_lib/Utils')
sys.path.append('../../../Mars3dof_env')
%load_ext autoreload
%load_ext autoreload
%autoreload 2
%matplotlib nbagg
import os
print(os.getcwd())
%%html
<style>
.output_wrapper, .output {
height:auto !important;
max-height:1000px; /* your desired max-height here */
}
.output_scroll {
box-shadow:none !important;
webkit-box-shadow:none !important;
}
</style>
###Output
_____no_output_____
###Markdown
Optimize Policy
###Code
from env import Env
import env_utils as envu
from dynamics_model import Dynamics_model
from lander_model import Lander_model
from ic_gen2 import Landing_icgen
import rl_utils
from arch_policy_vf import Arch
from policy_drdv import Policy
from value_function import Value_function
import policy_nets as policy_nets
import valfunc_nets as valfunc_nets
from agent import Agent
import torch.nn as nn
from flat_constraint import Flat_constraint
from glideslope_constraint import Glideslope_constraint
from reward_terminal_mdr import Reward
logger = rl_utils.Logger()
dynamics_model = Dynamics_model()
lander_model = Lander_model(apf_tau1=20,apf_tau2=100,apf_vf1=-2,apf_vf2=-1,apf_v0=70,apf_atarg=15.)
lander_model.get_state_agent = lander_model.get_state_agent1
obs_dim = 6
act_dim = 3
recurrent_steps = 20
reward_object = Reward()
glideslope_constraint = Glideslope_constraint(gs_limit=-1.0)
shape_constraint = Flat_constraint()
env = Env(lander_model,dynamics_model,logger,
reward_object=reward_object,
glideslope_constraint=glideslope_constraint,
shape_constraint=shape_constraint,
tf_limit=100.0,print_every=10)
env.ic_gen = Landing_icgen(mass_uncertainty=0.05,
g_uncertainty=(0.05,0.05),
adjust_apf_v0=True,
downrange = (0,2000 , -70, -10),
crossrange = (-1000,1000 , -30,30),
altitude = (2300,2400,-90,-70))
env.ic_gen.show()
arch = Arch()
policy = Policy(env)
value_function = Value_function(valfunc_nets.GRU(obs_dim, recurrent_steps=recurrent_steps),
shuffle=False, batch_size=9999999, max_grad_norm=30)
agent = Agent(arch, policy, value_function, None, env, logger,
policy_episodes=30, policy_steps=3000, gamma1=0.95, gamma2=0.995, lam=0.98,
recurrent_steps=recurrent_steps, monitor=env.rl_stats)
agent.train(3)
###Output
3-dof dynamics model
lander model apf
queue fixed
Flat Constraint
###Markdown
Test Policy
###Code
policy.test_mode=True
env.test_policy_batch(agent,5000,print_every=100)
###Output
i : 100
Cumulative Stats (mean,std,max,argmax)
thrust |10361.61 |1052.91 |2106.29 |15000.00 | 63
glideslope | 2.352 | 1.904 | 0.693 |45.027 | 94
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.538 | 0.249 | 0.002 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.0
position | -0.0 0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.066 0.004 -0.507 | 0.137 0.116 0.240 | -0.909 -0.418 -0.928 | 0.202 0.460 -0.001
fuel |253.47 | 13.95 |223.81 |294.28
glideslope | 3.44 | 2.92 | 0.99 | 16.61
i : 200
Cumulative Stats (mean,std,max,argmax)
thrust |10363.09 |1084.10 |2106.29 |15000.00 | 63
glideslope | 2.383 | 2.861 | 0.620 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.538 | 0.261 | 0.002 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.0
position | -0.0 0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.057 0.006 -0.501 | 0.145 0.127 0.256 | -0.911 -0.660 -0.928 | 0.512 0.484 0.007
fuel |254.20 | 14.57 |223.81 |296.61
glideslope | 3.47 | 4.86 | 0.99 | 62.96
i : 300
Cumulative Stats (mean,std,max,argmax)
thrust |10311.94 |1075.23 |2000.00 |15000.00 | 263
glideslope | 2.459 | 3.595 | 0.614 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.536 | 0.257 | 0.002 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.0
position | -0.0 0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.063 0.007 -0.501 | 0.143 0.118 0.253 | -0.913 -0.660 -0.928 | 0.512 0.484 0.008
fuel |253.92 | 14.62 |223.81 |296.61
glideslope | 3.56 | 4.91 | 0.99 | 62.96
i : 400
Cumulative Stats (mean,std,max,argmax)
thrust |10339.82 |1072.25 |2000.00 |15000.00 | 263
glideslope | 2.494 | 3.406 | 0.567 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.533 | 0.252 | 0.002 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.0
position | -0.0 0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.059 0.010 -0.498 | 0.140 0.122 0.247 | -0.913 -0.660 -0.928 | 0.512 0.546 0.008
fuel |253.94 | 15.03 |220.14 |299.32
glideslope | 3.58 | 4.62 | 0.97 | 62.96
i : 500
Cumulative Stats (mean,std,max,argmax)
thrust |10327.73 |1063.56 |2000.00 |15000.00 | 263
glideslope | 2.403 | 3.110 | 0.553 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.537 | 0.250 | 0.002 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.0
position | -0.0 0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.061 0.007 -0.501 | 0.142 0.125 0.246 | -0.913 -0.660 -0.928 | 0.632 0.546 0.008
fuel |254.58 | 15.00 |220.14 |305.44
glideslope | 3.39 | 4.20 | 0.92 | 62.96
i : 600
Cumulative Stats (mean,std,max,argmax)
thrust |10321.29 |1063.90 |2000.00 |15000.00 | 263
glideslope | 2.369 | 2.959 | 0.553 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.538 | 0.248 | 0.002 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.0
position | -0.0 0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.062 0.004 -0.503 | 0.139 0.125 0.244 | -0.913 -0.660 -0.928 | 0.632 0.546 0.013
fuel |254.87 | 15.21 |220.14 |306.25
glideslope | 3.35 | 4.07 | 0.92 | 62.96
i : 700
Cumulative Stats (mean,std,max,argmax)
thrust |10321.93 |1066.28 |2000.00 |15000.00 | 263
glideslope | 2.373 | 2.890 | 0.507 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.533 | 0.249 | 0.002 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.0
position | -0.0 0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.061 0.005 -0.500 | 0.134 0.122 0.245 | -0.913 -0.660 -0.928 | 0.632 0.546 0.013
fuel |254.63 | 15.12 |220.14 |306.25
glideslope | 3.37 | 4.10 | 0.88 | 62.96
i : 800
Cumulative Stats (mean,std,max,argmax)
thrust |10317.46 |1062.48 |2000.00 |15000.00 | 263
glideslope | 2.379 | 2.920 | 0.507 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.532 | 0.247 | 0.001 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.1
position | -0.0 0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.062 0.004 -0.498 | 0.135 0.123 0.242 | -0.913 -0.660 -0.928 | 0.632 0.577 0.013
fuel |254.52 | 14.94 |220.14 |306.25
glideslope | 3.36 | 3.92 | 0.88 | 62.96
i : 900
Cumulative Stats (mean,std,max,argmax)
thrust |10328.79 |1060.82 |2000.00 |15000.00 | 263
glideslope | 2.364 | 2.828 | 0.507 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.531 | 0.245 | 0.001 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.1
position | -0.0 0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.061 0.001 -0.497 | 0.135 0.123 0.240 | -0.913 -0.660 -0.928 | 0.632 0.577 0.013
fuel |254.70 | 14.87 |220.14 |306.25
glideslope | 3.30 | 3.75 | 0.88 | 62.96
i : 1000
Cumulative Stats (mean,std,max,argmax)
thrust |10330.65 |1062.46 |2000.00 |15000.00 | 263
glideslope | 2.384 | 3.076 | 0.507 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.529 | 0.244 | 0.001 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.1
position | -0.0 0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.063 0.002 -0.496 | 0.135 0.123 0.239 | -0.913 -0.660 -0.928 | 0.636 0.577 0.013
fuel |254.85 | 14.97 |220.14 |306.25
glideslope | 3.31 | 3.83 | 0.82 | 62.96
i : 1100
Cumulative Stats (mean,std,max,argmax)
thrust |10328.68 |1063.59 |2000.00 |15000.00 | 263
glideslope | 2.376 | 3.017 | 0.507 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.525 | 0.242 | 0.001 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.1
position | -0.0 0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.062 0.000 -0.491 | 0.136 0.123 0.236 | -0.913 -0.660 -0.928 | 0.636 0.577 0.013
fuel |255.00 | 15.00 |220.14 |306.25
glideslope | 3.29 | 3.73 | 0.82 | 62.96
i : 1200
Cumulative Stats (mean,std,max,argmax)
thrust |10315.56 |1060.93 |2000.00 |15000.00 | 263
glideslope | 2.380 | 2.985 | 0.507 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.527 | 0.240 | 0.001 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.1
position | -0.0 -0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.062 0.000 -0.493 | 0.138 0.123 0.235 | -0.916 -0.660 -0.928 | 0.636 0.577 0.013
fuel |255.00 | 15.09 |220.14 |306.25
glideslope | 3.34 | 4.05 | 0.82 | 65.03
i : 1300
Cumulative Stats (mean,std,max,argmax)
thrust |10318.76 |1063.13 |2000.00 |15000.00 | 263
glideslope | 2.364 | 2.898 | 0.507 |212.801 | 103
sc_margin |100.000 | 0.000 |100.000 |100.000 | 0
Final Stats (mean,std,min,max)
norm_vf | 0.527 | 0.241 | 0.001 | 1.003
norm_rf | 0.0 | 0.0 | 0.0 | 0.1
position | -0.0 -0.0 -0.0 | 0.0 0.0 0.0 | -0.0 -0.0 -0.0 | 0.0 0.0 -0.0
velocity | -0.062 0.002 -0.492 | 0.137 0.122 0.236 | -0.916 -0.660 -0.928 | 0.636 0.577 0.013
fuel |255.02 | 15.10 |220.14 |306.25
glideslope | 3.32 | 3.93 | 0.82 | 65.03
|
DeepLearningFrameworks/inference/ResNet50-TF.ipynb | ###Markdown
1. GPU
###Code
# Placeholders
checkpoint_file = 'resnet_v1_50.ckpt'
input_tensor = tf.placeholder(tf.float32, shape=(None,224,224,3), name='input_image')
# Load the model
sess = tf.Session()
arg_scope = resnet_v1.resnet_arg_scope()
with tensorflow.contrib.slim.arg_scope(arg_scope):
# Docstring ->
# num_classes: Number of predicted classes for classification tasks. If None
# we return the features before the logit layer.
logits, end_points = resnet_v1.resnet_v1_50(input_tensor, is_training=False)
saver = tf.train.Saver()
saver.restore(sess, checkpoint_file)
cold_start = predict_fn(logits, fake_input_data_cl, BATCH_SIZE)
%%time
# GPU: 8.26s
features = predict_fn(logits, fake_input_data_cl, BATCH_SIZE)
###Output
CPU times: user 7.85 s, sys: 1.02 s, total: 8.87 s
Wall time: 8.26 s
###Markdown
2. CPU
###Code
# HACK -> have to manually restart notebook and rerun
# Otherwise runs on GPU!!!!!
# Kill all GPUs ...
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# Placeholders
checkpoint_file = 'resnet_v1_50.ckpt'
input_tensor = tf.placeholder(tf.float32, shape=(None,224,224,3), name='input_image')
# Load the model
sess = tf.Session()
arg_scope = resnet_v1.resnet_arg_scope()
with tensorflow.contrib.slim.arg_scope(arg_scope):
# Docstring ->
# num_classes: Number of predicted classes for classification tasks. If None
# we return the features before the logit layer.
logits, end_points = resnet_v1.resnet_v1_50(input_tensor, is_training=False)
saver = tf.train.Saver()
saver.restore(sess, checkpoint_file)
# Create batches of fake data
fake_input_data_cl, fake_input_data_cf = give_fake_data(BATCHES_CPU)
print(fake_input_data_cl.shape, fake_input_data_cf.shape)
cold_start = predict_fn(logits, fake_input_data_cl, BATCH_SIZE)
%%time
# CPU: 23.1s
features = predict_fn(logits, fake_input_data_cl, BATCH_SIZE)
###Output
CPU times: user 1min 58s, sys: 3.84 s, total: 2min 2s
Wall time: 23.1 s
|
oecd-unemployment/.ipynb_checkpoints/Untitled-checkpoint.ipynb | ###Markdown
International Unemployment Rate DataDownload all available data from https://data.oecd.org/unemp/unemployment-rate.htm
###Code
import pandas as pd
# Country abbreviations
abbreviations = {'AUS':'Australia',
'AUT':'Austria',
'BEL':'Belgium',
'BRA':'Brazil',
'CAN':'Canada',
'CHL':'Chile',
'COL':'Colombia',
'CZE':'Czech Republic',
'CHE':'Switzerland',
'DEU':'Germany',
'DNK':'Denmark',
'EA19':'Euro Area (19 countries)',
'ESP':'Spain',
'EST':'Estonia',
'EU28':'European Union (28 countries)',
'FIN':'Finland',
'FRA':'France',
'GBR':'Great Britain',
'GRC':'Greece',
'HUN':'Hungary',
'IDN':'Indonesia',
'IRL':'Ireland',
'ISL':'Iceland',
'ISR':'Israel',
'ITA':'Italy',
'JPN':'Japan',
'KOR':'South Korea',
'LTU':'Lithuania',
'LUX':'Luxemburg',
'LVA':'Latvia',
'MEX':'Mexico',
'NLD':'Netherlands',
'NOR':'Norway',
'NZL':'New Zealand',
'OECD':'OECD',
'POL':'Poland',
'PRT':'Portugal',
'RUS':'Russia',
'SVK':'Slovak Republic',
'SVN':'Slovenia',
'SWE':'Sweden',
'TUR':'Turkey',
'USA':'United States',
'ZAF':'South Africa'
}
# Import data
data = pd.read_csv('../csv/DP_LIVE_14052019055753603.csv')
# Manage data
data = data[(data.FREQUENCY=='A') & (data.SUBJECT=='TOT')]
# Construct dataset
df = pd.DataFrame()
for code in data.LOCATION.sort_values().unique():
values = data[data.LOCATION==code].Value
index = pd.to_datetime(data[data.LOCATION==code].TIME.astype(str))
temp_series = pd.Series(values)
temp_series.index=index
df[abbreviations[code]] = temp_series
# Rename index column
df.index.name='Date'
# Export data
df.to_csv('../csv/international_unemployment_rate_data.csv')
###Output
_____no_output_____ |
Session7/Day1/Code repositories.ipynb | ###Markdown
Code RepositoriesThe notebook contains problems oriented around building a basic Python code repository and making it public via [Github](http://www.github.com). Of course there are other places to put code repositories, with complexity ranging from services comparable to github to simple hosting a git server on your local machine. But this focuses on git and github as a ready-to-use example with plenty of additional resources to be found online. Note that these problems assume you are using the Anaconda Python distribution. This is particular useful for these problems because it makes it very easy to install testing packages in virtual environments quickly and with little wasted disk space. If you are not using anaconda, you can either use an alternative virtual environment scheme (e.g. `pyenv` or `virtualenv`), or just install pacakges directly into your default python (and hope for the best...).For `git` interaction, this notebook also uses the `git` command line tools directly. There are a variety of GUI tools that make working with `git` more visually intuitive (e.g. [SourceTree](http://www.sourcetreeapp.com), [gitkraken](http://www.gitkraken.com), or the [github desktop client](https://desktop.github.com)), but this notebook uses the command line tools as the lowest common denominator. You are welcome to try to reproduce the steps with your client, however - feel free to ask your neighbors or instructors if you run into trouble there.As a final note, this notebook's examples assume you are using a system with a unix-like shell (e.g. macOS, Linux, or Windows with git-bash or the Linux subsystem shell). * * *Original by E Tollerud 2017 for LSSTC DSFP Session3 and AstroHackWeek, modified by B Sipocz Problem 0: Using Jupyter as a shell As an initial step before diving into code repositories, it's important to understand how you can use Jupyter as a shell. Most of the steps in this notebook require interaction with the system that's easier done with a shell or editor rather than using Python code in a notebook. While this could be done by opening up a terminal beside this notebook, to keep most of your work in the notebook itself, you can use the capabilities Jupyter + IPython offer for shell interaction. 0a: Figure out your base shell path and what's in it The critical trick here is the ``!`` magic in IPython. Anything after a leading ``!`` in IPython gets run by the shell instead of as python code. Run the shell command ``pwd`` and ``ls`` to see where IPython thinks you are on your system, and the contents of the directory.*hint: Be sure to remove the "complete"s below when you've done so. IPython will interpret that as part of the shell command if you don't*
###Code
!pwd
!ls
###Output
/home/caitlin/LSSTC-DSFP/LSSTC-DSFP-Sessions/Session7/Day1
blind_test_set.h5 MachLearnAstroData.ipynb
BriefIntroToMachineLearning.ipynb newdir
Code repositories.ipynb sdss_training_set.h5
images
###Markdown
0b: Try a multi-line shell command IPython magics often support "cell" magics by having ``%%`` at the top of a cell. Use that to cd into the directory below this one ("..") and then ``ls`` inside that directory.*Hint: if you need syntax tips, run the ``magic()`` function and look for the `!` or `!!` commands*
###Code
%%sh
cd ../
ls
###Output
Day0
Day1
Day2
Day3
Day4
Day5
README.md
###Markdown
0c: Create a new directory from JupyterWhile you can do this almost as easily with `os.mkdir` in Python, for this case try to do it using shell magics instead. Make a new directory in the directory you are currently in. Use your system file browser to ensure you were sucessful.
###Code
!mkdir newdir
###Output
mkdir: cannot create directory ‘newdir’: File exists
###Markdown
0d: Change directory to your new directory One thing about shell commands is that they always start wherever you started your IPython instance. So doing ``cd`` as a shell command only changes things temporarily (i.e. within that shell command). IPython provides a `%cd` magic that makes this change last, though. Use this to `%cd` into the directory you just created, and then use the `pwd` shell command to ensure this cd "stuck" (You can also try doing `cd` as a **shell** command to prove to yourself that it's different from the `%cd` magic.)
###Code
%cd -0
%cd newdir
%pwd
###Output
/home/caitlin/LSSTC-DSFP/LSSTC-DSFP-Sessions/Session7/Day1
/home/caitlin/LSSTC-DSFP/LSSTC-DSFP-Sessions/Session7/Day1/newdir
###Markdown
Final note: ``%cd -0`` is a convenient shorthand to switch back to the initial directory. Problem 1: Creating a bare-bones repo and getting it on Github Here we'll create a simple (public) code repository with a minimal set of content, and publish it in github. 1a: Create a basic repository locally Start by creating the simplest possible code repository, composed of a single code file. Create a directory (or use the one from *0c*), and place a ``code.py`` file in it, with a bit of Python code of your choosing. (Bonus points for witty or sarcastic code...) You could even use non-Python code if you desired, although Problems 3 & 4 feature Python-specific bits so I wouldn't recommend it.To make the file from the notebook, the ``%%file `` magic is a convenient way to write the contents of a notebook cell to a file.
###Code
#!mkdir #complete only if you didn't do 0c, or want a different name for your code directory
%%file code.py
def do_something():
# complete
print("heyyyyy")# this will make it much easier in future problems to see that something is actually happening'''
###Output
Overwriting code.py
###Markdown
If you want to test-run your code:
###Code
%run code.py # complete
do_something()
###Output
heyyyyy
###Markdown
1b: Convert the directory into a git repo Make that code into a git repository by doing ``git init`` in the directory you created, then ``git add`` and ``git commit``.
###Code
%cd newdir
!git init
!git add code.py
!git commit -m "initial commit of code.py"
###Output
[master e63541a] initial commit of code.py
1 file changed, 21 insertions(+)
create mode 100644 LICENSE
###Markdown
1c: Create a repository for your code in Github Go to [github's web site](http://www.github.com) in your web browser. If you do not have a github account, you'll need to create one (follow the prompts on the github site). Once you've got an account, you'll need to make sure your git client can [authenticate with github](https://help.github.com/categories/authenticating-to-github/). If you're using a GUI, you'll have to figure it out (usually it's pretty easy). On the command line you have two options: * The simplest way is to connect to github using HTTPS. This requires no initial setup, but `git` will prompt you for your github username and password every so often.* If you find that annoying, you can set up your system to use SSH to talk to github. Look for the "SSH and GPG keys" section of your settings on github's site, or if you're not sure how to work with SSH keys, check out [github's help on the subject](https://help.github.com/articles/connecting-to-github-with-ssh/). Once you've got github set up to talk to your computer, you'll need to create a new repository for the code you created. Hit the "+" in the upper-right, create a "new repository" and fill out the appropriate details (don't create a README just yet). To be consistent, it's recommended using the same name for your repository as the local directory name you used. But that is *not* a requirement, just a recommendation. Once you've created the repository, connect your local repository to github and push your changes up to github.
###Code
!git remote add ccdoughty https://github.com/ccdoughty/newdir.git
#!git push ccdoughty master -u DO THIS IN THE TERMINAL
###Output
_____no_output_____
###Markdown
The ``-u`` is a convenience that means from then on you can use just ``git push`` and ``git pull`` to send your code to and from github. 1e: Modify the code and send it back up to github Proper documentation is important. But for now make sure to add a README to your code repository. Always add a README with basic documentation. Always. Even if only you are going to use this code, trust me, your future self will be very happy you did it. You can just call it `README`, but to get it to get rendered nicely on the github repository, you can call it ``README.md`` and write it using markdown syntax, ``REAMDE.rst`` in ReST or various other similar markup languages github understands. If you don't know/care, just use ``README.md``, as that's pretty standard at this point.
###Code
%%file README.md
documentation
contains the def do_something(), which takes no arguments and just prints the word "heyyyy"
###Output
Overwriting README.md
###Markdown
Now add it to the repository via ``git`` commit, and push up to github...
###Code
!git add README.md
!git commit -m "adding the readme markdown file"
###Output
On branch master
Your branch is ahead of 'ccdoughty/master' by 2 commits.
(use "git push" to publish your local commits)
nothing to commit, working directory clean
###Markdown
1f: Choose a License A bet you didn't expect to be reading legalese today... but it turns out this is important. If you do not explicitly license your code, in most countries (including the US and EU) it is technically **illegal** for anyone to use your code for any purpose other than just looking at it.(Un?)Fortunately, there are a lot of possible open source licenses out there. Assuming you want an open license, the best resources is to use the ["Choose a License" website](http://choosealicense.org). Have a look over the options there and decide which you think is appropriate for your code. Once you've chosen a License, grab a copy of the license text, and place it in your repository as a file called ``LICENSE`` (or ``LICENSE.md`` or the like). Some licenses might also suggest you place the license text or just a copyright notice in the source code as well, but that's up to you. Once you've done that, do as we've done before: push all your additions up to github. If you've done it right, github will automatically figure out your license and show it in the upper-right corner of your repo's github page. Problem 2: Collaborating with others' repos One very important advantages of working in repositories is that sharing the code becomes much easier, others (and your future self) can have a look at it, use it, and contribute to it. So now we'll have you try modify your neighbors' project using github's Pull Request feature. 2a: Get (git?) your neighbor's code repo Find someone sitting near you who has gotten through Problem 1. Ask them their github user name and the name of their repository. Once you've got the name of their repo, navigate to it on github. The URL pattern is always "https://www.github.com/username/reponame". Use the github interface to "fork" that repo, yielding a "yourusername/reponame" repository. Go to that one, take note of the URL needed to clone it (you'll need to grab it from the repo web page, either in "HTTPS" or "SSH" form, depending on your choice in 1a). Then clone that onto your local machine.
###Code
# Don't forget to do this cd or something like it... otherwise you'll clone *inside* your repo
#%cd -0
#!git clone https://github.com/mrizzo25/version_control_test_dir
%cd version_control_test_dir
###Output
/home/caitlin/LSSTC-DSFP/LSSTC-DSFP-Sessions/Session7/Day1/version_control_test_dir
###Markdown
2c: create a branch for your change You're going to make some changes to their code, but who knows... maybe they'll spend so long reviewing it that you want to do another. So it's always best to make changes in a specific "branch" for that change. So to do this we need to make a github branch.A super useful site to learn more about branching and praticing scenarios, feel free to check it out now, and also ask: https://learngitbranching.js.org/
###Code
!git branch testbranch
###Output
_____no_output_____
###Markdown
2c: modify the code Make some change to their code repo. Usually this would be a new feature or a bug fix or documentation clarification or the like... But it's up to you. Once you've done that, be sure to commit the change locally.
###Code
!git add code.py
!git commit -m "removed function call"
###Output
[master c12cec4] removed function call
1 file changed, 3 deletions(-)
###Markdown
and push it up (to a branch on *your* github fork).
###Code
!git push origin testbranch
###Output
Username for 'https://github.com':
###Markdown
2d: Issue a pull request Now use the github interface to create a new "pull request". Once you've pushed your new branch up, you'll see a prompt to do this automatically appear on your fork's web page. But if you don't, use the "branches" drop-down to navigate to the new branch, and then hit the "pull request" button. That should show you an interface that you can use to leave a title and description (in github markdown), and then submit the PR. Go ahead and do this. 2e: Have them review the PR Tell your neighbor that you've issued the PR. They should be able to go to *their* repo, and see that a new pull request has been created. There they'll review the PR, possibly leaving comments for you to change. If so, go to 2f, but if not, they should hit the "Merge" button, and you can jump to 2g. 2f: (If necessary) make changes and update the code If they left you some comments that require changing prior to merging, you'll need to make those changes in your local copy, commit those changes, and then push them up to your branch on your fork.
###Code
!git #complete
###Output
_____no_output_____
###Markdown
Hopefully they are now satisfied and are willing to hit the merge button. 2g: Get the updated version Now you should get the up-to-date version from the original owner of the repo, because that way you'll have both your changes and any other changes they might have made in the meantime. To do this you'll need to connect your local copy to your *nieghbor*'s github repo (**not** your fork).
###Code
!git remote add <neighbors-username> <url-from-neighbors-github-repo> #complete
!git fetch <neighbors-username> #complete
!git branch --set-upstream-to=<neighbors-username>/master master
!git checkout master
!git pull
###Output
_____no_output_____
###Markdown
Now if you look at the local repo, it should include your changes. *Suggestion* You mauy want to change the "origin" remote to your username. E.g. ``git remote rename origin ``. To go further, you might even *delete* your fork's `master` branch, so that only your neighbor's `master` exists. That might save you headaches in the long run if you were to ever access this repo again in the future. 2h: Have them reciprocate Science (Data or otherwise) and open source code is a social enterprise built on shared effort, mutual respect, and trust. So ask them to issue a PR aginst *your* code, too. The more we can stand on each others' shoulders, the farther we will all see.*Hint: Ask them nicely. Maybe offer a cookie or something?* Problem 3: Setting up a bare-bones Python Package Up to this point we've been working on the simplest possible shared code: a single file with all the content. But for most substantial use cases this isn't going to cut it. After all, Python was designed around the idea of namespaces that let you hide away or show code to make writing, maintaining, and versioning code much easier. But to make use of these, we need to deploy the installational tools that Python provides. This is typically called "packaging". In this problem we will take the code you just made it and build it into a proper python package that can be installed and then used anywhere.For more background and detail (and the most up-to-date recommendations) see the [Python Packaging Guide](https://packaging.python.org/current/). 3a: Set up a Python package structure for your code First we adjust the structure of your code from Problem 1 to allow it to live in a package structure rather than as a stand-alone ``.py`` file. All you need to do is create a directory, move the ``code.py`` file into that directory, and add a file (can be empty) called ``__init__.py`` into the directory.You'll have to pick a name for the package, which is usually the same as the repo name (although that's not strictly required, notable exemption is e.g. `scikit-learn` vs `sklearn`).*Hint: don't forget to switch back to *your* code repo directory, if you are doing this immediately after Problem 2.*
###Code
!mkdir <yourpkgname>#complete
!git mv code.py <yourpkgname>#complete
#The "touch" unix command simply creates an empty file if there isn't one already.
#You could also use an editor to create an empty file if you prefer.
!touch <yourpkgname>/__init__.py#complete
###Output
_____no_output_____
###Markdown
3b: Test your package You should now be able to import your package and the code inside it as though it were some installed package like `numpy`, `astropy`, `pandas`, etc.
###Code
from <yourpkgname> import code#complete
#if your code.py has a function called `do_something` as in the example above, you can now run it like:
code.do_something()
###Output
_____no_output_____
###Markdown
3c: Apply packaging tricks One of the nice things about packages is that they let you hide the implementation of some part of your code in one place while exposing a "cleaner" namespace to the users of your package. To see a (trivial) example, of this, lets pull a function from your ``code.py`` into the base namespace of the package. In the below make the ``__init__.py`` have one line: ``from .code import do_something``. That places the ``do_something()`` function into the package's root namespace.
###Code
!cd newdir
%%file repodir/__init__.py
###Output
UsageError: Line magic function `%%file` not found.
###Markdown
Now the following should work.
###Code
import <yourpkgname>#complete
<yourpkgname>.do_something()#complete
###Output
_____no_output_____
###Markdown
*BUT* you will probably get an error here. That's because Python is smart about imports: once it's imported a package it won't re-import it later. Usually that saves time, but here it's a hassle. Fortunately, we can use the ``reload`` function to get around this:
###Code
from importlib import reload
reload(<yourpkgname>)#complete
<yourpkgname>.do_something()#complete
###Output
_____no_output_____
###Markdown
3d: Create a setup.py file Ok, that's great in a pinch, but what if you want your package to be available from *other* directories? If you open a new terminal somewhere else and try to ``import `` you'll see that it will fail, because Python doesn't know where to find your package. Fortunately, Python (both the language and the larger ecosystem) provide built-in tools to install packages. These are built around creating a ``setup.py`` script that controls installation of a python packages into a shared location on your machine. Essentially all Python packages are installed this way, even if it happens silently behind-the-scenes. Below is a template bare-bones setup.py file. Fill it in with the relevant details for your package.
###Code
%%file setup.py
#!/usr/bin/env python
from distutils.core import setup
setup(name='<yourpkgname>',
version='0.1dev',
description='<a description>',
author='<your name>',
author_email='<youremail>',
packages=['<yourpkgname>'],
) #complete
###Output
_____no_output_____
###Markdown
3e: Build the package Now you should be able to "build" the package. In complex packages this will involve more involved steps like linking against C or FORTRAN code, but for pure-python packages like yours, it simply involves filtering out some extraneous files and copying the essential pieces into a build directory.
###Code
!python setup.py build
###Output
_____no_output_____
###Markdown
To test that it built sucessfully, the easiest thing to do is cd into the `build/lib.X-Y-Z` directory ("X-Y-Z" here is OS and machine-specific). Then you should be able to ``import ``. It's usually best to do this as a completely independent process in python. That way you can be sure you aren't accidentally using an old import as we saw above.
###Code
%%sh
cd build/lib.X-Y-Z #complete
python -c "import <yourpkgname>;<yourpkgname>.do_something()" #complete
###Output
_____no_output_____
###Markdown
3f: Install the package Alright, now that it looks like it's all working as expected, we can install the package. Note that if we do this willy-nilly, we'll end up with lots of packages, perhaps with the wrong versions, and it's easy to get confused about what's installed (there's no reliable ``uninstall`` command...) So before installing we first create a virtual environment using Anaconda, and install into that. If you don't have anaconda or a similar virtual environment scheme, you can just do ``python setup.py install``. But just remember that this will be difficult to back out (hence the reason for Python environments in the first place!)
###Code
%%sh
conda create -n test_<yourpkgname> anaconda #complete
source activate test_<yourpkgname> #complete
python setup.py install
###Output
_____no_output_____
###Markdown
Now we can try running the package from *anywhere* (not just the source code directory), as long as we're in the same environment that we installed the package in.
###Code
%%sh
cd $HOME
source activate test_<yourpkgname> #complete
python -c "import <yourpkgname>;<yourpkgname>.do_something()" #complete
###Output
_____no_output_____
###Markdown
3g: Update the package on github OK, it's now installable. You'll now want to make sure to update the github version to reflect these improvements. You'll need to add and commit all the files. You'll also want to update the README to instruct users that they should use ``python setup.py install`` to install the package.
###Code
!git #complete
###Output
_____no_output_____
###Markdown
Problem 4: Publishing your package on (fake) PyPI Now that your package can be installed by anyone who comes across it on github. But it tends to scare some people that they need to download the source code and know ``git`` to use your code. The Python Package Index (PyPI), combined with the ``pip`` tool (now standard in Python) provides a much simpler way to distribute code. Here we will publish your code to a **testing** version of PyPI. 4a: Create a PyPI account First you'll need an account on PyPI to register new packages. Go to the [testing PyPI](https://testpypi.python.org/pypi), and register. You'll also need to supply your login details in the ``.pypirc`` directory in your home directory as shown below. (If it were the real PyPI you'd want to be more secure and not have your password in plain text. But for the testing server that's not really an issue.)Note that if you've ever done something like this before and hence already have a `.pypirc` file, you might get unexpected results if you run this without moving/renaming the old version temporarily.
###Code
%%file -a ~/.pypirc
[distutils]
index-servers = pypi
[pypi]
repository = https://test.pypi.org/legacy/
username = <your user name goes here>
password = <your password goes here>
###Output
_____no_output_____
###Markdown
4b: Build a "source" version of your package Use ``distutils`` to create the source distribution of your package.*Hint: You'll want to make sure your package version is something you want to release before executing the upload command. Released versions can't be duplicates of existing versions, and shouldn't end in "dev" or "b" or the like.*"
###Code
!python setup.py sdist
###Output
_____no_output_____
###Markdown
Verify that there is a ``-.tar.gz`` file in the ``dist`` directory. It should have all of the source code necessary for your package. 4c: Upload your package to PyPI Once you have an account on PyPI (or testPyPI in our case) you can upload your distributions to PyPI using ``twine``. If this is your first time uploading a distribution for a new project, twine will handle registering the project automatically filling out the details you provided in your ``setup.py``.
###Code
!twine upload dist/<yourpackage>-<version>
###Output
_____no_output_____
###Markdown
4d: Install your package with ``pip`` The ``pip`` tool is a convenient way to install packages on PyPI. Again, we use Anaconda to create a testing environment to make sure everything worked correctly.(Normally the ``-i`` wouldn't be necessary - we're using it here only because we're using the "testing" PyPI)
###Code
%%sh
conda create -n test_pypi_<yourpkgname> anaconda #complete
source activate test_pypi_<yourpkgname> #complete
pip install -i https://testpypi.python.org/pypi <yourpkgname>
%%sh
cd $HOME
source activate test_pypi_<yourpkgname> #complete
python -c "import <yourpkgname>;<yourpkgname>.do_something()" #complete
###Output
_____no_output_____
###Markdown
Code RepositoriesThe notebook contains problems oriented around building a basic Python code repository and making it public via [Github](http://www.github.com). Of course there are other places to put code repositories, with complexity ranging from services comparable to github to simple hosting a git server on your local machine. But this focuses on git and github as a ready-to-use example with plenty of additional resources to be found online. Note that these problems assume you are using the Anaconda Python distribution. This is particular useful for these problems because it makes it very easy to install testing packages in virtual environments quickly and with little wasted disk space. If you are not using anaconda, you can either use an alternative virtual environment scheme (e.g. `pyenv` or `virtualenv`), or just install pacakges directly into your default python (and hope for the best...).For `git` interaction, this notebook also uses the `git` command line tools directly. There are a variety of GUI tools that make working with `git` more visually intuitive (e.g. [SourceTree](http://www.sourcetreeapp.com), [gitkraken](http://www.gitkraken.com), or the [github desktop client](https://desktop.github.com)), but this notebook uses the command line tools as the lowest common denominator. You are welcome to try to reproduce the steps with your client, however - feel free to ask your neighbors or instructors if you run into trouble there.As a final note, this notebook's examples assume you are using a system with a unix-like shell (e.g. macOS, Linux, or Windows with git-bash or the Linux subsystem shell). * * *Original by E Tollerud 2017 for LSSTC DSFP Session3 and AstroHackWeek, modified by B Sipocz Problem 0: Using Jupyter as a shell As an initial step before diving into code repositories, it's important to understand how you can use Jupyter as a shell. Most of the steps in this notebook require interaction with the system that's easier done with a shell or editor rather than using Python code in a notebook. While this could be done by opening up a terminal beside this notebook, to keep most of your work in the notebook itself, you can use the capabilities Jupyter + IPython offer for shell interaction. 0a: Figure out your base shell path and what's in it The critical trick here is the ``!`` magic in IPython. Anything after a leading ``!`` in IPython gets run by the shell instead of as python code. Run the shell command ``pwd`` and ``ls`` to see where IPython thinks you are on your system, and the contents of the directory.*hint: Be sure to remove the "complete"s below when you've done so. IPython will interpret that as part of the shell command if you don't*
###Code
! dir
###Output
Volume in drive C has no label.
Volume Serial Number is D0E1-FA53
Directory of C:\Users\Owner\Documents\DSFP\LSSTC-DSFP-Sessions\Session7\Day1
11/06/2018 02:20 PM <DIR> .
11/06/2018 02:20 PM <DIR> ..
11/06/2018 11:18 AM <DIR> .ipynb_checkpoints
11/06/2018 09:45 AM 1'868'208 blind_test_set.h5
11/06/2018 11:11 AM 14'762 BriefIntroToMachineLearning.ipynb
11/06/2018 02:20 PM 40'276 Code repositories.ipynb
11/06/2018 11:11 AM <DIR> images
11/06/2018 01:59 PM 31'543 MachLearnAstroData.ipynb
11/05/2018 11:57 AM 1'063 README.md
11/06/2018 09:44 AM 2'993'656 sdss_training_set.h5
6 File(s) 4'949'508 bytes
4 Dir(s) 91'078'483'968 bytes free
###Markdown
0b: Try a multi-line shell command IPython magics often support "cell" magics by having ``%%`` at the top of a cell. Use that to cd into the directory below this one ("..") and then ``ls`` inside that directory.*Hint: if you need syntax tips, run the ``magic()`` function and look for the `!` or `!!` commands*
###Code
#%%sh
#magic()
! cd ..
###Output
_____no_output_____
###Markdown
0c: Create a new directory from JupyterWhile you can do this almost as easily with `os.mkdir` in Python, for this case try to do it using shell magics instead. Make a new directory in the directory you are currently in. Use your system file browser to ensure you were sucessful.
###Code
! mkdir my_code_repo
###Output
_____no_output_____
###Markdown
0d: Change directory to your new directory One thing about shell commands is that they always start wherever you started your IPython instance. So doing ``cd`` as a shell command only changes things temporarily (i.e. within that shell command). IPython provides a `%cd` magic that makes this change last, though. Use this to `%cd` into the directory you just created, and then use the `pwd` shell command to ensure this cd "stuck" (You can also try doing `cd` as a **shell** command to prove to yourself that it's different from the `%cd` magic.)
###Code
%cd ..
###Output
C:\Users\Owner\Documents\DSFP\LSSTC-DSFP-Sessions\Session7
###Markdown
Final note: ``%cd -0`` is a convenient shorthand to switch back to the initial directory. Problem 1: Creating a bare-bones repo and getting it on Github Here we'll create a simple (public) code repository with a minimal set of content, and publish it in github. 1a: Create a basic repository locally Start by creating the simplest possible code repository, composed of a single code file. Create a directory (or use the one from *0c*), and place a ``code.py`` file in it, with a bit of Python code of your choosing. (Bonus points for witty or sarcastic code...) You could even use non-Python code if you desired, although Problems 3 & 4 feature Python-specific bits so I wouldn't recommend it.To make the file from the notebook, the ``%%file `` magic is a convenient way to write the contents of a notebook cell to a file.
###Code
!mkdir #complete only if you didn't do 0c, or want a different name for your code directory
%%file my_code_repo/code.py
def do_something():
# complete
print("This is my code repo")# this will make it much easier in future problems to see that something is actually happening
###Output
Writing my_code_repo/code.py
###Markdown
If you want to test-run your code:
###Code
%run <yourdirectory>/code.py # complete
do_something()
###Output
_____no_output_____
###Markdown
1b: Convert the directory into a git repo Make that code into a git repository by doing ``git init`` in the directory you created, then ``git add`` and ``git commit``.
###Code
%cd # complete
!git init
!git add code.py
!git commit -m #complete
###Output
_____no_output_____
###Markdown
1c: Create a repository for your code in Github Go to [github's web site](http://www.github.com) in your web browser. If you do not have a github account, you'll need to create one (follow the prompts on the github site). Once you've got an account, you'll need to make sure your git client can [authenticate with github](https://help.github.com/categories/authenticating-to-github/). If you're using a GUI, you'll have to figure it out (usually it's pretty easy). On the command line you have two options: * The simplest way is to connect to github using HTTPS. This requires no initial setup, but `git` will prompt you for your github username and password every so often.* If you find that annoying, you can set up your system to use SSH to talk to github. Look for the "SSH and GPG keys" section of your settings on github's site, or if you're not sure how to work with SSH keys, check out [github's help on the subject](https://help.github.com/articles/connecting-to-github-with-ssh/). Once you've got github set up to talk to your computer, you'll need to create a new repository for the code you created. Hit the "+" in the upper-right, create a "new repository" and fill out the appropriate details (don't create a README just yet). To be consistent, it's recommended using the same name for your repository as the local directory name you used. But that is *not* a requirement, just a recommendation. Once you've created the repository, connect your local repository to github and push your changes up to github.
###Code
!git remote add <yourgithubusername> <the url github shows you on the repo web page> #complete
!git push <yourgithubusername> master -u
###Output
_____no_output_____
###Markdown
The ``-u`` is a convenience that means from then on you can use just ``git push`` and ``git pull`` to send your code to and from github. 1e: Modify the code and send it back up to github Proper documentation is important. But for now make sure to add a README to your code repository. Always add a README with basic documentation. Always. Even if only you are going to use this code, trust me, your future self will be very happy you did it. You can just call it `README`, but to get it to get rendered nicely on the github repository, you can call it ``README.md`` and write it using markdown syntax, ``REAMDE.rst`` in ReST or various other similar markup languages github understands. If you don't know/care, just use ``README.md``, as that's pretty standard at this point.
###Code
%%file README.md
# complete
###Output
_____no_output_____
###Markdown
Now add it to the repository via ``git`` commit, and push up to github...
###Code
!git #complete
###Output
_____no_output_____
###Markdown
1f: Choose a License A bet you didn't expect to be reading legalese today... but it turns out this is important. If you do not explicitly license your code, in most countries (including the US and EU) it is technically **illegal** for anyone to use your code for any purpose other than just looking at it.(Un?)Fortunately, there are a lot of possible open source licenses out there. Assuming you want an open license, the best resources is to use the ["Choose a License" website](http://choosealicense.org). Have a look over the options there and decide which you think is appropriate for your code. Once you've chosen a License, grab a copy of the license text, and place it in your repository as a file called ``LICENSE`` (or ``LICENSE.md`` or the like). Some licenses might also suggest you place the license text or just a copyright notice in the source code as well, but that's up to you. Once you've done that, do as we've done before: push all your additions up to github. If you've done it right, github will automatically figure out your license and show it in the upper-right corner of your repo's github page.
###Code
!git #complete
###Output
_____no_output_____
###Markdown
Problem 2: Collaborating with others' repos One very important advantages of working in repositories is that sharing the code becomes much easier, others (and your future self) can have a look at it, use it, and contribute to it. So now we'll have you try modify your neighbors' project using github's Pull Request feature. 2a: Get (git?) your neighbor's code repo Find someone sitting near you who has gotten through Problem 1. Ask them their github user name and the name of their repository. Once you've got the name of their repo, navigate to it on github. The URL pattern is always "https://www.github.com/username/reponame". Use the github interface to "fork" that repo, yielding a "yourusername/reponame" repository. Go to that one, take note of the URL needed to clone it (you'll need to grab it from the repo web page, either in "HTTPS" or "SSH" form, depending on your choice in 1a). Then clone that onto your local machine.
###Code
# Don't forget to do this cd or something like it... otherwise you'll clone *inside* your repo
%cd -0
!git clone <url from github>#complete
%cd <reponame>#complete
###Output
_____no_output_____
###Markdown
2c: create a branch for your change You're going to make some changes to their code, but who knows... maybe they'll spend so long reviewing it that you want to do another. So it's always best to make changes in a specific "branch" for that change. So to do this we need to make a github branch.A super useful site to learn more about branching and praticing scenarios, feel free to check it out now, and also ask: https://learngitbranching.js.org/
###Code
!git branch <name-of-branch>#complete
###Output
_____no_output_____
###Markdown
2c: modify the code Make some change to their code repo. Usually this would be a new feature or a bug fix or documentation clarification or the like... But it's up to you. Once you've done that, be sure to commit the change locally.
###Code
!git add <files modified>#complete
!git commit -m ""#complete
###Output
_____no_output_____
###Markdown
and push it up (to a branch on *your* github fork).
###Code
!git push origin <name-of-branch>#complete
###Output
_____no_output_____
###Markdown
2d: Issue a pull request Now use the github interface to create a new "pull request". Once you've pushed your new branch up, you'll see a prompt to do this automatically appear on your fork's web page. But if you don't, use the "branches" drop-down to navigate to the new branch, and then hit the "pull request" button. That should show you an interface that you can use to leave a title and description (in github markdown), and then submit the PR. Go ahead and do this. 2e: Have them review the PR Tell your neighbor that you've issued the PR. They should be able to go to *their* repo, and see that a new pull request has been created. There they'll review the PR, possibly leaving comments for you to change. If so, go to 2f, but if not, they should hit the "Merge" button, and you can jump to 2g. 2f: (If necessary) make changes and update the code If they left you some comments that require changing prior to merging, you'll need to make those changes in your local copy, commit those changes, and then push them up to your branch on your fork.
###Code
!git #complete
###Output
_____no_output_____
###Markdown
Hopefully they are now satisfied and are willing to hit the merge button. 2g: Get the updated version Now you should get the up-to-date version from the original owner of the repo, because that way you'll have both your changes and any other changes they might have made in the meantime. To do this you'll need to connect your local copy to your *nieghbor*'s github repo (**not** your fork).
###Code
!git remote add <neighbors-username> <url-from-neighbors-github-repo> #complete
!git fetch <neighbors-username> #complete
!git branch --set-upstream-to=<neighbors-username>/master master
!git checkout master
!git pull
###Output
_____no_output_____
###Markdown
Now if you look at the local repo, it should include your changes. *Suggestion* You mauy want to change the "origin" remote to your username. E.g. ``git remote rename origin ``. To go further, you might even *delete* your fork's `master` branch, so that only your neighbor's `master` exists. That might save you headaches in the long run if you were to ever access this repo again in the future. 2h: Have them reciprocate Science (Data or otherwise) and open source code is a social enterprise built on shared effort, mutual respect, and trust. So ask them to issue a PR aginst *your* code, too. The more we can stand on each others' shoulders, the farther we will all see.*Hint: Ask them nicely. Maybe offer a cookie or something?* Problem 3: Setting up a bare-bones Python Package Up to this point we've been working on the simplest possible shared code: a single file with all the content. But for most substantial use cases this isn't going to cut it. After all, Python was designed around the idea of namespaces that let you hide away or show code to make writing, maintaining, and versioning code much easier. But to make use of these, we need to deploy the installational tools that Python provides. This is typically called "packaging". In this problem we will take the code you just made it and build it into a proper python package that can be installed and then used anywhere.For more background and detail (and the most up-to-date recommendations) see the [Python Packaging Guide](https://packaging.python.org/current/). 3a: Set up a Python package structure for your code First we adjust the structure of your code from Problem 1 to allow it to live in a package structure rather than as a stand-alone ``.py`` file. All you need to do is create a directory, move the ``code.py`` file into that directory, and add a file (can be empty) called ``__init__.py`` into the directory.You'll have to pick a name for the package, which is usually the same as the repo name (although that's not strictly required, notable exemption is e.g. `scikit-learn` vs `sklearn`).*Hint: don't forget to switch back to *your* code repo directory, if you are doing this immediately after Problem 2.*
###Code
!mkdir <yourpkgname>#complete
!git mv code.py <yourpkgname>#complete
#The "touch" unix command simply creates an empty file if there isn't one already.
#You could also use an editor to create an empty file if you prefer.
!touch <yourpkgname>/__init__.py#complete
###Output
_____no_output_____
###Markdown
3b: Test your package You should now be able to import your package and the code inside it as though it were some installed package like `numpy`, `astropy`, `pandas`, etc.
###Code
from <yourpkgname> import code#complete
#if your code.py has a function called `do_something` as in the example above, you can now run it like:
code.do_something()
###Output
_____no_output_____
###Markdown
3c: Apply packaging tricks One of the nice things about packages is that they let you hide the implementation of some part of your code in one place while exposing a "cleaner" namespace to the users of your package. To see a (trivial) example, of this, lets pull a function from your ``code.py`` into the base namespace of the package. In the below make the ``__init__.py`` have one line: ``from .code import do_something``. That places the ``do_something()`` function into the package's root namespace.
###Code
%%file <yourpkgname>/__init__.py
#complete
###Output
_____no_output_____
###Markdown
Now the following should work.
###Code
import <yourpkgname>#complete
<yourpkgname>.do_something()#complete
###Output
_____no_output_____
###Markdown
*BUT* you will probably get an error here. That's because Python is smart about imports: once it's imported a package it won't re-import it later. Usually that saves time, but here it's a hassle. Fortunately, we can use the ``reload`` function to get around this:
###Code
from importlib import reload
reload(<yourpkgname>)#complete
<yourpkgname>.do_something()#complete
###Output
_____no_output_____
###Markdown
3d: Create a setup.py file Ok, that's great in a pinch, but what if you want your package to be available from *other* directories? If you open a new terminal somewhere else and try to ``import `` you'll see that it will fail, because Python doesn't know where to find your package. Fortunately, Python (both the language and the larger ecosystem) provide built-in tools to install packages. These are built around creating a ``setup.py`` script that controls installation of a python packages into a shared location on your machine. Essentially all Python packages are installed this way, even if it happens silently behind-the-scenes. Below is a template bare-bones setup.py file. Fill it in with the relevant details for your package.
###Code
%%file setup.py
#!/usr/bin/env python
from distutils.core import setup
setup(name='<yourpkgname>',
version='0.1dev',
description='<a description>',
author='<your name>',
author_email='<youremail>',
packages=['<yourpkgname>'],
) #complete
###Output
_____no_output_____
###Markdown
3e: Build the package Now you should be able to "build" the package. In complex packages this will involve more involved steps like linking against C or FORTRAN code, but for pure-python packages like yours, it simply involves filtering out some extraneous files and copying the essential pieces into a build directory.
###Code
!python setup.py build
###Output
_____no_output_____
###Markdown
To test that it built sucessfully, the easiest thing to do is cd into the `build/lib.X-Y-Z` directory ("X-Y-Z" here is OS and machine-specific). Then you should be able to ``import ``. It's usually best to do this as a completely independent process in python. That way you can be sure you aren't accidentally using an old import as we saw above.
###Code
%%sh
cd build/lib.X-Y-Z #complete
python -c "import <yourpkgname>;<yourpkgname>.do_something()" #complete
###Output
_____no_output_____
###Markdown
3f: Install the package Alright, now that it looks like it's all working as expected, we can install the package. Note that if we do this willy-nilly, we'll end up with lots of packages, perhaps with the wrong versions, and it's easy to get confused about what's installed (there's no reliable ``uninstall`` command...) So before installing we first create a virtual environment using Anaconda, and install into that. If you don't have anaconda or a similar virtual environment scheme, you can just do ``python setup.py install``. But just remember that this will be difficult to back out (hence the reason for Python environments in the first place!)
###Code
%%sh
conda create -n test_<yourpkgname> anaconda #complete
source activate test_<yourpkgname> #complete
python setup.py install
###Output
_____no_output_____
###Markdown
Now we can try running the package from *anywhere* (not just the source code directory), as long as we're in the same environment that we installed the package in.
###Code
%%sh
cd $HOME
source activate test_<yourpkgname> #complete
python -c "import <yourpkgname>;<yourpkgname>.do_something()" #complete
###Output
_____no_output_____
###Markdown
3g: Update the package on github OK, it's now installable. You'll now want to make sure to update the github version to reflect these improvements. You'll need to add and commit all the files. You'll also want to update the README to instruct users that they should use ``python setup.py install`` to install the package.
###Code
!git #complete
###Output
_____no_output_____
###Markdown
Problem 4: Publishing your package on (fake) PyPI Now that your package can be installed by anyone who comes across it on github. But it tends to scare some people that they need to download the source code and know ``git`` to use your code. The Python Package Index (PyPI), combined with the ``pip`` tool (now standard in Python) provides a much simpler way to distribute code. Here we will publish your code to a **testing** version of PyPI. 4a: Create a PyPI account First you'll need an account on PyPI to register new packages. Go to the [testing PyPI](https://testpypi.python.org/pypi), and register. You'll also need to supply your login details in the ``.pypirc`` directory in your home directory as shown below. (If it were the real PyPI you'd want to be more secure and not have your password in plain text. But for the testing server that's not really an issue.)Note that if you've ever done something like this before and hence already have a `.pypirc` file, you might get unexpected results if you run this without moving/renaming the old version temporarily.
###Code
%%file -a ~/.pypirc
[distutils]
index-servers = pypi
[pypi]
repository = https://test.pypi.org/legacy/
username = <your user name goes here>
password = <your password goes here>
###Output
_____no_output_____
###Markdown
4b: Build a "source" version of your package Use ``distutils`` to create the source distribution of your package.*Hint: You'll want to make sure your package version is something you want to release before executing the upload command. Released versions can't be duplicates of existing versions, and shouldn't end in "dev" or "b" or the like.*"
###Code
!python setup.py sdist
###Output
_____no_output_____
###Markdown
Verify that there is a ``-.tar.gz`` file in the ``dist`` directory. It should have all of the source code necessary for your package. 4c: Upload your package to PyPI Once you have an account on PyPI (or testPyPI in our case) you can upload your distributions to PyPI using ``twine``. If this is your first time uploading a distribution for a new project, twine will handle registering the project automatically filling out the details you provided in your ``setup.py``.
###Code
!twine upload dist/<yourpackage>-<version>
###Output
_____no_output_____
###Markdown
4d: Install your package with ``pip`` The ``pip`` tool is a convenient way to install packages on PyPI. Again, we use Anaconda to create a testing environment to make sure everything worked correctly.(Normally the ``-i`` wouldn't be necessary - we're using it here only because we're using the "testing" PyPI)
###Code
%%sh
conda create -n test_pypi_<yourpkgname> anaconda #complete
source activate test_pypi_<yourpkgname> #complete
pip install -i https://testpypi.python.org/pypi <yourpkgname>
%%sh
cd $HOME
source activate test_pypi_<yourpkgname> #complete
python -c "import <yourpkgname>;<yourpkgname>.do_something()" #complete
###Output
_____no_output_____ |
_downloads/plot_sharpen.ipynb | ###Markdown
Image sharpening=================This example shows how to sharpen an image in noiseless situation byapplying the filter inverse to the blur.
###Code
import scipy
from scipy import ndimage
import matplotlib.pyplot as plt
f = scipy.misc.face(gray=True).astype(float)
blurred_f = ndimage.gaussian_filter(f, 3)
filter_blurred_f = ndimage.gaussian_filter(blurred_f, 1)
alpha = 30
sharpened = blurred_f + alpha * (blurred_f - filter_blurred_f)
plt.figure(figsize=(12, 4))
plt.subplot(131)
plt.imshow(f, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(132)
plt.imshow(blurred_f, cmap=plt.cm.gray)
plt.axis('off')
plt.subplot(133)
plt.imshow(sharpened, cmap=plt.cm.gray)
plt.axis('off')
plt.tight_layout()
plt.show()
###Output
_____no_output_____ |
Resources/Data_entry/pymaceuticals_starter.ipynb | ###Markdown
Observations and Insights
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
%matplotlib inline
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
study_results.head()
# Combine the data into a single dataset
data_combination = mouse_metadata.merge(study_results, on='Mouse ID', how='left')
# Display the data table for preview
data_combination.head()
# Combine the data into a single dataset
data_combination = mouse_metadata.join(study_results, on='Mouse ID', how='outer')
pd.merge(['mouse_metadata', 'study_results', on='Mouse ID', how='outer'])
filenames = ['mouse_metadata_path', 'study_results_path']
data_combination = mouse_metadata.append(study_results)
pd.merge([mouse_metadata, study_results
drop_dup_mouse_id = clinical_trial_df.loc[clinical_trial_df.duplicated(subset=['Mouse ID', 'Timepoint',]),'Mouse ID'].unique()
clean_clinical_trial_df = clinical_trial_df[clinical_trial_df['Mouse ID'].isin(drop_dup_mouse_id)==False]
clean_mouse_df = mouse_drug_df[mouse_drug_df['Mouse ID'].isin(drop_dup_mouse_id)==False]
duplicate_mice = study_results.loc[study_results.duplicates(subset=['Mouse ID', 'Timepoint',]), 'Mouse ID'].unique()
duplicate_mice.head()
data_combination.shape
#checking the number of mice
data_combination.duplicated().sum()
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
data_combination.duplicated(['Mouse ID', 'Timepoint']).sum()
# Optional: Get all the data for the duplicate mouse ID.
data_combination.duplicated(['Mouse ID'])
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean_mice = data_combination.drop_duplicates(['Mouse ID'])
clean_mice
# Checking the number of mice in the clean DataFrame.
clean_mice.duplicated(['Mouse ID']).sum()
###Output
_____no_output_____
###Markdown
Summary Statistics
###Code
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# This method is the most straighforward, creating multiple series and putting them all together at the end.
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
mean = data_combination.groupby('Drug Regimen')['Tumor Volume (mm3)'].mean()
median = data_combination.groupby('Drug Regimen')['Tumor Volume (mm3)'].median()
variance = data_combination.groupby('Drug Regimen')['Tumor Volume (mm3)'].var()
stdv = data_combination.groupby('Drug Regimen')['Tumor Volume (mm3)'].std()
sem = data_combination.groupby('Drug Regimen')['Tumor Volume (mm3)'].sem()
# This method produces everything in a single groupby function
summarystats = pd.DataFrame({"Mean": mean, "Median": median, "Variance": variance, "Standard Deviation": stdv, "SEM": sem})
summarystats
###Output
_____no_output_____
###Markdown
Bar and Pie Charts
###Code
# data to appear to the bar gragh to be generated
datapoint_plot = data_combination.groupby(["Drug Regimen"]).count()["Mouse ID"]
datapoint_plot
# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pandas.
datapoint_plot.plot(kind="bar", figsize=(10,4))
#set chart title
plt.title("Total Number of Mice for each Treatment")
plt.xlabel("Drug Regimen")
plt.ylabel("Total Number")
#show chart and set layout
plt.show()
plt.tight_layout()
# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pyplot.
# Set x-axis and tick locations
x_axis = np.arange(len(datapoint_plot))
tick_locations = [value for value in x_axis]
# Defining data to be generated
plt.figure(figsize=(10,4))
plt.bar(x_axis, datapoint_plot, color='g', alpha=1, align='center')
plt.xticks(tick_locations, datapoint_plot.index.values, rotation="vertical")
# Setting x and y limit
plt.xlim(-0.75, len(x_axis)-0.25)
plt.ylim(0, max(datapoint_plot)+10)
plt.title("Total Number of Mice for each Treatment")
plt.xlabel("Drug Regimen")
plt.ylabel("Total Number")
plt.legend
# Generate a pie plot showing the distribution of female versus male mice using pandas
sex_distribution = data_combination.groupby(["Sex"]).count()["Mouse ID"]
sex_distribution
# Pie plot generation
colors = ['yellow', 'green']
explode = (0.05, 0)
panPie_plot = sex_distribution.plot.pie(y='Total Count',figsize=(8,8), colors = colors, startangle=180, explode = explode, autopct="%1.1f%%")
panPie_plot.legend(["Female", "Male"], prop={'size': 15})
# Generate a pie plot showing the distribution of female versus male mice using pyplot
# Set the pie plot to be graphed
colors = ['lightpink', 'purple']
explode = (0.05, 0)
plt.pie(sex_distribution, explode=explode, labels=sex_distribution.index.values, colors=colors, autopct="%1.1f%%", startangle=90)
plt.legend(["Female", "Male"], prop={'size': 15})
plt.rcParams['figure.figsize'] = (8,8)
plt.title('Gender Distribution')
###Output
_____no_output_____
###Markdown
Quartiles, Outliers and Boxplots
###Code
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
mouse_timepoint = data_combination[data_combination["Drug Regimen"].isin(["Capomulin", "Ramicane", "Infubinol", "Ceftamin"])]
mouse_timepoint = mouse_timepoint.sort_values(["Timepoint"], ascending=True)
mouse_timepoint
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
both_dataframe = mouse_timepoint.merge(data_combination, on = ('Mouse ID', 'Timepoint'), how = 'left' )
both_dataframe
# Tumor volume at last timepoint
regimes_data = mouse_timepoint[["Drug Regimen", "Mouse ID", "Timepoint", "Tumor Volume (mm3)"]]
regimes_data
regimes_data.shape
# Selecting individual row by index
capomulin_data = data_combination.loc[data_combination["Drug Regimen"] == "Capomulin",:]
ramicane_data = data_combination.loc[data_combination["Drug Regimen"] == "Ramicane", :]
infubinol_data = data_combination.loc[data_combination["Drug Regimen"] == "Infubinol", :]
ceftamin_data = data_combination.loc[data_combination["Drug Regimen"] == "Ceftamin", :]
capomulin_data
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
capomulin_last = capomulin_data.groupby('Mouse ID').max()['Timepoint']
capomulin_last
capomulin_vol = pd.DataFrame(capomulin_last)
capomulin_vol
capomulin_merge = capomulin_vol.merge(data_combination, on=("Mouse ID", "Timepoint"), how="left")
capomulin_merge
# Calculate the IQR and quantitatively determine if there are any potential outliers.
tuvol = capomulin_merge['Tumor Volume (mm3)']
quartiles = tuvol.quantile([0.25,0.5,0.75])
capomulin_lowerq = quartiles[0.25]
capomulin_upperq = quartiles[0.75]
capomulin_iqr = capomulin_upperq - capomulin_lowerq
print(f' IQR = {capomulin_iqr}')
print(f' Lower Quartile = {capomulin_lowerq}')
print(f' Upper Quartile = {capomulin_upperq}')
capomulin_lower_bound = capomulin_lowerq - 1.5*capomulin_iqr
capomulin_upper_bound = capomulin_upperq + 1.5*capomulin_iqr
print(f' Lower Bound: {capomulin_lower_bound}')
print(f' Upper Bound: {capomulin_upper_bound}')
capomulin_merge.describe()
print(f"Capomulin potential outliers could be values below {capomulin_lower_bound} and above {capomulin_upper_bound} could be outliers.")
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
ramicane_last = ramicane_data.groupby('Mouse ID').max()['Timepoint']
ramicane_last
ramicane_vol = pd.DataFrame(ramicane_last)
ramicane_vol
ramicane_merge = ramicane_vol.merge(data_combination, on=("Mouse ID", "Timepoint"), how="left")
ramicane_merge
# Calculate the IQR and quantitatively determine if there are any potential outliers.
tuvol_1 = ramicane_merge['Tumor Volume (mm3)']
quartiles = tuvol_1.quantile([0.25,0.5,0.75])
ramicane_lowerq = quartiles[0.25]
ramicane_upperq = quartiles[0.75]
ramicane_iqr = ramicane_upperq - ramicane_lowerq
print(f' IQR = {ramicane_iqr}')
print(f' Lower Quartile = {ramicane_lowerq}')
print(f' Upper Quartile = {ramicane_upperq}')
ramicane_lower_bound = ramicane_lowerq - 1.5*ramicane_iqr
ramicane_upper_bound = ramicane_upperq + 1.5*ramicane_iqr
print(f' Lower Bound: {ramicane_lower_bound}')
print(f' Upper Bound: {ramicane_upper_bound}')
ramicane_merge.describe()
print(f"ramicane potential outliers could be values below {ramicane_lower_bound} and above {ramicane_upper_bound} could be outliers.")
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
infubinol_last = infubinol_data.groupby('Mouse ID').max()['Timepoint']
infubinol_last
infubinol_vol = pd.DataFrame(infubinol_last)
infubinol_vol
infubinol_merge = infubinol_vol.merge(data_combination, on=("Mouse ID", "Timepoint"), how="left")
infubinol_merge
# Calculate the IQR and quantitatively determine if there are any potential outliers.
tuvol_2 = infubinol_merge['Tumor Volume (mm3)']
quartiles = tuvol_2.quantile([0.25,0.5,0.75])
infubinol_lowerq = quartiles[0.25]
infubinol_upperq = quartiles[0.75]
infubinol_iqr = infubinol_upperq - infubinol_lowerq
print(f' IQR = {infubinol_iqr}')
print(f' Lower Quartile = {infubinol_lowerq}')
print(f' Upper Quartile = {infubinol_upperq}')
infubinol_lower_bound = infubinol_lowerq - 1.5*infubinol_iqr
infubinol_upper_bound = infubinol_upperq + 1.5*infubinol_iqr
print(f' Lower Bound: {infubinol_lower_bound}')
print(f' Upper Bound: {infubinol_upper_bound}')
infubinol_merge.describe()
print(f"infubinol potential outliers could be values below {infubinol_lower_bound} and above {infubinol_upper_bound} could be outliers.")
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
ceftamin_last = ceftamin_data.groupby('Mouse ID').max()['Timepoint']
ceftamin_last
ceftamin_vol = pd.DataFrame(ceftamin_last)
ceftamin_vol
ceftamin_merge = ceftamin_vol.merge(data_combination, on=("Mouse ID", "Timepoint"), how="left")
ceftamin_merge
# Calculate the IQR and quantitatively determine if there are any potential outliers.
tuvol_3 = ceftamin_merge['Tumor Volume (mm3)']
quartiles = tuvol_3.quantile([0.25,0.5,0.75])
ceftamin_lowerq = quartiles[0.25]
ceftamin_upperq = quartiles[0.75]
ceftamin_iqr = ceftamin_upperq - ceftamin_lowerq
print(f' IQR = {ceftamin_iqr}')
print(f' Lower Quartile = {ceftamin_lowerq}')
print(f' Upper Quartile = {ceftamin_upperq}')
ceftamin_lower_bound = ceftamin_lowerq - 1.5*ceftamin_iqr
ceftamin_upper_bound = ceftamin_upperq + 1.5*ceftamin_iqr
print(f' Lower Bound: {ceftamin_lower_bound}')
print(f' Upper Bound: {ceftamin_upper_bound}')
ceftamin_merge.describe()
print(f"ceftamin potential outliers could be values below {ceftamin_lower_bound} and above {ceftamin_upper_bound} could be outliers.")
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
treatment_plot = [tuvol, tuvol_1, tuvol_2, tuvol_3]
fig1, ax1 = plt.subplots()
ax1.set_title('Final Tumor Volume by Regimen')
ax1.set_ylabel('Final Tumor Volume (mm3)')
ax1.set_xlabel('Drug Regimen')
ax1.boxplot(treatment_plot, labels=["Capomulin","Ramicane","Infubinol","Ceftamin",])
plt.savefig('boxplot')
plt.show()
###Output
_____no_output_____
###Markdown
Line and Scatter Plots
###Code
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
line_plot = data_combination.loc[(data_combination["Mouse ID"] == "s710")]
line_plot = line_plot.set_index("Timepoint")
line_plot
#Final plot
line_plot["Tumor Volume (mm3)"].plot(color = "darkorange")
plt.title("Tumor Volume of Mouse s710 Over Time")
plt.xlabel("Timepoint")
plt.ylabel("Tumor Volume (mm3)")
plt.show()
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
scatter_plot = data_combination.loc[(data_combination["Drug Regimen"] == "Capomulin")]
scatter_plot
scatter_plot_df = scatter_plot.groupby(["Mouse ID"]).mean()
scatter_plot_df
# set x and y value
weight_scatter_plot = scatter_plot_df["Weight (g)"]
volume_scatter_plot = scatter_plot_df["Tumor Volume (mm3)"]
# Plot the graph
plt.scatter(weight_scatter_plot, volume_scatter_plot, color = "darkred")
plt.xlabel("Weight (g)")
plt.ylabel("Average Tumor Volume (mm3)")
plt.title("Weight Versus Average Tumor Volume for Capomulin")
###Output
_____no_output_____
###Markdown
Correlation and Regression
###Code
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
print(f' The correlation coefficient between weight and tumor volume is {round(st.pearsonr(weight_scatter_plot, volume_scatter_plot)[0],2)}')
# Linear regression model
linear_representation = st.linregress(scatter_plot_df['Weight (g)'], scatter_plot_df['Tumor Volume (mm3)'])
linear_representation
#Caluclate the linear regression model
slope, intercept, r_value , p_value, std_err = st.linregress(weight_scatter_plot, volume_scatter_plot)
y_value = slope * wt + intercept
#using prior computing data to plot the regression line on the scatter plot
scatter_plot = data_combination.loc[(data_combination["Drug Regimen"] == "Capomulin")]
scatter_plot_df = scatter_plot.groupby(["Mouse ID"]).mean()
weight_scatter_plot = scatter_plot_df["Weight (g)"]
volume_scatter_plot = scatter_plot_df["Tumor Volume (mm3)"]
plt.scatter(weight_scatter_plot, volume_scatter_plot, color = "brown")
plt.plot(wt, y_value, color = "magenta")
plt.xlabel("Weight (g)")
plt.ylabel("Average Tumor Volume (mm3)")
plt.title("Linear Regression on scatter plot for Capomulin")
plt.show
###Output
_____no_output_____ |
phd-thesis/benchmarkings/am207-NILM-project-master/fhmm.ipynb | ###Markdown
Factorial Hiddon Markov ModelIn this notebook, we apply the FHMM disaggregation method implemented in the NILMTK package on the REDD dataset. We then make improvements on the NILMTK implementation and compare the results. For data processing, we use the tools availalble in NILMTK. Factorial hidden markov models (FHMM) are generalizations of hidden markov models (HMMs) where the hidden state is factored into multiple state variables (Ghahramani and Jordan, 1997). We first describe the disaggregation problem in terms of HMMs, then describe it in the FHMM framework. In this problem, we wish to infer a time series of the hidden states of each appliance in the household. If we only wished to infer the hidden state of one appliance, for example, a refrigerator, we can model it as an HMM. The hidden state would be whether the fridge is on or off. The observed state would be the aggregated energy reading for the entire home. $$z_t \rightarrow z_{t+1} \\\hspace{1mm} \downarrow \hspace{10mm} \downarrow \hspace{1mm} \\x_t \hspace{6mm} x_{t+1}$$where $z$ refers to the hidden state and takes on a discrete value (on/off) and $x$ is the observed state and takes on a continuous value. The emission probability, the probability of making an observation $x_t$ given $z_t$, $P(x_t|z_t)$ can be modeled as a Gaussian. An FHMM is similar to HMM, but for each observation, instead of having one hidden state, there are multiple hidden states we need to infer. This would be the case if we wanted to infer the status of multiple appliances in the home. We can model this as an HMM with a distinct state for each possible combination of states: fridge on + lights off, fridge on + lights on, etc. Alternatively, we can let the state be represented by a collection of state variables, $$z_t = z^1_t, z^2_t, ...$$each of which can take on an on/off value. In the energy disaggregation problem, the observed state is the sum of the different hidden states (Kolter and Jakkola, 2012). The hidden states are drawn from a multinomial distribution and the transition probability can be factored as: $$P(z_t|z_{t-1}) = \Pi_{m=1}^M P(z_t^{(m)}|z_{t-1}^{(m)})$$The emissions are Gaussian: $$ P(x_t|z_t) = \mathcal{N} (\Sigma_{i=1}^{N} \mu^{(i)}, \Sigma)$$The additive model is a special case of the general FHMM.
###Code
from __future__ import print_function, division
import time
from matplotlib import rcParams
import matplotlib.pyplot as plt
%matplotlib inline
rcParams['figure.figsize'] = (13, 6)
plt.style.use('ggplot')
from nilmtk import DataSet, TimeFrame, MeterGroup, HDFDataStore
from nilmtk.disaggregate import fhmm_exact
###Output
/Users/kyu/Google Drive/AM207project/nilmtk/nilmtk/node.py:2: ImportWarning: Not importing directory 'nilm_metadata': missing __init__.py
from nilm_metadata import recursively_update_dict
###Markdown
Read in data and do data processingRead in data and separate into training and testing sets using functions available in NILMTK.
###Code
train = DataSet('redd.h5')
test = DataSet('redd.h5')
# we do not use the 4th house because it does not contain a fridge
training_houses = [1,2,3]
test_houses = 5
test_elec = test.buildings[test_houses].elec
# appliances
appliances = ['fridge', 'microwave']
###Output
_____no_output_____
###Markdown
We train with only the k=3 top devices. However, we find that the top devices are not the same for each house. We decided to use fridge, sockets, and light because they are the most common appliances to occur in the top 3 and are present in both the training and test set. The appliances have different numbers for each home, presenting difficulties for using the disaggregation algorithms across different houses. We manually set the appliance numbers of the test set to deal with this problem. FHMM implementation in NILMTKThe NILMTK implementation takes the approach of expanding the HMM model state space to have every combination of states of eveyr appliance (eg. fridge on + lights off, fridge on + lights on, etc.). In the NILMTK implementation, we use the train_across_buildings function in the FHMM class. The function takes in the training dataset, and we give it a list of houses and appliances we want to train. The code loops through each appliance, then each building, and checks that the on/off difference are larger than a preset value. The data from all buildings for a particular appliance is modeled as an HMM with two hidden states (on/off) and Gaussian emissions. The package hmmlearn is used to fit the means for the two states. hmmlearn uses EM to fit the parameters. This is done for every appliance, so that for every appliance we have an HMM. The parameters for each appliance are then combined into an FHMM by taking every combination of the possible states for every appliance and summing the power means for each state. To perform disaggregation, the predict function of hmmlearn is used, which finds the most likely state sequence that corresponds to a set of observations. hmmlearn has two options for the decoder algorithm, Viterbi and MAP, with Viterbi being the default.
###Code
# initialized FHMM model
fhmm = fhmm_exact.FHMM()
# train model on training houses. We downsample to a period of 10s.
fhmm.train_across_buildings(train, training_houses, appliances, min_activation=0.001, sample_period=10)
# name of file to save disaggregated output to
disag_filename = 'redd-disag-fhmm_exact.h5'
output = HDFDataStore(disag_filename, 'w')
# perform disaggregation
fhmm.disaggregate_across_buildings(test, output, [test_houses], sample_period=10)
output.close()
# Read disaggreated data from the file we just save to
disag_fhmm = DataSet(disag_filename)
disag_fhmm_elec = disag_fhmm.buildings[test_houses].elec
# make plots of f1 score
from nilmtk.metrics import f1_score, accuracy_score, recall_score, precision_score
f1_fhmm = f1_score(disag_fhmm_elec, test_elec)
print(f1_fhmm)
f1_fhmm.index = disag_fhmm_elec.get_labels(f1_fhmm.index)
f1_fhmm.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('f-score');
plt.title("FHMM");
# make plot of accuracy metric
accuracy_fhmm = accuracy_score(disag_fhmm_elec, test_elec)
print(accuracy_fhmm)
accuracy_fhmm.index = disag_fhmm_elec.get_labels(accuracy_fhmm.index)
accuracy_fhmm.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('accuracy-score');
plt.title("FHMM");
# make plot of recall metric
recall_fhmm = recall_score(disag_fhmm_elec, test_elec)
print(recall_fhmm)
recall_fhmm.index = disag_fhmm_elec.get_labels(recall_fhmm.index)
recall_fhmm.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('recall-score');
plt.title("FHMM");
# make plot of precision metric
precision_fhmm = precision_score(disag_fhmm_elec, test_elec)
print(precision_fhmm)
precision_fhmm.index = disag_fhmm_elec.get_labels(precision_fhmm.index)
precision_fhmm.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('recall-score');
plt.title("FHMM");
# make plot of actual signal and inferred signal for fridge
a = disag_fhmm_elec['fridge'].plot()
b = test.buildings[test_houses].elec['fridge'].plot()
plt.legend([a, b], ['model', 'truth'])
plt.show()
###Output
_____no_output_____
###Markdown
Effect of number of hidden statesWe modify the train_across_buildings method to allow for a user-specified number of hidden states. We test 2, 3, and 4 hidden states. The third and fourth states account for an intermdeiate state intermediate between the on and off states, or when the device is operating at a lower energy state.
###Code
# initialize FHMM model
fhmm3 = fhmm_exact.FHMM()
# train model
fhmm3.train_across_buildings(train, training_houses, appliances, min_activation=0.001, number_of_states=3, sample_period=10)
# run disaggregation and save to output
disag_filename = 'redd-disag-fhmm_3.h5'
output = HDFDataStore(disag_filename, 'w')
fhmm3.disaggregate_across_buildings(test, output, [test_houses], sample_period=10)
output.close()
# Read disaggreated data
disag_fhmm3 = DataSet(disag_filename)
disag_fhmm3_elec = disag_fhmm3.buildings[test_houses].elec
# compute f1 score
f1_fhmm3 = f1_score(disag_fhmm3_elec, test_elec)
f1_fhmm3.index = disag_fhmm3_elec.get_labels(f1_fhmm3.index)
print(f1_fhmm3)
# repeat with 4 hidden states
# initialize FHMM model
fhmm4 = fhmm_exact.FHMM()
# train model
fhmm4.train_across_buildings(train, training_houses, appliances, min_activation=0.001, number_of_states=4, sample_period=10)
# run disaggregation and save to output
disag_filename = 'redd-disag-fhmm_4.h5'
output = HDFDataStore(disag_filename, 'w')
fhmm4.disaggregate_across_buildings(test, output, [test_houses], sample_period=10)
output.close()
# Read disaggreated data
disag_fhmm4 = DataSet(disag_filename)
disag_fhmm4_elec = disag_fhmm4.buildings[test_houses].elec
# compute f1 score
f1_fhmm4 = f1_score(disag_fhmm4_elec, test_elec)
f1_fhmm4.index = disag_fhmm3_elec.get_labels(f1_fhmm4.index)
print(f1_fhmm4)
# make comparison figure
fig = plt.gcf()
fig.set_size_inches(5.5, 3.5)
bar2 = plt.bar([1,2], f1_fhmm.values, [0.2, 0.2], label='2 hidden states', color='b')
bar3 = plt.bar([1.2,2.2], f1_fhmm3.values, [0.2, 0.2], label='3 hidden states', color='r')
bar4 = plt.bar([1.4,2.4], f1_fhmm4.values, [0.2, 0.2], label='4 hidden states', color='green')
plt.ylabel('f-score')
plt.title('Effect of number of hidden states of FHMM')
plt.xticks([1.3, 2.3], ('Microwave', 'Fridge'))
plt.legend(loc=2)
plt.savefig('num_states.pdf')
###Output
_____no_output_____
###Markdown
Improved FHMM implementation: using Gaussian mixtures as emissionThe emission probabilties in the NILMTK implementation are Gaussian. This means that given a certain hidden state, the probability of the meter reading follows a Gaussian distribution. An alternative is to model the emissions as a Gaussian Mixture. This has the potential to improve the model because we are only inferring the hidden states of two appliances, but the mains reading will include appliances that the model is not trained on. So for example, when the fridge is on, the observed signal can be multi-modal.
###Code
from nilmtk.disaggregate import fhmm_improved
reload(fhmm_improved)
fhmm2 = fhmm_improved.FHMM()
# Note that we have given the sample period to downsample the data to 1 minute
fhmm2.train_across_buildings(train, training_houses, ['fridge', 'microwave'], sample_period=60, min_activation=0.001)
output.close()
disag_filename = 'redd-disag-fhmm_gmm2.h5'
output = HDFDataStore(disag_filename, 'w')
# Note that we have mentioned to disaggregate after converting to a sample period of 60 seconds
fhmm2.disaggregate_across_buildings(test, output, [test_houses], sample_period=60)
output.close()
# Read disaggreated data
disag_fhmm = DataSet(disag_filename)
disag_fhmm_elec = disag_fhmm.buildings[test_houses].elec
# make plots of f1 score
f1_fhmm = f1_score(disag_fhmm_elec, test_elec)
print(f1_fhmm)
f1_fhmm.index = disag_fhmm_elec.get_labels(f1_fhmm.index)
f1_fhmm.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('f-score');
plt.title("FHMM");
reload(fhmm_exact)
fhmm4 = fhmm_exact.FHMM()
# Note that we have given the sample period to downsample the data to 1 minute
fhmm4.train_across_buildings(train, training_houses, appliances, min_activation=0.001, number_of_states=4, sample_period=60)
disag_filename = 'redd-disag-fhmm_4.h5'
output = HDFDataStore(disag_filename, 'w')
# Note that we have mentioned to disaggregate after converting to a sample period of 60 seconds
fhmm4.disaggregate_across_buildings(test, output, [test_houses], sample_period=60)
output.close()
# Read disaggreated data
disag_fhmm = DataSet(disag_filename)
disag_fhmm_elec = disag_fhmm.buildings[test_houses].elec
# make plots of f1 score
f1_fhmm = f1_score(disag_fhmm_elec, test_elec)
print(f1_fhmm)
f1_fhmm.index = disag_fhmm_elec.get_labels(f1_fhmm.index)
f1_fhmm.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('f-score');
plt.title("FHMM");
###Output
3 0.126807
18 0.542834
dtype: float64
|
notebooks/z00_Data_prep/00-mc-prep_geolink_norge_dataset.ipynb | ###Markdown
Note download data from https://drive.google.com/drive/folders/1EgDN57LDuvlZAwr5-eHWB5CTJ7K9HpDPCredit to this repo: https://github.com/LukasMosser/geolink_dataset Data DisclaimerAll the data serving as an input to these notebooks was generously donated by GEOLINK and is CC-by-SA 4.0 If you use their data please reference their dataset properly to give them credit for their contribution.
###Code
%reload_ext autoreload
%autoreload 2
import lasio
import matplotlib.pyplot as plt
%matplotlib inline
import os
from tqdm.auto import tqdm
import pandas as pd
import geopandas as gpd
import numpy as np
from pathlib import Path
from sklearn import preprocessing
from operator import itemgetter
###Output
_____no_output_____
###Markdown
in and our directories
###Code
data_locations = Path(
"../../data/raw/geolink_dataset/GEOLINK North sea wells with Lithology interpretation/GEOLINK_Lithology and wells NORTH SEA"
)
data_locations_wellheads = Path("../../data/raw/geolink_dataset/norge_well_heads")
interim_locations = Path("../../data/processed/geolink_norge_dataset/")
interim_locations2 = Path("../../data/interim/geolink_norge_dataset/")
###Output
_____no_output_____
###Markdown
load and save as parquet
###Code
df_lithology = pd.read_excel(data_locations / "../Lithology code data.xlsx", header=1)[
:-1
]
df_lithology["Abbreviation"] = pd.to_numeric(df_lithology["Abbreviation"])
df_lithology.to_parquet(
interim_locations / "geolink_norge_lithology.parquet", compression="gzip"
)
df_lithology
# TODO rename well heads
df_well_tops = pd.concat(
[
pd.read_csv(data_locations_wellheads / "wellbore_exploration_all.csv"),
pd.read_csv(data_locations_wellheads / "wellbore_development_all.csv"),
pd.read_csv(data_locations_wellheads / "wellbore_other_all.csv"),
]
)
df_well_tops["wlbWellboreName_geolink"] = df_well_tops["wlbWellboreName"].str.replace(
"/", "_"
)
# add dates
date_cols = ["wlbEntryDate", "wlbCompletionDate"]
for c in date_cols:
df_well_tops[c] = pd.to_datetime(df_well_tops[c]) # .astype('str')
df_well_tops["wlbNsDecDeg"] = df_well_tops["wlbNsDecDeg"].replace(0, np.nan)
df_well_tops["wlbEwDesDeg"] = df_well_tops["wlbEwDesDeg"].replace(0, np.nan)
a = set(df_well_tops.columns)
df_well_tops = df_well_tops.dropna(axis=1, thresh=0.9 * len(df_well_tops))
b = set(df_well_tops.columns)
print("removed", a - b)
# make into geodataframe
df_well_tops = gpd.GeoDataFrame(
df_well_tops,
geometry=gpd.points_from_xy(df_well_tops.wlbEwDesDeg, df_well_tops.wlbNsDecDeg),
)
df_well_tops
###Output
removed {'wlbNpdidWellboreReclass', 'wlbField', 'fclNpdidFacilityDrilling', 'wlbPluggedDate', 'wlbPlotSymbol', 'wlbProductionFacility', 'wlbDiskosWellboreType', 'wlbNamePart5', 'wlbDiskosWellboreParent', 'wlbPluggedAbandonDate', 'wlbAgeWithHc1', 'wlbFormationWithHc2', 'fclNpdidFacilityProducing', 'wlbSeismicLocation', 'wlbReclassFromWellbore', 'prlNpdidProdLicenceTarget', 'wlbReleasedDate', 'wlbDiscovery', 'wlbLicensingActivity', 'wlbWdssQcDate', 'wlbNamePart6', 'wlbFactMapUrl', 'wlbContentPlanned', 'wlbFacilityTypeDrilling', 'wlbDrillingFacilityFixedOrMoveable', 'wlbStatus', 'wlbLicenceTargetName', 'wlbKickOffPoint', 'wlbAgeAtTd', 'wlbEntryPreDrillDate', 'dscNpdidDiscovery', 'wlbAgeWithHc3', 'wlbNpdidSiteSurvey', 'wlbPressReleaseUrl', 'wlbDateReclass', 'wlbFormationWithHc3', 'wlbDateUpdatedMax', 'wlbFinalVerticalDepth', 'prlNpdidProductionLicence', 'wlbBottomHoleTemperature', 'wlbFormationAtTd', 'wlbMultilateral', 'wlbContent', 'fldNpdidField', 'wlbAgeWithHc2', 'wlbCompPreDrillDate', 'wlbSubSea', 'wlbReentryExplorationActivity', 'wlbFormationWithHc1', 'wlbMaxInclation', 'wlbSiteSurvey', 'wlbDrillingDays', 'wlbReentry', 'wlbPurposePlanned', 'wlbDiscoveryWellbore'}
###Markdown
Las files We can now proceed to import these files as las files and get their dataframes and hopefully put them into a data format that is more suited for ML tasks.
###Code
if not (interim_locations2 / "geolink_norge_well_logs_raw.parquet").exists():
# load las files
well_dataframes = []
files = sorted(data_locations.glob("*.las"))
for f in tqdm(files):
df = lasio.read(f).df()
df["Well"] = f.stem
well_dataframes.append(df)
df_all = pd.concat(well_dataframes)
df_all["Well"] = df_all["Well"].astype("category")
# Name lithology
litho_dict = df_lithology.set_index("Abbreviation")["Lithology"].to_dict()
df_all["LITHOLOGY_GEOLINK"] = (
df_all["LITHOLOGY_GEOLINK"].replace(litho_dict).astype("category")
)
# unique index
df_all = df_all.reset_index() # .set_index(['Well', 'DEPT'])
df_all.to_parquet(
interim_locations2 / "geolink_norge_well_logs_raw.parquet", compression="gzip"
)
df_all = pd.read_parquet(interim_locations2 / "geolink_norge_well_logs_raw.parquet")
df_all
###Output
_____no_output_____
###Markdown
Clean las files
###Code
# Clean.
# must have well head
df_all_clean2 = df_all[
df_all.Well.apply(lambda s: s in set(df_well_tops["wlbWellboreName_geolink"]))
]
# must have lithology
df_all_clean2 = df_all_clean2.dropna(subset=["LITHOLOGY_GEOLINK"])
print("nans", df_all_clean2.isna().mean().sort_values())
# Keep /cols logs that are present>thresh of the time
df_all_clean1 = df_all_clean2.dropna(axis=1, thresh=0.6 * len(df_all_clean2))
print('kept {:%} cols'.format(len(df_all_clean1.columns) / len(df_all_clean2.columns)))
# print("nans", df_all_clean1.isna().mean().sort_values())
# Drop rows with any Nan's
df_all_clean = df_all_clean1.dropna(axis=0, how='any')
print('kept {:%} rows'.format(len(df_all_clean) / len(df_all_clean2)))
df_all_clean
df_all_clean.dropna().Well.value_counts()
df_all_clean[df_all_clean['LITHOLOGY_GEOLINK']=='Marlstone'].Well.value_counts()
# 15_9-12
from deep_ml_curriculum.visualization.well_log import plot_facies, plot_well
well_name="30_4-1"
logs = df_all_clean[df_all_clean2.Well==well_name]
facies = logs['LITHOLOGY_GEOLINK'].astype('category').values
plot_well(well_name,
logs,
facies)
from deep_ml_curriculum.visualization.well_log import plot_facies, plot_well
well_name="30_6-11"
logs = df_all_clean[df_all_clean2.Well==well_name]
facies = logs['LITHOLOGY_GEOLINK'].astype('category').values
plot_well(well_name,
logs,
facies)
# Split by well name
# wells_val = [
# "35_11-1",
# "35_11-10",
# "35_11-11",
# "35_11-12",
# "35_11-13",
# "35_11-15 S",
# "35_11-2",
# "35_11-5",
# "35_11-6",
# "35_11-7",
# "35_12-1",
# ]
wells_test = [
"34_10-12",
"34_10-16 R",
"34_10-17",
"34_10-19",
"34_10-21",
"34_10-23",
"34_10-33",
"34_10-35",
"34_10-5",
"34_10-7",
"34_11-1",
"34_11-2 S",
"34_11-3 T2",
]
df_all_clean_test = df_all_clean[df_all_clean.Well.apply(lambda s: s in wells_test)]
df_all_clean_train = df_all_clean[
df_all_clean.Well.apply(lambda s: (s not in wells_test))
]
# assert len(set(df_all_clean_val.Well).intersection(set(df_all_clean_train))) == 0
assert len(set(df_all_clean_test.Well).intersection(set(df_all_clean_train))) == 0
# assert len(set(df_all_clean_test.Well).intersection(set(df_all_clean_val))) == 0
len(df_all_clean_train), len(df_all_clean_test)
df_all_clean_train.to_parquet(
interim_locations / "geolink_norge_well_logs_train.parquet", compression="gzip"
)
df_all_clean_test.to_parquet(
interim_locations / "geolink_norge_well_logs_test.parquet", compression="gzip"
)
# df_all_clean_val.to_parquet(
# interim_locations / "geolink_norge_well_logs_val.parquet", compression="gzip"
# )
df_all_clean
###Output
_____no_output_____
###Markdown
Others
###Code
df_picks = pd.read_excel(
data_locations / "../NPD stratigraphic picks north sea.xlsx", header=0
)
df_picks.to_parquet(
interim_locations / "geolink_norge_picks.parquet", compression="gzip"
)
df_picks
###Output
_____no_output_____
###Markdown
Well heads part 2
###Code
# only wells we use
a = sorted(df_all.Well.unique())
df_well_tops = df_well_tops[
df_well_tops["wlbWellboreName_geolink"].apply(lambda s: s in a)
]
df_well_tops.to_file(interim_locations / "norge_well_tops.gpkg", driver="GPKG")
###Output
_____no_output_____
###Markdown
Example Load
###Code
# Test load
df_all_clean2 = pd.read_parquet(
interim_locations / "geolink_norge_well_logs_train.parquet"
) # .set_index(['Well', 'DEPT'])
df_well_tops = gpd.read_file(interim_locations / "norge_well_tops.gpkg")
df_well_tops_minimal = df_well_tops[
[
"wlbWellboreName_geolink",
"wlbCompletionYear",
"wlbKellyBushElevation",
"wlbCompletionDate",
"wlbTotalDepth",
"geometry",
]
]
df_well_tops.plot()
# Merge well tops and well logs, a selection
df_all_clean3 = pd.merge(
left=df_all_clean2.sample(1000),
right=df_well_tops_minimal,
left_on="Well",
right_on="wlbWellboreName_geolink",
how="left",
).drop(columns="wlbWellboreName_geolink")
df_all_clean3 = df_all_clean3.set_index(['Well', 'DEPT'])
df_all_clean3 = gpd.GeoDataFrame(df_all_clean3, geometry=df_all_clean3['geometry'])
df_all_clean3.plot()
# df_all_clean3
df_picks = pd.read_parquet(interim_locations / "geolink_norge_picks.parquet")
df_picks
df_all_clean = pd.read_parquet(
interim_locations / "geolink_norge_well_logs_train.parquet"
).set_index(["Well", "DEPT"])
df_all_clean
###Output
_____no_output_____
###Markdown
Example plot
###Code
df_all_clean = pd.read_parquet(
interim_locations / "geolink_norge_well_logs_train.parquet"
).set_index(["Well", "DEPT"])
df_all_clean['DEPT'] = df_all_clean.index.get_level_values(1)
df_all_clean
# logs
from deep_ml_curriculum.visualization.well_log import plot_facies, plot_well
well_name="30_4-1"
logs = df_all_clean.xs(well_name)
facies = logs['LITHOLOGY_GEOLINK'].astype('category').values
plot_well(well_name,
logs,
facies)
plt.figure(figsize=(1,8))
plot_facies(facies, plt.gca(), colorbar=False)
###Output
_____no_output_____
###Markdown
reindex depth and to XarrayThis lets us includes location easily without using much more space
###Code
# Load some
df_all_clean1 = pd.read_parquet(
interim_locations / "geolink_norge_well_logs_test.parquet"
).set_index(['Well', 'DEPT'])
df_all_clean1['Depth'] = df_all_clean1.index.get_level_values(1)
df_all_clean1['split'] = 'test'
# Load some
df_all_clean2 = pd.read_parquet(
interim_locations / "geolink_norge_well_logs_train.parquet"
).set_index(['Well', 'DEPT'])
df_all_clean2['Depth'] = df_all_clean2.index.get_level_values(1)
df_all_clean2['split'] = 'train'
# # Load some
# df_all_clean3 = pd.read_parquet(
# interim_locations / "geolink_norge_well_logs_val.parquet"
# ).set_index(['Well', 'DEPT'])
# df_all_clean3['Depth'] = df_all_clean3.index.get_level_values(1)
# df_all_clean3['split'] = 'val'
df_all = pd.concat([df_all_clean1, df_all_clean2])
df_all
df_well_tops = gpd.read_file(interim_locations / "norge_well_tops.gpkg")
df_well_tops_minimal = df_well_tops[
[
"wlbWellboreName_geolink",
"wlbCompletionYear",
"wlbKellyBushElevation",
"wlbCompletionDate",
"wlbTotalDepth",
"geometry",
]
].copy()
df_well_tops_minimal['xc'] = df_well_tops_minimal.geometry.x
df_well_tops_minimal['yc'] = df_well_tops_minimal.geometry.y
df_well_tops_minimal
nidx = np.arange(400, 5500, 0.15)
def reindex(x):
"""Reindex each well to 15cm"""
if len(x)==0: return None
x = x.reset_index().set_index('DEPT')
x = x.reindex(nidx, method='nearest', limit=1).drop(columns=['Well']).sort_index()
return x
# return x.reset_index().set_index(['Well', 'DEPT'])
df_all3 = df_all.groupby(level=0).apply(reindex).dropna()
df_all3
import xarray as xr
xr_all_clean2 = df_all3.to_xarray()
xr_all_clean2
xr_wells = df_well_tops_minimal.rename(columns={'wlbWellboreName_geolink':'Well'}).set_index('Well').to_xarray()
xr_wells
xr_all = xr.merge(
[xr_all_clean2, xr_wells],
join='left')
xr_all2 = xr_all.sortby(['Well', 'DEPT'])
xr_all2
well_name="30_4-1"
logs = xr_all2.sel(Well=well_name).to_dataframe().dropna()
logs['DEPT'] = logs['Depth']
facies = logs['LITHOLOGY_GEOLINK'].astype('category').values
plot_well(well_name, logs, facies)
logs
from deep_ml_curriculum.visualization.well_log import plot_facies, plot_well
well_name="30_4-1"
logs = df_all_clean.xs(well_name)
facies = logs['LITHOLOGY_GEOLINK'].astype('category').values
plot_well(well_name,
logs,
facies)
logs
def dset_to_nc(dset, f, engine="netcdf4", compression={"zlib": True}):
if isinstance(dset, xr.DataArray):
dset = dset.to_dataset(name="data")
encoding = {k: {"zlib": True} for k in dset.data_vars}
print('saving to {}'.format(f))
dset.to_netcdf(f, engine=engine, encoding=encoding)
print('Wrote {}.nc size={} M'.format(f.stem, f.stat().st_size / 1000000.0))
dset_to_nc(dset=xr_all.drop(['geometry']),
f=interim_locations/'geolink_norge_well_logs.h5')
import os, shutil
def get_dir_size(start_path="."):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def dset_to_zarr(dset, f):
if isinstance(dset, xr.DataArray):
dset = dset.to_dataset(name="data")
encoding = {k: {"zlib": True} for k in dset.data_vars}
print('saving to {}'.format(f))
if f.exists():
try:
return xr.open_zarr(f)
except:
shutil.rmtree(f)
dset.to_zarr(str(f))
print('{}.zarr size={} M'.format(f.stem, get_dir_size(str(f)) / 1000000.0))
dset_to_zarr(dset=xr_all.drop(['geometry']),
f=interim_locations/'geolink_norge_well_logs.zarr')
###Output
saving to ../../data/processed/geolink_norge_dataset/geolink_norge_well_logs.zarr
geolink_norge_well_logs.zarr size=49.057065 M
###Markdown
Plot map
###Code
import matplotlib.pyplot as plt
%matplotlib inline
import os
from tqdm.auto import tqdm
import pandas as pd
import geopandas as gpd
import numpy as np
# import pandas as pd
# import xarray as xr
# xf = xr.open_zarr("../../data/processed/geolink_norge_dataset/geolink_norge_well_logs.zarr")
# df = xf.to_dataframe().swaplevel().sample(1000)
# df['LITHOLOGY_GEOLINK'] = df['LITHOLOGY_GEOLINK'].astype('category')
# df['Well'] = df.index.get_level_values(0).astype('category')
# df['DEPT'] = df.index.get_level_values(1)
# feature_cols = ['CALI', 'DTC', 'GR', 'RDEP', 'RHOB',
# 'RMED', 'xc', 'yc', 'DEPT']
# df = df.dropna(how='any', subset=feature_cols+['LITHOLOGY_GEOLINK'])
# df = df.sort_index()
# import geopandas as gpd
# gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.xc, df.yc))
# gdf = gdf.set_crs(epsg=4326).to_crs(epsg=3857)
# gdf.plot()
###Output
_____no_output_____
###Markdown
Plot contextily
###Code
from pathlib import Path
interim_locations = Path("../../data/processed/geolink_norge_dataset/")
df_well_tops = gpd.read_file(interim_locations / "norge_well_tops.gpkg").set_crs(epsg=4326).to_crs(epsg=3857)#.head(40)
# df_well_tops.plot()
import contextily as ctx
ax = df_well_tops.plot(figsize=(18, 18), edgecolor='k')
ctx.add_basemap(ax, url=ctx.providers.Esri.OceanBasemap, zoom=8)
# Plot every 5th
df_well_tops[::5].apply(lambda x:
ax.annotate(
s=x.wlbWellboreName,
xy=x.geometry.centroid.coords[0],
ha='left',
c='white',
), axis=1);
ax = df_well_tops.plot(figsize=(18, 18), edgecolor='k')
# ctx.add_basemap(ax, url=ctx.providers.Esri.OceanBasemap)
ctx.add_basemap(ax,
crs=df_well_tops.crs.to_string(),
source=ctx.providers.Stamen.Watercolor
)
# Plot every 5th
df_well_tops[::5].apply(lambda x:
ax.annotate(
s=x.wlbWellboreName,
xy=x.geometry.centroid.coords[0],
ha='left',
c='white'
), axis=1);
west, south, east, north = bbox = df_well_tops.total_bounds
img, ext = ctx.bounds2raster(west,
south,
east,
north,
"world_watercolor.tif",
source=ctx.providers.Stamen.Watercolor,
ll=True,
zoom=8
)
west, south, east, north = bbox = df_well_tops.total_bounds
img, ext = ctx.bounds2raster(west,
south,
east,
north,
"oceanesri.tif",
source=ctx.providers.Esri.OceanBasemap,
ll=True,
zoom=8
)
ctx.bounds2raster?
###Output
_____no_output_____ |
notebooks_completos/Clase2a_NumPy_intro.ipynb | ###Markdown
Clase 2a: Introducción a NumPy _Hasta ahora hemos visto los tipos de datos más básicos que nos ofrece Python: integer, real, complex, boolean, list, tuple... Pero ¿no echas algo de menos? Efectivamente, los __arrays__. __Durante esta nos adentraremos en el paquete NumPy: veremos como los arrays mejoran la eficiencia de nuestro código, aprenderemos a crearlos y a operar con ellos_. ¿Qué es un array? Un array es un __bloque de memoria que contiene elementos del mismo tipo__. Básicamente:* nos _recuerdan_ a los vectores, matrices, tensores...* podemos almacenar el array con un nombre y acceder a sus __elementos__ mediante sus __índices__.* ayudan a gestionar de manera eficiente la memoria y a acelerar los cálculos.---| Índice | 0 | 1 | 2 | 3 | ... | n-1 | n || ---------- | :---: | :---: | :---: | :---: | :---: | :---: | :---: || Valor | 2.1 | 3.6 | 7.8 | 1.5 | ... | 5.4 | 6.3 |---__¿Qué solemos guardar en arrays?__* Vectores y matrices.* Datos de experimentos: - En distintos instantes discretos. - En distintos puntos del espacio.* Resultado de evaluar funciones con los datos anteriores.* Discretizaciones para usar algoritmos de: integración, derivación, interpolación...* ... ¿Qué es NumPy? NumPy es un paquete fundamental para la programación científica que __proporciona un objeto tipo array__ para almacenar datos de forma eficiente y una serie de __funciones__ para operar y manipular esos datos.Para usar NumPy lo primero que debemos hacer es importarlo:
###Code
import numpy as np
#para ver la versión que tenemos instalada:
np.__version__
###Output
_____no_output_____
###Markdown
Nuestro primer array ¿No decíamos que Python era fácil? Pues __creemos nuestros primeros arrays__:
###Code
import numpy as np
# Array de una dimensión
mi_primer_array = np.array([1, 2, 3, 4])
mi_primer_array
# Podemos usar print
print(mi_primer_array)
# Comprobar el tipo de mi_primer_array
type(mi_primer_array)
# Comprobar el tipo de datos que contiene
mi_primer_array.dtype
###Output
_____no_output_____
###Markdown
Los arrays de una dimensión se crean pasándole una lista como argumento a la función `np.array`. Para crear un array de dos dimensiones le pasaremos una _lista de listas_:
###Code
# Array de dos dimensiones
mi_segundo_array = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
###Output
_____no_output_____
###Markdown
Podemos continuar en la siguiente línea usando `\`, pero no es necesario escribirlo dentro de paréntesis o corchetes Esto sería una buena manera de definirlo, de acuerdo con el [PEP 8 (indentation)](http://legacy.python.org/dev/peps/pep-0008/indentation):
###Code
mi_segundo_array = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
###Output
_____no_output_____
###Markdown
Funciones y constantes de NumPy Hemos dicho que NumPy también incorporá __funciones__. Un ejemplo sencillo:
###Code
# Suma
np.sum(mi_primer_array)
# Máximo
np.max(mi_primer_array)
# Seno
np.sin(mi_segundo_array)
###Output
_____no_output_____
###Markdown
Y algunas __constantes__ que podemos neccesitar:
###Code
np.pi, np.e
###Output
_____no_output_____
###Markdown
Características de los arrays de NumPy El objeto tipo array que proporciona NumPy (Python ya dispone de un tipo array que sirve para almacenar elementos de igual tipo pero no proporciona toda la artillería matemática necesaria como para hacer operaciones de manera rápida y eficiente) se caracteriza por: 1) Homogeneidad de tipo: Comencemos viendo que ocurre con las __listas__:
###Code
lista = [ 1, 1+2j, True, 'aerodinamica', [1, 2, 3] ]
lista
###Output
_____no_output_____
###Markdown
En el caso de los __arrays__:
###Code
array = np.array([ 1, 1+2j, True, 'aerodinamica'])
array
###Output
_____no_output_____
###Markdown
__¿Todo bien? Pues no__. Mientras que en la lista cada elemento conserva su tipo, en el array, todos han de tener el mismo y NumPy ha considerado que todos van a ser string. 2) Tamaño fijo en el momento de la creación: __¡Tranquilo!__ los __allocate__ son automáticos...Igual que en el caso anterior, comencemos con la __lista__:
###Code
print(id(lista))
lista.append('fluidos')
print(lista)
print(id(lista))
print(id(array))
array = np.append(array, 'fluidos')
print(array)
print(id(array))
###Output
139998351447504
['1' '(1+2j)' 'True' 'aerodinamica' 'fluidos']
139998351448304
###Markdown
Si consultamos la ayuda de la función `np.append` escribiendo en una celda `help(np.append)` podemos leer: Returns ------- append : ndarray A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array. 3) Eficiencia Hasta el momento los arrays han demostrado ser bastante menos flexibles que las listas, luego olvidemos estos últimos 10 minutos y manejemos siempre listas... ¿no? ¡Pues no! Los arrays realizan una gestión de la memoria mucho más eficiente que mejora el rendimiento.Prestemos atención ahora a la velocidad de ejecución gracias a la _función mágica_ `%%timeit`, que colocada al inicio de una celda nos indicará el tiempo que tarda en ejecutarse.
###Code
lista = list(range(0,100000))
type(lista)
%%timeit
sum(lista)
array = np.arange(0, 100000)
%%timeit
np.sum(array)
###Output
10000 loops, best of 3: 98.2 µs per loop
###Markdown
Como ves, las mejoras en este caso son de 2 órdenes de magnitud. __NumPy nos ofrece funciones que se ejecutan prácticamente en tiempos de lenguaje compilado (Fortran, C, C++) y optimizado, pero escribiendo mucho menos código y con un nivel de abstracción mayor__. Conociendo una serie de buenas prácticas, podremos competir en velocidad con nuestros códigos en Python. Para casos en los que no sea posible, existen herramientas que nos permiten ejecutar desde Python nuestros códigos en otros lengujes como [f2py](http://docs.scipy.org/doc/numpy-dev/f2py/). Este tema puede resultarte algo avanzado a estas alturas, pero bastante útil; puedes consultar este [artículo de pybonacci](http://pybonacci.org/2013/02/22/integrar-fortran-con-python-usando-f2py/9) si lo necesitas. Funciones para crear arrays ¿Demasiada teoría? vayamos a la práctica. Ya hemos visto que la función `np.array()` nos permite crear arrays con los valores que nosotros introduzcamos manualmente a través de listas. Más adelante, aprenderemos a leer ficheros y almacenarlos en arrays. Mientras tanto, ¿qué puede hacernos falta? array de ceros
###Code
# En una dimensión
np.zeros(100)
# En dos dimensiones
np.zeros([10,10])
###Output
_____no_output_____
###Markdown
Nota: En el caso 1D es válido tanto `np.zeros([5])` como `np.zeros(5)` (sin los corchetes), pero no lo será para el caso nD array "vacío"
###Code
np.empty(10)
###Output
_____no_output_____
###Markdown
Importante: El array vacío se crea en un tiempo algo inferior al array de ceros. Sin embargo, el valor de sus elementos será arbitrario y dependerá del estado de la memoria. Si lo utilizas asegúrate de que luego llenas bien todos sus elementos porque podrías introducir resultados erróneos. array de unos
###Code
np.ones([3,2])
###Output
_____no_output_____
###Markdown
Nota: Otras funciones muy útiles son `np.zeros_like` y `np.ones_like`. Usa la ayuda para ver lo que hacen si lo necesitas. array identidad
###Code
np.identity(4)
###Output
_____no_output_____
###Markdown
Nota: También puedes probar `np.eye()` y `np.diag()`. Rangos np.arange NumPy, dame __un array que vaya de 0 a 5__:
###Code
a = np.arange(0, 5)
a
###Output
_____no_output_____
###Markdown
__Mira con atención el resultado anterior__, ¿hay algo que deberías grabar en tu cabeza para simpre?__El último elemento no es 5 sino 4__ NumPy, dame __un array que vaya de 0 a 10, de 3 en 3__:
###Code
np.arange(0,11,3)
###Output
_____no_output_____
###Markdown
np.linspace Si has tenido que usar MATLAB alguna vez, seguro que esto te suena:
###Code
np.linspace(0, 10, 21)
###Output
_____no_output_____
###Markdown
En este caso sí que se incluye el último elemento. Nota: También puedes probar `np.logspace()` reshape Con `np.arange()` es posible crear "vectores" cuyos elementos tomen valores consecutivos o equiespaciados, como hemos visto anteriormente. ¿Podemos hacer lo mismo con "matrices"? Pues sí, pero no usando una sola función. Imagina que quieres crear algo como esto:\begin{pmatrix} 1 & 2 & 3\\ 4 & 5 & 6\\ 7 & 8 & 9\\ \end{pmatrix} * Comenzaremos por crear un array 1d con los valores $(1,2,3,4,5,6,7,8,9)$ usando `np.arange()`.* Luego le daremos forma de array 2d. con `np.reshape(array, (dim0, dim1))`.
###Code
a = np.arange(1,10)
M = np.reshape(a, [3,3])
M
# También funciona como método
N = a.reshape([3,3])
N
###Output
_____no_output_____
###Markdown
Nota: No vamos a entrar demasiado en qué son los métodos, pero debes saber que están asociados a la programación orientada a objetos y que en Python todo es un objeto. Lo que debes pensar es que son unas funciones especiales en las que el argumento más importante (sobre el que se realiza la acción) se escribe delante seguido de un punto. Por ejemplo: `.método(argumentos)` Importación Python es un lenguaje que está altamente modularizado: está dividido en __bibliotecas que realizan tareas específicas__. Para hacer uso de ellas debemos importarlas. Podemos importar cosas de la [biblioteca estándar](https://docs.python.org/3.4/library/), de paquetes que hayamos descargado (o se enceuntren en [nuestra distribución](http://docs.continuum.io/anaconda/pkg-docs.html)) o de módulos que nosotros mismos construyamos. Existen varias formas de importar: import numpy Cada vez que queramos acceder a una función de numpy, deberemos escribir: numpy.sin(5) numpy.linspace(0,100,50) ---Como esto puede resultar tedioso, suele utilizarse un __namespace__, el recomendado en la documentación oficial y que usaremos en el curso es: import numpy as np Ahora podremos llamar a funciones escribiendo: np.sin(5) np.linspace(0,100,50) ---Si esto te sigue pareciendo demasido escribir puedes hacer (__altamente no recomendado__): from numpy import * El asterisco, quiere decir _TODO_. Esto genera varios problemas: * __Imporatará gran cantidad de funciones y clases que puede que no necesites__.* El nombre de estas funciones, puede coincidir con el de alguna de otro módulo que hayas importado, de manera que "la machacará", por lo que __se producirán ambigüedades__. Ejemplo: ¿por qué no hacer from numpy import * ?
###Code
from numpy import *
a = [1,2,3,4,5]
sin(a)
from math import *
sin(a)
###Output
_____no_output_____
###Markdown
__La función seno que incorporá math no es la misma que la de NumPy__. Ambas proporcionarán el seno de un número, evidentemente, el mismo resultado para el mismo número, pero una acepta listas y la otra no. Al hacer la segunda importación, la función seno de NumPy se ha sustituido por la de math y la misma sentencia, da un error. Esto puede hacer que te vuelvas un poco loco si tu código es grande o acabes volviendo loco a alguien si usa tu código.¿Suficiente? Ahora ya sabes por qué tendrás que escribir `np.loquesea` __siempre__. Importante: Reiniciemos el kernel e importemos bien NumPy para continuar.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Operaciones Operaciones elemento a elemento Ahora que pocas cosas se nos escapan de los arrays, probemos a hacer algunas operaciones. El funcionamiento es el habitual en FORTRAN y MATLAB y poco hay que añadir:
###Code
#crear un arra y y sumarle un número
arr = np.arange(11)
arr + 55
#multiplicarlo por un número
arr * 2
#elevarlo al cuadrado
arr ** 2
#calcular una función
np.tanh(arr)
###Output
_____no_output_____
###Markdown
Entrenamiento: Puedes tratar de comparar la diferencia de tiempo entre realizar la operación en bloque, como ahora, y realizarla elemento a elemento, recorriendo el array con un bucle. __Si las operaciones involucran dos arrays también se realizan elemento a elemento__
###Code
#creamos dos arrays
arr1 = np.arange(0,11)
arr2 = np.arange(20,31)
#los sumamos
arr1 + arr2
#multiplicamos
arr1 * arr2
###Output
_____no_output_____
###Markdown
Comparaciones
###Code
# >,<
arr1 > arr2
# ==
arr1 == arr2 # ¡ojo! los arrays son de integers, no de floats
###Output
_____no_output_____
###Markdown
Nota: Por cierto, ¿qúe ocurrirá si los arrays con los que se quiere operar no tiene la misma forma? ¿apuestas? Quizá más adelante te interese buscar algo de información sobre __broadcasting__. Ejercicios1. Crear un array z1 3x4 lleno de ceros de tipo entero.2. Crear un array z2 3x4 lleno de ceros salvo la primera fila que serán todo unos.3. Crear un array z3 3x4 lleno de ceros salvo la última fila que será el rango entre 5 y 8.4. Crea un vector de 10 elementos, siendo los impares unos y los pares doses.5. Crea un «tablero de ajedrez», con unos en las casillas negras y ceros en las blancas.
###Code
a = np.zeros((3, 4))
a
a[0, :] = 1
a
b = np.zeros((3, 4))
b[-1] = np.arange(5, 9)
b
v = np.ones(10)
v[::2] = 2
v
tablero = np.zeros((8, 8))
tablero[1::2, ::2] = 1
tablero[::2, 1::2] = 1
tablero
###Output
_____no_output_____
###Markdown
Extra:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.matshow(tablero, cmap=plt.cm.gray_r)
###Output
_____no_output_____
###Markdown
--- ___Hemos aprendido:___* Las características de los arrays de NumPy: - Homogeneidad de tipo. - Tamaño fijo en el momento de la creación.* A usar las principales funciones para crear arrays.* A operar con arrays._En definitiva:_* __Ingenieros y científicos $\heartsuit$ arrays.__* __Ingenieros y científicos necesitan NumPy.____El próximo día__ aprenderemos cómo acceder a elementos de un array _slicing_, cómo realizar algunas operaciones de álgebra lineal (determinantes, trazas, autovalores...) y practicaremos todo lo aprendido.__¡Quiero más!__Algunos enlaces:Algunos enlaces en Pybonacci:* [Cómo crear matrices en Python con NumPy](http://pybonacci.wordpress.com/2012/06/11/como-crear-matrices-en-python-con-numpy/).* [Números aleatorios en Python con NumPy y SciPy](http://pybonacci.wordpress.com/2013/01/11/numeros-aleatorios-en-python-con-numpy-y-scipy/).Algunos enlaces en otros sitios:* [100 numpy exercises](http://www.labri.fr/perso/nrougier/teaching/numpy.100/index.html). Es posible que de momento sólo sepas hacer los primeros, pero tranquilo, pronto sabrás más...* [NumPy and IPython SciPy 2013 Tutorial](http://conference.scipy.org/scipy2013/tutorial_detail.php?id=100).* [NumPy and SciPy documentation](http://docs.scipy.org/doc/). ---Clase en vídeo, parte del [Curso de Python para científicos e ingenieros](http://cacheme.org/curso-online-python-cientifico-ingenieros/) grabado en la Escuela Politécnica Superior de la Universidad de Alicante.
###Code
from IPython.display import YouTubeVideo
YouTubeVideo("UltVlYCacD0", width=560, height=315, list="PLGBbVX_WvN7bMwYe7wWV5TZt1a58jTggB")
###Output
_____no_output_____
###Markdown
--- Si te ha gustado esta clase:Tweet!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');--- ¡Síguenos en Twitter! Follow @AeroPython !function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs'); Curso AeroPython por Juan Luis Cano Rodriguez y Alejandro Sáez Mollejo se distribuye bajo una Licencia Creative Commons Atribución 4.0 Internacional. ---_Las siguientes celdas contienen configuración del Notebook__Para visualizar y utlizar los enlaces a Twitter el notebook debe ejecutarse como [seguro](http://ipython.org/ipython-doc/dev/notebook/security.html)_ File > Trusted Notebook
###Code
%%html
<a href="https://twitter.com/AeroPython" class="twitter-follow-button" data-show-count="false">Follow @AeroPython</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
# Esta celda da el estilo al notebook
from IPython.core.display import HTML
css_file = '../static/styles/style.css'
HTML(open(css_file, "r").read())
###Output
_____no_output_____
###Markdown
Clase 2a: Introducción a NumPy _Hasta ahora hemos visto los tipos de datos más básicos que nos ofrece Python: integer, real, complex, boolean, list, tuple... Pero ¿no echas algo de menos? Efectivamente, los __arrays__. __Durante esta nos adentraremos en el paquete NumPy: veremos como los arrays mejoran la eficiencia de nuestro código, aprenderemos a crearlos y a operar con ellos_. ¿Qué es un array? Un array es un __bloque de memoria que contiene elementos del mismo tipo__. Básicamente:* nos _recuerdan_ a los vectores, matrices, tensores...* podemos almacenar el array con un nombre y acceder a sus __elementos__ mediante sus __índices__.* ayudan a gestionar de manera eficiente la memoria y a acelerar los cálculos.---| Índice | 0 | 1 | 2 | 3 | ... | n-1 | n || ---------- | :---: | :---: | :---: | :---: | :---: | :---: | :---: || Valor | 2.1 | 3.6 | 7.8 | 1.5 | ... | 5.4 | 6.3 |---__¿Qué solemos guardar en arrays?__* Vectores y matrices.* Datos de experimentos: - En distintos instantes discretos. - En distintos puntos del espacio.* Resultado de evaluar funciones con los datos anteriores.* Discretizaciones para usar algoritmos de: integración, derivación, interpolación...* ... ¿Qué es NumPy? NumPy es un paquete fundamental para la programación científica que __proporciona un objeto tipo array__ para almacenar datos de forma eficiente y una serie de __funciones__ para operar y manipular esos datos.Para usar NumPy lo primero que debemos hacer es importarlo:
###Code
import numpy as np
#para ver la versión que tenemos instalada:
np.__version__
###Output
_____no_output_____
###Markdown
Nuestro primer array ¿No decíamos que Python era fácil? Pues __creemos nuestros primeros arrays__:
###Code
import numpy as np
# Array de una dimensión
mi_primer_array = np.array([1, 2, 3, 4])
mi_primer_array
# Podemos usar print
print(mi_primer_array)
# Comprobar el tipo de mi_primer_array
type(mi_primer_array)
# Comprobar el tipo de datos que contiene
mi_primer_array.dtype
###Output
_____no_output_____
###Markdown
Los arrays de una dimensión se crean pasándole una lista como argumento a la función `np.array`. Para crear un array de dos dimensiones le pasaremos una _lista de listas_:
###Code
# Array de dos dimensiones
mi_segundo_array = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
###Output
_____no_output_____
###Markdown
Podemos continuar en la siguiente línea usando `\`, pero no es necesario escribirlo dentro de paréntesis o corchetes Esto sería una buena manera de definirlo, de acuerdo con el [PEP 8 (indentation)](http://legacy.python.org/dev/peps/pep-0008/indentation):
###Code
mi_segundo_array = np.array([
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
])
###Output
_____no_output_____
###Markdown
Funciones y constantes de NumPy Hemos dicho que NumPy también incorporá __funciones__. Un ejemplo sencillo:
###Code
# Suma
np.sum(mi_primer_array)
# Máximo
np.max(mi_primer_array)
# Seno
np.sin(mi_segundo_array)
###Output
_____no_output_____
###Markdown
Y algunas __constantes__ que podemos neccesitar:
###Code
np.pi, np.e
###Output
_____no_output_____
###Markdown
Características de los arrays de NumPy El objeto tipo array que proporciona NumPy (Python ya dispone de un tipo array que sirve para almacenar elementos de igual tipo pero no proporciona toda la artillería matemática necesaria como para hacer operaciones de manera rápida y eficiente) se caracteriza por: 1) Homogeneidad de tipo: Comencemos viendo que ocurre con las __listas__:
###Code
lista = [ 1, 1+2j, True, 'aerodinamica', [1, 2, 3] ]
lista
###Output
_____no_output_____
###Markdown
En el caso de los __arrays__:
###Code
array = np.array([ 1, 1+2j, True, 'aerodinamica'])
array
###Output
_____no_output_____
###Markdown
__¿Todo bien? Pues no__. Mientras que en la lista cada elemento conserva su tipo, en el array, todos han de tener el mismo y NumPy ha considerado que todos van a ser string. 2) Tamaño fijo en el momento de la creación: __¡Tranquilo!__ los __allocate__ son automáticos...Igual que en el caso anterior, comencemos con la __lista__:
###Code
print(id(lista))
lista.append('fluidos')
print(lista)
print(id(lista))
print(id(array))
array = np.append(array, 'fluidos')
print(array)
print(id(array))
###Output
140622331072304
['1' '(1+2j)' 'True' 'aerodinamica' 'fluidos']
140622331110224
###Markdown
Si consultamos la ayuda de la función `np.append` escribiendo en una celda `help(np.append)` podemos leer: Returns ------- append : ndarray A copy of `arr` with `values` appended to `axis`. Note that `append` does not occur in-place: a new array is allocated and filled. If `axis` is None, `out` is a flattened array. 3) Eficiencia Hasta el momento los arrays han demostrado ser bastante menos flexibles que las listas, luego olvidemos estos últimos 10 minutos y manejemos siempre listas... ¿no? ¡Pues no! Los arrays realizan una gestión de la memoria mucho más eficiente que mejora el rendimiento.Prestemos atención ahora a la velocidad de ejecución gracias a la _función mágica_ `%%timeit`, que colocada al inicio de una celda nos indicará el tiempo que tarda en ejecutarse.
###Code
lista = list(range(0,100000))
type(lista)
%%timeit
sum(lista)
array = np.arange(0, 100000)
%%timeit
np.sum(array)
###Output
The slowest run took 4.93 times longer than the fastest. This could mean that an intermediate result is being cached.
10000 loops, best of 3: 61.4 µs per loop
###Markdown
Como ves, las mejoras en este caso son de 2 órdenes de magnitud. __NumPy nos ofrece funciones que se ejecutan prácticamente en tiempos de lenguaje compilado (Fortran, C, C++) y optimizado, pero escribiendo mucho menos código y con un nivel de abstracción mayor__. Conociendo una serie de buenas prácticas, podremos competir en velocidad con nuestros códigos en Python. Para casos en los que no sea posible, existen herramientas que nos permiten ejecutar desde Python nuestros códigos en otros lengujes como [f2py](http://docs.scipy.org/doc/numpy-dev/f2py/). Este tema puede resultarte algo avanzado a estas alturas, pero bastante útil; puedes consultar este [artículo de pybonacci](http://pybonacci.org/2013/02/22/integrar-fortran-con-python-usando-f2py/9) si lo necesitas. Funciones para crear arrays ¿Demasiada teoría? vayamos a la práctica. Ya hemos visto que la función `np.array()` nos permite crear arrays con los valores que nosotros introduzcamos manualmente a través de listas. Más adelante, aprenderemos a leer ficheros y almacenarlos en arrays. Mientras tanto, ¿qué puede hacernos falta? array de ceros
###Code
# En una dimensión
np.zeros(100)
# En dos dimensiones
np.zeros([10,10])
###Output
_____no_output_____
###Markdown
Nota: En el caso 1D es válido tanto `np.zeros([5])` como `np.zeros(5)` (sin los corchetes), pero no lo será para el caso nD array "vacío"
###Code
np.empty(10)
###Output
_____no_output_____
###Markdown
Importante: El array vacío se crea en un tiempo algo inferior al array de ceros. Sin embargo, el valor de sus elementos será arbitrario y dependerá del estado de la memoria. Si lo utilizas asegúrate de que luego llenas bien todos sus elementos porque podrías introducir resultados erróneos. array de unos
###Code
np.ones([3,2])
###Output
_____no_output_____
###Markdown
Nota: Otras funciones muy útiles son `np.zeros_like` y `np.ones_like`. Usa la ayuda para ver lo que hacen si lo necesitas. array identidad
###Code
np.identity(4)
###Output
_____no_output_____
###Markdown
Nota: También puedes probar `np.eye()` y `np.diag()`. Rangos np.arange NumPy, dame __un array que vaya de 0 a 5__:
###Code
a = np.arange(0, 5)
a
###Output
_____no_output_____
###Markdown
__Mira con atención el resultado anterior__, ¿hay algo que deberías grabar en tu cabeza para simpre?__El último elemento no es 5 sino 4__ NumPy, dame __un array que vaya de 0 a 10, de 3 en 3__:
###Code
np.arange(0,11,3)
###Output
_____no_output_____
###Markdown
np.linspace Si has tenido que usar MATLAB alguna vez, seguro que esto te suena:
###Code
np.linspace(0, 10, 21)
###Output
_____no_output_____
###Markdown
En este caso sí que se incluye el último elemento. Nota: También puedes probar `np.logspace()` reshape Con `np.arange()` es posible crear "vectores" cuyos elementos tomen valores consecutivos o equiespaciados, como hemos visto anteriormente. ¿Podemos hacer lo mismo con "matrices"? Pues sí, pero no usando una sola función. Imagina que quieres crear algo como esto:\begin{pmatrix} 1 & 2 & 3\\ 4 & 5 & 6\\ 7 & 8 & 9\\ \end{pmatrix} * Comenzaremos por crear un array 1d con los valores $(1,2,3,4,5,6,7,8,9)$ usando `np.arange()`.* Luego le daremos forma de array 2d. con `np.reshape(array, (dim0, dim1))`.
###Code
a = np.arange(1,10)
M = np.reshape(a, [3,3])
M
# También funciona como método
N = a.reshape([3,3])
N
###Output
_____no_output_____
###Markdown
Nota: No vamos a entrar demasiado en qué son los métodos, pero debes saber que están asociados a la programación orientada a objetos y que en Python todo es un objeto. Lo que debes pensar es que son unas funciones especiales en las que el argumento más importante (sobre el que se realiza la acción) se escribe delante seguido de un punto. Por ejemplo: `.método(argumentos)` Importación Python es un lenguaje que está altamente modularizado: está dividido en __bibliotecas que realizan tareas específicas__. Para hacer uso de ellas debemos importarlas. Podemos importar cosas de la [biblioteca estándar](https://docs.python.org/3.4/library/), de paquetes que hayamos descargado (o se enceuntren en [nuestra distribución](http://docs.continuum.io/anaconda/pkg-docs.html)) o de módulos que nosotros mismos construyamos. Existen varias formas de importar: import numpy Cada vez que queramos acceder a una función de numpy, deberemos escribir: numpy.sin(5) numpy.linspace(0,100,50) ---Como esto puede resultar tedioso, suele utilizarse un __namespace__, el recomendado en la documentación oficial y que usaremos en el curso es: import numpy as np Ahora podremos llamar a funciones escribiendo: np.sin(5) np.linspace(0,100,50) ---Si esto te sigue pareciendo demasido escribir puedes hacer (__altamente no recomendado__): from numpy import * El asterisco, quiere decir _TODO_. Esto genera varios problemas: * __Imporatará gran cantidad de funciones y clases que puede que no necesites__.* El nombre de estas funciones, puede coincidir con el de alguna de otro módulo que hayas importado, de manera que "la machacará", por lo que __se producirán ambigüedades__. Ejemplo: ¿por qué no hacer from numpy import * ?
###Code
from numpy import *
a = [1,2,3,4,5]
sin(a)
from math import *
sin(a)
###Output
_____no_output_____
###Markdown
__La función seno que incorporá math no es la misma que la de NumPy__. Ambas proporcionarán el seno de un número, evidentemente, el mismo resultado para el mismo número, pero una acepta listas y la otra no. Al hacer la segunda importación, la función seno de NumPy se ha sustituido por la de math y la misma sentencia, da un error. Esto puede hacer que te vuelvas un poco loco si tu código es grande o acabes volviendo loco a alguien si usa tu código.¿Suficiente? Ahora ya sabes por qué tendrás que escribir `np.loquesea` __siempre__. Importante: Reiniciemos el kernel e importemos bien NumPy para continuar.
###Code
import numpy as np
###Output
_____no_output_____
###Markdown
Operaciones Operaciones elemento a elemento Ahora que pocas cosas se nos escapan de los arrays, probemos a hacer algunas operaciones. El funcionamiento es el habitual en FORTRAN y MATLAB y poco hay que añadir:
###Code
#crear un arra y y sumarle un número
arr = np.arange(11)
arr + 55
#multiplicarlo por un número
arr * 2
#elevarlo al cuadrado
arr ** 2
#calcular una función
np.tanh(arr)
###Output
_____no_output_____
###Markdown
Entrenamiento: Puedes tratar de comparar la diferencia de tiempo entre realizar la operación en bloque, como ahora, y realizarla elemento a elemento, recorriendo el array con un bucle. __Si las operaciones involucran dos arrays también se realizan elemento a elemento__
###Code
#creamos dos arrays
arr1 = np.arange(0,11)
arr2 = np.arange(20,31)
#los sumamos
arr1 + arr2
#multiplicamos
arr1 * arr2
###Output
_____no_output_____
###Markdown
Comparaciones
###Code
# >,<
arr1 > arr2
# ==
arr1 == arr2 # ¡ojo! los arrays son de integers, no de floats
###Output
_____no_output_____
###Markdown
Nota: Por cierto, ¿qúe ocurrirá si los arrays con los que se quiere operar no tiene la misma forma? ¿apuestas? Quizá más adelante te interese buscar algo de información sobre __broadcasting__. Ejercicios1. Crear un array z1 3x4 lleno de ceros de tipo entero.2. Crear un array z2 3x4 lleno de ceros salvo la primera fila que serán todo unos.3. Crear un array z3 3x4 lleno de ceros salvo la última fila que será el rango entre 5 y 8.4. Crea un vector de 10 elementos, siendo los impares unos y los pares doses.5. Crea un «tablero de ajedrez», con unos en las casillas negras y ceros en las blancas.
###Code
a = np.zeros((3, 4))
a
a[0, :] = 1
a
b = np.zeros((3, 4))
b[-1] = np.arange(5, 9)
b
v = np.ones(10)
v[::2] = 2
v
tablero = np.zeros((8, 8))
tablero[1::2, ::2] = 1
tablero[::2, 1::2] = 1
tablero
###Output
_____no_output_____
###Markdown
Extra:
###Code
%matplotlib inline
import matplotlib.pyplot as plt
plt.matshow(tablero, cmap=plt.cm.gray_r)
###Output
_____no_output_____
###Markdown
--- ___Hemos aprendido:___* Las características de los arrays de NumPy: - Homogeneidad de tipo. - Tamaño fijo en el momento de la creación.* A usar las principales funciones para crear arrays.* A operar con arrays._En definitiva:_* __Ingenieros y científicos $\heartsuit$ arrays.__* __Ingenieros y científicos necesitan NumPy.____El próximo día__ aprenderemos cómo acceder a elementos de un array _slicing_, cómo realizar algunas operaciones de álgebra lineal (determinantes, trazas, autovalores...) y practicaremos todo lo aprendido.__¡Quiero más!__Algunos enlaces:Algunos enlaces en Pybonacci:* [Cómo crear matrices en Python con NumPy](http://pybonacci.wordpress.com/2012/06/11/como-crear-matrices-en-python-con-numpy/).* [Números aleatorios en Python con NumPy y SciPy](http://pybonacci.wordpress.com/2013/01/11/numeros-aleatorios-en-python-con-numpy-y-scipy/).Algunos enlaces en otros sitios:* [100 numpy exercises](http://www.labri.fr/perso/nrougier/teaching/numpy.100/index.html). Es posible que de momento sólo sepas hacer los primeros, pero tranquilo, pronto sabrás más...* [NumPy and IPython SciPy 2013 Tutorial](http://conference.scipy.org/scipy2013/tutorial_detail.php?id=100).* [NumPy and SciPy documentation](http://docs.scipy.org/doc/). ---Clase en vídeo, parte del [Curso de Python para científicos e ingenieros](http://cacheme.org/curso-online-python-cientifico-ingenieros/) grabado en la Escuela Politécnica Superior de la Universidad de Alicante.
###Code
from IPython.display import YouTubeVideo
YouTubeVideo("UltVlYCacD0", width=560, height=315, list="PLGBbVX_WvN7bMwYe7wWV5TZt1a58jTggB")
###Output
_____no_output_____
###Markdown
--- Si te ha gustado esta clase:Tweet!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');--- ¡Síguenos en Twitter! Follow @AeroPython !function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs'); Curso AeroPython por Juan Luis Cano Rodriguez y Alejandro Sáez Mollejo se distribuye bajo una Licencia Creative Commons Atribución 4.0 Internacional. ---_Las siguientes celdas contienen configuración del Notebook__Para visualizar y utlizar los enlaces a Twitter el notebook debe ejecutarse como [seguro](http://ipython.org/ipython-doc/dev/notebook/security.html)_ File > Trusted Notebook
###Code
%%html
<a href="https://twitter.com/AeroPython" class="twitter-follow-button" data-show-count="false">Follow @AeroPython</a>
<script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
# Esta celda da el estilo al notebook
from IPython.core.display import HTML
css_file = '../static/styles/style.css'
HTML(open(css_file, "r").read())
###Output
_____no_output_____ |
Poor_mans_regression.ipynb | ###Markdown
###Code
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0,10,11)
true_slope, true_intercept = 2,-3
y = true_slope * x + true_intercept + 5 * (np.random.random(size=11) - .5)
plt.scatter(x,y,c='b')
plt.plot(np.linspace(0,10,11),true_slope * np.linspace(0,10,11) + true_intercept,c='r')
def guess():
# returns slope, intercept guess
return np.random.randint(-100,100),np.random.randint(-100,100)
def pred(x,m,b):
return m * x + b
def mse(y,y_hat):
length = y.shape[0]
mse = (y - y_hat).dot(y-y_hat)
return mse/length
m_o = []
b_o = []
mse_o = []
for _ in range(10000):
m, b = guess()
m_o.append(m)
b_o.append(b)
y_hat = pred(x,m,b)
mse_o.append(mse(y,y_hat))
plt.scatter(m_o,b_o,c=mse_o)
plt.colorbar()
mse = np.array(mse_o)
min_index = np.argmin(mse)
m_o[min_index],b_o[min_index]
b_o.index(3)
plt.scatter(m_o,mse_o)
plt.scatter(b_o,mse_o)
###Output
_____no_output_____ |
examples/integrations/databricks/labelbox_databricks_example.ipynb | ###Markdown
Notebook Setup
###Code
from labelbox import Client
import databricks.koalas as pd
import labelspark
try: API_KEY
except NameError:
API_KEY = dbutils.notebook.run("api_key", 60)
client = Client(API_KEY)
projects = client.get_projects()
for project in projects:
print(project.name, project.uid)
# can parse the directory and make a Spark table of image URLs
def create_unstructured_dataset():
print("Creating table of unstructured image data")
# Pull information from Data Lake or other storage
dataSet = client.get_dataset("ckolyi9ha7h800y7i5ppr3put") #Insert Dataset ID from Labelbox for a sample dataset
#creates a list of datarow dictionaries
df_list = [ {
"external_id": dataRow.external_id,
"row_data": dataRow.row_data
} for dataRow in dataSet.data_rows()]
# Create DataFrame
images = pd.DataFrame(df_list)
df_images = images.to_spark()
# display(df_images)
df_images.registerTempTable("unstructured_data")
# df_images = spark.createDataFrame(images)
table_exists = False
tblList = spark.catalog.listTables()
if len(tblList) == 0:
create_unstructured_dataset()
table_exists = True
for table in tblList:
if table.name == "unstructured_data":
print("Unstructured data table exists")
table_exists = True
if not table_exists: create_unstructured_dataset()
###Output
_____no_output_____
###Markdown
Load Unstructured Data
###Code
%sql
select * from unstructured_data
from labelbox import Client
client = Client(API_KEY)
###Output
_____no_output_____
###Markdown
LabelSpark expects a spark table with two columns; the first column "external_id" and second column "row_data"external_id is a filename, like "birds.jpg" or "my_video.mp4"row_data is the URL path to the file. Labelbox renders assets locally on your users' machines when they label, so your labeler will need permission to access that asset. Example: | external_id | row_data ||-------------|--------------------------------------|| image1.jpg | https://url_to_your_asset/image1.jpg || image2.jpg | https://url_to_your_asset/image2.jpg || image3.jpg | https://url_to_your_asset/image3.jpg |
###Code
import labelspark
unstructured_data = spark.table("unstructured_data")
dataSet_new = labelspark.create_dataset(client, unstructured_data, "Demo Dataset")
###Output
_____no_output_____
###Markdown
You can use the labelbox SDK to build your ontology. An example is provided below. Please refer to documentation at https://docs.labelbox.com/python-sdk/en/index-en
###Code
from labelbox.schema.ontology import OntologyBuilder, Tool, Classification, Option
# from labelbox import Client
# import os
ontology = OntologyBuilder()
tool_people = Tool(tool=Tool.Type.BBOX, name="People")
tool_car = Tool(tool=Tool.Type.SEGMENTATION, name="Car")
tool_umbrella = Tool(tool=Tool.Type.POLYGON, name="Umbrella")
Weather_Classification = Classification(class_type=Classification.Type.RADIO, instructions="Weather",
options=[Option(value="Clear"),
Option(value="Overcast"),
Option(value="Rain"),
Option(value="Other")])
Time_of_Day = Classification(class_type=Classification.Type.RADIO, instructions="Time of Day",
options=[Option(value="Day"),
Option(value="Night"),
Option(value="Unknown")])
ontology.add_tool(tool_people)
ontology.add_tool(tool_car)
ontology.add_tool(tool_umbrella)
ontology.add_classification(Weather_Classification)
ontology.add_classification(Time_of_Day)
project_demo2 = client.create_project(name="LabelSpark Demo Example", description = "Example description here.")
project_demo2.datasets.connect(dataSet_new)
# Setup frontends
all_frontends = list(client.get_labeling_frontends())
for frontend in all_frontends:
if frontend.name == 'Editor':
project_frontend = frontend
break
# Attach Frontends
project_demo2.labeling_frontend.connect(project_frontend)
# Attach Project and Ontology
project_demo2.setup(project_frontend, ontology.asdict())
print("Project Setup is complete.")
###Output
_____no_output_____
###Markdown
Bronze and Silver Annotation Tables Be sure to provide your Labelbox Project ID (a long string like "ckolzeshr7zsy0736w0usbxdy") to labelspark get_annotations method to pull in your labeled dataset. bronze_table = labelspark.get_annotations(client,"ckolzeshr7zsy0736w0usbxdy", spark, sc) *These other methods transform the bronze table and do not require a project ID.* flattened_bronze_table = labelspark.flatten_bronze_table(bronze_table)silver_table = labelspark.bronze_to_silver(bronze_table)
###Code
client = Client(API_KEY) #refresh client
bronze_table = labelspark.get_annotations(client,"ckolzeshr7zsy0736w0usbxdj", spark, sc) #insert your unique project ID here
bronze_table.registerTempTable("street_photo_demo")
display(bronze_table.limit(2))
client = Client(API_KEY) #refresh client
bronze_table = spark.table("street_photo_demo")
flattened_bronze_table = labelspark.flatten_bronze_table(bronze_table)
display(flattened_bronze_table.limit(1))
client = Client(API_KEY) #refresh client
silver_table = labelspark.bronze_to_silver(bronze_table)
silver_table.registerTempTable("silver_table")
display(silver_table)
%sql
SELECT * FROM silver_table
WHERE `People.count` > 0
AND `Umbrella.count` > 0
AND `Car.count` > 0
AND Weather = "Rain"
%sql
SELECT * FROM silver_table
WHERE `People.count` > 10
def cleanup():
client = Client(API_KEY)
dataSet_new.delete()
project_demo2.delete()
cleanup()
###Output
_____no_output_____
###Markdown
Labelbox Connector for Databricks Tutorial Notebook Pre-requisites1. This tutorial notebook requires a Lablbox API Key. Please login to your [Labelbox Account](app.labelbox.com) and generate an [API Key](https://app.labelbox.com/account/api-keys)2. A few cells below will install the Labelbox SDK and Connector Library. This install is notebook-scoped and will not affect the rest of your cluster. 3. Please make sure you are running at least the latest LTS version of Databricks. Notebook PreviewThis notebook will guide you through these steps: 1. Connect to Labelbox via the SDK 2. Create a labeling dataset from a table of unstructured data in Databricks3. Programmatically set up an ontology and labeling project in Labelbox4. Load Bronze and Silver annotation tables from an example labeled project 5. Additional cells describe how to handle video annotations and use Labelbox Diagnostics and Catalog Additional documentation links are provided at the end of the notebook. Thanks for trying out the Databricks and Labelbox Connector! You or someone from your organization signed up for a Labelbox trial through Databricks Partner Connect. This notebook was loaded into your Shared directory to help illustrate how Labelbox and Databricks can be used together to power unstructured data workflows. Labelbox can be used to rapidly annotate a variety of unstructured data from your Data Lake ([images](https://labelbox.com/product/image), [video](https://labelbox.com/product/video), [text](https://labelbox.com/product/text), and [geospatial tiled imagery](https://docs.labelbox.com/docs/tiled-imagery-editor)) and the Labelbox Connector for Databricks makes it easy to bring the annotations back into your Lakehouse environment for AI/ML and analytical workflows. If you would like to watch a video of the workflow, check out our [Data & AI Summit Demo](https://databricks.com/session_na21/productionizing-unstructured-data-for-ai-and-analytics). Questions or comments? Reach out to us at [[email protected]](mailto:[email protected])
###Code
%pip install labelbox labelspark
###Output
_____no_output_____
###Markdown
Configure the SDKNow that Labelbox and the Databricks libraries have been installed, you will need to configure the SDK. You will need an API key that you can create through the app [here](https://app.labelbox.com/account/api-keys). You can also store the key using Databricks Secrets API. The SDK will attempt to use the env var `LABELBOX_API_KEY`
###Code
from labelbox import Client, Dataset
from labelbox.schema.ontology import OntologyBuilder, Tool, Classification, Option
import databricks.koalas as pd
import labelspark
API_KEY = ""
if not(API_KEY):
raise ValueError("Go to Labelbox to get an API key")
client = Client(API_KEY)
###Output
_____no_output_____
###Markdown
Fetch seed dataNext we'll load a demo dataset into a Spark table so you can see how to easily load assets into Labelbox via URL. For simplicity, you can get a Dataset ID from Labelbox and we'll load those URLs into a Spark table for you (so you don't need to worry about finding data to get this demo notebook to run). Below we'll grab the "Example Nature Dataset" included in Labelbox trials.Also, Labelbox has native support for AWS, Azure, and GCP cloud storage. You can connect Labelbox to your storage via [Delegated Access](https://docs.labelbox.com/docs/iam-delegated-access) and easily load those assets for annotation. For more information, you can watch this [video](https://youtu.be/wlWo6EmPDV4).
###Code
sample_dataset = next(client.get_datasets(where=(Dataset.name == "Example Nature Dataset")))
sample_dataset.uid
# can parse the directory and make a Spark table of image URLs
SAMPLE_TABLE = "sample_unstructured_data"
tblList = spark.catalog.listTables()
if not any([table.name == SAMPLE_TABLE for table in tblList]):
df = pd.DataFrame([
{
"external_id": dr.external_id,
"row_data": dr.row_data
} for dr in sample_dataset.data_rows()
]).to_spark()
df.registerTempTable(SAMPLE_TABLE)
print(f"Registered table: {SAMPLE_TABLE}")
###Output
_____no_output_____
###Markdown
You should now have a temporary table "sample_unstructured_data" which includes the file names and URLs for some demo images. We're going to share this table with Labelbox using the Labelbox Connector for Databricks!
###Code
display(sqlContext.sql(f"select * from {SAMPLE_TABLE} LIMIT 5"))
###Output
_____no_output_____
###Markdown
Create a Labeling ProjectProjects are where teams create labels. A project is requires a dataset of assets to be labeled and an ontology to configure the labeling interface. Step 1: Create a dataasetThe [Labelbox Connector for Databricks](https://pypi.org/project/labelspark/) expects a spark table with two columns; the first column "external_id" and second column "row_data"external_id is a filename, like "birds.jpg" or "my_video.mp4"row_data is the URL path to the file. Labelbox renders assets locally on your users' machines when they label, so your labeler will need permission to access that asset. Example: | external_id | row_data ||-------------|--------------------------------------|| image1.jpg | https://url_to_your_asset/image1.jpg || image2.jpg | https://url_to_your_asset/image2.jpg || image3.jpg | https://url_to_your_asset/image3.jpg |
###Code
unstructured_data = spark.table(SAMPLE_TABLE)
demo_dataset = labelspark.create_dataset(client, unstructured_data, "Databricks Demo Dataset")
print("Open the dataset in the App")
print(f"https://app.labelbox.com/data/{demo_dataset.uid}")
###Output
_____no_output_____
###Markdown
Step 2: Create a projectYou can use the labelbox SDK to build your ontology (we'll do that next) You can also set your project up entirely through our website at app.labelbox.com.Check out our [ontology creation documentation.](https://docs.labelbox.com/docs/configure-ontology)
###Code
# Create a new project
project_demo = client.create_project(name="Labelbox and Databricks Example")
project_demo.datasets.connect(demo_dataset) # add the dataset to the queue
ontology = OntologyBuilder()
tools = [
Tool(tool=Tool.Type.BBOX, name="Frog"),
Tool(tool=Tool.Type.BBOX, name="Flower"),
Tool(tool=Tool.Type.BBOX, name="Fruit"),
Tool(tool=Tool.Type.BBOX, name="Plant"),
Tool(tool=Tool.Type.SEGMENTATION, name="Bird"),
Tool(tool=Tool.Type.SEGMENTATION, name="Person"),
Tool(tool=Tool.Type.SEGMENTATION, name="Sleep"),
Tool(tool=Tool.Type.SEGMENTATION, name="Yak"),
Tool(tool=Tool.Type.SEGMENTATION, name="Gemstone"),
]
for tool in tools:
ontology.add_tool(tool)
conditions = ["clear", "overcast", "rain", "other"]
weather_classification = Classification(
class_type=Classification.Type.RADIO,
instructions="what is the weather?",
options=[Option(value=c) for c in conditions]
)
ontology.add_classification(weather_classification)
# Setup editor
for editor in client.get_labeling_frontends():
if editor.name == 'Editor':
project_demo.setup(editor, ontology.asdict())
print("Project Setup is complete.")
###Output
_____no_output_____
###Markdown
Step 3: Go label data
###Code
print("Open the project to start labeling")
print(f"https://app.labelbox.com/projects/{project_demo.uid}/overview")
raise ValueError("Go label some data before continuing")
###Output
_____no_output_____
###Markdown
Exporting labels/annotationsAfter creating labels in Labelbox you can export them to use in Databricks for model training and analysis.
###Code
LABEL_TABLE = "exported_labels"
labels_table = labelspark.get_annotations(client, project_demo.uid, spark, sc)
labels_table.registerTempTable(LABEL_TABLE)
display(labels_table)
###Output
_____no_output_____ |
docs/ipynb/13-tutorial-skyrmion.ipynb | ###Markdown
Skyrmion in a disk In this tutorial, we compute and relax a skyrmion in a interfacial-DMI material in a confined disk like geometry.
###Code
import oommfc as oc
import discretisedfield as df
import micromagneticmodel as mm
###Output
_____no_output_____
###Markdown
We define mesh in cuboid through corner points `p1` and `p2`, and discretisation cell size `cell`.
###Code
region = df.Region(p1=(-50e-9, -50e-9, 0), p2=(50e-9, 50e-9, 10e-9))
mesh = df.Mesh(region=region, cell=(5e-9, 5e-9, 5e-9))
###Output
_____no_output_____
###Markdown
The mesh we defined is:
###Code
mesh.k3d()
###Output
_____no_output_____
###Markdown
Now, we can define the system object by first setting up the Hamiltonian:
###Code
system = mm.System(name='skyrmion')
system.energy = (mm.Exchange(A=1.6e-11)
+ mm.DMI(D=4e-3, crystalclass='Cnv')
+ mm.UniaxialAnisotropy(K=0.51e6, u=(0, 0, 1))
+ mm.Demag()
+ mm.Zeeman(H=(0, 0, 2e5)))
###Output
_____no_output_____
###Markdown
Disk geometry is set up be defining the saturation magnetisation (norm of the magnetisation field). For that, we define a function:
###Code
Ms = 1.1e6
def Ms_fun(pos):
"""Function to set magnitude of magnetisation: zero outside cylindric shape,
Ms inside cylinder.
Cylinder radius is 50nm.
"""
x, y, z = pos
if (x**2 + y**2)**0.5 < 50e-9:
return Ms
else:
return 0
###Output
_____no_output_____
###Markdown
And the second function we need is the function to definr the initial magnetisation which is going to relax to skyrmion.
###Code
def m_init(pos):
"""Function to set initial magnetisation direction:
-z inside cylinder (r=10nm),
+z outside cylinder.
y-component to break symmetry.
"""
x, y, z = pos
if (x**2 + y**2)**0.5 < 10e-9:
return (0, 0, -1)
else:
return (0, 0, 1)
# create system with above geometry and initial magnetisation
system.m = df.Field(mesh, dim=3, value=m_init, norm=Ms_fun)
###Output
_____no_output_____
###Markdown
The geometry is now:
###Code
system.m.norm.k3d_nonzero()
###Output
_____no_output_____
###Markdown
and the initial magnetsation is:
###Code
system.m.plane('z').mpl()
###Output
/home/marijanbeg/miniconda3/envs/ud/lib/python3.8/site-packages/matplotlib/quiver.py:686: RuntimeWarning: divide by zero encountered in double_scalars
length = a * (widthu_per_lenu / (self.scale * self.width))
/home/marijanbeg/miniconda3/envs/ud/lib/python3.8/site-packages/matplotlib/quiver.py:686: RuntimeWarning: invalid value encountered in multiply
length = a * (widthu_per_lenu / (self.scale * self.width))
###Markdown
Finally we can minimise the energy and plot the magnetisation.
###Code
# minimize the energy
md = oc.MinDriver()
md.drive(system)
# Plot relaxed configuration: vectors in z-plane
system.m.plane('z').mpl()
# Plot z-component only:
system.m.z.plane('z').mpl()
# 3d-plot of z-component
system.m.z.k3d_scalar(filter_field=system.m.norm)
###Output
_____no_output_____
###Markdown
Finally we can sample and plot the magnetisation along the line:
###Code
system.m.z.line(p1=(-49e-9, 0, 0), p2=(49e-9, 0, 0), n=20).mpl()
###Output
_____no_output_____
###Markdown
Tutorial 13: Skyrmion in a disk> Interactive online tutorial:> [](https://mybinder.org/v2/gh/ubermag/oommfc/master?filepath=docs%2Fipynb%2Findex.ipynb) In this tutorial, we compute and relax a skyrmion in a interfacial-DMI material in a confined disk like geometry.
###Code
import oommfc as oc
import discretisedfield as df
import micromagneticmodel as mm
###Output
_____no_output_____
###Markdown
We define mesh in cuboid through corner points `p1` and `p2`, and discretisation cell size `cell`.
###Code
region = df.Region(p1=(-50e-9, -50e-9, 0), p2=(50e-9, 50e-9, 10e-9))
mesh = df.Mesh(region=region, cell=(5e-9, 5e-9, 5e-9))
###Output
_____no_output_____
###Markdown
The mesh we defined is:
###Code
%matplotlib inline
mesh.k3d()
###Output
_____no_output_____
###Markdown
Now, we can define the system object by first setting up the Hamiltonian:
###Code
system = mm.System(name='skyrmion')
system.energy = (mm.Exchange(A=1.6e-11)
+ mm.DMI(D=4e-3, crystalclass='Cnv')
+ mm.UniaxialAnisotropy(K=0.51e6, u=(0, 0, 1))
+ mm.Demag()
+ mm.Zeeman(H=(0, 0, 2e5)))
###Output
_____no_output_____
###Markdown
Disk geometry is set up be defining the saturation magnetisation (norm of the magnetisation field). For that, we define a function:
###Code
Ms = 1.1e6
def Ms_fun(pos):
"""Function to set magnitude of magnetisation: zero outside cylindric shape,
Ms inside cylinder.
Cylinder radius is 50nm.
"""
x, y, z = pos
if (x**2 + y**2)**0.5 < 50e-9:
return Ms
else:
return 0
###Output
_____no_output_____
###Markdown
And the second function we need is the function to definr the initial magnetisation which is going to relax to skyrmion.
###Code
def m_init(pos):
"""Function to set initial magnetisation direction:
-z inside cylinder (r=10nm),
+z outside cylinder.
y-component to break symmetry.
"""
x, y, z = pos
if (x**2 + y**2)**0.5 < 10e-9:
return (0, 0, -1)
else:
return (0, 0, 1)
# create system with above geometry and initial magnetisation
system.m = df.Field(mesh, dim=3, value=m_init, norm=Ms_fun)
###Output
_____no_output_____
###Markdown
The geometry is now:
###Code
system.m.norm.k3d_nonzero()
###Output
_____no_output_____
###Markdown
and the initial magnetsation is:
###Code
system.m.plane('z').mpl()
###Output
/Users/marijanbeg/miniconda3/envs/ubermag-dev/lib/python3.8/site-packages/matplotlib/quiver.py:715: RuntimeWarning: divide by zero encountered in double_scalars
length = a * (widthu_per_lenu / (self.scale * self.width))
/Users/marijanbeg/miniconda3/envs/ubermag-dev/lib/python3.8/site-packages/matplotlib/quiver.py:715: RuntimeWarning: invalid value encountered in multiply
length = a * (widthu_per_lenu / (self.scale * self.width))
/Users/marijanbeg/miniconda3/envs/ubermag-dev/lib/python3.8/site-packages/matplotlib/quiver.py:767: RuntimeWarning: invalid value encountered in less
short = np.repeat(length < minsh, 8, axis=1)
/Users/marijanbeg/miniconda3/envs/ubermag-dev/lib/python3.8/site-packages/matplotlib/quiver.py:780: RuntimeWarning: invalid value encountered in less
tooshort = length < self.minlength
###Markdown
Finally we can minimise the energy and plot the magnetisation.
###Code
# minimize the energy
md = oc.MinDriver()
md.drive(system)
# Plot relaxed configuration: vectors in z-plane
system.m.plane('z').mpl()
# Plot z-component only:
system.m.z.plane('z').mpl()
# 3d-plot of z-component
system.m.z.k3d_scalar(filter_field=system.m.norm)
###Output
_____no_output_____
###Markdown
Finally we can sample and plot the magnetisation along the line:
###Code
system.m.z.line(p1=(-49e-9, 0, 0), p2=(49e-9, 0, 0), n=20).mpl()
###Output
_____no_output_____
###Markdown
Tutorial 13: Skyrmion in a disk> Interactive online tutorial:> [](https://mybinder.org/v2/gh/ubermag/oommfc/master?filepath=docs%2Fipynb%2Findex.ipynb) In this tutorial, we compute and relax a skyrmion in a interfacial-DMI material in a confined disk like geometry.
###Code
import discretisedfield as df
import micromagneticmodel as mm
import oommfc as oc
###Output
_____no_output_____
###Markdown
We define mesh in cuboid through corner points `p1` and `p2`, and discretisation cell size `cell`.
###Code
region = df.Region(p1=(-50e-9, -50e-9, 0), p2=(50e-9, 50e-9, 10e-9))
mesh = df.Mesh(region=region, cell=(5e-9, 5e-9, 5e-9))
###Output
_____no_output_____
###Markdown
The mesh we defined is:
###Code
%matplotlib inline
mesh.k3d()
###Output
_____no_output_____
###Markdown
Now, we can define the system object by first setting up the Hamiltonian:
###Code
system = mm.System(name="skyrmion")
system.energy = (
mm.Exchange(A=1.6e-11)
+ mm.DMI(D=4e-3, crystalclass="Cnv")
+ mm.UniaxialAnisotropy(K=0.51e6, u=(0, 0, 1))
+ mm.Demag()
+ mm.Zeeman(H=(0, 0, 2e5))
)
###Output
_____no_output_____
###Markdown
Disk geometry is set up be defining the saturation magnetisation (norm of the magnetisation field). For that, we define a function:
###Code
Ms = 1.1e6
def Ms_fun(pos):
"""Function to set magnitude of magnetisation: zero outside cylindric shape,
Ms inside cylinder.
Cylinder radius is 50nm.
"""
x, y, z = pos
if (x**2 + y**2) ** 0.5 < 50e-9:
return Ms
else:
return 0
###Output
_____no_output_____
###Markdown
And the second function we need is the function to definr the initial magnetisation which is going to relax to skyrmion.
###Code
def m_init(pos):
"""Function to set initial magnetisation direction:
-z inside cylinder (r=10nm),
+z outside cylinder.
y-component to break symmetry.
"""
x, y, z = pos
if (x**2 + y**2) ** 0.5 < 10e-9:
return (0, 0, -1)
else:
return (0, 0, 1)
# create system with above geometry and initial magnetisation
system.m = df.Field(mesh, dim=3, value=m_init, norm=Ms_fun)
###Output
_____no_output_____
###Markdown
The geometry is now:
###Code
system.m.norm.k3d_nonzero()
###Output
_____no_output_____
###Markdown
and the initial magnetsation is:
###Code
system.m.plane("z").mpl()
###Output
_____no_output_____
###Markdown
Finally we can minimise the energy and plot the magnetisation.
###Code
# minimize the energy
md = oc.MinDriver()
md.drive(system)
# Plot relaxed configuration: vectors in z-plane
system.m.plane("z").mpl()
# Plot z-component only:
system.m.z.plane("z").mpl()
# 3d-plot of z-component
system.m.z.k3d_voxels(filter_field=system.m.norm)
###Output
_____no_output_____
###Markdown
Finally we can sample and plot the magnetisation along the line:
###Code
system.m.z.line(p1=(-49e-9, 0, 0), p2=(49e-9, 0, 0), n=20).mpl()
###Output
_____no_output_____ |
docs/notebooks/fermions_backend.ipynb | ###Markdown
The fermionic tweezer backendIt implements four optical lattice sites with possibility of spin up and down. The first four wires are the spin up and the next four wires are the spin down. We have implemented: - `load` which adds a Fermion to the wire. - `hop` which lets Fermions hop. - `int` which describes interactions between fermions. - `phase` which is the chemical potential on the gate. - `measure` which reads out the occupation. Our own simulator code The fermions can be directly mapped to spins via a local Jordan-Wigner transformation on site $s$$\psi_{x,-1/2} = -\sigma^{+} \otimes \mathbf{1} \otimes \mathbf{1} \otimes \mathbf{1}$$\psi_{x,1/2} = -\sigma^z \otimes \sigma^{+} \otimes \mathbf{1} \otimes \mathbf{1}$and on site $y$ $\psi_{y,-1/2} = -\sigma^z \otimes \sigma^z \otimes \sigma^+ \otimes \mathbf{1}$$\psi_{y,1/2} = -\sigma^z \otimes \sigma^z \otimes \sigma^z \otimes \sigma^{+} $We first create the Pauli matrices and then create the fermionic operators on the extend system
###Code
import numpy as np
from scipy.sparse.linalg import expm
def nested_kronecker_product(a):
'''putting together a large operator from a list of matrices.
Provide an example here.
Args:
a (list): A list of matrices that can connected.
Returns:
array: An matrix that operates on the connected Hilbert space.
'''
if len(a) == 2:
return np.kron(a[0],a[1])
else:
return np.kron(a[0], nested_kronecker_product(a[1:]))
def jordan_wigner_transform(j, lattice_length):
'''
Builds up the fermionic operators in a 1D lattice
Args:
j (int): site index
lattice_length: how many sites does the lattice have ?
Returns:
psi_x: the field operator of creating a fermion on size j
'''
P = np.array([[0, 1], [0, 0]])
Z = np.array([[1, 0], [0, -1]])
I = np.eye(2)
operators = []
for k in range(j):
operators.append(Z)
operators.append(P)
for k in range(lattice_length-j-1):
operators.append(I)
return nested_kronecker_product(operators)
###Output
_____no_output_____
###Markdown
we have basically four wires, which is the same as two lattice sites with spin $\pm 1/2$
###Code
l = 2 # length of the tweezer array
Nstates = 2 ** (2 * l)
lattice_length = 2 * l
loweringOp = []
for i in range(lattice_length):
loweringOp.append(jordan_wigner_transform(i, lattice_length))
Nstates = 2**lattice_length
emptySystem = np.zeros(Nstates)
emptySystem[0] = 1
print(emptySystem)
# load one atom into site one
psi0 = loweringOp[1].T.dot(emptySystem)
print(psi0)
###Output
[1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
###Markdown
measurement
###Code
number_operators = []
for i in range(lattice_length):
number_operators.append(loweringOp[i].T.conj().dot(loweringOp[i]))
probs = np.abs(psi0)**2
resultInd = np.random.choice(np.arange(Nstates), p=probs, size = 1)
print(resultInd)
result = np.zeros(Nstates)
result[resultInd[0]] = 1
print(result)
measurements = np.zeros(lattice_length, dtype = int)
for i in range(lattice_length):
observed = number_operators[i].dot(result)
observed = observed.dot(result)
measurements[i] = int(observed)
print(measurements)
###Output
[4]
[0. 0. 0. 0. 1. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
[0 1 0 0]
###Markdown
construct the hopping operator
###Code
emptySystem = 1j*np.zeros(Nstates)
emptySystem[0] = 1
print(emptySystem)
# load two atoms into site one
psi0 = loweringOp[0].T.dot(emptySystem)
psi0 = loweringOp[1].T.dot(psi0)
print(psi0)
###Output
[1.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j
0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j]
[ 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j 0.+0.j
0.+0.j 0.+0.j 0.+0.j -1.+0.j 0.+0.j 0.+0.j 0.+0.j]
###Markdown
couple two neighboring sites
###Code
theta = np.pi/2;
latt_ind = [0, 1, 2, 3];
# couple spin down sites with even indices
Hhop = loweringOp[latt_ind[0]].T.dot(loweringOp[latt_ind[2]]) + loweringOp[latt_ind[2]].T.dot(loweringOp[latt_ind[0]])
# couple spin up sites with odd indices
Hhop += loweringOp[latt_ind[1]].T.dot(loweringOp[latt_ind[3]]) + loweringOp[latt_ind[3]].T.dot(loweringOp[latt_ind[1]])
Uhop = expm(-1j*theta*Hhop);
psi = np.dot(Uhop,psi0)
psi
###Output
_____no_output_____
###Markdown
and measure
###Code
measurement_indices = [0,1,2,3]
n_shots = 5
probs = np.abs(psi)**2
resultInd = np.random.choice(np.arange(Nstates), p=probs, size = n_shots)
measurements = np.zeros((n_shots, len(measurement_indices)), dtype = int)
for jj in range(n_shots):
result = np.zeros(Nstates)
result[resultInd[jj]] = 1
for ii, ind in enumerate(measurement_indices):
observed = number_operators[ind].dot(result)
observed = observed.dot(result)
measurements[jj,ii] = int(observed)
measurements
###Output
_____no_output_____
###Markdown
interactions
###Code
emptySystem = 1j*np.zeros(Nstates)
emptySystem[0] = 1
print(emptySystem)
# load two atoms into site one
psi0 = loweringOp[0].T.dot(emptySystem)
psi0 = loweringOp[1].T.dot(psi0)
print(psi0)
number_operators = []
for i in range(lattice_length):
number_operators.append(loweringOp[i].T.conj().dot(loweringOp[i]))
# interaction Hamiltonian
Hint = 0 * number_operators[0]
for ii in range(l):
spindown_ind = 2*ii;
spinup_ind = 2*ii+1;
Hint += number_operators[spindown_ind].dot(number_operators[spinup_ind])
np.array(number_operators).sum(axis=0)
###Output
_____no_output_____
###Markdown
why do I have six states with two atoms ?
###Code
Hint
###Output
_____no_output_____ |
notebooks/first-model-DavidVollendroff.ipynb | ###Markdown
Initial Data Exploration
###Code
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
The Kaggle Data
###Code
leafly_csv_url = 'https://raw.githubusercontent.com/med-cabinet-5/data-science/master/data/cannabis.csv'
###Output
_____no_output_____
###Markdown
Supplementary data referencing ailments from the app 'Kushy'
###Code
kushy_strains_csv_url = 'https://raw.githubusercontent.com/kushyapp/cannabis-dataset/master/Dataset/Strains/strains-kushy_api.2017-11-14.csv'
leafly = pd.read_csv(leafly_csv_url)
kushy = pd.read_csv(kushy_strains_csv_url)
leafly.head()
kushy.head()
kushy.isnull().sum()
###Output
_____no_output_____
###Markdown
Most of the information is in chemical analysis, which we don't care about, but we do gain ~1000 ailment observations
###Code
leafly.shape
leafly.isnull().sum()
###Output
_____no_output_____
###Markdown
In order to maintain our ability to provide standardized output we'll drop a small portion of the data
###Code
leafly = leafly.dropna(subset=['Flavor', 'Description'])
###Output
_____no_output_____
###Markdown
The only merge-able data of interest
###Code
kushy_clean = kushy[['name', 'ailment']]
kushy_clean.shape
kushy_clean.isnull().sum()
kushy_clean = kushy_clean.dropna()
kushy_clean.shape
merged = pd.merge(leafly, kushy_clean, left_on='Strain', right_on='name', how='left' )
merged.shape
merged.loc[19]['Description']
merged.loc[19]['ailment']
###Output
_____no_output_____
###Markdown
Dropping the rating and retaining only string information
###Code
merged_strings = merged.drop(['Rating'], axis=1)
merged_strings = merged_strings.fillna('')
merged_strings.isnull().sum()
merged_strings.dtypes
###Output
_____no_output_____
###Markdown
Concatenating all text such that it is easy to process for our NLP model
###Code
merged_strings['all_text'] = merged_strings.apply(lambda x: ' '.join(x), axis=1)
merged_strings['all_text'][0]
###Output
_____no_output_____
###Markdown
NLP Time
###Code
import spacy
from sklearn.feature_extraction.text import TfidfVectorizer
nlp = spacy.load("en_core_web_lg")
def lemma_producer(text):
"""
tokenizes string, returning a list of lemmas
"""
lemmas = []
processed_text = nlp(text)
for token in processed_text:
if ((token.is_stop == False) and (token.is_punct == False)) and (token.pos_!= 'PRON'):
lemmas.append(token.lemma_)
return ' '.join(lemmas)
merged_strings['lemmas'] = merged_strings['all_text'].apply(lemma_producer)
merged_strings['lemmas'][0]
tfidf = TfidfVectorizer(stop_words="english", min_df=0.025, max_df=0.98, ngram_range=(1,3))
dtm = tfidf.fit_transform(merged_strings['lemmas'])
dtm = pd.DataFrame(dtm.todense(), columns=tfidf.get_feature_names())
dtm.head()
###Output
_____no_output_____
###Markdown
Modeling
###Code
from sklearn.neighbors import NearestNeighbors
model = NearestNeighbors(n_neighbors=3, algorithm='kd_tree')
model.fit(dtm)
test_string = ['I have fibromyalgia and I want pain relief']
test_string = tfidf.transform(test_string)
test_string = test_string.todense()
predictions = model.kneighbors(test_string)
predictions # (distance, index)
predictions[1][0][0] # best match
best_match = predictions[1][0][0]
merged_strings.iloc[best_match]
recommended_strain = merged_strings.iloc[best_match]
recommended_strain.drop(['name', 'ailment', 'all_text', 'lemmas']).to_dict()
returned_values = recommended_strain.drop(['name', 'ailment', 'all_text', 'lemmas']).to_dict()
returned_values
###Output
_____no_output_____ |
04_LoopControl.ipynb | ###Markdown
LoopControl
###Code
#export
from bfh_mt_hs2020_rl_basics.bridge import BridgeBase
from datetime import timedelta, datetime
import time
from ignite.engine import Engine
from ignite.metrics import RunningAverage
from ignite.contrib.handlers.tensorboard_logger import TensorboardLogger, OutputHandler
from ptan.ignite import EndOfEpisodeHandler, EpisodeEvents, PeriodicEvents, PeriodEvents
class TimeHandler:
TIME_PASSED_METRIC = 'time_passed'
def __init__(self):
self._started_ts = time.time()
def attach(self, engine: Engine):
engine.add_event_handler(EpisodeEvents.EPISODE_COMPLETED, self)
def __call__(self, engine: Engine):
engine.state.metrics[self.TIME_PASSED_METRIC] = time.time() - self._started_ts
class LoopControl:
def __init__(self, bridge:BridgeBase, run_name:str, bound_avg_reward:float=1000.0, logtb:bool = False):
self.bridge = bridge
self.run_name = run_name
self.engine = Engine(self.bridge.process_batch)
# this handler has several problems. it does more than one thing and it also
# has to have direct access to the experienceSource of the agent.
# that could be refactored
EndOfEpisodeHandler(self.bridge.agent.exp_source, bound_avg_reward = bound_avg_reward).attach(self.engine)
TimeHandler().attach(self.engine)
RunningAverage(output_transform=lambda v: v['loss']).attach(self.engine, "avg_loss")
PeriodicEvents().attach(self.engine) # creates periodic events
@self.engine.on(EpisodeEvents.EPISODE_COMPLETED)
def episode_completed(trainer: Engine):
passed = trainer.state.metrics.get('time_passed', 0)
print("Episode %d: reward=%.0f, steps=%s, "
"elapsed=%s" % (
trainer.state.episode, trainer.state.episode_reward,
trainer.state.episode_steps,
timedelta(seconds=int(passed))))
@self.engine.on(EpisodeEvents.BOUND_REWARD_REACHED)
def game_solved(trainer: Engine):
passed = trainer.state.metrics['time_passed']
print("Game solved in %s, after %d episodes "
"and %d iterations!" % (
timedelta(seconds=int(passed)),
trainer.state.episode, trainer.state.iteration))
trainer.should_terminate = True
if logtb:
tb = self._create_tb_logger()
handler = OutputHandler(tag="episodes", metric_names=['reward', 'steps', 'avg_reward'])
tb.attach(self.engine, log_handler=handler, event_name=EpisodeEvents.EPISODE_COMPLETED)
handler = OutputHandler(tag="train", metric_names=['avg_loss'], output_transform=lambda a: a)
tb.attach(self.engine, log_handler=handler, event_name=PeriodEvents.ITERS_100_COMPLETED)
def _create_tb_logger(self) -> TensorboardLogger:
now = datetime.now().isoformat(timespec='minutes')
now = now.replace(":", "")
logdir = f"runs/{now}-{self.run_name}"
return TensorboardLogger(log_dir=logdir)
def run(self):
self.engine.run(self.bridge.batch_generator())
from bfh_mt_hs2020_rl_basics.agent import SimpleAgent
from bfh_mt_hs2020_rl_basics.bridge import SimpleBridge
from bfh_mt_hs2020_rl_basics.env import CarEnv
import torch
def basic_init_bridge() -> SimpleBridge:
env = CarEnv()
agent = SimpleAgent(env, torch.device("cpu"), gamma=0.9, buffer_size=1000)
bridge = SimpleBridge(agent, gamma=0.9)
return bridge
def basic_init():
bridge = basic_init_bridge()
LoopControl(bridge, "dummy")
basic_init()
###Output
_____no_output_____ |
Week1/Week1_Task_2_image.ipynb | ###Markdown
Task Image: Dataset Link:RGB Dataset can be found at " /data/rgb-images/ " in the respective challenge's repo.DICOM Dataset can be found at " /data/dicom-images/ " in the respective challenge's repo. Description:Images are needed to be preprocessed before feeding them into computer vision algorithms. Comman forms of image data are: 2D, RGB, dicom format, satellite images and 4D images. 2D images are grayscale images, RGB images are 3-channeled images representing color value of pixel, DICOM format is the standard for the communication and management of medical imaging information and related data, and 4D images (example - brain MRI scans) are slices of 3D images stacked on top of each other. Objective:How to load and process various formats of image for machine learning (Check out helpful links section to get hints) Tasks:- Read the rgb images provided and store their numerical representation in numpy array (matplotlib or PIL)- Plot rgb image '9.jpeg'- Print dimensions of image '12.jpeg'- Convert any 5 images to grayscale and plot them- Read the dicom images provided (dicom.read_file function)- Print numerical representation of image '0009.DCM' (dicom_img.pixel_array attribute)- Plot any dicom image (matplotlib.pyplot.imshow function) Further fun (will not be evaluated):- You already got familiar with complex unstructured data like rgb and dicom images, let's apply those skills to 2D images as well. Download the famous MNIST Dataset from https://www.kaggle.com/c/digit-recognizer/data . Read those 2D images and explore the dataset. Try out edge detection using sobel filter without using any libraries other than numpy.- DICOM format contains much more information than just pixel values. Explore the data further. Helpful Links:- Awesome tutorial on image processing with numpy: http://www.degeneratestate.org/posts/2016/Oct/23/image-processing-with-numpy/- Understand pydicom data structure and images - https://www.kaggle.com/avirdee/understanding-dicoms
###Code
# Import the required libraries
# Use terminal commands like "pip install numpy" to install packages
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# import PIL if and when required
import cv2
!pip install pydicom
import pydicom
im = plt.imread("9.jpeg")
im
def plti(im, h=8, **kwargs):
"""
Helper function to plot an image.
"""
y = im.shape[0]
x = im.shape[1]
w = (y/x) * h
plt.figure(figsize=(w,h))
plt.imshow(im, interpolation="none", **kwargs)
plt.axis('off')
plti(im)
im2 = plt.imread("12.jpeg")
im2.shape
def to_grayscale(im, weights = np.c_[0.2989, 0.5870, 0.1140]):
"""
Transforms a colour image to a greyscale image by
taking the mean of the RGB values, weighted
by the matrix weights
"""
tile = np.tile(weights, reps=(im.shape[0],im.shape[1],1))
return np.sum(tile * im, axis=2)
img = to_grayscale(im)
plti(img, cmap='Greys')
dataset = pydicom.dcmread('./0009.DCM')
d = dataset.pixel_array.mean(axis=0)
print(d)
plt.imshow(d)
for i in range(dataset.pixel_array.shape[0]):
di = dataset.pixel_array[i,:,:]
plt.imshow(di,cmap=plt.cm.bone)
plt.show()
###Output
_____no_output_____ |
livedoor_news_multi.ipynb | ###Markdown
概要ここでは、GoogleColaboratoryのとGoogleDriveを使用して、BERTを用いた日本語文章の多値分類を行います。 日本語学習済みモデルは 京都大学 黒橋・河原研究所が公開している`BERT日本語Pretrainedモデル`を、形態素解析器は同研究室が公開している`JUMAN`を使用します。 GoogleDriveに学習済みモデルとデータセットを保存するため、1.6GB以上の空きが必要です。 全体の流れ1. 形態素解析器のインストール1. GoogleDriveをマウント1. データセット/BERT日本語学習済みモデルをGoogleDriveへ保存1. データセットをBERT向けのフォーマットに変換1. BERTのリポジトリをClone1. プログラムの改変1. trainデータを使用した、学習済みモデルのfine-tuning1. テストデータの予測1. 予測結果の検証 形態素解析器のインストール京都大学 黒橋・河原研究所が公開している`BERT日本語Pretrainedモデル`では、形態素解析器に`JUMAN`を使用する必要がある為、インストールを行います。 詳細については、以下をご確認ください。 [BERT日本語Pretrainedモデル - KUROHASHI-KAWAHARA LAB - 詳細](http://nlp.ist.i.kyoto-u.ac.jp/index.php?BERT日本語Pretrainedモデルr6199008)
###Code
!wget https://github.com/ku-nlp/jumanpp/releases/download/v2.0.0-rc2/jumanpp-2.0.0-rc2.tar.xz && \
tar xJvf jumanpp-2.0.0-rc2.tar.xz && \
rm jumanpp-2.0.0-rc2.tar.xz && \
cd jumanpp-2.0.0-rc2/ && \
mkdir bld && \
cd bld && \
cmake .. \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_INSTALL_PREFIX=/usr/local && \
make && \
sudo make install
###Output
_____no_output_____
###Markdown
インストールの確認
###Code
!jumanpp -v
###Output
_____no_output_____
###Markdown
TensorFlowのバージョン変更
###Code
%tensorflow_version 1.x
###Output
_____no_output_____
###Markdown
pyknpのインストールPythonでJUMANを実行する為のライブラリです。
###Code
! pip install pyknp
###Output
_____no_output_____
###Markdown
GoogleDriveのマウント
###Code
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
###Markdown
作業ディレクトリの作成と移動
###Code
!mkdir -p /content/drive/'My Drive'/brrt/livedoor_news
cd /content/drive/'My Drive'/bert/livedoor_news
###Output
/content/drive/My Drive/bert/livedoor_news
###Markdown
データセット/BERT日本語学習済みモデルをGoogleDriveへ保存 livedoor newsコーパスのダウンロード
###Code
import urllib.request
livedoor_news_url = "https://www.rondhuit.com/download/ldcc-20140209.tar.gz"
urllib.request.urlretrieve(livedoor_news_url, "ldcc-20140209.tar.gz")
###Output
_____no_output_____
###Markdown
BERT日本語Pretrainedモデルのダウンロードと解凍
###Code
kyoto_u_bert_url = "http://nlp.ist.i.kyoto-u.ac.jp/nl-resource/JapaneseBertPretrainedModel/Japanese_L-12_H-768_A-12_E-30_BPE.zip"
urllib.request.urlretrieve(kyoto_u_bert_url, "Japanese_L-12_H-768_A-12_E-30_BPE.zip")
!unzip Japanese_L-12_H-768_A-12_E-30_BPE.zip
###Output
_____no_output_____
###Markdown
データセットをBERT向けのフォーマットに変換 TSVファイルの作成下記の記事を参考に作成しました。 [BERT多言語モデルで日本語文章の二値分類を試す](https://qiita.com/knok/items/9e3b4505d6b8f813943d) GoogleDriveに保存されるまで少し時間がかかります。
###Code
import tarfile
import csv
import re
target_genre = [
"dokujo-tsushin",
"it-life-hack",
"kaden-channel",
"livedoor-homme",
"movie-enter",
"peachy",
"smax",
"sports-watch",
"topic-news"
]
fname_list = [[] for i in range(len(target_genre))]
tsv_fname = "all.tsv"
brackets_tail = re.compile('【[^】]*】$')
brackets_head = re.compile('^【[^】]*】')
def remove_brackets(inp):
output = re.sub(brackets_head, '',re.sub(brackets_tail, '', inp))
return output
def read_title(f):
next(f)
next(f)
title = next(f)
title = remove_brackets(title.decode('utf-8'))
return title[:-1]
with tarfile.open("ldcc-20140209.tar.gz") as tf:
for ti in tf:
if "LICENSE.txt" in ti.name:
continue
elif "CHANGES.txt" in ti.name:
continue
elif "README.txt" in ti.name:
continue
else:
for i, t in enumerate(target_genre):
if target_genre[i] in ti.name and ti.name.endswith(".txt"):
fname_list[i].append(ti.name)
continue
with open(tsv_fname, "w") as wf:
writer = csv.writer(wf, delimiter='\t')
for i, fcategory in enumerate(fname_list):
for name in fcategory:
f = tf.extractfile(name)
title = read_title(f)
row = [target_genre[i], i, '', title]
writer.writerow(row)
###Output
_____no_output_____
###Markdown
trainデータ/devデータ/testデータの分割 all.tsvがGoogleDriveに生成されてから実行
###Code
import random
random.seed(100)
with open("all.tsv", 'r') as f, open("rand-all.tsv", "w") as wf:
lines = f.readlines()
random.shuffle(lines)
for line in lines:
wf.write(line)
random.seed(101)
train_fname, dev_fname, test_fname = ["train.tsv", "dev.tsv", "test.tsv"]
with open("rand-all.tsv") as f, open(train_fname, "w") as tf, open(dev_fname, "w") as df, open(test_fname, "w") as ef:
ef.write("class\tsentence\n")
for line in f:
v = random.randint(0, 9)
if v == 8:
df.write(line)
elif v == 9:
ef.write(line)
else:
tf.write(line)
###Output
_____no_output_____
###Markdown
BERTのリポジトリをClone
###Code
!git clone https://github.com/google-research/bert.git
###Output
_____no_output_____
###Markdown
プログラムの改変[Qiitaの記事](https://qiita.com/Yuu94/items/0e5cff226bd3cc8fb08c)を参考に、`run_classifier.py`と`tokenization.py`を改変してください。 trainデータを使用した、学習済みモデルのfine-tuning
###Code
!python bert/run_classifier_livedoor.py \
--task_name=livedoor \
--do_train=true \
--do_eval=true \
--data_dir=./ \
--vocab_file=./Japanese_L-12_H-768_A-12_E-30_BPE/vocab.txt \
--bert_config_file=./Japanese_L-12_H-768_A-12_E-30_BPE/bert_config.json \
--init_checkpoint=./Japanese_L-12_H-768_A-12_E-30_BPE/bert_model.ckpt \
--max_seq_length=128 \
--train_batch_size=32 \
--learning_rate=2e-5 \
--num_train_epochs=3.0 \
--output_dir=./tmp/livedoor_news_output_fine \
--do_lower_case False
###Output
_____no_output_____
###Markdown
テストデータの予測
###Code
!python bert/run_classifier_livedoor.py \
--task_name=livedoor \
--do_predict=true \
--data_dir=./ \
--vocab_file=./Japanese_L-12_H-768_A-12_E-30_BPE/vocab.txt \
--bert_config_file=./Japanese_L-12_H-768_A-12_E-30_BPE/bert_config.json \
--init_checkpoint=./tmp/livedoor_news_output_fine \
--max_seq_length=128 \
--output_dir=tmp/livedoor_news_output_predic/
###Output
_____no_output_____
###Markdown
予測結果の検証
###Code
import csv
import numpy as np
with open("./test.tsv") as f, open("tmp/livedoor_news_output_predic/test_results.tsv") as rf:
test = csv.reader(f, delimiter = '\t')
test_result = csv.reader(rf, delimiter = '\t')
# 正解データの抽出
next(test)
test_list = [int(row[1]) for row in test ]
# 予測結果を抽出
result_list = []
for result in test_result:
max_index = np.argmax(result)
result_list.append(max_index)
# 分類した予測結果(カテゴリNo)を出力
with open('tmp/livedoor_news_output_predic/test_results.csv', 'w') as of:
writer = csv.writer(of)
for row in result_list:
writer.writerow([row])
test_count = len(test_list)
result_correct_answer_list = [result for test, result in zip(test_list, result_list) if test == result]
result_correct_answer_count = len(result_correct_answer_list)
print("正解率: ", result_correct_answer_count / test_count)
###Output
_____no_output_____ |
4.databases/exercises/2.genesis.ipynb | ###Markdown
La genèse du CSVVous récupérez un ensemble de résultats sur le score, obtenus grâce à une mesure d’association appelée *Pointwise Mutual Information*, des vingt bigrammes les plus fréquents dans la version française de la *Genèse*.Votre objectif est d’enregistrer ces informations dans un fichier au format CSV.
###Code
data = [(('Poti', 'Phéra'), 13.908017261062806),
(('cheveux', 'blancs'), 13.908017261062806),
(('plusieurs', 'couleurs'), 13.908017261062806),
(('Lachaï', 'roï'), 13.492979761783962),
(('Très', 'Haut'), 13.49297976178396),
(('bonne', 'santé'), 12.908017261062803),
(('Des', 'bénédictions'), 12.49297976178396),
(('Faites', 'ceci'), 12.270587340447515),
(('herbe', 'portant'), 12.270587340447515),
(('Soyez', 'féconds'), 12.171051666896599),
(('Beer', 'Schéba'), 12.03354814314666),
(('chefs', 'issus'), 11.908017261062803),
(('êtres', 'vivants'), 11.908017261062803),
(('rendrai', 'fécond'), 11.685624839726358),
(('seras', 'dégagé'), 11.685624839726358),
(('Paddan', 'Aram'), 11.655036519892933),
(('enfui', 'dehors'), 11.618510643867822),
(('Au', 'bout'), 11.434086072730395),
(('êtes', 'sincères'), 11.434086072730395),
(('ou', 'non'), 11.296582548980458)]
# Your code here
###Output
_____no_output_____ |
Homework2_Moustaghit_Andreani_Serra.ipynb | ###Markdown
Homework 2Steam Reviews 2021Filippo Andreani Mossaab Moustaghit Eleonora Serra Research Questions
###Code
!pip install squarify
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import math
import pandas as pd
import seaborn as sns
import squarify
import datetime
pd.__version__
###Output
_____no_output_____
###Markdown
Installing Kaggle to download the dataset (don't execute this )
###Code
! pip install -q kaggle
from google.colab import files
files.upload()
! cp kaggle.json ~/.kaggle/
! chmod 600 ~/.kaggle/kaggle.json
! kaggle datasets list
!kaggle datasets download -d najzeko/steam-reviews-2021
! unzip steam-reviews-2021.zip -d steam_reviews/
###Output
unzip: cannot find or open steam-reviews-2021.zip, steam-reviews-2021.zip.zip or steam-reviews-2021.zip.ZIP.
###Markdown
Mounting the drive
###Code
from google.colab import drive
drive.mount('/content/drive/',force_remount=True)
###Output
Mounted at /content/drive/
###Markdown
Dont run the cell below
###Code
!cp -r /content/steam_reviews/steam_reviews.csv /content/drive/MyDrive/ADMHW2/steam_reviews.csv
###Output
cp: cannot stat '/content/steam_reviews/steam_reviews.csv': No such file or directory
###Markdown
Reading Dataset We chose to work only on 1000000 rows because the RAM gets saturated after that.
###Code
data= pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",nrows=1000000)
data.shape
data.describe()
###Output
_____no_output_____
###Markdown
Data Wrangling
###Code
data.head()
data.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 23 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 1000000 non-null int64
1 app_id 1000000 non-null int64
2 app_name 1000000 non-null object
3 review_id 1000000 non-null int64
4 language 1000000 non-null object
5 review 998242 non-null object
6 timestamp_created 1000000 non-null int64
7 timestamp_updated 1000000 non-null int64
8 recommended 1000000 non-null bool
9 votes_helpful 1000000 non-null int64
10 votes_funny 1000000 non-null int64
11 weighted_vote_score 1000000 non-null float64
12 comment_count 1000000 non-null int64
13 steam_purchase 1000000 non-null bool
14 received_for_free 1000000 non-null bool
15 written_during_early_access 1000000 non-null bool
16 author.steamid 1000000 non-null int64
17 author.num_games_owned 1000000 non-null int64
18 author.num_reviews 1000000 non-null int64
19 author.playtime_forever 1000000 non-null float64
20 author.playtime_last_two_weeks 1000000 non-null float64
21 author.playtime_at_review 997686 non-null float64
22 author.last_played 1000000 non-null float64
dtypes: bool(4), float64(5), int64(11), object(3)
memory usage: 148.8+ MB
###Markdown
As we can see in the first 5 rows the date format is not well adapted and also the number of reviews doesn't match the number of rows we are working on (1000000) which means some rows have empty reviews. And for these reasons we should clean the data.
###Code
# First let's change the format of the date
def parsedate(time_as_a_unix_timestamp):
return pd.to_datetime(time_as_a_unix_timestamp, unit='s')
dataset = pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",nrows=1000000,parse_dates=['timestamp_created','timestamp_updated', 'author.last_played'],date_parser=parsedate)
dataset.head()
###Output
_____no_output_____
###Markdown
We can also drop the Unamed and app_id columns which don't give us any information
###Code
dataset.drop(dataset[["app_id","Unnamed: 0"]],axis=1,inplace=True)
dataset.describe()
## Lets check for null values
dataset.isnull().sum()
###Output
_____no_output_____
###Markdown
As we can see 1758 reviews are missing and 2314 playtime at review are missing too.
###Code
print("number of NaN values for the column review :", dataset['review'].isnull().sum())
print("number of NaN values for the column author.playtime_at_review :", dataset['author.playtime_at_review'].isnull().sum())
###Output
number of NaN values for the column review : 1758
number of NaN values for the column author.playtime_at_review : 2314
###Markdown
Normally, in this case if the review column was a number, we could've replace the NaN values by the mean of the whole rows of the column, but it's not the case. We tought about droping those rows but is it pertinent?! We ll keep it like this for now. [RQ1] Exploratory Data Analysis (EDA) We can visualize the data missing in our dataset:
###Code
sns.heatmap(dataset.isnull(),cbar=False,cmap='viridis')
###Output
_____no_output_____
###Markdown
As we said above, the two column missing data are review and author.playtime_at_review. Let's count the number of games we have in the dataset
###Code
dataset = pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",usecols=['app_name'])
games_count=dataset['app_name'].value_counts().to_frame()
games_count=games_count.sort_values('app_name',ascending=False)
games_count
###Output
_____no_output_____
###Markdown
As we can see we have 315 games reviewed in the dataset.
###Code
plt.rcParams['figure.figsize']=(8,8)
plt.pie(games_count[0:10],labels=dataset.app_name.value_counts()[0:10].index,shadow=True,autopct='%.1f%%')
plt.legend()
plt.show()
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: MatplotlibDeprecationWarning: Non-1D inputs to pie() are currently squeeze()d, but this behavior is deprecated since 3.1 and will be removed in 3.3; pass a 1D array instead.
###Markdown
We can't plot the whole games but we have chosen to plot only the top 10 games and the most reviewed game is PlAYERUNKNOWN'S BATTLEGROUNDS. Let's do the same thing for the language.
###Code
dataset = pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",usecols=['language'])
language_count=dataset['language'].value_counts().to_frame()
language_count.apply(lambda x: 100*x/x.sum())
###Output
_____no_output_____
###Markdown
As we can see English is the most used language in the reviews and it represents 44.3% from the dataset.
###Code
plt.rcParams['figure.figsize']=(8,8)
plt.pie(language_count[0:10],labels=dataset.language.value_counts().index[0:10],shadow=True,autopct='%.1f%%')
plt.legend()
plt.show()
###Output
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:2: MatplotlibDeprecationWarning: Non-1D inputs to pie() are currently squeeze()d, but this behavior is deprecated since 3.1 and will be removed in 3.3; pass a 1D array instead.
###Markdown
We couldn't plot all the languages but we ploted only the first 10, and english represents 47% from the first 10 languages. As we can see we have many language and this plot is not so clear so lets check the 7 most used languages.
###Code
top_language_n = 10
top_language = dataset.loc[:,'language'].value_counts()\
[:top_language_n].sort_values(ascending=False)
squarify.plot(sizes=top_language, label=top_language.index.array,\
color=["red","green","orange","blue","grey","yellow","pink","black","white","magenta"], alpha=.7 )
plt.axis('off')
plt.show()
###Output
_____no_output_____
###Markdown
Let's see if there is any correlation between the games and the language.
###Code
dataset = pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",usecols=['app_name','language'])
###Output
_____no_output_____
###Markdown
[RQ2] Let's explore the dataset by finding simple insights into the reviews.
###Code
dataset1 = pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",usecols=['app_name','review'])
###Output
_____no_output_____
###Markdown
Let's plot the number of reviews for each application in descending order.
###Code
numbofreviews=dataset1.groupby('app_name',sort=True).review.count().sort_values(ascending=False)
numbofreviews.to_frame()
###Output
_____no_output_____
###Markdown
Ploting the whole application won't be clear, so we chose to plot only the top 10.
###Code
plt.rcParams['figure.figsize']=(20,12)
numbofreviews[:10].plot(kind="bar")
plt.xticks(horizontalalignment="center")
plt.title("Number of reviews per application")
plt.xlabel("Application")
plt.ylabel("number of reviews")
###Output
_____no_output_____
###Markdown
What applications have the best Weighted Vote Score?
###Code
dataset2 = pd.read_csv("/content/drive/MyDrive/ADMHW2/steam_reviews.csv",usecols=['app_name','weighted_vote_score'])
sortedbywvs=dataset2.sort_values('weighted_vote_score',ascending=False)
sortedbywvs=sortedbywvs.drop_duplicates(subset='app_name')
print("The 20 best Weighted Vote Score applications are ")
for app_name in sortedbywvs[:20].app_name.unique():
print(f'{app_name} ')
###Output
The 20 best Weighted Vote Score applications are
Stardew Valley
Divinity: Original Sin 2
Subnautica
Mirror
Wallpaper Engine
Terraria
The Forest
Monster Hunter: World
The Elder Scrolls Online
Human: Fall Flat
DARK SOULS™ III
No Man's Sky
Undertale
Kenshi
DEATH STRANDING
Watch_Dogs 2
Darkest Dungeon®
Frostpunk
The Elder Scrolls V: Skyrim
The Witcher 3: Wild Hunt
###Markdown
Which applications have the most and the least recommendations?
###Code
dataset3 = pd.read_csv("/content/drive/MyDrive/ADMHW2/steam_reviews.csv",usecols=['app_name','recommended'])
appgroup=dataset3.groupby('app_name').sum()
sortrecommendf=appgroup.sort_values('recommended', ascending =False).reset_index()
sortrecommendf
sortrecommendf['app_name'][0]
sortrecommendf['app_name'][314]
###Output
_____no_output_____
###Markdown
How many of these applications were purchased, and how many were given for free?
###Code
dataset3=pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",usecols=['app_name','received_for_free','steam_purchase'])
dataset3.groupby('app_name').received_for_free.value_counts()
###Output
_____no_output_____
###Markdown
As We can see the majority of games have been received for free and purchased.
###Code
group1=dataset3.loc[(dataset3['received_for_free']==True) & (dataset3['steam_purchase']==False)]
group1.count()
group2=dataset3.loc[(dataset3['received_for_free']==False) & (dataset3['steam_purchase']==True)]
group2.count()
###Output
_____no_output_____
###Markdown
So 359 041 were given for free and 16 513 412 were purchased.We have considered as purchased just the app that were purchased on steam, using the variable 'steam purchase'. RQ3 M : "/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv"
###Code
def parsedate(time_as_a_unix_timestamp):
return pd.to_datetime(time_as_a_unix_timestamp, unit='s')
dataset5 = pd.read_csv("/content/drive/MyDrive/ADMHW2/steam_reviews.csv",usecols=['app_name','timestamp_created'],parse_dates=['timestamp_created'],date_parser=parsedate)
###Output
_____no_output_____
###Markdown
What is the most common time that authors review an application?
###Code
dataset5['timestamp_created'] = dataset5['timestamp_created'].dt.floor('Min')
dataset5['timestamp_created'].head()
dataset5['new_date'] = [d.date() for d in dataset5['timestamp_created']]
dataset5['new_time'] = [d.time() for d in dataset5['timestamp_created']]
dataset5.head()
dataset5.drop('timestamp_created', axis=1, inplace=True)
dataset5.drop('new_date', axis=1, inplace=True)
dataset5.head()
groups=dataset5.groupby('new_time').count()
sortreview=groups.sort_values('app_name',ascending= False).reset_index()
sortreview.rename(columns={'app_name':'Num_times'})
groups.idxmax()
groups.idxmin()
###Output
_____no_output_____
###Markdown
14:50 is the most common time that authors review an application, instead 06:02 is the worst common time. Create a function that receives as a parameter a list of time intervals and returns the plot the number of reviews for each of the intervals.
###Code
dataset5 = pd.read_csv("/content/drive/MyDrive/ADMHW2/steam_reviews.csv",usecols=['review_id','app_name','timestamp_created'],parse_dates=['timestamp_created'],date_parser=parsedate)
dataset5
def revperint(t_col):
t_col = [datetime.datetime.strptime(t, '%H:%M:%S') for t in t_col]
sec_1 = []
min_1 = []
ora_1 = []
number_reviews = []
for i in range(len(t_col)):
sec_1.append(t_col[i].time().second)
min_1.append(t_col[i].time().minute)
ora_1.append(t_col[i].time().hour)
for i in range(0, len(t_col), 2):
number_reviews.append(dataset5[((dataset5['timestamp_created'].dt.hour >= ora_1[i]) & (dataset5['timestamp_created'].dt.minute >= min_1[i])) & (dataset5['timestamp_created'].dt.hour <= ora_1[i+1]) & (dataset5['timestamp_created'].dt.minute <= min_1[i+1]) & (dataset5['timestamp_created'].dt.second <= sec_1[i+1])].review_id.count())
xx = ['5am', '9am', '1pm', '4pm', '9pm', '12am', '2am']
plt.bar(xx, number_reviews, color = 'green')
plt.yscale('log')
plt.yticks([2000000, 2500000, 3000000, 3500000, 4000000])
plt.title('Number of reviews per interval')
plt.xlabel('Intervals')
plt.ylabel('Number of reviews')
plt.show()
int = ['05:00:00', '08:59:59', '09:00:00', '12:59:59', '13:00:00', '15:59:59',
'16:00:00', '20:59:59', '21:00:00', '23:59:59', '00:00:00', '01:59:59', '02:00:00',
'04:59:59']
revperint(int)
###Output
_____no_output_____
###Markdown
intervals = ['06:00:00', '10:59:59', '11:00:00', '13:59:59', '14:00:00', '16:59:59', '17:00:00', '19:59:59', '20:00:00', '23:59:59', '00:00:00', '02:59:59', '03:00:00', '05:59:59']
###Code
def revperint(t_col):
t_col = [datetime.datetime.strptime(t, '%H:%M:%S') for t in t_col]
sec_1 = []
min_1 = []
ora_1 = []
number_reviews = []
for i in range(len(t_col)):
sec_1.append(t_col[i].time().second)
min_1.append(t_col[i].time().minute)
ora_1.append(t_col[i].time().hour)
for i in range(0, len(t_col), 2):
number_reviews.append(dataset5[((dataset5['timestamp_created'].dt.hour >= ora_1[i]) & (dataset5['timestamp_created'].dt.minute >= min_1[i])) & (dataset5['timestamp_created'].dt.hour <= ora_1[i+1]) & (dataset5['timestamp_created'].dt.minute <= min_1[i+1]) & (dataset5['timestamp_created'].dt.second <= sec_1[i+1])].review_id.count())
xx = ['6am', '11am', '2pm', '5pm', '8pm', '12am', '3am']
plt.bar(xx, number_reviews, color = 'green')
plt.yscale('log')
plt.yticks([2000000, 2500000, 3000000, 3500000, 4000000])
plt.title('Number of reviews per interval')
plt.xlabel('Intervals')
plt.ylabel('Number of reviews')
plt.show()
intervals = ['06:00:00', '10:59:59', '11:00:00', '13:59:59', '14:00:00', '16:59:59',
'17:00:00', '19:59:59', '20:00:00', '23:59:59', '00:00:00', '02:59:59', '03:00:00',
'05:59:59']
revperint(intervals)
###Output
_____no_output_____
###Markdown
RQ4 What are the top 3 languages used to review applications?
###Code
dataset6 = pd.read_csv("/content/drive/MyDrive/ADMHW2/steam_reviews.csv",usecols=['app_name','language'])
top_threelanguage_n = 3
top_threelanguage = dataset6.loc[:,'language'].value_counts()\
[:top_threelanguage_n].sort_values(ascending=False)
top_threelanguage
###Output
_____no_output_____
###Markdown
the top three language used to review applications are english, schinese and russian
###Code
plt.rcParams['figure.figsize']=(8,8)
top_threelanguage.plot(kind="bar")
plt.rcParams['figure.figsize']=(8,8)
plt.pie(top_threelanguage,labels=top_threelanguage.index,shadow=True,autopct='%.1f%%')
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Create a function that receives as parameters both the name of a data set and a list of languages’ names and returns a data frame filtered only with the reviews written in the provided languages.
###Code
def reviewslang(df,languagelist):
boolean_series = df.language.isin(languagelist)
df = df[boolean_series]
return df
reviewslang(dataset6,['english','russian'])
###Output
_____no_output_____
###Markdown
Use the function created in the previous literal to find what percentage of these reviews (associated with the top 3 languages) were voted as funny?
###Code
dataset6 = pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",usecols=['app_name','language','votes_funny'])
rv=(reviewslang(dataset6,['english','russian','schinese'])[dataset6.votes_funny == 1].count().app_name/dataset6.count().app_name)
"{:.0%}".format(rv)
###Output
_____no_output_____
###Markdown
only 6% of these reviews were voted as funny Use the function created in the literal “a” to find what percentage of these reviews (associated with the top 3 languages) were voted as helpful?
###Code
dataset7=pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",usecols=['app_name','language','votes_helpful'])
rv1=(reviewslang(dataset7,['english','russian','schinese'])[dataset7.votes_helpful == 1].count().app_name/dataset7.count().app_name)
"{:.0%}".format(rv1)
###Output
_____no_output_____
###Markdown
12% of these reviews were voted as helpful [RQ5] Plot the top 10 most popular reviewers and the number of reviews. Popularity considering the number of reviews (1)
###Code
dataset8=pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",usecols=['author.steamid','author.num_reviews'])
authornumreviewssorted=dataset8.sort_values(by='author.num_reviews',ascending=False).drop_duplicates('author.steamid')
popauthors=authornumreviewssorted[:10]
popauthors
###Output
_____no_output_____
###Markdown
Popularity considering the number of reviews registred in this dataset (2)
###Code
populars=dataset3['author.steamid'].value_counts()
populars[0:10]
###Output
_____no_output_____
###Markdown
These are the top popular authors regarding the number of their reviews in this dataset. What applications did the most popular author review? From (1), the most popular author's steam id is 76561198103272004.
###Code
dataset3=pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",usecols=['author.steamid','app_name'])
popauthorgames=dataset3.loc[dataset3['author.steamid'] == 76561198103272004 ]
popauthorgames
###Output
_____no_output_____
###Markdown
The most popular author reviewed one game in this dataset which is Grand Theft Auto V. From (2), the most popular author's steam ID is 76561198062813911 .
###Code
popauthorgames1=dataset3.loc[dataset3['author.steamid'] == 76561198062813911 ]
popauthorgames1
popauthorgames1.drop_duplicates(subset=['app_name']).shape
###Output
_____no_output_____
###Markdown
So he reviewed 148 games. How many applications did he purchase, and how many did he get as free? Provide the number (count) and the percentage.
###Code
dataset3=pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",usecols=['author.steamid','app_name','received_for_free','steam_purchase'])
###Output
_____no_output_____
###Markdown
From (1), the most popular author's steam id is 76561198103272004.
###Code
popapp=dataset3.loc[dataset3['author.steamid'] == 76561198103272004 ]
popapp[(popapp['received_for_free'] == False ) & (popapp['steam_purchase'] == True)].count()
###Output
_____no_output_____
###Markdown
As we can see for this first case, the author purchased the one application he reviewed. Wich makes 100% of purchased and 0% free. From (2), the most popular author's steam ID is 76561198062813911 .
###Code
popapp1=dataset3.loc[dataset3['author.steamid'] == 76561198062813911 ]
popapp1=popapp1.drop_duplicates('app_name')
popapp1[(popapp1['received_for_free'] == False ) & (popapp1['steam_purchase'] == True)].count()
popapp[(popapp['received_for_free'] == True ) & (popapp['steam_purchase'] == False)].count()
###Output
_____no_output_____
###Markdown
As we can see for this second case, the author purchased the 108 applications that he reviewed and which are ALL the application he reviwed. Wich makes 100% of purchased and 0% free. How many of the applications he purchased reviewed positively, and how many negatively? How about the applications he received for free?
###Code
dataset3=pd.read_csv("/content/drive/MyDrive/AMD/Dataset/Homework2/steam_reviews/steam_reviews.csv",usecols=['author.steamid','app_name','received_for_free','steam_purchase','recommended'])
###Output
_____no_output_____
###Markdown
From (1), the most popular author's steam id is 76561198103272004.
###Code
popapp=dataset3.loc[dataset3['author.steamid'] == 76561198103272004 ]
popapp
###Output
_____no_output_____
###Markdown
For this reviewer the only app he reviewed was purchased and the review was positive because he recommended it. From (2), the most popular author's steam ID is 76561198062813911 .
###Code
popapp=dataset3.loc[dataset3['author.steamid'] == 76561198062813911 ]
popapp=popapp.drop_duplicates('app_name')
popapp[(popapp['received_for_free'] == False ) & (popapp['steam_purchase'] == True) & (popapp['recommended'] == True)].count()
popapp[(popapp['received_for_free'] == False ) & (popapp['steam_purchase'] == True) & (popapp['recommended'] == False)].count()
###Output
_____no_output_____
###Markdown
As we can see above and the questions before, he only reviewed purchased apps. And 106 of them were reviewed positively and 2 negatively. RQ6 The average time (days and minutes) a user lets pass before he updates a review.
###Code
def parsedate(time_as_a_unix_timestamp):
return pd.to_datetime(time_as_a_unix_timestamp, unit='s')
dataset9=pd.read_csv("/content/drive/MyDrive/ADMHW2/steam_reviews.csv",usecols=['author.steamid','timestamp_created', 'timestamp_updated'],parse_dates=['timestamp_created','timestamp_updated' ],date_parser=parsedate)
dataset9['time_delta'] = (dataset9.timestamp_updated - dataset9.timestamp_created)
dataset9.sort_values('time_delta',ascending= False).reset_index()
###Output
_____no_output_____
###Markdown
We drop useless rows and useless cols
###Code
dataset9.drop('timestamp_created', axis=1, inplace=True)
dataset9.drop('timestamp_updated', axis=1, inplace=True)
dataset9.drop(dataset9[dataset9.time_delta == '0 days 00:00:00'].index, inplace=True)
dataset9.head()
dataset9.tail()
(dataset9['time_delta']).mean()
###Output
_____no_output_____
###Markdown
the mean is 321 days and 46 minutes Plot the top 3 authors that usually update their reviews We sort and plot the authors that update more reviews
###Code
groupautc=dataset9.groupby('author.steamid').count()
sortautc=groupautc.sort_values('time_delta',ascending= False).reset_index()
sortautc
plt.rcParams['figure.figsize']=(20,12)
sortautc[0:3].plot.bar(x='author.steamid',y='time_delta',rot=0)
plt.xticks(horizontalalignment="center")
plt.title("Number of updates per author")
plt.xlabel("author steam ID")
plt.ylabel("number of updates")
###Output
_____no_output_____
###Markdown
RQ7
###Code
dataset10 = pd.read_csv("/content/drive/MyDrive/ADMHW2/steam_reviews.csv",usecols=['app_name','weighted_vote_score', 'votes_funny'])
dataset10.head()
###Output
_____no_output_____
###Markdown
What’s the probability that a review has a Weighted Vote Score equal to or bigger than 0.5?
###Code
#votescore
dataset10b=dataset10['weighted_vote_score'] >= 0.5
filtered_dataset10 = dataset10[dataset10b]
vs=len(filtered_dataset10)/len(dataset10)
"{:.0%}".format(vs)
###Output
_____no_output_____
###Markdown
the probability that a review has a Weighted Vote Score equal to or bigger than 0.5 is 22% What’s the probability that a review has at least one vote as funny given that the Weighted Vote Score is bigger than 0.5?
###Code
#prob vs
dataset10v=dataset10['weighted_vote_score'] > 0.5
filtered_dataset10v = dataset10[dataset10v]
vs2=len(filtered_dataset10v)/len(dataset10)
#intersection
intersection = dataset10[(dataset10['votes_funny'] >= 1) & (dataset10['weighted_vote_score'] >0.5)]
intersec=(len(intersection))/len(dataset10)
#prob cond
cond=intersec/vs2
"{:.0%}".format(cond)
###Output
_____no_output_____
###Markdown
here we have to use the formula of conditional probability.the probability that a review has at least one vote as funny given that the Weighted Vote Score is bigger than 0.5 is 25% Is the probability that “a review has at least one vote as funny” independent of the “probability that a review has a Weighted Vote Score equal or bigger than 0.5”?
###Code
#funny
dataset10f=dataset10['votes_funny'] >= 1
filtered_dataset10f = dataset10[dataset10f]
fun=len(filtered_dataset10f)/len(dataset10)
"{:.0%}".format(fun)
###Output
_____no_output_____
###Markdown
NO, they are not indipendent cause the conditional probability is different from the probability of funny, it's mean that an app is considered funny when has an high weighted vote score. [RQ8] Is there a significant difference in the Weighted Vote Score of reviews made in Chinese vs the ones made in Russian? Use an appropriate statistical test or technique and support your choice.
###Code
dataset11 = pd.read_csv("/content/drive/MyDrive/ADMHW2/steam_reviews.csv",usecols=['app_name','weighted_vote_score', 'language'])
dataset11b=dataset11['language'] == 'schinese'
filtered_chinese = dataset11[dataset11b]
filtered_chinese.describe()
plt.boxplot(filtered_chinese['weighted_vote_score'])
dataset11c=dataset11['language'] == 'russian'
filtered_russian = dataset11[dataset11c]
filtered_russian.describe()
plt.boxplot(filtered_russian['weighted_vote_score'])
###Output
_____no_output_____
###Markdown
The distribution are not normal so we have to use some non parametric test
###Code
from scipy import stats
###Output
_____no_output_____
###Markdown
We use a non parametric test cause we don't assume hypotesis that the distribution is normal, so we consider the distribution as a non gaussian
###Code
kruskal_test = stats.kruskal(filtered_chinese['weighted_vote_score'], filtered_russian['weighted_vote_score'])
kruskal_test
alpha = 0.05
pvalue=kruskal_test[1]
if pvalue<alpha:
print('reject H0')
else:
print('fail to reject H0')
###Output
reject H0
###Markdown
The p value is < alpha, it is equal to 0 so we reject H0 (null hypotesys).We can note that with a just some rows pvalue is a little bit higher than 0, than when we used the entire dataset it is exactly 0 Can you find any significant relationship between the time that a user lets pass before he updates the review and the Weighted Vote Score? Use an appropriate statistical test or technique and support your choice.
###Code
def parsedate(time_as_a_unix_timestamp):
return pd.to_datetime(time_as_a_unix_timestamp, unit='s')
dataset12 = pd.read_csv("/content/drive/MyDrive/ADMHW2/steam_reviews.csv",usecols=['app_name','weighted_vote_score', 'timestamp_created','timestamp_updated'])
dataset12['time_delta'] = (dataset12.timestamp_updated - dataset12.timestamp_created)
dataset12.drop('timestamp_created', axis=1, inplace=True)
dataset12.drop('timestamp_updated', axis=1, inplace=True)
dataset12.drop(dataset12[dataset12.time_delta == '0 days 00:00:00'].index, inplace=True)
corr = dataset12.corr(method='pearson')
corr
plt.figure(figsize=(8, 6))
sns.heatmap(corr, annot=True)
plt.show()
corr.style.background_gradient(cmap='coolwarm')
#we can try to do scatter plot and add a comment
###Output
_____no_output_____
###Markdown
The correlation between the weighted vote score and the time that a users lets pass before he updates a review is very near at the zero. So we can say that these two variable are not correlated. Is there any change in the relationship of the variables mentioned in the previous literal if you include whether an application is recommended or not in the review? Use an appropriate statistical test or technique and support your choice.
###Code
def parsedate(time_as_a_unix_timestamp):
return pd.to_datetime(time_as_a_unix_timestamp, unit='s')
dataset13 = pd.read_csv("/content/drive/MyDrive/ADMHW2/steam_reviews.csv",usecols=['app_name','weighted_vote_score', 'timestamp_created','timestamp_updated', 'recommended'])
dataset13['time_delta'] = (dataset13.timestamp_updated - dataset13.timestamp_created)
dataset13.drop('timestamp_created', axis=1, inplace=True)
dataset13.drop('timestamp_updated', axis=1, inplace=True)
dataset13.drop(dataset13[dataset13.time_delta == '0 days 00:00:00'].index, inplace=True)
corr2 = dataset13.corr(method='pearson')
corr2
plt.figure(figsize=(8, 6))
sns.heatmap(corr2, annot=True)
plt.show()
corr2.style.background_gradient(cmap='coolwarm')
###Output
_____no_output_____
###Markdown
NO, there are no change in the relation between weighted vote score and time delta if we also include the variable 'reccomended' What are histograms, bar plots, scatterplots and pie charts used for? A histogram is the most commonly used graph to show frequency distributions.it is used to measure or summarize the distribution of dataIt displays data by grouping data into "bins" of equal width. Each bin is plotted as a bar whose height corresponds to how many data points are in that bin.A barplot is used to display and show the relationships between a numeric and a categorical variable.A scatterplot is used to display and show relationships between two numeric variables.A piecharts is used when you have categorical data and each slice represents the count or percentage of the observations of a level/category for the variable. What insights can you extract from a Box Plot? Box plots provide a quick visual summary of the variability of values in a dataset. They show upper and lower quartiles, the median, min and max values, and any outliers in the dataset. the box is "composed" on the upper side from the upper quartiles and in the lower side from the lower quartiles, the median is drawn by a line in the box. and the min and the max are represented out of the box. Usually outliers are represented by some little circle, over the max value or below the min. Theoretical Questions TQ1 1. This algorithm computes recursively the smallest value of an array A. 2. The worst case possible is the largest possible running time of the algorithm, so that is when the randomly picked value 's' never meets the value needed to have k=r, generating a loop because the function won't stop to call itself. In this algorithm, the worst case possible will have the running time T(n)=O(n^2), due to the recursive part where the array part is repeatedly recalled for each iteration . 3. What is asymptotically the running time of the algorithm in the best case?In the best case, k is equal to r and just return the value of s: in this case wehave just one call and its running time is T(n)=O(n). TQ2 1.Running Time of splitSwap(a,0,n): T(n)Function splitSwap(a,l,n):If n<=1:....1.O(1) Constant timeReturn.....2.O(1) Constant timesplitSwap(a,l,n/2).... 3.O(n/2) splitSwap(a,l+n/2,n/2)....4.O(n/2) Constant time swapList(a,l,n)...5.O(n)Function swapList(a,l,n):For I = 0 to n/2:....6.O(n/2)Temp = a[i+1].....7.O(1)A[l+i] = a[l+n/2+i]....8.O(1)A[l+n/2+i] = tmp....9.O(1) The overall time complexity of the splitSwap function will beT(n) = O(1)+ O(1)+ O(n/2)+ O(n/2)+ O(n)+ O(n/2)+ O(1)+ O(1)+ O(1) Ignoring the constant termT(n) = O(n/2)+O(n)+O(n/2)T(n) = O(n) 2. What does the Algorithm do:The Algorithm is swapping the elements of the list or array. Is it optimal:Yes the algorithm is optimal. Because it divide and conquer the given list in to two similar, but simpler sub problem. And then it compose their solutions to solve the swapping of the elements of the list.Mechanism Of The problem: The algorithm first call recursively to itself and pass the array, first index and the size of the list as half. Then it call itself recursively and pass the array, and the index start from the mid of the list. And the size is also half of the array. At the last, the statement call to function name swap List and pass the array, index and total size of the array. The for loop in the swap List is running up to mid of the array. Basically the for loop doing swapping operation on the list. TQ3 Counter Example:W : 1 1000 20 30 40 9V: 8 8000 8 8 8 1Maximum Capacity of knapsack = 1000There is weight Versus values. Both are sorted by the value by weight ration.Consider the above weight value pair. I've sorted according to value by weight ratio. If we use 0/1 knapsack approach, we would get Profit = 8000 because we will pick second item i.e. weight = 1000.Using KnapSack Fractional approach we will get profit = 10+(888/1000)*8000 = 8001If we select first item, capacity become 888, as a result we cannot select second item.Therefor we are ignoring the second item. Now we have capacity to select all items so take them. Total profit will be = 8 + 8+ 8+ 8 + 1 = 33 = WHere we have 8000> 200*W. So in this example, the heuristic fails to provide the optimal solutions.These are the counter examples in which selecting more value/weight ratio item can fails in providing the optimal solutions.
###Code
###Output
_____no_output_____ |
docs/source/notebooks/examples/simulating-constraint-based-models.ipynb | ###Markdown
Simulating with Constraint-Based Models
###Code
import ccapi
client = ccapi.Client()
client.auth(email = "[email protected]", password = "test")
model = ccapi.load_model("dehalococcoides", client = client)
model
metabolic = model.default_version
metabolic
metabolic.metabolites
metabolic.reactions
###Output
_____no_output_____
###Markdown
Flux Balance Analysis
###Code
solution = metabolic.analyse(type_ = "fba")
solution["data"]
###Output
_____no_output_____
###Markdown
Simulating with Constraint-Based Models
###Code
import ccapi
client = ccapi.Client()
client.auth(email = "[email protected]", password = "test")
model = ccapi.load_model("dehalococcoides", client = client)
model
metabolic = model.default_version
metabolic
metabolic.metabolites
metabolic.reactions
# metabolic.reactions.query(lambda r: not isinstance(r.upper_bound, str))
solution = metabolic.analyse(type_ = "fba")
metabolic.to_json()
solution
###Output
_____no_output_____ |
modelproject/ModelProject-sev.ipynb | ###Markdown
$$\LARGE\text{Model Project}$$ The Cournot model of oligopoly In a Cournot model of oligopoly firms compete in quantity of homogenous products in a market with no collusion. The model describes the rational behaviour of the individual firm in the decision of output given the rivals decision of output. This project examines the case with two competing symmetric firms with complete information. Both firms choose output simultaneously. 1. Firms Firm $i$ choose to produce quantity $q_i$ of a homogeneous product in a market of two firms, with both firms having the same constant marginal cost at $c$. The inverse demand function for firm $i$ is assumed to be $p_i(q_i,q_j)=a-q_i-q_j$, where $c<a$. The profit function becomes$$\pi_i(q_i,q_j) = p_i(q_i,q_j)q_i-q_i c$$The firms' best response functions $(BR_i)$ are calculated by maximizing the profit function. $$\underset{q_i}{\textbf{max}} \hspace{0.2cm} \pi_i(qi,qj) $$This results in $$BR_i(q_j) = \frac{a-c-q_j}{2}$$Each firms best response is optimal given the best response chosen by the rival - with no incentive to deviate. If the quantity chosen by each firm is to be a Nash equilibrium it must satisfy the bestresponse functions. Given symmetry the Nash equilibrium output is$$q_i^* = \frac{a-c}{3}$$
###Code
#1.a import modules
import numpy as np
import sympy as sm
from numpy import array
from scipy import linalg
from scipy import optimize
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
2. Define model
###Code
#2.a Define symbols
a = sm.symbols('a')
c = sm.symbols('c')
q1 = sm.symbols('q_1')
q2 = sm.symbols('q_2')
pi_1 = sm.symbols('\pi_1')
pi_2 = sm.symbols('\pi_2')
# 2.b Inverse demand function
def inverse_demand(q1,q2,a):
inverse = a - q1 - q2
return inverse
# 2.c cost functions
def cost(q,c):
cost_1 = q * c
return cost_1
# 2.d Profit functions
def profit_firm1(q1,q2,a,c):
pi_1 = (inverse_demand(q1,q2,a)) * q1 - cost(q1,c)
return pi_1
def profit_firm2(q1,q2,a,c):
pi_2 = inverse_demand(q1,q2,a) * q2 - cost(q2,c)
return pi_2
###Output
_____no_output_____
###Markdown
3. How to solve the model $\underline{\text{Solve in three steps:}}$1. **maximize** profit function wrt. $q_i$ (find FOCs) and set to zero2. **isolate** $q_i$ from the first order conditions (find BR functions)3. **substitute** $q_j$ into $q_i$ (find Nash equilibrium) 3.1 First order conditionsThe first order conditions is listed below, where FOC1 denotes the solution for firm 1, and FOC2 for firm 2.
###Code
FOC_1 = sm.diff(profit_firm1(q1,q2,a,c),q1) #derivative wrt. q1
FOC_2 = sm.diff(profit_firm2(q1,q2,a,c),q2)
print(f'FOC1: {FOC_1} = 0 \nFOC2: {FOC_2} = 0\n')
###Output
FOC1: a - c - 2*q_1 - q_2 = 0
FOC2: a - c - q_1 - 2*q_2 = 0
###Markdown
3.2 Best response functions The best response function for firm 1 is derived by isolating $q_1$ in FOC1 andsame for firm 2 in FOC2.
###Code
BR_1_sympy = sm.solve(FOC_1,q1) #solve for q1
BR_2_sympy = sm.solve(FOC_2,q2)
print(f'BR_1(q2) = q1 = {BR_1_sympy} \nBR_2(q1) = q2 = {BR_2_sympy}\n')
###Output
BR_1(q2) = q1 = [a/2 - c/2 - q_2/2]
BR_2(q1) = q2 = [a/2 - c/2 - q_1/2]
###Markdown
3.3 Nash Equilibrium The Nash Equilibrium can be solved for by substituting $q_1$ into $q_2$ which yields
###Code
BR1_sympy = a/2-c/2-q2/2
BR2_sympy = a/2-c/2-q1/2 - q2
SUB = BR2_sympy.subs(q1,BR1_sympy)
NE_q = sm.solve(SUB,q2)
quan = (a-c)/3
SUB2 = inverse_demand(q1,q2,a).subs(q1,quan)
NE_p = SUB2.subs(q2,quan)
print(f'q^*_i = {NE_q}')
print(f'p^* = {NE_p}')
###Output
q^*_i = [a/3 - c/3]
p^* = a/3 + 2*c/3
###Markdown
Which shows the Nash Equilibrium quantity, $q_i^*$, and the equilibrium price $p^*$, where $c1=c2$. 4. Example of how to solve the problem In this section I derive the best response function for each firm by maximizing their respective profit function. The best response functions are then used to find the Nash Equilibrium, i.e. the optimal choice of quantity given the other firms' choice of quantity. This procedure is described in section 3. 4.1 Model analysis
###Code
#4.a the optimization setup does not always work without running the code below.
#del a, c1, c2, q1, q2, pi_1, pi_2
#4.b Define best response functions
q0=0
def BR_1_analysis(q2,a,c):
opt_q1 = optimize.minimize(lambda q0: -profit_firm1(q0,q2,a,c), q2).x[0]
return opt_q1
def BR_2_analysis(q1,a,c):
opt_q2 = optimize.minimize(lambda q0: -profit_firm2(q1,q0,a,c), q1).x[0]
return opt_q2
def conditions_analysis(q,param):
u = q[0] - BR_1_analysis(q[1],param[0],param[1])
y = q[1] - BR_2_analysis(q[0],param[0],param[1])
return [u,y]
#4.c Define parametervalues [a,c]
param = [11,2]
#4.d Solve optimization problem
initial_values = [1,1]
opt_q_i = optimize.fsolve(conditions_analysis,initial_values, args = (param))
opt_p = param[0]/3 + 2*param[1]/3
profit_i = opt_p * opt_q_i - param[1] * opt_q_i
print(f'With parameter values of [a,c] = {param}, the Nash equilibrium quantities is (q1,q2) = {opt_q_i}.\nThis gives an equilibrium price of p = {opt_p}.')
print(f'Both companies achieve same profit of (pi_1, pi_2) = {profit_i}.\n')
###Output
With parameter values of [a,c] = [11, 2], the Nash equilibrium quantities is (q1,q2) = [2.99999999 2.99999999].
This gives an equilibrium price of p = 5.0.
Both companies achieve same profit of (pi_1, pi_2) = [8.99999998 8.99999998].
###Markdown
4.2 Graphical analysis of best response functions
###Code
#4.e Define reactions functions
def q1_graphic(q2,param): #parameters can be changed in section 4, code 4.c
return (param[0]-param[1]-q2)/2
def q2_graphic(q1,param):
return (param[0]-param[1]-q1)/2
#4.f Create best response to multiple q values
range_q1 = np.linspace(-10,20,100)
range_q2 = np.linspace(-10,20,100)
q1_plot = q1_graphic(range_q2,param)
q2_plot = q2_graphic(range_q1,param)
#4.g Plot best response functions
fig = plt.figure(figsize=(12,5))
plt.style.use('fivethirtyeight')
ax_fig1 = fig.add_subplot(1,2,1)
plt.plot(q1_plot,range_q2, label = '$BR_1(q_2)$')
plt.plot(range_q1,q2_plot, label = '$BR_2(q_1)$')
plt.title('Figure 1 - Bestresponse functions', fontsize=15)
plt.plot(opt_q_i,opt_q_i,'ko')
plt.xlabel('Quantity produced by firm 1')
plt.ylabel('Quantity produced by firm 2')
ax_fig1.set_xlim(0,10)
ax_fig1.set_ylim(0,10)
ax_fig1.grid(True)
plt.legend(loc="best")
print("Note: the Figure is misleading with the negative quantities on the axis'; negative quantities are not feasible.\nI have not find a way to solve this problem yet.")
###Output
Note: the Figure is misleading with the negative quantities on the axis'; negative quantities are not feasible.
I have not find a way to solve this problem yet.
###Markdown
The two best response functions in Figure 1 intersect at the black dot and represents the Nash Equilibrium quantities found in section 4.1. Both functions decreases in the rival firm's quantity; the intuition is that whenever one firm increase output it is optimal for the rival to decrease quantity. 4.3 How does increasing marginal cost affect the equilibrium?
###Code
#4.h Calculate equilibrium at different costs
c_vec = np.linspace(1,10,10)
q_vec = []
for c in c_vec:
q = (param[0]-c)/3
q_vec.append(q)
pi = (param[0]-q-q-c)*q
p = param[0]/3 + 2*c/3
print(f'a = {param[0]}, c = {c} --> q = {q:.2f}, p = {p:.2f} --> Profit = {pi:.2f}')
#4.i Code does not work (want to plot marginal cost against equilibrium quantity)
#plt.plot(c_vec,q)
#plt.title('Figure 2 - Equilibrium quantity given c', fontsize=15)
#plt.xlabel('Marginal cost')
#plt.ylabel('Equilibrium quantity')
#ax_fig1.grid(True)
#q = [(param[0]-c)/3 for c in c_vec]
q_vec = (param[0]-c_vec)/3
plt.plot(c_vec,q_vec)
plt.title('Figure 2 - Equilibrium quantity given c', fontsize=15)
plt.xlabel('Marginal cost')
plt.ylabel('Equilibrium quantity')
ax_fig1.grid(True)
###Output
_____no_output_____
###Markdown
This output shows that increasing marginal costs decreases amount of quantity produced in equilibrium per firm. The intuition is that increasing marginal costs makes it more expensive for the firms to produce goods, which leads them to increase prices by lowering amount of goods produced. 5. What happens if more firms enters the market? I will now introduce a third firm. Each firms profit is now $$\pi_i(q_i,q_j,q_k) = p_i(q_i,q_j,q_k)q_i-q_i c, \hspace{0.2cm}\\ p_i=a-q_1-q_2-q_3$$The firms' best response functions $(BR_i)$ are calculated by maximizing the profit function. $$\underset{q_i}{\textbf{max}} \hspace{0.2cm} \pi_i(qi,qj,q_k) $$This results in $$BR_i(q_j,q_k) = \frac{a-c-q_j-q_k}{2}$$The Nash equilibrium quantity becomes$$q_i^* = \frac{a-c}{4}$$The parametervalues for $a$ and $c$ are defined in codeline 4.c. Results will be compared with results from sections 4.1 in order to conclude what affect a third firm has on the market.$\underline{\text{Solve in three steps:}}$1. **maximize** profit function wrt. $q_i$ (find FOCs)2. **isolate** $q_i$ from the first order conditions (find BR functions)3. **substitute** $q_j$ and $q_k$ into $q_i$ (find Nash equilibrium)
###Code
#5.a define inverse demand function
def inverse_morefirms(q1,q2,q3,a):
demand = a - q1 - q2 - q3
return demand
#5.b define profit functions
def profit_firm1_morefirms(q1,q2,q3,a,c):
return inverse_morefirms(q1,q2,q3,a) * q1 - cost(q1,c)
def profit_firm2_morefirms(q1,q2,q3,a,c):
return inverse_morefirms(q1,q2,q3,a) * q2 - cost(q2,c)
def profit_firm3_morefirms(q1,q2,q3,a,c):
return inverse_morefirms(q1,q2,q3,a) * q3 - cost(q3,c)
#5.c define bestresponse functions by maximizing profits
def BR1_morefirms(q2,q3,a,c):
optimal_q1 = optimize.minimize(lambda q0: -profit_firm1_morefirms(q0,q2,q3,a,c), q0).x[0]
return optimal_q1
def BR2_morefirms(q1,q3,a,c):
optimal_q2 = optimize.minimize(lambda q0: -profit_firm2_morefirms(q1,q0,q3,a,c), q0).x[0]
return optimal_q2
def BR3_morefirms(q1,q2,a,c):
optimal_q3 = optimize.minimize(lambda q0: -profit_firm3_morefirms(q1,q2,q0,a,c), q0).x[0]
return optimal_q3
#5.d conditions
def conditions_morefirms(q,param):
u = q[0] - BR1_morefirms(q[1],q[2],param[0],param[1])
y = q[1] - BR2_morefirms(q[0],q[2],param[0],param[1])
z = q[2] - BR3_morefirms(q[0],q[1],param[0],param[1])
return [u,y,z]
param = [20,3]
#5.e solution and print
inital_values = [1,1,1]
opt_qi_morefirms = optimize.fsolve(conditions_morefirms,inital_values, args = (param))
opt_p_morefirms = param[0] - 3/4 * (param[0]-param[1])
profit_i_morefirms = opt_p_morefirms * opt_qi_morefirms - param[1] * opt_qi_morefirms
print(f'With parameter values of [a,c] = {param}, the Nash equilibrium quantities is (q1,q2,q3) = {opt_qi_morefirms}.\nThis gives an equilibrium price of p = {opt_p_morefirms}.')
print(f'Both companies achieve same profit of (pi_1, pi_2) = {profit_i_morefirms}.\n')
def inverse_nfirms(a,q_vec):
demand = a - np.sum(q_vec)
return demand
def profit_firm_i(a,c,q_vec,i,q_i):
# update q_i
q_vec[i]=q_i
return inverse_nfirms(a,q_vec) * q_i - cost(q_i,c)
def BR_nfirms(a,c,q_vec,q_i,i):
optimal_q_i = optimize.minimize(lambda q0: -profit_firm_i(a,c,q_vec,i,q0),x0=np.mean(q_vec)).x[0]
return optimal_q_i
def conditions_nfirms(q_vec,param):
conds = [q_i-BR_nfirms(param[0],param[1],q_vec,q_i,i) for i,q_i in enumerate(q_vec)]
return conds
def solve_model(param,n):
q0_vec = [1 for i in range(n)] # n firms
res = optimize.root(conditions_nfirms,q0_vec, args = (param))
opt_qi_nfirms = res.x
opt_p_nfirms = param[0]-np.sum(opt_qi_nfirms)
return opt_qi_nfirms,opt_p_nfirms
n_vec = range(2,20)
p_vec = [solve_model(param,n)[1] for n in n_vec]
plt.plot(n_vec,p_vec)
plt.title('Equilibrium price given n', fontsize=15)
plt.xlabel('price given n firms')
plt.ylabel('Equilibrium price')
ax_fig1.grid(True)
def conditions_nfirms_fast(q_i,param,n):
# Assume all firms react the same
q_vec = [q_i[0] for i in range(n)]
cond = q_i[0]-BR_nfirms(param[0],param[1],q_vec,q_i,0)
return cond
def solve_model_fast(param,n):
#q0_vec = [1 for i in range(n)] # n firms
q0 = [1]
res = optimize.root(conditions_nfirms_fast,q0, args = (param,n))
opt_qi_nfirms = res.x[0]
opt_p_nfirms = param[0]-opt_qi_nfirms*n
return opt_qi_nfirms,opt_p_nfirms
n_vec = range(2,50)
p_vec = [solve_model(param,n)[1] for n in n_vec]
plt.plot(n_vec,p_vec)
plt.title('Equilibrium price given n', fontsize=15)
plt.xlabel('price given n firms')
plt.ylabel('Equilibrium price')
ax_fig1.grid(True)
###Output
_____no_output_____ |
.ipynb_checkpoints/TwitterUsingTwitter-checkpoint.ipynb | ###Markdown
Example 1. Authorizing an application to access Twitter account data
###Code
import twitter
# XXX: Go to http://dev.twitter.com/apps/new to create an app and get values
# for these credentials, which you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information on Twitter's OAuth implementation.
# access_token = "4576964834-5ZretFH6XZs605C6lUyWKBBqwe3O2OvLg84mZKZ"
# access_token_secret = "y9QehXEmGkM4rYYkAGPi7AwDX665qpxOF7UQxjsv0R1yW"
# consumer_key = "5rEhPV8THryOeTmDVVHi03axp"
# consumer_secret = "beo4w9LPA1evBexDD3evhVUrO1OFhTwVrpeiozo4HZv5srtBGf"
CONSUMER_KEY ="5rEhPV8THryOeTmDVVHi03axp"
CONSUMER_SECRET ="beo4w9LPA1evBexDD3evhVUrO1OFhTwVrpeiozo4HZv5srtBGf"
OAUTH_TOKEN ="4576964834-5ZretFH6XZs605C6lUyWKBBqwe3O2OvLg84mZKZ"
OAUTH_TOKEN_SECRET ="y9QehXEmGkM4rYYkAGPi7AwDX665qpxOF7UQxjsv0R1yW"
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET,CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
# Nothing to see by displaying twitter_api except that it's now a defined variable
print (twitter_api)
type(twitter)
dir(twitter)
help(twitter)
## Example 1. Accessing Twitter's API for development purposes
import twitter
def oauth_login():
# XXX: Go to http://twitter.com/apps/new to create an app and get values
# for these credentials that you'll need to provide in place of these
# empty string values that are defined as placeholders.
# See https://dev.twitter.com/docs/auth/oauth for more information
# on Twitter's OAuth implementation.
CONSUMER_KEY ="5rEhPV8THryOeTmDVVHi03axp"
CONSUMER_SECRET ="beo4w9LPA1evBexDD3evhVUrO1OFhTwVrpeiozo4HZv5srtBGf"
OAUTH_TOKEN ="4576964834-5ZretFH6XZs605C6lUyWKBBqwe3O2OvLg84mZKZ"
OAUTH_TOKEN_SECRET ="y9QehXEmGkM4rYYkAGPi7AwDX665qpxOF7UQxjsv0R1yW"
auth = twitter.oauth.OAuth(OAUTH_TOKEN, OAUTH_TOKEN_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
twitter_api = twitter.Twitter(auth=auth)
return twitter_api
# Sample usage
twitter_api = oauth_login()
# Nothing to see by displaying twitter_api except that it's now a defined variable
print (twitter_api)
###Output
_____no_output_____ |
FirstLast_FittingData.ipynb | ###Markdown
First Last - Fitting Data
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
###Output
_____no_output_____
###Markdown
Power on the Moon----* The Apollo lunar mission deployed a series of experiments on the Moon.* The experiment package was called the Apollo Lunar Surface Experiments Package [(ALSEP)](https://en.wikipedia.org/wiki/Apollo_Lunar_Surface_Experiments_Package)* The ALSEP was powered by a radioisotope thermoelectric generator [(RTG)](https://en.wikipedia.org/wiki/Radioisotope_thermoelectric_generator)----* An RTG is basically a fist-sized slug of Pu-238 wrapped in a material that generates electric power when heated.* Since the RTG is powered by a radioisotope, the output power decreases over time as the radioisotope decays. Read in the datafileThe data file `/Data/Apollo_RTG.csv` contains the power output of the Apollo 12 RTG as a function of time.The data colunms are* [Day] - Days on the Moon* [Power] - RTG power output in Watts Plot the Data* Day vs. Power* Use the OO interface to matplotlib* Fit the function with a polynomial (degree >= 3)* Plot the fit with the data- Output size w:11in, h:8.5in- Make the plot look nice (including clear labels) Power over time* All of your answer should be formatted as sentences* For example: `The power on day 0 is VALUE Watts`* Do not pick the complex roots! 1 - What was the power output on Day 0? 2 - How many years after landing could you still power a 60 W lightbulb? 3 - How many years after landing could you still power a 5 W USB device? 4 - How many years after landing until the power output is 0 W? --- Fitting data to a function* The datafile `./Data/linedata.csv` contains two columns of data* Use the OO interface to matplotlib* Plot the data (with labels!)* Fit the function below to the data* Find the values `(A,C,W)` that best fit the data- Output size w:11in, h:8.5in- Make the plot look nice (including clear labels) ---- Fit a gaussian of the form:$$ \Large f(x) = A e^{-\frac{(x - C)^2}{W}} $$* A = amplitude of the gaussian* C = x-value of the central peak of the gaussian* W = width of the gaussian --- Stellar Spectra The file `./Data/StarData.csv` is a spectra of a main sequence star* Col 1 - Wavelength `[angstroms]`* Col 2 - Flux `[normalized to 0->1]` Read in the Data Plot the Data* Use the OO interface to matplotlib* Output size w:11in, h:8.5in* Make the plot look nice (including clear labels and a legend) Use [Wien's law](https://en.wikipedia.org/wiki/Wien%27s_displacement_law) to determine the temperature of the Star* You will need to find the wavelength where the Flux is at a maximum* Use the Astropy units and constants - do not hardcode
###Code
from astropy import units as u
from astropy import constants as const
###Output
_____no_output_____
###Markdown
Fitting Data
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from astropy import units as u
from astropy import constants as const
from scipy.optimize import curve_fit
###Output
_____no_output_____
###Markdown
--- Power on the Moon---* The Apollo lunar mission deployed a series of experiments on the Moon.* The experiment package was called the Apollo Lunar Surface Experiments Package [(ALSEP)](https://en.wikipedia.org/wiki/Apollo_Lunar_Surface_Experiments_Package)* The ALSEP was powered by a radioisotope thermoelectric generator [(RTG)](https://en.wikipedia.org/wiki/Radioisotope_thermoelectric_generator)* An RTG is basically a fist-sized slug of Pu-238 wrapped in a material that generates electric power when heated.* Since the RTG is powered by a radioisotope, the output power decreases over time as the radioisotope decays. --- Read in the datafileThe data file `/Data/Apollo_RTG.csv` contains the power output of the Apollo 12 RTG as a function of time.The data colunms are* [Day] - Days on the Moon* [Power] - RTG power output in Watts Plot the Data* Day vs. Power* Fit the function with a (degree >= 3) polynomial* Plot the fit with the data* Output size w:11in, h:8.5in* Make the plot look nice (including clear labels) Power over time* All of your answer should be formatted as sentences* For example: `The power on day 0 is VALUE Watts`* Do not pick the complex roots! 1 - What was the power output on Day 0? 2 - How many years after landing could you still power a 60 W lightbulb? 3 - How many years after landing could you still power a 5 W USB device? 4 - How many years after landing until the power output is 0 W? --- Fitting data to a function* The datafile `./Data/linedata.csv` contains two columns of data* Plot the data (with labels!)* Fit the function below to the data* Find the values `(A,C,W)` that best fit the data- Output size w:11in, h:8.5in- Make the plot look nice (including clear labels) ---- Fit a gaussian of the form:$$ \huge f(x) = A e^{-\frac{(x - C)^2}{W}} $$* A = amplitude of the gaussian* C = x-value of the central peak of the gaussian* W = width of the gaussian --- Stellar Spectra The file `./Data/StarData.csv` is a spectra of a main sequence star* Col 1 - Wavelength `[angstroms]`* Col 2 - Flux `[normalized to 0->1]` Read in the Data Plot the Data* Output size w:11in, h:8.5in* Make the plot look nice (including clear labels and a legend) Use [Wien's law](https://en.wikipedia.org/wiki/Wien%27s_displacement_law) to determine the temperature of the Star* You will need to find the wavelength where the Flux is at a maximum* Use the Astropy units and constants - do not hardcode [Plank's Law](https://en.wikipedia.org/wiki/Planck%27s_law)* [Plank's Law](https://en.wikipedia.org/wiki/Planck%27s_law) describes the spectra emitted by a blackbody at a temperature T* You will want to look at the $\lambda$ version* Write a function to calculate the blackbody flux, at the above temperature, for all of your data_wavelength points* Use the Astropy units and constants - do not hardcode* Scale the blackbody flux to `[0->1]`* Add a column to the table: `Blackbody`
###Code
# Write a function
# Apply the function
# Normalize and add column
###Output
_____no_output_____
###Markdown
First Last - Fitting Data
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
###Output
_____no_output_____
###Markdown
First Last - Fitting Data
###Code
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
###Output
_____no_output_____
###Markdown
Power on the Moon----* The Apollo lunar mission deployed a series of experiments on the Moon.* The experiment package was called the Apollo Lunar Surface Experiments Package [(ALSEP)](https://en.wikipedia.org/wiki/Apollo_Lunar_Surface_Experiments_Package)* The ALSEP was powered by a radioisotope thermoelectric generator [(RTG)](https://en.wikipedia.org/wiki/Radioisotope_thermoelectric_generator)----* An RTG is basically a fist-sized slug of Pu-238 wrapped in a material that generates electric power when heated.* Since the RTG is powered by a radioisotope, the output power decreases over time as the radioisotope decays. Read in the datafileThe data file `/Data/Apollo_RTG.csv` contains the power output of the Apollo 12 RTG as a function of time.The data colunms are* [Day] - Days on the Moon* [Power] - RTG power output in Watts Plot the Data* Day vs. Power* Use the OO interface to matplotlib* Fit the function with a polynomial (degree >= 3)* Plot the fit with the data- Output size w:11in, h:8.5in- Make the plot look nice (including clear labels) Power over time* All of your answer should be formatted as sentences* For example: `The power on day 0 is VALUE Watts` 1 - What was the power output on Day 0? 2 - How many years after landing could you still power a 60 W lightbulb? 3 - How many years after landing could you still power a 5 W USB device? 4 - How many years after landing until the power output is 0 W? --- Fitting data to a function* The datafile `./Data/linedata.csv` contains two columns of data* Use the OO interface to matplotlib* Plot the data (with labels!)* Fit the function below to the data* Find the values `(A,C,W)` that best fit the data- Output size w:11in, h:8.5in- Make the plot look nice (including clear labels) ---- Fit a gaussian of the form:$$ \Large f(x) = A e^{-\frac{(x - C)^2}{W}} $$* A = amplitude of the gaussian* C = x-value of the central peak of the gaussian* W = width of the gaussian Due Mon Feb 25 - 1 pm- `Make sure to change the filename to your name!`- `Make sure to change the Title to your name!`- `File -> Download as -> HTML (.html)`- `upload your .html and .ipynb file to the class Canvas page` ---- Ravenclaw The file `./Data/StarData.csv` is a spectra of a main sequence star* Col 1 - Wavelength `[angstroms]`* Col 2 - Flux `[normalized to 0->1]` Read in the Data Plot the Data* Use the OO interface to matplotlib* Output size w:11in, h:8.5in* Make the plot look nice (including clear labels and a legend) Use [Wien's law](https://en.wikipedia.org/wiki/Wien%27s_displacement_law) to determine the temperature of the Star
###Code
from astropy import units as u
from astropy import constants as const
###Output
_____no_output_____ |
.ipynb_checkpoints/D10-Supervised_Machine_Learning (1)-checkpoint.ipynb | ###Markdown
Day 10 Class Exercises: Supervised Machine Learning Background. For these class exercises, we will be using the wine quality dataset which can be found at this URL:https://archive.ics.uci.edu/ml/datasets/wine+quality. We will be using the supervised machine learning tools from the lessons to determine a model that can use physicochemical measurements of wine as a predictor of quality. The data for these exercises can be found in the `data` directory of this repository. Additionally, with these class exercises we learn a few new things. When new knowledge is introduced you'll see the icon shown on the right: Get StartedImport the Numpy, Pandas, Matplotlib (matplotlib magic), Seaborn and sklearn packages.
###Code
%matplotlib inline
# Data Management
import numpy as np
import pandas as pd
# Visualization
import seaborn as sns
import matplotlib.pyplot as plt
# Machine learning
from sklearn import model_selection
from sklearn import preprocessing
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import Perceptron
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
###Output
_____no_output_____
###Markdown
Exercise 1. Review the data once moreLoad the wine quality data used in the Seaborn class exercises from Day 9. As a reminder, you can read about this dataset from the file [../data/winequality.names](../data/winequality.names) Next, read in the file named `winequality-red.csv`. This data, despite the `csv` suffix, is separated using a semicolon.
###Code
wine = pd.read_csv('../data/winequality-red.csv', sep=";")
wine.head()
###Output
_____no_output_____ |
jupyter-notebooks/inference_via_ext_software/WetGrass_inf_pymc3.ipynb | ###Markdown
WetGrass analyzed using PyMC3. Inferring prob of some nodes conditioned on other nodes having given states.
###Code
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pymc3 as pm3
import scipy.stats as stats
import pprint as pp
np.random.seed(1234)
# plt.style.use('ggplot')
# plots don't show on notebook unless use this
%matplotlib inline
import os
import sys
cur_dir_path = os.getcwd()
print(cur_dir_path)
os.chdir('../../')
qfog_path = os.getcwd()
print(qfog_path)
sys.path.insert(0,qfog_path)
import importlib
mm = importlib.import_module("jupyter-notebooks.inference_via_ext_software.ModelMaker_PyMC3")
from graphs.BayesNet import *
# build BayesNet object bnet from bif file
in_path = "examples_cbnets/WetGrass.bif"
bnet = BayesNet.read_bif(in_path, False)
# build model (with all nodes observed) from bnet
prefix0 = "jupyter-notebooks/" +\
"inference_via_ext_software/model_examples_c/"
file_prefix = prefix0 + "WetGrass_inf_obs_all"
obs_vertices = 'all'
mod_file = mm.ModelMaker_PyMC3.write_model_for_inf(file_prefix, bnet, obs_vertices)
###Output
_____no_output_____
###Markdown
.py file with model can be found heremodel_examples_c/WetGrass_inf_obs_all_pymc3.py
###Code
# enter observed data here
data_Cloudy = np.array([0], dtype=int)
data_Rain = None
data_Sprinkler = None
data_WetGrass = np.array([1], dtype=int)
# -i option allows it to access notebook's namespace
%run -i $mod_file
# sample model
chain_length = 100
with mod:
trace = pm3.sample(chain_length)
print(trace)
pm3.traceplot(trace,figsize=(20,10));
###Output
_____no_output_____
###Markdown
Exact results using brute force (enumeration) and junction tree algorithms
###Code
from inference.JoinTreeEngine import *
from inference.EnumerationEngine import *
jtree_eng = JoinTreeEngine(bnet)
brute_eng = EnumerationEngine(bnet)
# introduce some evidence
bnet.get_node_named("Cloudy").active_states = [0]
bnet.get_node_named("WetGrass").active_states = [1]
#print node distributiona
node_list = jtree_eng.bnet_ord_nodes
jtree_pot_list = jtree_eng.get_unipot_list(node_list)
brute_pot_list = brute_eng.get_unipot_list(node_list)
for k in range(len(node_list)):
print("brute:", brute_pot_list[k])
print("jtree:", jtree_pot_list[k])
print('')
###Output
brute: ['Cloudy']
[ 1. 0.]
jtree: ['Cloudy']
[ 1. 0.]
brute: ['Rain']
[ 0.34839842 0.65160158]
jtree: ['Rain']
[ 0.34839842 0.65160158]
brute: ['Sprinkler']
[ 0.13119789 0.86880211]
jtree: ['Sprinkler']
[ 0.13119789 0.86880211]
brute: ['WetGrass']
[ 0. 1.]
jtree: ['WetGrass']
[ 0. 1.]
|
Decomposition.ipynb | ###Markdown
###Code
from collections import defaultdict
from functools import reduce, lru_cache
from itertools import product
import numpy as np
PAULIS = {
"I": np.eye(2, dtype=complex),
"X": np.array([[0, 1], [1, 0]], dtype=complex),
"Y": np.array([[0, -1j], [1j, 0]], dtype=complex),
"Z": np.array([[1, 0], [0, -1]], dtype=complex),
}
def decompose(H):
"""Decomposes a Hermitian matrix in to a linear sum of tensor products of
Pauli matrices.
Args:
H (ndarray): Hermitian matrix of dimension (2^n x 2^n).
Prints/Returns:
components (defaultdict): Dictionary with tensor products of Pauli
matrices as keys, and corresponding (non-zero) coefficients as values,
that decompose H.
"""
n = int(np.log2(len(H)))
dims = 2 ** n
if H.shape != (dims, dims):
raise ValueError("The input must be a 2^n x 2^n dimensional matrix.")
basis_key = ["".join(k) for k in product(PAULIS.keys(), repeat=n)]
components = defaultdict(int)
for i, val in enumerate(product(PAULIS.values(), repeat=n)):
basis_mat = reduce(np.kron, val)
coeff = H.reshape(-1).dot(basis_mat.reshape(-1)) / dims
coeff = np.real_if_close(coeff).item()
if not np.allclose(coeff, 0):
components[basis_key[i]] = coeff
print(components)
from datetime import datetime
start=datetime.now()
H = np.array([[ 1.5, 0, 0, 0.5, 0, 0.5, 0.5, 0],
[ 0, 0.5, 0, 0, 0, 0, 0, 0.5],
[ 0, 0, 0.5, 0, 0, 0, 0, 0.5],
[0.5, 0, 0, -0.5, 0, 0, 0, 0],
[0, 0, 0, 0, 0.5, 0, 0, 0.5],
[0.5, 0, 0, 0, 0, -0.5, 0, 0],
[0.5, 0, 0, 0, 0, 0, -0.5, 0],
[0, 0.5, 0.5, 0, 0.5, 0, 0, -1.5]], dtype=np.complex128)
decompose(H)
print(datetime.now()-start)
###Output
defaultdict(<class 'int'>, {'IIZ': 0.5, 'IXX': 0.25, 'IYY': -0.25, 'IZI': 0.5, 'XIX': 0.25, 'XXI': 0.25, 'YIY': -0.25, 'YYI': -0.25, 'ZII': 0.5})
0:00:00.018253
###Markdown
TIME-SERIES DECOMPOSITION**File:** Decomposition.ipynb**Course:** Data Science Foundations: Data Mining in Python IMPORT LIBRARIES
###Code
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.dates import DateFormatter
from statsmodels.tsa.seasonal import seasonal_decompose
###Output
_____no_output_____
###Markdown
LOAD AND PREPARE DATA
###Code
df = pd.read_csv('data/AirPassengers.csv', parse_dates=['Month'], index_col=['Month'])
###Output
_____no_output_____
###Markdown
PLOT DATA
###Code
fig, ax = plt.subplots()
plt.xlabel('Year: 1949-1960')
plt.ylabel('Monthly Passengers (1000s)')
plt.title('Monthly Intl Air Passengers')
plt.plot(df, color='black')
ax.xaxis.set_major_formatter(DateFormatter('%Y'))
###Output
_____no_output_____
###Markdown
DECOMPOSE TIME SERIES - Decompose the time series into three components: trend, seasonal, and residuals or noise.- This commands also plots the components. - The argument `period` specifies that there are 12 observations (i.e., months) in the cycle.- By default, `seasonal_decompose` performs an additive (as opposed to multiplicative) decomposition.
###Code
# Set the figure size
plt.rcParams['figure.figsize'] = [7, 8]
# Plot the decomposition components
sd = seasonal_decompose(df, period=12).plot()
###Output
_____no_output_____
###Markdown
- For growth over time, it may be more appropriate to use a multiplicative trend.- The approach can show consistent changes by percentage.- In this approach, the residuals should be centered on 1 instead of 0.
###Code
sd = seasonal_decompose(df, model='multiplicative').plot()
###Output
_____no_output_____
###Markdown
Decomposition Decomposition involves removing the seasonality, trend, and residuals from a time series for further analysis. You can predict the residuals and add(or multiply) to the trend and seasonality.
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels.tsa.api as smt
%matplotlib inline
###Output
/Users/michaelbeale/anaconda3/lib/python3.6/site-packages/statsmodels/compat/pandas.py:56: FutureWarning: The pandas.core.datetools module is deprecated and will be removed in a future version. Please use the pandas.tseries module instead.
from pandas.core import datetools
###Markdown
Basics Decomposition is usually additive or multiplicative. Below is a formula for how different types of decomposition models work (where _t_ is a point in time). AdditiveValue = Seasonality of _t_ + Trend of _t_ + Residual of _t_ MultiplicativeValue = Seasonality of _t_ × Trend of _t_ × Residual of _t_ Additive vs Mutiplicative
###Code
additive = []
for x in range(0, 30, 2):
additive.append(x+3)
additive.append(x)
x = [x for x in range(len(additive))]
plt.plot(x, additive)
###Output
_____no_output_____
###Markdown
If the magnitude of the seasonality is not increasing, an additive method should be used. Here you can see the differences between the peaks and valleys remains relatively constant.
###Code
multiplicative = []
for x in range(0, 30, 2):
multiplicative.append(x + x)
multiplicative.append(x)
x = [x for x in range(len(additive))]
plt.plot(x, multiplicative)
###Output
_____no_output_____
###Markdown
In the above example the seasonality variance is increasing indicating that a multiplicative model would be more appropriate. Decomposing Models Classical Decomposition
###Code
air = pd.read_csv('data/international-airline-passengers.csv', header=0, index_col=0, parse_dates=[0])
# seasonal_decompose() params
# freq defaults to 'air.index.inferred_freq' if pandas array but this needs to be an integer
# for example - if you had 15 minute data and there was weekly seasonality freq=24*60/15*7
# filt defaults to a symmetric moving average - https://ec.europa.eu/eurostat/sa-elearning/symmetric-trend-filter
# moving median average would be more robust to anomalies but not letting anomalies effect the mean
# model defaults to additive, other option is multiplicative
# two_sided defaults to True - does it use a center fit or only for the past
d = smt.seasonal_decompose(air, model='multiplicative') #use multiplicative because the amplitude is increasing
fig = d.plot()
fig.set_size_inches(15,8)
###Output
_____no_output_____
###Markdown
* The trend estimate for the first 6 and last 6 of the measurements are unavialble because the two sided symmetric moving average for monthly data. If `two_sided=False`, then the first 12 measurements would be unavailable.* Classical decomposition only works if seasonality remains constant from year to year. Typically only an issue with longer time series* Classical decomposition is not robust enough to handle unusual data (outliers) X-12-ARIMA decomposition * for monthly or quarterly data* trend estimate is available for all points* seasonal component is allowed to change slowly over time
###Code
# implementation coming soon
###Output
_____no_output_____
###Markdown
STL decomposition STL(Seasonal and Trend decomposition using Loess) has several advantages over the classical decomposition method and X-12-ARIMA:* Unlike X-12-ARIMA, STL will handle any type of seasonality, not only monthly and quarterly data.* The seasonal component is allowed to change over time, and the rate of change can be controlled by the user.* The smoothness of the trend-cycle can also be controlled by the user.* It can be robust to outliers (i.e., the user can specify a robust decomposition). So occasional unusual observations will not affect the estimates of the trend-cycle and seasonal components. They will, however, affect the remainder component.* Doesn't handle trading day or calendar variation* Only for additive models, but you can take the logs of data for multiplicative
###Code
# implementaion coming soon. No good python libraries for this ATM
###Output
_____no_output_____
###Markdown
Using Decomposition for forecasting
###Code
# coming soon
###Output
_____no_output_____ |
code/notebooks/mqtt-test.ipynb | ###Markdown
Simple MQTT Test
###Code
!pip3 install paho-mqtt
from functions import *
import paho.mqtt.client as mqtt
from random import randrange, uniform
# connection callback
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# message received callback
def on_message(client, userdata, msg):
print(msg.topic + " " + str(msg.payload))
client.publish("/out", "received an input...")
# set up the client
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(MQTT_USERNAME, MQTT_PASSWORD)
client.connect(MQTT_BROKER, MQTT_PORT, MQTT_KEEPALIVE)
# publish messages...
for index in range(5):
timestamp = time.time()
msg = "Mqtt Message"
ret_code = client.publish(MQTT_TOPIC_TEST, payload=msg, qos=0, retain=False)
print("Retcode : {retcode}, mid : {mid}".format(retcode=ret_code.rc, mid=ret_code.mid))
time.sleep(1)
# subscribe
client.subscribe(MQTT_TOPIC_UPTIME)
# process the MQTT business
client.loop_forever()
###Output
_____no_output_____ |
Notebooks/Bonus03-KerasPart2/Bonus03-Keras-5-RNN.ipynb | ###Markdown
Copyright (c) 2017-21 Andrew GlassnerPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Deep Learning: A Visual Approach by Andrew Glassner, https://glassner.com Order: https://nostarch.com/deep-learning-visual-approach GitHub: https://github.com/blueberrymusic------ What's in this notebookThis notebook is provided to help you work with Keras and TensorFlow. It accompanies the bonus chapters for my book. The code is in Python3, using the versions of libraries as of April 2021.Note that I've included the output cells in this saved notebook, but Jupyter doesn't save the variables or data that were used to generate them. To recreate any cell's output, evaluate all the cells from the start up to that cell. A convenient way to experiment is to first choose "Restart & Run All" from the Kernel menu, so that everything's been defined and is up to date. Then you can experiment using the variables, data, functions, and other stuff defined in this notebook. Bonus Chapter 3 - Notebook 5: RNN curves
###Code
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import math
random_seed = 42
# Workaround for Keras issues on Mac computers (you can comment this
# out if you're not on a Mac, or not having problems)
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# Make a File_Helper for saving and loading files.
save_files = True
import os, sys, inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.insert(0, os.path.dirname(current_dir)) # path to parent dir
from DLBasics_Utilities import File_Helper
file_helper = File_Helper(save_files)
def sum_of_sines(number_of_steps, d_theta, skip_steps, freqs, amps, phases):
'''Add together multiple sine waves and return a list of values that is
number_of_steps long. d_theta is the step (in radians) between samples.
skip_steps determines the start of the sequence. The lists freqs, amps,
and phases should all the same length (but we don't check!)'''
values = []
for step_num in range(number_of_steps):
angle = d_theta * (step_num + skip_steps)
sum = 0
for wave in range(len(freqs)):
y = amps[wave] * math.sin(freqs[wave]*(phases[wave] + angle))
sum += y
values.append(sum)
return np.array(values)
def sum_of_upsloping_sines(number_of_steps, d_theta, skip_steps, freqs, amps, phases):
'''Like sum_of_sines(), but always sloping upwards'''
np.random.seed(42)
values = []
for step_num in range(number_of_steps):
angle = d_theta * (step_num + skip_steps)
sum = 0
for wave in range(len(freqs)):
y = amps[wave] * math.sin(freqs[wave]*(phases[wave] + angle))
sum += y
values.append(sum)
if step_num > 0:
sum_change = sum - prev_sum
if sum_change < 0:
values[-1] *= -1
if step_num == 1:
values[-2] *= -1
prev_sum = sum
return np.array(values)
def samples_and_targets_from_sequence(sequence, window_size):
'''Return lists of samples and targets built from overlapping
windows of the given size. Windows start at the beginning of
the input sequence and move right by 1 element.'''
samples = []
targets = []
for i in range(sequence.shape[0]-window_size):
sample = sequence[i:i+window_size]
target = sequence[i+window_size]
samples.append(sample)
targets.append(target[0])
return (np.array(samples), np.array(targets))
def make_data(data_sequence_number, training_length):
training_sequence = test_sequence = []
test_length = 200
theta_step = .057
if data_sequence_number == 0:
freqs_list = [1, 2]
amps_list = [1, 2]
phases_list = [0, 0]
data_maker = sum_of_sines
elif data_sequence_number == 1:
freqs_list = [1.1, 1.7, 3.1, 7]
amps_list = [1,2,2,3]
phases_list = [0,0,0,0]
data_maker = sum_of_sines
elif data_sequence_number == 2:
freqs_list = [1.1, 1.7, 3.1, 7]
amps_list = [1,2,2,3]
phases_list = [0,0,0,0]
data_maker = sum_of_upsloping_sines
else:
print("***** ERROR! Unknown data_sequence_number = ",data_sequence_number)
training_sequence = data_maker(training_length, theta_step, 0, freqs_list, amps_list, phases_list)
test_sequence = data_maker(test_length, theta_step, 2*training_length, freqs_list, amps_list, phases_list)
return (training_sequence, test_sequence)
def show_data_sets(training_length):
for i in range(0, 3):
(training_sequence, test_sequence) = make_data(i, training_length)
plt.figure(figsize=(8,3))
plt.subplot(1, 2, 1)
plt.plot(training_sequence)
plt.title('training sequence, set '+str(i))
plt.xlabel('index')
plt.ylabel('value')
plt.subplot(1, 2, 2)
plt.plot(test_sequence)
plt.title('test sequence, set '+str(i))
plt.xlabel('index')
plt.ylabel('value')
plt.tight_layout()
file_helper.save_figure('RNN-data-set-'+str(i))
plt.show()
show_data_sets(training_length=200)
def scale_sequences(training_sequence, test_sequence):
# reshape train and test sequences to form needed by MinMaxScaler
training_sequence = np.reshape(training_sequence, (training_sequence.shape[0], 1))
test_sequence = np.reshape(test_sequence, (test_sequence.shape[0], 1))
Min_max_scaler = MinMaxScaler(feature_range=(0, 1))
Min_max_scaler.fit(training_sequence)
scaled_training_sequence = Min_max_scaler.transform(training_sequence)
scaled_test_sequence = Min_max_scaler.transform(test_sequence)
return (Min_max_scaler, scaled_training_sequence, scaled_test_sequence)
# chop up train and test sequences into overlapping windows of the given size
def chop_up_sequences(training_sequence, test_sequence, window_size):
(X_train, y_train) = samples_and_targets_from_sequence(training_sequence, window_size)
(X_test, y_test) = samples_and_targets_from_sequence(test_sequence, window_size)
return (X_train, y_train, X_test, y_test)
def make_data_set(data_sequence_number, window_size, training_length):
(training_sequence, test_sequence) = make_data(data_sequence_number, training_length)
(Min_max_scaler, scaled_training_sequence, scaled_test_sequence) = \
scale_sequences(training_sequence, test_sequence)
(X_train, y_train, X_test, y_test)= chop_up_sequences(scaled_training_sequence, scaled_test_sequence, window_size)
return (Min_max_scaler, X_train, y_train, X_test, y_test, training_sequence, test_sequence)
# build and run the first model.
def make_model(model_number, window_size):
model = Sequential()
if model_number == 0:
model.add(LSTM(3, input_shape=[window_size, 1]))
model.add(Dense(1, activation=None))
elif model_number == 1:
model.add(LSTM(3, return_sequences=True, input_shape=[window_size, 1]))
model.add(LSTM(3))
model.add(Dense(1, activation=None))
elif model_number == 2:
model.add(LSTM(9, return_sequences=True, input_shape=[window_size, 1]))
model.add(LSTM(6, return_sequences=True))
model.add(LSTM(3))
model.add(Dense(1, activation=None))
else:
print("*** ERROR: make_model unknown model_number = ",model_number)
model.compile(loss='mean_squared_error', optimizer='adam')
return model
def build_and_compare(model_number, data_set_number, window_size, training_length, epochs):
np.random.seed(random_seed)
model = make_model(model_number, window_size)
(Min_max_scaler, X_train, y_train, X_test, y_test, training_sequence, test_sequence) = \
make_data_set(data_set_number, window_size, training_length)
history = model.fit(X_train, y_train, epochs=epochs, batch_size=1, verbose=0)
# Predict
y_train_predict = np.ravel(model.predict(X_train))
y_test_predict = np.ravel(model.predict(X_test))
# invert transformation
inverse_y_train_predict = Min_max_scaler.inverse_transform([y_train_predict])
inverse_y_test_predict = Min_max_scaler.inverse_transform([y_test_predict])
plot_string = '-dataset-'+str(data_set_number)+'-window-'+str(window_size)+\
'-model_number-'+str(model_number)+'-length-'+str(training_length)+'-epochs-'+str(epochs)
plt.plot(history.history['loss'])
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Loss for data set '+str(data_set_number)+', window '+str(window_size))
file_helper.save_figure('RNN-loss'+plot_string)
plt.show()
# plot training and predictions
plt.plot(training_sequence, label="train", color='black', linewidth=2, zorder=20)
skip_values = np.array(window_size*(np.nan,))
flat_predict = np.ravel(inverse_y_train_predict)
plot_predict = np.append(skip_values, flat_predict)
plt.plot(plot_predict, label="train predict", color='red', linewidth=2, zorder=10)
plt.legend(loc='best')
plt.xlabel('index')
plt.ylabel('train and prediction')
plt.title('training set '+str(data_set_number)+', window '+str(window_size))
file_helper.save_figure('RNN-train-predictions'+plot_string)
plt.show()
plt.plot(test_sequence, label="test", color='black', linewidth=2, zorder=20)
skip_values = np.array(window_size*(np.nan,))
flat_predict = np.ravel(inverse_y_test_predict)
plot_predict = np.append(skip_values, flat_predict)
plt.plot(plot_predict, label="test predict", color='red', linewidth=2, zorder=10)
plt.legend(loc='best')
plt.xlabel('index')
plt.ylabel('test and prediction')
plt.title('test set '+str(data_set_number)+', window '+str(window_size))
plt.tight_layout()
file_helper.save_figure('RNN-test-predictions'+plot_string)
plt.show()
###Output
_____no_output_____
###Markdown
Slow Alert!If you're running without a GPU (and maybe even if you are), the last twoof these runs will take a while. It might be hours, or even days. Plan ahead!
###Code
build_and_compare(model_number=0, data_set_number=0, window_size=1, training_length=200, epochs=100)
build_and_compare(model_number=0, data_set_number=0, window_size=3, training_length=200, epochs=100)
build_and_compare(model_number=0, data_set_number=0, window_size=5, training_length=200, epochs=100)
build_and_compare(model_number=0, data_set_number=1, window_size=1, training_length=200, epochs=100)
build_and_compare(model_number=0, data_set_number=1, window_size=3, training_length=200, epochs=100)
build_and_compare(model_number=0, data_set_number=1, window_size=5, training_length=200, epochs=100)
build_and_compare(model_number=0, data_set_number=2, window_size=5, training_length=200, epochs=100)
build_and_compare(model_number=1, data_set_number=2, window_size=5, training_length=200, epochs=100)
build_and_compare(model_number=2, data_set_number=2, window_size=5, training_length=200, epochs=100)
#build_and_compare(model_number=2, data_set_number=2, window_size=13, training_length=2000, epochs=100)
#build_and_compare(model_number=2, data_set_number=2, window_size=13, training_length=20000, epochs=100)
###Output
_____no_output_____ |
functions/3)types_of_args.ipynb | ###Markdown
Types of Arguments
- **Default Arguments**
In this, you can give the default value to a parameter while defining a function which it will take **if the value is not passed** while calling the function
###Code
def add(a=3,b=9):
print(a+b)
add(2)
add(2,7)
add()
###Output
11
9
12
###Markdown
- **Keyword Arguments**
These are the values you can give to parameters **while calling the function**
The special thing here is that the **order of the keyword arguments does not matter** as long as the **identifiers** are correct
###Code
def inc(a,b,c):
print(a+1,b+1,c+1)
inc(2,3,4)
inc(b=4,a=2,c=5)
###Output
3 4 5
3 5 6
###Markdown
- **Arbritrary Arguments(Args)**
These are used to store an **unknown number** of arguments.
Whatever arguments are received are stored in a **tuple**
###Code
def mult(*num):
for i in num:
print(i*5,end=" ")
mult(2,3)
print("\n")
mult(2,3,4,5,6)
###Output
10 15
10 15 20 25 30
###Markdown
- **Keyword Arbitrary Arguments(Kwargs)**
These are used to store an **unknown number** of keyword arguments but with each argument having keywords assigned to them.
The keyword arguments are stored in the form of a **dictionary**.
###Code
def login(**details):
print("Username:",details["username"],"Password:",details["password"])
login(username="Arjun",password="hello")
###Output
Username: Arjun Password: hello
|
Kinect_project/Neural Network/Neural Newtork.ipynb | ###Markdown
Neural Network We'll now use a Neural Network to predict the players identity.
###Code
%matplotlib notebook
import pylab as plt
import numpy as np
import seaborn as sns; sns.set()
import keras
from keras.models import Sequential, Model
from keras.layers import Dense
from keras.optimizers import Adam
from sklearn.decomposition import PCA
###Output
Using TensorFlow backend.
###Markdown
We'll start with the features and the topology proposed by the last year and train the NN with it. After we'll try with our features (generated by wave instead of each balloon).
###Code
data = np.genfromtxt('../features/kate_data_julien_sarah.csv', delimiter=',')
np.random.shuffle(data)
training_ratio = 0.85
l = len(data)
X = data[:,:-1]
y = data[:,-1]
X_train = X[:int(l*training_ratio)]
X_test = X[int(l*training_ratio):]
y_train = y[:int(l*training_ratio)]/2
y_test = y[int(l*training_ratio):]/2
y_train = keras.utils.np_utils.to_categorical(y_train.astype(int))
y_test = keras.utils.np_utils.to_categorical(y_test.astype(int))
###Output
_____no_output_____
###Markdown
Dimensionality reduction with PCA
###Code
mu = X_train.mean(axis=0)
U,s,V = np.linalg.svd(X_train - mu, full_matrices=False)
Zpca = np.dot(X_train - mu, V.transpose())
Rpca = np.dot(Zpca[:,:2], V[:2,:]) + mu # reconstruction
err = np.sum((X_train-Rpca)**2)/Rpca.shape[0]/Rpca.shape[1]
print('PCA reconstruction error with 2 PCs: ' + str(round(err,3)));
print(max(Zpca[:,0]))
print(min(Zpca[:,0]))
print(max(Zpca[:,1]))
print(min(Zpca[:,1]))
print(np.argmax(Zpca[:,0]))
print(np.argmax(Zpca[:,1]))
###Output
PCA reconstruction error with 2 PCs: 7.471
2234.6621633472378
-22.639794956373947
128.40015756257122
-369.87814850662505
94
94
###Markdown
Building and training of a dnn
###Code
m = Sequential()
m.add(Dense(150, activation='relu', input_shape=(105,)))
#m.add(Dense(150, activation='relu'))
m.add(Dense(150, activation='relu'))
m.add(Dense(150, activation='relu'))
m.add(Dense(50, activation='relu'))
m.add(Dense(2, activation='sigmoid'))
m.compile(loss='categorical_crossentropy', optimizer = Adam(), metrics=['accuracy'])
history = m.fit(X_train, y_train, batch_size=10, epochs=20, verbose=1, validation_data = (X_test, y_test))
y_pred = m.predict(X_test)
accuracy = m.evaluate(X_test, y_test)[1]
print("Précision old features: %.2f" % accuracy)
###Output
240/240 [==============================] - 0s 37us/step
Précision old features: 0.81
###Markdown
Now let's try with our features and with a different topology. Since we have computed a 12 dimension feature-vector, it would make no sense to use layers with more than 100 neurons as the last year group did.
###Code
X = np.genfromtxt('../features/features_wave_julian_sarah.csv', delimiter=',')
y = np.genfromtxt('../features/output_wave_julian_sarah.csv', delimiter=',')
p = np.random.permutation(len(X))
X, y = X[p], y[p]
training_ratio = 0.85
l = len(y)
X_train = X[:int(l*training_ratio)]
X_test = X[int(l*training_ratio):]
y_train = y[:int(l*training_ratio)]/2
y_test = y[int(l*training_ratio):]/2
y_train = keras.utils.np_utils.to_categorical(y_train.astype(int))
y_test = keras.utils.np_utils.to_categorical(y_test.astype(int))
###Output
_____no_output_____
###Markdown
In our case we have 12 dimensions to features instead. Let's apply the NN directly without a PCA to compare later
###Code
m = Sequential()
m.add(Dense(15, activation='relu', input_shape=(12,)))
m.add(Dense(15, activation='relu'))
m.add(Dense(15, activation='relu'))
m.add(Dense(4, activation='relu'))
m.add(Dense(2, activation='sigmoid'))
m.compile(loss='categorical_crossentropy', optimizer = Adam(), metrics=['accuracy'])
history = m.fit(X_train, y_train, batch_size=10, epochs=20, verbose=1, validation_data = (X_test, y_test))
accuracy = m.evaluate(X_test, y_test)[1]
print("Précision New features: %.2f" % accuracy)
###Output
240/240 [==============================] - 0s 29us/step
Précision New features: 0.88
###Markdown
We got a precision of 88%, which is better than the 81% of the last year. But we would need to do an average to compare Let's apply PCA now to reduce the dimension to 3
###Code
model_pca3 = PCA(n_components=3)
# On entraîne notre modèle (fit) sur les données
model_pca3.fit(X)
# On applique le résultat sur nos données :
X_reduced3 = model_pca3.transform(X)
training_ratio = 0.85
l = len(y)
X_train = X_reduced3[:int(l*training_ratio)]
X_test = X_reduced3[int(l*training_ratio):]
y_train = y[:int(l*training_ratio)]/2
y_test = y[int(l*training_ratio):]/2
y_train = keras.utils.np_utils.to_categorical(y_train.astype(int))
y_test = keras.utils.np_utils.to_categorical(y_test.astype(int))
m = Sequential()
m.add(Dense(20, activation='relu', input_shape=(3,)))
#m.add(Dense(20, activation='relu'))
m.add(Dense(20, activation='relu'))
m.add(Dense(20, activation='relu'))
m.add(Dense(5, activation='relu'))
m.add(Dense(2, activation='sigmoid'))
m.compile(loss='categorical_crossentropy', optimizer = Adam(), metrics=['accuracy'])
history = m.fit(X_train, y_train, batch_size=10, epochs=20, verbose=1, validation_data = (X_test, y_test))
###Output
Train on 1360 samples, validate on 240 samples
Epoch 1/20
1360/1360 [==============================] - 1s 555us/step - loss: 0.5776 - acc: 0.7301 - val_loss: 0.5513 - val_acc: 0.7500
Epoch 2/20
1360/1360 [==============================] - 0s 91us/step - loss: 0.4966 - acc: 0.7831 - val_loss: 0.5360 - val_acc: 0.7583
Epoch 3/20
1360/1360 [==============================] - 0s 87us/step - loss: 0.4722 - acc: 0.7860 - val_loss: 0.5485 - val_acc: 0.7333
Epoch 4/20
1360/1360 [==============================] - 0s 88us/step - loss: 0.4613 - acc: 0.7897 - val_loss: 0.5361 - val_acc: 0.7583
Epoch 5/20
1360/1360 [==============================] - 0s 86us/step - loss: 0.4488 - acc: 0.7949 - val_loss: 0.5459 - val_acc: 0.7458
Epoch 6/20
1360/1360 [==============================] - 0s 87us/step - loss: 0.4395 - acc: 0.7963 - val_loss: 0.5291 - val_acc: 0.7375
Epoch 7/20
1360/1360 [==============================] - 0s 102us/step - loss: 0.4388 - acc: 0.7934 - val_loss: 0.5254 - val_acc: 0.7417
Epoch 8/20
1360/1360 [==============================] - 0s 97us/step - loss: 0.4293 - acc: 0.7919 - val_loss: 0.5402 - val_acc: 0.7292
Epoch 9/20
1360/1360 [==============================] - 0s 98us/step - loss: 0.4341 - acc: 0.7963 - val_loss: 0.5272 - val_acc: 0.7375
Epoch 10/20
1360/1360 [==============================] - 0s 95us/step - loss: 0.4172 - acc: 0.8029 - val_loss: 0.5676 - val_acc: 0.7333
Epoch 11/20
1360/1360 [==============================] - 0s 90us/step - loss: 0.4199 - acc: 0.8022 - val_loss: 0.5253 - val_acc: 0.7458
Epoch 12/20
1360/1360 [==============================] - 0s 95us/step - loss: 0.4155 - acc: 0.8000 - val_loss: 0.5548 - val_acc: 0.7250
Epoch 13/20
1360/1360 [==============================] - 0s 90us/step - loss: 0.4158 - acc: 0.7963 - val_loss: 0.5477 - val_acc: 0.7375
Epoch 14/20
1360/1360 [==============================] - 0s 107us/step - loss: 0.4137 - acc: 0.8007 - val_loss: 0.5076 - val_acc: 0.7500
Epoch 15/20
1360/1360 [==============================] - 0s 101us/step - loss: 0.4128 - acc: 0.8007 - val_loss: 0.5281 - val_acc: 0.7458
Epoch 16/20
1360/1360 [==============================] - 0s 89us/step - loss: 0.4045 - acc: 0.8088 - val_loss: 0.5103 - val_acc: 0.7500
Epoch 17/20
1360/1360 [==============================] - 0s 88us/step - loss: 0.4058 - acc: 0.7978 - val_loss: 0.5145 - val_acc: 0.7417
Epoch 18/20
1360/1360 [==============================] - 0s 91us/step - loss: 0.4036 - acc: 0.8110 - val_loss: 0.5468 - val_acc: 0.7583
Epoch 19/20
1360/1360 [==============================] - 0s 89us/step - loss: 0.4095 - acc: 0.8037 - val_loss: 0.5118 - val_acc: 0.7583
Epoch 20/20
1360/1360 [==============================] - 0s 87us/step - loss: 0.4061 - acc: 0.8110 - val_loss: 0.5160 - val_acc: 0.7500
|
jwolf-AI/tensorflow2_tutorials_chinese-master/105-example_regression.ipynb | ###Markdown
TensorFlow2.0教程-回归 tensorflow2教程知乎专栏:https://zhuanlan.zhihu.com/c_1091021863043624960在回归问题中,我们的目标是预测连续值的输出,如价格或概率。 我们采用了经典的Auto MPG数据集,并建立了一个模型来预测20世纪70年代末和80年代初汽车的燃油效率。 为此,我们将为该模型提供该时段内许多汽车的描述。 此描述包括以下属性:气缸,排量,马力和重量。
###Code
from __future__ import absolute_import, division, print_function
import pathlib
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
print(tf.__version__)
###Output
2.0.0-alpha0
###Markdown
1.Auto MPG数据集 获取数据
###Code
dataset_path = keras.utils.get_file('auto-mpg.data',
'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data')
print(dataset_path)
###Output
Downloading data from https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data
32768/30286 [================================] - 1s 25us/step
/home/czy/.keras/datasets/auto-mpg.data
###Markdown
使用pandas读取数据
###Code
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values='?', comment='\t',
sep=' ', skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
###Output
_____no_output_____
###Markdown
2.数据预处理 清洗数据
###Code
print(dataset.isna().sum())
dataset = dataset.dropna()
origin = dataset.pop('Origin')
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
###Output
MPG 0
Cylinders 0
Displacement 0
Horsepower 6
Weight 0
Acceleration 0
Model Year 0
Origin 0
dtype: int64
###Markdown
划分训练集和测试集
###Code
train_dataset = dataset.sample(frac=0.8,random_state=0)
test_dataset = dataset.drop(train_dataset.index)
###Output
_____no_output_____
###Markdown
检测数据观察训练集中几对列的联合分布。
###Code
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde")
###Output
_____no_output_____
###Markdown
整体统计数据:
###Code
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
###Output
_____no_output_____
###Markdown
取出标签
###Code
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
###Output
_____no_output_____
###Markdown
标准化数据最好使用不同比例和范围的特征进行标准化。 虽然模型可能在没有特征归一化的情况下收敛,但它使训练更加困难,并且它使得结果模型依赖于输入中使用的单位的选择。
###Code
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
###Output
_____no_output_____
###Markdown
3.构建模型
###Code
def build_model():
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
model = build_model()
model.summary()
example_batch = normed_train_data[:10]
example_result = model.predict(example_batch)
example_result
###Output
_____no_output_____
###Markdown
4.训练模型
###Code
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 100 == 0: print('')
print('.', end='')
EPOCHS = 1000
history = model.fit(
normed_train_data, train_labels,
epochs=EPOCHS, validation_split = 0.2, verbose=0,
callbacks=[PrintDot()])
###Output
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
....................................................................................................
###Markdown
查看训练记录
###Code
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
hist.tail()
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label = 'Val Error')
plt.ylim([0,5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label = 'Val Error')
plt.ylim([0,20])
plt.legend()
plt.show()
plot_history(history)
###Output
_____no_output_____
###Markdown
使用early stop
###Code
model = build_model()
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split = 0.2, verbose=0, callbacks=[early_stop, PrintDot()])
plot_history(history)
###Output
..........................................................
###Markdown
测试
###Code
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=0)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
###Output
Testing set Mean Abs Error: 1.85 MPG
###Markdown
5.预测
###Code
test_predictions = model.predict(normed_test_data).flatten()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
###Output
_____no_output_____ |
quantum-with-qiskit/Q80_Multiple_Control_Constructions.ipynb | ###Markdown
$ \newcommand{\bra}[1]{\langle 1|} $$ \newcommand{\ket}[1]{|1\rangle} $$ \newcommand{\braket}[2]{\langle 1|2\rangle} $$ \newcommand{\dot}[2]{ 1 \cdot 2} $$ \newcommand{\biginner}[2]{\left\langle 1,2\right\rangle} $$ \newcommand{\mymatrix}[2]{\left( \begin{array}{1} 2\end{array} \right)} $$ \newcommand{\myvector}[1]{\mymatrix{c}{1}} $$ \newcommand{\myrvector}[1]{\mymatrix{r}{1}} $$ \newcommand{\mypar}[1]{\left( 1 \right)} $$ \newcommand{\mybigpar}[1]{ \Big( 1 \Big)} $$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $$ \newcommand{\onehalf}{\frac{1}{2}} $$ \newcommand{\donehalf}{\dfrac{1}{2}} $$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $$ \newcommand{\vzero}{\myvector{1\\0}} $$ \newcommand{\vone}{\myvector{0\\1}} $$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $$ \newcommand{\myarray}[2]{ \begin{array}{1}2\end{array}} $$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $$ \newcommand{\norm}[1]{ \left\lVert 1 \right\rVert } $$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} 1 \mspace{-1.5mu} \rfloor } $$ \newcommand{\greenbit}[1] {\mathbf{{\color{green}1}}} $$ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}1}}} $$ \newcommand{\redbit}[1] {\mathbf{{\color{red}1}}} $$ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}1}}} $$ \newcommand{\blackbit}[1] {\mathbf{{\color{black}1}}} $ Multiple Control Constructions _prepared by Maksim Dimitrijev and Abuzer Yakaryilmaz_[](https://youtu.be/eoFJdS5BwkA) Remember that when appying CNOT gate, NOT operator is applied to the target qubit if the control qubit is in state $\ket{1}$:$$ CNOT= \mymatrix{cc|cc}{\blackbit{1} & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & \bluebit{1} \\ 0 & 0 & \bluebit{1} & 0} . $$How can we obtain the following operator, in which the NOT operator is applied to the target qubit if the control qubit is in state $ \ket{0} $?$$ C_0NOT = \mymatrix{cc|cc}{0 & \bluebit{1} & 0 & 0 \\ \bluebit{1} & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & \blackbit{1}} . $$As also mentioned in the notebook [Operators on Multiple Bits](../classical-systems/CS40_Operators_on_Multiple_Bits.ipynb), we can apply a $ NOT $ operator on the control bit before applying $ CNOT $ operator so that the $ NOT $ operator is applied to the target qubit when the control qubit has been in state $ \ket{0} $. To recover the previous value of the control qubit, we apply the $ NOT $ operator once more after the $ CNOT $ operator. In short: apply $ NOT $ operator to the control qubit, apply $ CNOT $ operator, and, apply $ NOT $ operator to the control qubit.We can implement this idea in Qiskit as follows.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
q = QuantumRegister(2, "q")
c = ClassicalRegister(2, "c")
qc = QuantumCircuit(q,c)
qc.x(q[1])
qc.cx(q[1],q[0])
# Returning control qubit to the initial state
qc.x(q[1])
job = execute(qc,Aer.get_backend('unitary_simulator'), shots = 1)
U=job.result().get_unitary(qc,decimals=3)
print("CNOT(0) = ")
for row in U:
s = ""
for value in row:
s = s + str(round(value.real,2)) + " "
print(s)
qc.draw(output="mpl", reverse_bits=True)
###Output
_____no_output_____
###Markdown
By using this trick, more complex conditional operators can be implemented. CCNOTNow we introduce $ CCNOT $ gate: **controlled-controlled-not operator** ([Toffoli gate](https://en.wikipedia.org/wiki/Toffoli_gate)), which is controlled by two qubits. The implementation of $CCNOT$ gate in Qiskit is as follows: circuit.ccx(control-qubit1,control-qubit2,target-qubit)That is, $ NOT $ operator is applied to the target qubit when both control qubits are in state $\ket{1}$. Its matrix representation is as follows:$$ CCNOT = \mymatrix{cc|cc|cc|cc}{\blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & 0 & \bluebit{1} \\ 0 & 0 & 0 & 0 & 0 & 0 & \bluebit{1} & 0}. $$ Task 1Implement each of the following operators in Qiskit by using three qubits. Verify your implementation by using "unitary_simulator" backend. $$ C_0C_0NOT = \mymatrix{cc|cc|cc|cc}{0 & \bluebit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \bluebit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1}}, ~~ C_0C_1NOT = \mymatrix{cc|cc|cc|cc}{ \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & \bluebit{1} & 0 & 0 & 0 & 0 \\ 0 & 0 & \bluebit{1} & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1}}, ~~ \mbox{and} ~~ C_1C_0NOT = \mymatrix{cc|cc|cc|cc}{\blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & \bluebit{1} & 0 & 0 \\ 0 & 0 & 0 & 0 & \bluebit{1} & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1}}. $$
###Code
#
# your solution is here
#
###Output
_____no_output_____
###Markdown
click for our solution More controlsHere we present basic methods on how to implement $ NOT $ gates controlled by more than two qubits by using $CNOT$, $ CCNOT $, and some ancilla (auxiliary) qubits. *(Note that Qiskit has a method called "mct" to implement such gates. Another multiple-controlled operator in Qiskit is "mcrz".)* Implementation of CCCNOT gateWe give the implementation of $ CCCNOT $ gate: $NOT$ operator is applied to target qubit when the control qubits are in state $ \ket{111} $. This gate requires 4 qubits. We also use an auxiliary qubit. Our qubits are $ q_{aux}, q_3, q_2, q_1, q_0 $, and the auxiliary qubit $q_{aux}$ should be in state $\ket{0}$ after each use. The implementation of the $ CCCNOT $ gate in Qiskit is given below. The short explanations are given as comments.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
qaux = QuantumRegister(1,"qaux")
q = QuantumRegister(4,"q")
c = ClassicalRegister(4,"c")
qc = QuantumCircuit(q,qaux,c)
# step 1: set qaux to |1> if both q3 and q2 are in |1>
qc.ccx(q[3],q[2],qaux[0])
# step 2: apply NOT gate to q0 if both qaux and q1 are in |1>
qc.ccx(qaux[0],q[1],q[0])
# step 3: set qaux to |0> if both q3 and q2 are in |1> by reversing the affect of step 1
qc.ccx(q[3],q[2],qaux[0])
qc.draw(output="mpl",reverse_bits=True)
###Output
_____no_output_____
###Markdown
Now, we execute this circuit on every possible inputs and verify the correctness of the implementation experimentally.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
all_inputs=[]
for q3 in ['0','1']:
for q2 in ['0','1']:
for q1 in ['0','1']:
for q0 in ['0','1']:
all_inputs.append(q3+q2+q1+q0)
# print(all_inputs)
print("input --> output")
for the_input in all_inputs:
# create the circuit
qaux = QuantumRegister(1,"qaux")
q = QuantumRegister(4,"q")
c = ClassicalRegister(4,"c")
qc = QuantumCircuit(q,qaux,c)
# set the initial value of the circuit w.r.t. the input
if the_input[0] =='1': qc.x(q[3])
if the_input[1] =='1': qc.x(q[2])
if the_input[2] =='1': qc.x(q[1])
if the_input[3] =='1': qc.x(q[0])
# implement the CCNOT gates
qc.ccx(q[3],q[2],qaux[0])
qc.ccx(qaux[0],q[1],q[0])
qc.ccx(q[3],q[2],qaux[0])
# measure the main quantum register
qc.measure(q,c)
# execute the circuit
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=1)
counts = job.result().get_counts(qc)
for key in counts: the_output = key
printed_str = the_input[0:3]+" "+the_input[3]+" --> "+the_output[0:3]+" "+the_output[3]
if (the_input!=the_output): printed_str = printed_str + " the output is different than the input"
print(printed_str)
###Output
_____no_output_____
###Markdown
Task 2Provide an implementation of the NOT operator controlled by 4 qubits ($CCCCNOT$) in Qiskit. Verify its correctness by executing your solution on all possible inputs. (See the above example)*You may use two auxiliary qubits.*
###Code
#
# your solution is here
#
###Output
_____no_output_____
###Markdown
click for our solution Task 3Repeat Task 2 for the operator $C_1C_0C_1C_0NOT$: $NOT$ operator is applied to the target qubit if the four control qubits are in state $ \ket{1010} $.
###Code
#
# your solution is here
#
###Output
_____no_output_____
###Markdown
click for our solution Task 4 (extra)Write a function taking a binary string "$ b_1 b_2 b_3 b_4$ that repeats Task 2 for the operator $ C_{b_1}C_{b_2}C_{b_3}C_{b_4}NOT $ gate, where $ b_1,\ldots,b_4$ are bits and $ NOT $ operator is applied to target qubit if the control qubits are in state $ \ket{b_1b_2b_3b_4} $.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
def c4not(control_state='1111'):
#
# your code is here
#
# try different values
#c4not()
#c4not('1001')
c4not('0011')
#c4not('1101')
#c4not('0000')
###Output
_____no_output_____
###Markdown
$ \newcommand{\bra}[1]{\langle 1|} $$ \newcommand{\ket}[1]{|1\rangle} $$ \newcommand{\braket}[2]{\langle 1|2\rangle} $$ \newcommand{\dot}[2]{ 1 \cdot 2} $$ \newcommand{\biginner}[2]{\left\langle 1,2\right\rangle} $$ \newcommand{\mymatrix}[2]{\left( \begin{array}{1} 2\end{array} \right)} $$ \newcommand{\myvector}[1]{\mymatrix{c}{1}} $$ \newcommand{\myrvector}[1]{\mymatrix{r}{1}} $$ \newcommand{\mypar}[1]{\left( 1 \right)} $$ \newcommand{\mybigpar}[1]{ \Big( 1 \Big)} $$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $$ \newcommand{\onehalf}{\frac{1}{2}} $$ \newcommand{\donehalf}{\dfrac{1}{2}} $$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $$ \newcommand{\vzero}{\myvector{1\\0}} $$ \newcommand{\vone}{\myvector{0\\1}} $$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $$ \newcommand{\myarray}[2]{ \begin{array}{1}2\end{array}} $$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $$ \newcommand{\norm}[1]{ \left\lVert 1 \right\rVert } $$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} 1 \mspace{-1.5mu} \rfloor } $$ \newcommand{\greenbit}[1] {\mathbf{{\color{green}1}}} $$ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}1}}} $$ \newcommand{\redbit}[1] {\mathbf{{\color{red}1}}} $$ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}1}}} $$ \newcommand{\blackbit}[1] {\mathbf{{\color{black}1}}} $ Multiple Control Constructions _prepared by Maksim Dimitrijev and Abuzer Yakaryilmaz_[](https://youtu.be/eoFJdS5BwkA) Remember that when appying CNOT gate, NOT operator is applied to the target qubit if the control qubit is in state $\ket{1}$:$$ CNOT= \mymatrix{cc|cc}{\blackbit{1} & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & \bluebit{1} \\ 0 & 0 & \bluebit{1} & 0} . $$How can we obtain the following operator, in which the NOT operator is applied to the target qubit if the control qubit is in state $ \ket{0} $?$$ C_0NOT = \mymatrix{cc|cc}{0 & \bluebit{1} & 0 & 0 \\ \bluebit{1} & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & \blackbit{1}} . $$As also mentioned in the notebook [Operators on Multiple Bits](../classical-systems/CS40_Operators_on_Multiple_Bits.ipynb), we can apply a $ NOT $ operator on the control bit before applying $ CNOT $ operator so that the $ NOT $ operator is applied to the target qubit when the control qubit has been in state $ \ket{0} $. To recover the previous value of the control qubit, we apply the $ NOT $ operator once more after the $ CNOT $ operator. In short: apply $ NOT $ operator to the control qubit, apply $ CNOT $ operator, and, apply $ NOT $ operator to the control qubit.We can implement this idea in Qiskit as follows.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
q = QuantumRegister(2, "q")
c = ClassicalRegister(2, "c")
qc = QuantumCircuit(q,c)
qc.x(q[1])
qc.cx(q[1],q[0])
# Returning control qubit to the initial state
qc.x(q[1])
job = execute(qc,Aer.get_backend('unitary_simulator'), shots = 1)
U=job.result().get_unitary(qc,decimals=3)
print("CNOT(0) = ")
for row in U:
s = ""
for value in row:
s = s + str(round(value.real,2)) + " "
print(s)
qc.draw(output="mpl", reverse_bits=True)
###Output
CNOT(0) =
0.0 1.0 0.0 0.0
1.0 0.0 0.0 0.0
0.0 0.0 1.0 0.0
0.0 0.0 0.0 1.0
###Markdown
By using this trick, more complex conditional operators can be implemented. CCNOTNow we introduce $ CCNOT $ gate: **controlled-controlled-not operator** ([Toffoli gate](https://en.wikipedia.org/wiki/Toffoli_gate)), which is controlled by two qubits. The implementation of $CCNOT$ gate in Qiskit is as follows: circuit.ccx(control-qubit1,control-qubit2,target-qubit)That is, $ NOT $ operator is applied to the target qubit when both control qubits are in state $\ket{1}$. Its matrix representation is as follows:$$ CCNOT = \mymatrix{cc|cc|cc|cc}{\blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & 0 & \bluebit{1} \\ 0 & 0 & 0 & 0 & 0 & 0 & \bluebit{1} & 0}. $$ Task 1Implement each of the following operators in Qiskit by using three qubits. Verify your implementation by using "unitary_simulator" backend. $$ C_0C_0NOT = \mymatrix{cc|cc|cc|cc}{0 & \bluebit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \bluebit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1}}, ~~ C_0C_1NOT = \mymatrix{cc|cc|cc|cc}{ \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & \bluebit{1} & 0 & 0 & 0 & 0 \\ 0 & 0 & \bluebit{1} & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1}}, ~~ \mbox{and} ~~ C_1C_0NOT = \mymatrix{cc|cc|cc|cc}{\blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & \bluebit{1} & 0 & 0 \\ 0 & 0 & 0 & 0 & \bluebit{1} & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1}}. $$
###Code
#
# your solution is here
#
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
q = QuantumRegister(3,"q")
c = ClassicalRegister(3,"c")
qc = QuantumCircuit(q,c)
qc.x(q[2])
qc.x(q[1])
qc.ccx(q[2],q[1],q[0])
qc.x(q[2])
qc.x(q[1])
job = execute(qc,Aer.get_backend('unitary_simulator'), shots = 1)
U=job.result().get_unitary(qc,decimals=3)
print("CCNOT(00) = ")
for row in U:
s = ""
for value in row:
s = s + str(round(value.real,2)) + " "
print(s)
qc.draw(output="mpl",reverse_bits=True)
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
q = QuantumRegister(3,"q")
c = ClassicalRegister(3,"c")
qc = QuantumCircuit(q,c)
qc.x(q[2])
qc.ccx(q[2],q[1],q[0])
qc.x(q[2])
job = execute(qc,Aer.get_backend('unitary_simulator'), shots = 1)
U=job.result().get_unitary(qc,decimals=3)
print("CCNOT(01) = ")
for row in U:
s = ""
for value in row:
s = s + str(round(value.real,2)) + " "
print(s)
qc.draw(output="mpl",reverse_bits=True)
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
q = QuantumRegister(3,"q")
c = ClassicalRegister(3,"c")
qc = QuantumCircuit(q,c)
qc.x(q[1])
qc.ccx(q[2],q[1],q[0])
qc.x(q[1])
job = execute(qc,Aer.get_backend('unitary_simulator'), shots = 1)
U=job.result().get_unitary(qc,decimals=3)
print("CCNOT(10) = ")
for row in U:
s = ""
for value in row:
s = s + str(round(value.real,2)) + " "
print(s)
qc.draw(output="mpl",reverse_bits=True)
###Output
CCNOT(00) =
0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0
1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0
CCNOT(01) =
1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0
CCNOT(10) =
1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0
0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0
0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0
###Markdown
click for our solution More controlsHere we present basic methods on how to implement $ NOT $ gates controlled by more than two qubits by using $CNOT$, $ CCNOT $, and some ancilla (auxiliary) qubits. *(Note that Qiskit has a method called "mct" to implement such gates. Another multiple-controlled operator in Qiskit is "mcrz".)* Implementation of CCCNOT gateWe give the implementation of $ CCCNOT $ gate: $NOT$ operator is applied to target qubit when the control qubits are in state $ \ket{111} $. This gate requires 4 qubits. We also use an auxiliary qubit. Our qubits are $ q_{aux}, q_3, q_2, q_1, q_0 $, and the auxiliary qubit $q_{aux}$ should be in state $\ket{0}$ after each use. The implementation of the $ CCCNOT $ gate in Qiskit is given below. The short explanations are given as comments.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
qaux = QuantumRegister(1,"qaux")
q = QuantumRegister(4,"q")
c = ClassicalRegister(4,"c")
qc = QuantumCircuit(q,qaux,c)
# step 1: set qaux to |1> if both q3 and q2 are in |1>
qc.ccx(q[3],q[2],qaux[0])
# step 2: apply NOT gate to q0 if both qaux and q1 are in |1>
qc.ccx(qaux[0],q[1],q[0])
# step 3: set qaux to |0> if both q3 and q2 are in |1> by reversing the affect of step 1
qc.ccx(q[3],q[2],qaux[0])
qc.draw(output="mpl",reverse_bits=True)
###Output
_____no_output_____
###Markdown
Now, we execute this circuit on every possible inputs and verify the correctness of the implementation experimentally.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
all_inputs=[]
for q3 in ['0','1']:
for q2 in ['0','1']:
for q1 in ['0','1']:
for q0 in ['0','1']:
all_inputs.append(q3+q2+q1+q0)
# print(all_inputs)
print("input --> output")
for the_input in all_inputs:
# create the circuit
qaux = QuantumRegister(1,"qaux")
q = QuantumRegister(4,"q")
c = ClassicalRegister(4,"c")
qc = QuantumCircuit(q,qaux,c)
# set the initial value of the circuit w.r.t. the input
if the_input[0] =='1': qc.x(q[3])
if the_input[1] =='1': qc.x(q[2])
if the_input[2] =='1': qc.x(q[1])
if the_input[3] =='1': qc.x(q[0])
# implement the CCNOT gates
qc.ccx(q[3],q[2],qaux[0])
qc.ccx(qaux[0],q[1],q[0])
qc.ccx(q[3],q[2],qaux[0])
# measure the main quantum register
qc.measure(q,c)
# execute the circuit
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=1)
counts = job.result().get_counts(qc)
for key in counts: the_output = key
printed_str = the_input[0:3]+" "+the_input[3]+" --> "+the_output[0:3]+" "+the_output[3]
if (the_input!=the_output): printed_str = printed_str + " the output is different than the input"
print(printed_str)
###Output
input --> output
000 0 --> 000 0
000 1 --> 000 1
001 0 --> 001 0
001 1 --> 001 1
010 0 --> 010 0
010 1 --> 010 1
011 0 --> 011 0
011 1 --> 011 1
100 0 --> 100 0
100 1 --> 100 1
101 0 --> 101 0
101 1 --> 101 1
110 0 --> 110 0
110 1 --> 110 1
111 0 --> 111 1 the output is different than the input
111 1 --> 111 0 the output is different than the input
###Markdown
Task 2Provide an implementation of the NOT operator controlled by 4 qubits ($CCCCNOT$) in Qiskit. Verify its correctness by executing your solution on all possible inputs. (See the above example)*You may use two auxiliary qubits.*
###Code
#
# your solution is here
#
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
qaux = QuantumRegister(2,"qaux")
q = QuantumRegister(5,"q")
c = ClassicalRegister(5,"c")
qc = QuantumCircuit(q,qaux,c)
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
qc.ccx(qaux[1],qaux[0],q[0])
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
qc.draw(output="mpl",reverse_bits=True)
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
all_inputs=[]
for q4 in ['0','1']:
for q3 in ['0','1']:
for q2 in ['0','1']:
for q1 in ['0','1']:
for q0 in ['0','1']:
all_inputs.append(q4+q3+q2+q1+q0)
#print(all_inputs)
print("input --> output")
for the_input in all_inputs:
# create the circuit
qaux = QuantumRegister(2,"qaux")
q = QuantumRegister(5,"q")
c = ClassicalRegister(5,"c")
qc = QuantumCircuit(q,qaux,c)
# set the initial value of the circuit w.r.t. the input
if the_input[0] =='1': qc.x(q[4])
if the_input[1] =='1': qc.x(q[3])
if the_input[2] =='1': qc.x(q[2])
if the_input[3] =='1': qc.x(q[1])
if the_input[4] =='1': qc.x(q[0])
# implement the CCNOT gates
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
qc.ccx(qaux[1],qaux[0],q[0])
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
# measure the main quantum register
qc.measure(q,c)
# execute the circuit
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=1)
counts = job.result().get_counts(qc)
for key in counts: the_output = key
printed_str = the_input[0:4]+" "+the_input[4]+" --> "+the_output[0:4]+" "+the_output[4]
if (the_input!=the_output): printed_str = printed_str + " the output is different than the input"
print(printed_str)
###Output
input --> output
0000 0 --> 0000 0
0000 1 --> 0000 1
0001 0 --> 0001 0
0001 1 --> 0001 1
0010 0 --> 0010 0
0010 1 --> 0010 1
0011 0 --> 0011 0
0011 1 --> 0011 1
0100 0 --> 0100 0
0100 1 --> 0100 1
0101 0 --> 0101 0
0101 1 --> 0101 1
0110 0 --> 0110 0
0110 1 --> 0110 1
0111 0 --> 0111 0
0111 1 --> 0111 1
1000 0 --> 1000 0
1000 1 --> 1000 1
1001 0 --> 1001 0
1001 1 --> 1001 1
1010 0 --> 1010 0
1010 1 --> 1010 1
1011 0 --> 1011 0
1011 1 --> 1011 1
1100 0 --> 1100 0
1100 1 --> 1100 1
1101 0 --> 1101 0
1101 1 --> 1101 1
1110 0 --> 1110 0
1110 1 --> 1110 1
1111 0 --> 1111 1 the output is different than the input
1111 1 --> 1111 0 the output is different than the input
###Markdown
click for our solution Task 3Repeat Task 2 for the operator $C_1C_0C_1C_0NOT$: $NOT$ operator is applied to the target qubit if the four control qubits are in state $ \ket{1010} $.
###Code
#
# your solution is here
#
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
qaux = QuantumRegister(2,"qaux")
q = QuantumRegister(5,"q")
c = ClassicalRegister(5,"c")
qc = QuantumCircuit(q,qaux,c)
qc.x(q[3])
qc.x(q[1])
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
qc.ccx(qaux[1],qaux[0],q[0])
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
qc.x(q[3])
qc.x(q[1])
qc.draw(output="mpl",reverse_bits=True)
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
all_inputs=[]
for q4 in ['0','1']:
for q3 in ['0','1']:
for q2 in ['0','1']:
for q1 in ['0','1']:
for q0 in ['0','1']:
all_inputs.append(q4+q3+q2+q1+q0)
#print(all_inputs)
print("input --> output")
for the_input in all_inputs:
# create the circuit
qaux = QuantumRegister(2,"qaux")
q = QuantumRegister(5,"q")
c = ClassicalRegister(5,"c")
qc = QuantumCircuit(q,qaux,c)
# set the initial value of the circuit w.r.t. the input
if the_input[0] =='1': qc.x(q[4])
if the_input[1] =='1': qc.x(q[3])
if the_input[2] =='1': qc.x(q[2])
if the_input[3] =='1': qc.x(q[1])
if the_input[4] =='1': qc.x(q[0])
# implement the CCNOT gates
qc.x(q[3])
qc.x(q[1])
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
qc.ccx(qaux[1],qaux[0],q[0])
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
qc.x(q[3])
qc.x(q[1])
# measure the main quantum register
qc.measure(q,c)
# execute the circuit
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=1)
counts = job.result().get_counts(qc)
for key in counts: the_output = key
printed_str = the_input[0:4]+" "+the_input[4]+" --> "+the_output[0:4]+" "+the_output[4]
if (the_input!=the_output): printed_str = printed_str + " the output is different than the input"
print(printed_str)
###Output
input --> output
0000 0 --> 0000 0
0000 1 --> 0000 1
0001 0 --> 0001 0
0001 1 --> 0001 1
0010 0 --> 0010 0
0010 1 --> 0010 1
0011 0 --> 0011 0
0011 1 --> 0011 1
0100 0 --> 0100 0
0100 1 --> 0100 1
0101 0 --> 0101 0
0101 1 --> 0101 1
0110 0 --> 0110 0
0110 1 --> 0110 1
0111 0 --> 0111 0
0111 1 --> 0111 1
1000 0 --> 1000 0
1000 1 --> 1000 1
1001 0 --> 1001 0
1001 1 --> 1001 1
1010 0 --> 1010 1 the output is different than the input
1010 1 --> 1010 0 the output is different than the input
1011 0 --> 1011 0
1011 1 --> 1011 1
1100 0 --> 1100 0
1100 1 --> 1100 1
1101 0 --> 1101 0
1101 1 --> 1101 1
1110 0 --> 1110 0
1110 1 --> 1110 1
1111 0 --> 1111 0
1111 1 --> 1111 1
###Markdown
click for our solution Task 4 (extra)Write a function taking a binary string "$ b_1 b_2 b_3 b_4$ that repeats Task 2 for the operator $ C_{b_1}C_{b_2}C_{b_3}C_{b_4}NOT $ gate, where $ b_1,\ldots,b_4$ are bits and $ NOT $ operator is applied to target qubit if the control qubits are in state $ \ket{b_1b_2b_3b_4} $.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
all_inputs=[]
for q4 in ['0','1']:
for q3 in ['0','1']:
for q2 in ['0','1']:
for q1 in ['0','1']:
for q0 in ['0','1']:
all_inputs.append(q4+q3+q2+q1+q0)
#print(all_inputs)
def c4not(control_state='1111'):
#
# drawing the circuit
#
print("Control state is",control_state)
print("Drawing the circuit:")
qaux = QuantumRegister(2,"qaux")
q = QuantumRegister(5,"q")
c = ClassicalRegister(5,"c")
qc = QuantumCircuit(q,qaux,c)
for b in range(4):
if control_state[b] == '0':
qc.x(q[4-b])
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
qc.ccx(qaux[1],qaux[0],q[0])
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
for b in range(4):
if control_state[b] == '0':
qc.x(q[4-b])
display(qc.draw(output="mpl",reverse_bits=True))
#
# executing the operator on all possible inputs
#
print("Control state is",control_state)
print("input --> output")
for the_input in all_inputs:
# create the circuit
qaux = QuantumRegister(2,"qaux")
q = QuantumRegister(5,"q")
c = ClassicalRegister(5,"c")
qc = QuantumCircuit(q,qaux,c)
# set the initial value of the circuit w.r.t. the input
if the_input[0] =='1': qc.x(q[4])
if the_input[1] =='1': qc.x(q[3])
if the_input[2] =='1': qc.x(q[2])
if the_input[3] =='1': qc.x(q[1])
if the_input[4] =='1': qc.x(q[0])
# implement the CCNOT gates
for b in range(4):
if control_state[b] == '0':
qc.x(q[4-b])
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
qc.ccx(qaux[1],qaux[0],q[0])
qc.ccx(q[4],q[3],qaux[1])
qc.ccx(q[2],q[1],qaux[0])
for b in range(4):
if control_state[b] == '0':
qc.x(q[4-b])
# measure the main quantum register
qc.measure(q,c)
# execute the circuit
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=1)
counts = job.result().get_counts(qc)
for key in counts: the_output = key
printed_str = the_input[0:4]+" "+the_input[4]+" --> "+the_output[0:4]+" "+the_output[4]
if (the_input!=the_output): printed_str = printed_str + " the output is different than the input"
print(printed_str)
# try different values
#c4not()
#c4not('1001')
c4not('0011')
#c4not('1101')
#c4not('0000')
###Output
Control state is 0011
Drawing the circuit:
###Markdown
$ \newcommand{\bra}[1]{\langle 1|} $$ \newcommand{\ket}[1]{|1\rangle} $$ \newcommand{\braket}[2]{\langle 1|2\rangle} $$ \newcommand{\dot}[2]{ 1 \cdot 2} $$ \newcommand{\biginner}[2]{\left\langle 1,2\right\rangle} $$ \newcommand{\mymatrix}[2]{\left( \begin{array}{1} 2\end{array} \right)} $$ \newcommand{\myvector}[1]{\mymatrix{c}{1}} $$ \newcommand{\myrvector}[1]{\mymatrix{r}{1}} $$ \newcommand{\mypar}[1]{\left( 1 \right)} $$ \newcommand{\mybigpar}[1]{ \Big( 1 \Big)} $$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $$ \newcommand{\onehalf}{\frac{1}{2}} $$ \newcommand{\donehalf}{\dfrac{1}{2}} $$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $$ \newcommand{\vzero}{\myvector{1\\0}} $$ \newcommand{\vone}{\myvector{0\\1}} $$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $$ \newcommand{\myarray}[2]{ \begin{array}{1}2\end{array}} $$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $$ \newcommand{\norm}[1]{ \left\lVert 1 \right\rVert } $$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} 1 \mspace{-1.5mu} \rfloor } $$ \newcommand{\greenbit}[1] {\mathbf{{\color{green}1}}} $$ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}1}}} $$ \newcommand{\redbit}[1] {\mathbf{{\color{red}1}}} $$ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}1}}} $$ \newcommand{\blackbit}[1] {\mathbf{{\color{black}1}}} $ Multiple Control Constructions _prepared by Maksim Dimitrijev and Abuzer Yakaryilmaz_[](https://youtu.be/eoFJdS5BwkA) Remember that when appying CNOT gate, NOT operator is applied to the target qubit if the control qubit is in state $\ket{1}$:$$ CNOT= \mymatrix{cc|cc}{\blackbit{1} & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & \bluebit{1} \\ 0 & 0 & \bluebit{1} & 0} . $$How can we obtain the following operator, in which the NOT operator is applied to the target qubit if the control qubit is in state $ \ket{0} $?$$ C_0NOT = \mymatrix{cc|cc}{0 & \bluebit{1} & 0 & 0 \\ \bluebit{1} & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & \blackbit{1}} . $$As also mentioned in the notebook [Operators on Multiple Bits](../classical-systems/CS40_Operators_on_Multiple_Bits.ipynb), we can apply a $ NOT $ operator on the control bit before applying $ CNOT $ operator so that the $ NOT $ operator is applied to the target qubit when the control qubit has been in state $ \ket{0} $. To recover the previous value of the control qubit, we apply the $ NOT $ operator once more after the $ CNOT $ operator. In short: apply $ NOT $ operator to the control qubit, apply $ CNOT $ operator, and, apply $ NOT $ operator to the control qubit.We can implement this idea in Qiskit as follows.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
q = QuantumRegister(2, "q")
c = ClassicalRegister(2, "c")
qc = QuantumCircuit(q,c)
qc.x(q[1])
qc.cx(q[1],q[0])
# Returning control qubit to the initial state
qc.x(q[1])
job = execute(qc,Aer.get_backend('unitary_simulator'), shots = 1)
U=job.result().get_unitary(qc,decimals=3)
print("CNOT(0) = ")
for row in U:
s = ""
for value in row:
s = s + str(round(value.real,2)) + " "
print(s)
qc.draw(output="mpl", reverse_bits=True)
###Output
_____no_output_____
###Markdown
By using this trick, more complex conditional operators can be implemented. CCNOTNow we introduce $ CCNOT $ gate: **controlled-controlled-not operator** ([Toffoli gate](https://en.wikipedia.org/wiki/Toffoli_gate)), which is controlled by two qubits. The implementation of $CCNOT$ gate in Qiskit is as follows: circuit.ccx(control-qubit1,control-qubit2,target-qubit)That is, $ NOT $ operator is applied to the target qubit when both control qubits are in state $\ket{1}$. Its matrix representation is as follows:$$ CCNOT = \mymatrix{cc|cc|cc|cc}{\blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & 0 & \bluebit{1} \\ 0 & 0 & 0 & 0 & 0 & 0 & \bluebit{1} & 0}. $$ Task 1Implement each of the following operators in Qiskit by using three qubits. Verify your implementation by using "unitary_simulator" backend. $$ C_0C_0NOT = \mymatrix{cc|cc|cc|cc}{0 & \bluebit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \bluebit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1}}, ~~ C_0C_1NOT = \mymatrix{cc|cc|cc|cc}{ \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & \bluebit{1} & 0 & 0 & 0 & 0 \\ 0 & 0 & \bluebit{1} & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 \\ 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1}}, ~~ \mbox{and} ~~ C_1C_0NOT = \mymatrix{cc|cc|cc|cc}{\blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\ 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 & 0 \\ 0 & 0 & 0 & \blackbit{1} & 0 & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & \bluebit{1} & 0 & 0 \\ 0 & 0 & 0 & 0 & \bluebit{1} & 0 & 0 & 0 \\ \hline 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1} & 0 \\ 0 & 0 & 0 & 0 & 0 & 0 & 0 & \blackbit{1}}. $$
###Code
#
# your solution is here
#
###Output
_____no_output_____
###Markdown
click for our solution More controlsHere we present basic methods on how to implement $ NOT $ gates controlled by more than two qubits by using $CNOT$, $ CCNOT $, and some ancilla (auxiliary) qubits. *(Note that Qiskit has a method called "mct" to implement such gates. Another multiple-controlled operator in Qiskit is "mcrz".)* Implementation of CCCNOT gateWe give the implementation of $ CCCNOT $ gate: $NOT$ operator is applied to target qubit when the control qubits are in state $ \ket{111} $. This gate requires 4 qubits. We also use an auxiliary qubit. Our qubits are $ q_{aux}, q_3, q_2, q_1, q_0 $, and the auxiliary qubit $q_{aux}$ should be in state $\ket{0}$ after each use. The implementation of the $ CCCNOT $ gate in Qiskit is given below. The short explanations are given as comments.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
qaux = QuantumRegister(1,"qaux")
q = QuantumRegister(4,"q")
c = ClassicalRegister(4,"c")
qc = QuantumCircuit(q,qaux,c)
# step 1: set qaux to |1> if both q3 and q2 are in |1>
qc.ccx(q[3],q[2],qaux[0])
# step 2: apply NOT gate to q0 if both qaux and q1 are in |1>
qc.ccx(qaux[0],q[1],q[0])
# step 3: set qaux to |0> if both q3 and q2 are in |1> by reversing the affect of step 1
qc.ccx(q[3],q[2],qaux[0])
qc.draw(output="mpl",reverse_bits=True)
###Output
_____no_output_____
###Markdown
Now, we execute this circuit on every possible inputs and verify the correctness of the implementation experimentally.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
all_inputs=[]
for q3 in ['0','1']:
for q2 in ['0','1']:
for q1 in ['0','1']:
for q0 in ['0','1']:
all_inputs.append(q3+q2+q1+q0)
# print(all_inputs)
print("input --> output")
for the_input in all_inputs:
# create the circuit
qaux = QuantumRegister(1,"qaux")
q = QuantumRegister(4,"q")
c = ClassicalRegister(4,"c")
qc = QuantumCircuit(q,qaux,c)
# set the initial value of the circuit w.r.t. the input
if the_input[0] =='1': qc.x(q[3])
if the_input[1] =='1': qc.x(q[2])
if the_input[2] =='1': qc.x(q[1])
if the_input[3] =='1': qc.x(q[0])
# implement the CCNOT gates
qc.ccx(q[3],q[2],qaux[0])
qc.ccx(qaux[0],q[1],q[0])
qc.ccx(q[3],q[2],qaux[0])
# measure the main quantum register
qc.measure(q,c)
# execute the circuit
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=1)
counts = job.result().get_counts(qc)
for key in counts: the_output = key
printed_str = the_input[0:3]+" "+the_input[3]+" --> "+the_output[0:3]+" "+the_output[3]
if (the_input!=the_output): printed_str = printed_str + " the output is different than the input"
print(printed_str)
###Output
_____no_output_____
###Markdown
Task 2Provide an implementation of the NOT operator controlled by 4 qubits ($CCCCNOT$) in Qiskit. Verify its correctness by executing your solution on all possible inputs. (See the above example)*You may use two auxiliary qubits.*
###Code
#
# your solution is here
#
###Output
_____no_output_____
###Markdown
click for our solution Task 3Repeat Task 2 for the operator $C_1C_0C_1C_0NOT$: $NOT$ operator is applied to the target qubit if the four control qubits are in state $ \ket{1010} $.
###Code
#
# your solution is here
#
###Output
_____no_output_____
###Markdown
click for our solution Task 4 (extra)Write a function taking a binary string "$ b_1 b_2 b_3 b_4$ that repeats Task 2 for the operator $ C_{b_1}C_{b_2}C_{b_3}C_{b_4}NOT $ gate, where $ b_1,\ldots,b_4$ are bits and $ NOT $ operator is applied to target qubit if the control qubits are in state $ \ket{b_1b_2b_3b_4} $.
###Code
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
def c4not(control_state='1111'):
#
# your code is here
#
# try different values
#c4not()
#c4not('1001')
c4not('0011')
#c4not('1101')
#c4not('0000')
###Output
_____no_output_____ |
Thesis(CNN).ipynb | ###Markdown
Setup
###Code
# from google.colab import drive
# drive.mount('/content/drive/')
# %cd drive/MyDrive/skripsi2.8/
# if using hosted runtime uncomment this
!python --version
import tensorflow as tf, matplotlib.pyplot as plt, numpy as np, matplotlib.pyplot as plt, matplotlib.patches as patches, os
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from framework.utils import bbox_utils, data_utils, drawing_utils, eval_utils, io_utils, train_utils
from framework.models import faster_rcnn
# tf.autograph.set_verbosity(0)
# import logging
# logging.getLogger("tensorflow").setLevel(logging.ERROR)
tf.__version__
# !pip list
###Output
_____no_output_____
###Markdown
Read Dataset from TFRecord
###Code
AUTOTUNE = tf.data.experimental.AUTOTUNE
IMAGE_SIZE = 512 # make sure had same size with the picture
def read_tfrecord(example):
tfrecord_format = {
"filename": tf.io.FixedLenFeature([], tf.string),
"pic": tf.io.FixedLenFeature([], tf.string),
"bbox": tf.io.FixedLenFeature([], tf.string),
"label": tf.io.FixedLenFeature([], tf.string)
}
example = tf.io.parse_single_example(example, tfrecord_format)
filename = tf.cast(example["filename"], tf.string)
image = tf.io.parse_tensor(example["pic"], out_type = tf.uint8)
bbox = tf.io.parse_tensor(example["bbox"], out_type = tf.float32)
label = tf.io.parse_tensor(example["label"], out_type = tf.int32)
return {"filename": filename, "image": image, "bbox": bbox, "label": label}
def load_dataset(filenames):
ignore_order = tf.data.Options()
ignore_order.experimental_deterministic = False # disable order, increase speed
dataset = tf.data.TFRecordDataset(filenames) # automatically interleaves reads from multiple files
dataset = dataset.with_options(ignore_order) # uses data as soon as it streams in, rather than in its original order
dataset = dataset.map(read_tfrecord)
return dataset
def get_dataset(filenames, train=True):
dataset = load_dataset(filenames)
if train:
dataset = dataset.shuffle(2048)
dataset = dataset.prefetch(buffer_size=AUTOTUNE)
return dataset
def read_label_map(label_map_path):
item_id = None
item_name = None
items = {}
with open(label_map_path, "r") as file:
for line in file:
line.replace(" ", "")
if line == "item{":
pass
elif line == "}":
pass
elif "id" in line:
item_id = int(line.split(":", 1)[1].strip())
elif "name" in line:
item_name = line.split(":", 1)[1].replace("'", "").replace("\"", "").strip()
if item_id is not None and item_name is not None:
items[item_name] = item_id
item_id = None
item_name = None
return items
label_map_path = "./data_preparation/label_map.pbtxt"
label_map_dict = read_label_map(label_map_path)
def get_label_text(result, doc = label_map_dict):
for key, value in doc.items():
if(value == result + 1):
return key
return "Unpredictable"
image_type = "normal"
train_data = get_dataset(f"./data_preparation/{image_type}_train.tfrecord")
test_data = get_dataset(f"./data_preparation/{image_type}_test.tfrecord", train=False)
train_data
def show_data(data, n):
for dat in data.take(n):
plt.imshow(dat["image"])
for coord in dat["bbox"]: # bbox is ymin, xmin, ymax, xmax
coord *= IMAGE_SIZE
rect = patches.Rectangle(
(coord[1].numpy(), coord[0].numpy()), # x1, y1
coord[3].numpy() - coord[1].numpy(), # width
coord[2].numpy() - coord[0].numpy(), # height
linewidth = 2, edgecolor = "r", fill = False)
plt.gca().add_patch(rect)
plt.show()
show_data(train_data, 20)
###Output
_____no_output_____
###Markdown
Init Variable
###Code
batch_size = 4
epochs = 1
# backbone = "vgg16"
# backbone = "mobilenet_v2"
backbone = "resnet50"
hyper_params = train_utils.get_hyper_params(backbone)
train_total_item = len(list(train_data))
labels = list(label_map_dict.keys())
print(labels)
# We add 1 class for background
hyper_params["total_labels"] = len(labels) + 1
train_data = train_data.map(lambda data : data_utils.preprocessing_before_frcnn(data, IMAGE_SIZE, IMAGE_SIZE))
data_shapes = data_utils.get_data_shapes()
padding_values = data_utils.get_padding_values()
train_data = train_data.padded_batch(batch_size, padded_shapes=data_shapes, padding_values=padding_values)
anchors = bbox_utils.generate_anchors(hyper_params)
frcnn_train_feed = train_utils.faster_rcnn_generator(train_data, anchors, hyper_params)
if (backbone == "vgg16"):
from framework.models.rpn_vgg16 import get_rpn_model
elif (backbone == "mobilenet_v2"):
from framework.models.rpn_mobilenet_v2 import get_rpn_model
elif (backbone == "resnet50"):
from framework.models.rpn_resnet50 import get_rpn_model
###Output
_____no_output_____
###Markdown
Train Model
###Code
# Load weights
frcnn_weight_path = io_utils.get_weight_path("faster_rcnn", backbone)
frcnn_model_path = io_utils.get_model_path("faster_rcnn", backbone)
load_weights = False
load_model = False
if load_model:
frcnn_model = tf.keras.models.load_model(frcnn_model_path, custom_objects={
"RoIBBox": faster_rcnn.RoIBBox,
"RoIDelta": faster_rcnn.RoIDelta,
"RoIPooling": faster_rcnn.RoIPooling
})
# frcnn_model.compile(optimizer=tf.optimizers.SGD(momentum=7e-1)) # uncomment if you need to recompile
else:
rpn_model, feature_extractor = get_rpn_model(hyper_params)
frcnn_model = faster_rcnn.get_model_frcnn(
feature_extractor, rpn_model, anchors, hyper_params)
frcnn_model.compile(optimizer=tf.optimizers.SGD(learning_rate=92e-5, momentum=8e-1))
faster_rcnn.init_model_frcnn(frcnn_model, hyper_params)
if load_weights:
frcnn_model.load_weights(frcnn_weight_path)
log_path = io_utils.get_log_path("faster_rcnn", backbone)
checkpoint_callback = ModelCheckpoint(frcnn_model_path, monitor="loss")
weight_checkpoint_callback = ModelCheckpoint(
frcnn_weight_path,monitor="loss",
save_best_only=True, save_weights_only=True)
tensorboard_callback = TensorBoard(log_dir=log_path)
# history_path = io_utils.get_history_path("faster_rcnn", backbone)
# csv_callback = tf.keras.callbacks.CSVLogger(history_path,
# separator=",", append=True)
# total_epoch = 0
## result: :
# schedule_lr_callback = tf.keras.callbacks.LearningRateScheduler(
# lambda ep: 1e-5 * 10 ** ((ep + total_epoch) / 30))
step_size_train = train_utils.get_step_size(train_total_item, batch_size)
history = frcnn_model.fit(frcnn_train_feed,
steps_per_epoch=step_size_train,
verbose = 1,
epochs=epochs,
callbacks=[weight_checkpoint_callback,
checkpoint_callback,
tensorboard_callback])
###Output
_____no_output_____
###Markdown
Learning Rate Hypothesis
###Code
import matplotlib.pyplot as plt
from matplotlib import rcParams
import pandas as pd, numpy as np
rcParams['figure.figsize'] = (18, 8)
rcParams['axes.spines.top'] = False
rcParams['axes.spines.right'] = False
history_path = io_utils.get_history_path("faster_rcnn", backbone)
hist_data = pd.read_csv(history_path)
history_loss = np.array(hist_data["loss"])
learning_rates = 1e-5 * (10 ** (np.arange(len(history_loss)) / 30))
plt.semilogx(
learning_rates,
history_loss,
lw=3, color='#000'
)
plt.title('Learning rate vs. loss', size=20)
plt.xlabel('Learning rate', size=14)
plt.ylabel('Loss', size=14);
###Output
_____no_output_____
###Markdown
Evaluate Model
###Code
labels = ["bg"] + labels
test_total_item = len(list(test_data))
test_data = test_data.map(lambda data : data_utils.preprocessing_before_frcnn(
data, IMAGE_SIZE, IMAGE_SIZE))
test_data = test_data.padded_batch(
batch_size, padded_shapes=data_shapes, padding_values=padding_values)
load_path = io_utils.get_weight_path("faster_rcnn", backbone)
rpn_model, feature_extractor = get_rpn_model(hyper_params)
frcnn_test_model = faster_rcnn.get_model_frcnn(feature_extractor, rpn_model, anchors, hyper_params, mode="test")
frcnn_test_model.load_weights(load_path)
step_size = train_utils.get_step_size(test_total_item, batch_size)
pred_bboxes, pred_labels, pred_scores = frcnn_test_model.predict(test_data, steps=step_size, verbose=1)
stats, hist = eval_utils.evaluate_predictions(test_data, pred_bboxes, pred_labels, pred_scores, labels, batch_size)
print("DONE")
stats
hist
###Output
_____no_output_____ |
3_uq/1_lstm/xx1_lstm_deep_ensemble_noise.ipynb | ###Markdown
Method: LSTM Dataset: Lorenz-96, F = 8 Purpose: Uncertainty Quantification - Deep Ensemble 1. Set-up
###Code
# GPU
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# Package
import sys
sys.path.append("../..")
from create_data import load_data
from utils import * # Number of testing samples
import numpy as np
import matplotlib.pyplot as plt
from time import time
from functools import partial
import jax
import jax.numpy as jnp
from jax.nn.initializers import glorot_normal, normal
from jax.example_libraries import optimizers
train, test = load_data("Lorenz 96, F = 8", "../../data/lorenz8", 0.5)
np.random.seed(1)
train.data = train.data + np.random.normal(0, 1e-1, train.data.shape)
print(f"Train size: {train.data.shape}")
print(f"Test size: {test.data.shape}")
###Output
Train size: (90000, 40)
Test size: (90000, 40)
###Markdown
**Create test set**
###Code
L_forecast_test = 400 # steps to forecast forward (when testing)
np.random.seed(1)
data_test = test.data
T_test, data_dim = data_test.shape
possible_idx = T_test - (L_forecast_test + 1) # minus number of steps forward, and the warm-up period
T_indices = np.random.randint(0, possible_idx, size = NUM_TEST)
t_past_batch = np.repeat(T_indices[:, None], WARM_UP_TEST, axis = 1).astype(int) # 200 warmup
t_pred_batch = (T_indices[:, None] + np.arange(1, 1 + L_forecast_test)[None, :].astype(int))
X_test = data_test[t_past_batch]
y_test = data_test[t_pred_batch]
print(f"Test input size: {X_test.shape}") # Number of test points x input length x dim
print(f"Test output size: {y_test.shape}") # Number of test points x horizon x dim
###Output
Test input size: (100, 2000, 40)
Test output size: (100, 400, 40)
###Markdown
2. LSTM Implementation
###Code
def LSTM(h_dim, data_dim, W_init = glorot_normal(), b_init = normal()):
"""
args:
====
h_dim: dimension of the internal state
data_dim: dimensionity of the time series
outputs:
======
init_fun: function to initialize the parameters
process: function to process a time-series and compute the final prediction and final internal state
forecast: function that, given a pair (internal-state, input), computes the next T predictions
"""
def init_fun(rng):
"""
This function initialize the weights of the RNN
args:
====
rng: jax RNG
outputs:
======
params: a tuple of parameters
"""
# Forget Layer
k1, k2, k3 = jax.random.split(rng, num = 3)
fU = W_init(k1, (h_dim, data_dim))
fW = W_init(k2, (h_dim, h_dim))
fb = b_init(k3, (h_dim,))
# Input Layer
k1, k2, k3 = jax.random.split(rng, num = 3)
iU = W_init(k1, (h_dim, data_dim))
iW = W_init(k2, (h_dim, h_dim))
ib = b_init(k3, (h_dim,))
# Candidate layer
k1, k2, k3 = jax.random.split(rng, num = 3)
gU = W_init(k1, (h_dim, data_dim))
gW = W_init(k2, (h_dim, h_dim))
gb = b_init(k3, (h_dim,))
# Output layer
k1, k2, k3 = jax.random.split(rng, num = 3)
oU = W_init(k1, (h_dim, data_dim))
oW = W_init(k2, (h_dim, h_dim))
ob = b_init(k3, (h_dim,))
# Dense layer (hidden -> y)
k1, k2 = jax.random.split(rng, num = 2)
dO = W_init(k1, (data_dim, h_dim))
db = b_init(k2, (data_dim,))
params = fU, fW, fb, iU, iW, ib, gU, gW, gb, oU, oW, ob, dO, db
return params
def process(params, time_series):
"""
This function takes a time-series in input, pass it through the RNN,
and finally outputs the last prediction and internal state
args:
====
params: tuple of parameters
time_series: data of dimension (T, dim_data)
outputs:
=======
c_final: jax vector of dimension nn_size
h_final: jax vector of dimension nn_size
pred_traj[-1]: last prediction
"""
fU, fW, fb, iU, iW, ib, gU, gW, gb, oU, oW, ob, dO, db = params
c_zero = np.zeros((h_dim, ))
h_zero = np.zeros((h_dim, ))
# forward pass
def process_internal(start, x):
c, h = start
forget_gate = sigmoid(jnp.dot(fU, x) + jnp.dot(fW, h) + fb)
input_gate = sigmoid(jnp.dot(iU, x) + jnp.dot(iW, h) + ib)
cand_gate = jnp.tanh(jnp.dot(gU, x) + jnp.dot(gW, h) + gb)
c_new = sigmoid(forget_gate * c + input_gate * cand_gate)
output_gate = sigmoid(jnp.dot(oU, x) + jnp.dot(oW, h) + ob)
h_new = jnp.tanh(c_new) * output_gate
y = x + dO @ h_new + db
return (c_new, h_new), y
(c_final, h_final), pred_traj = jax.lax.scan(process_internal, (c_zero, h_zero), time_series)
return (c_final, h_final), pred_traj[-1]
def forecast(params, internal_states, x_input, horizon):
"""
This function takes in an internal state and a first input and produces
prediction over a finite horizon.
args:
====
params: tuple of parameters
internal_states = (c_internal, h_internal): internal state values of c and h
x_input: jax vector of dimension dim_data
horizon: horizon of the prediction
outputs:
=======
preds: a trajectory of prediction of dimension (horison, dim_data)
"""
c_internal, h_internal = internal_states
# extract parameters
fU, fW, fb, iU, iW, ib, gU, gW, gb, oU, oW, ob, dO, db = params
# forward pass
def forecast_internal(triple_c_h_x, _ ):
cell, hidden, x = triple_c_h_x
forget_gate = sigmoid(jnp.dot(fU, x) + jnp.dot(fW, hidden) + fb)
input_gate = sigmoid(jnp.dot(iU, x) + jnp.dot(iW, hidden) + ib)
cand_gate = jnp.tanh(jnp.dot(gU, x) + jnp.dot(gW, hidden) + gb)
c_new = sigmoid(forget_gate * cell + input_gate * cand_gate)
output_gate = sigmoid(jnp.dot(oU, x) + jnp.dot(oW, hidden) + ob)
h_new = jnp.tanh(c_new) * output_gate
y = x + dO @ h_new + db
return (c_new, h_new, y), y
_, pred_traj = jax.lax.scan(forecast_internal, (c_internal, h_internal, x_input), None, length=horizon)
# return the trajectory of predictions
return pred_traj
return init_fun, process, forecast
def get_parameters(nn_size, seed, batch_size, L_past, L_forecast_train,
num_epoch, lr_schedule, early_stopping = EARLY_STOPPING,
early_stopping_baseline = 1.1):
assert len(num_epoch) == len(lr_schedule)
def training(x, y, init_params):
@jax.jit
def step(i, opt_state, x_batch, y_batch):
params = get_params(opt_state)
value, g = jax.value_and_grad(mse)(params, x_batch, y_batch)
opt_state = opt_update(i, g, opt_state)
return get_params(opt_state), opt_state, value
@partial(jax.jit, static_argnums=2)
def make_forecast(params, x_batch, horizon):
# pass the data through the RNN.
# note that "preds" is the first forecasts
hs, preds = process_batch(params, x_batch)
# compute the (L_forecast-1) next forecasts
y_pred = forecast_batch(params, hs, preds, horizon-1)
#stick all the forecasts together
y_pred = jnp.concatenate([preds[:, None,:], y_pred], axis=1)
return y_pred
@jax.jit
def mse(params, x_batch, y_truth):
"""
For each time-series in a batch, forecasts over a finite horizon
and compute the MSE.
args:
====
params: neural parameters
x_batch: a batch of inputs with dimension (batch_size, T_past, dim_data)
y_truth: a batch of values to forecasts with dimension (batch_size, T_future, dim_data)
outputs:
=======
MSE: MSE between forecasts and targets
"""
# horizon of the forecast
L_forecast = y_truth.shape[1]
y_pred = make_forecast(params, x_batch, L_forecast)
#compute MSE
error = y_pred - y_truth
mu_loss = jnp.mean(error**2)
return mu_loss
start = time()
loss_train_traj = []
loss_train_all_traj = []
overall_best_params = init_params
overall_best_mse = 999999999
# train/val split
t_size = int(0.9 * train_size)
v_size = train_size - t_size
T_indices_val = np.arange(t_size, train_size - (L_forecast_test//2 + L_past))
t_start_val = T_indices_val[::10]
t_past_batch_val = (t_start_val[:,None] + np.arange(L_past)[None,:]).astype(int)
t_pred_batch_val = (t_start_val[:,None] + np.arange(L_past,L_past+L_forecast_test//2)[None,:]).astype(int)
x_val = data_test[t_past_batch_val]
y_val = data_test[t_pred_batch_val]
print("Backpropogation start", end = "\n\n")
for i, lr in enumerate(lr_schedule):
opt_init, opt_update, get_params = optimizers.adam(step_size = lr)
opt_state = opt_init(overall_best_params)
counter = 0
best_mse = 999999999
for epoch in range(num_epoch[i]):
e_start = time()
# randomize the order of the training data
T_indices = np.arange(t_size - (L_forecast_train + L_past))
np.random.shuffle(T_indices)
# training
loss_epoch_train = []
for k in range(t_size // batch_size + 1):
# create a batch of data
t_start = T_indices[np.arange(k*batch_size, (k+1)*batch_size).astype(int) % len(T_indices)] # start of each time series in the batch
# create 2d array of dimension (batch_size, L_past) containing all the time indices
t_past_batch = (t_start[:,None] + np.arange(L_past)[None,:]).astype(int) # transposes data
t_pred_batch = (t_start[:,None] + np.arange(L_past,L_past+L_forecast_train)[None,:]).astype(int)
#create batch of dimension (batch_size, L_past, data_dim)
x_batch = x[t_past_batch]
y_batch = y[t_pred_batch]
params, opt_state, loss_current = step(k, opt_state, x_batch, y_batch) # update
loss_epoch_train.append(loss_current)
mse_train = np.mean(loss_epoch_train)
# validation
mse_val = mse(params, x_val, y_val)
if best_mse > mse_val: # Improvement
counter = 0
best_mse = mse_val
best_params = params
else:
counter += 1
e_end = time()
if (epoch + 1) % 10 == 0:
print(f"Epoch {epoch + 1}: Time taken = {e_end - e_start:.2f} | Train loss = {mse_train:.7f} | Val loss = {mse_val: .7f}")
if best_mse < early_stopping_baseline and counter >= early_stopping:
print(f"EARLY STOPPING. Epoch {epoch + 1}: Train loss = {mse_train:.7f} | Val loss = {mse_val: .7f}")
break
print(f"Best Validation MSE: {best_mse:.7f}")
if best_mse < overall_best_mse: # Best round so far
print("IMPROVED VALIDATION MSE")
overall_best_mse = best_mse
overall_best_params = best_params
print()
end = time()
print(f"Total time: {end - start:.2f}")
return overall_best_params
start = time()
x, y = train.data[:-1], train.data[1:]
train_size, data_dim = x.data.shape
np.random.seed(seed)
key = jax.random.PRNGKey(seed)
# Initialize LSTM
init_fun, process, forecast = LSTM(nn_size, data_dim) # LSTM Network
process_batch = jax.jit(jax.vmap(process, in_axes=(None,0)))
forecast_batch = jax.jit(jax.vmap(forecast, in_axes=(None,0,0,None)), static_argnums=3)
init_params = init_fun(key)
final_params = training(x, y, init_params)
end = time()
print(f"Complete. Time taken: {end - start:.2f}s")
return final_params, (process_batch, forecast_batch)
def get_test_pred(data_test, params, lstm_fx):
@partial(jax.jit, static_argnums=2)
def make_forecast(params, x_batch, horizon):
pbatch, fbatch = lstm_fx
# pass the data through the RNN.
# note that "preds" is the first forecasts
hs, preds = pbatch(params, x_batch)
# compute the (L_forecast-1) next forecasts
y_pred = fbatch(params, hs, preds, horizon-1)
#stick all the forecasts together
y_pred = jnp.concatenate([preds[:, None,:], y_pred], axis=1)
return y_pred
@jax.jit
def loss(params, x_batch, y_truth):
"""
For each time-series in a batch, forecasts over a finite horizon
and compute the MSE.
args:
====
params: neural parameters
x_batch: a batch of inputs with dimension (batch_size, T_past, dim_data)
y_truth: a batch of values to forecasts with dimension (batch_size, T_future, dim_data)
outputs:
=======
MSE: MSE between forecasts and targets
"""
# horizon of the forecast
L_forecast = y_truth.shape[1]
y_pred = make_forecast(params, x_batch, L_forecast)
#compute MSE
error = y_pred - y_truth
return jnp.mean(error**2)
start = time()
num_data_test, L_past, data_dim = data_test.shape # testing ex, # steps used before, dim of data
mu_pred = make_forecast(params, data_test, L_forecast_test)
end = time()
print(f"Testing complete. Time taken: {end - start:.2f}")
return np.array(mu_pred)
###Output
_____no_output_____
###Markdown
3. Best Parameters
###Code
nn_size = 500
L_forecast_train = 4
L_past = 4
b_size = 128 # Batch size
lr_list = [1e-3, 1e-4, 1e-5, 1e-6] # Learning rate schedule
epoch_list = [200, 200, 200, 200] # Number of epochs for each learning rate
###Output
_____no_output_____
###Markdown
4. Ensemble
###Code
res_folder = os.path.join("results", "ensemble_noise")
def run_seed(seed):
"""
Runs the experiment with optimal parameters and appends the predictions into the global variable mu_preds
"""
params, lstm_fx = get_parameters(nn_size = nn_size, seed = seed, batch_size = b_size, L_past = L_past,
L_forecast_train = L_forecast_train, num_epoch = epoch_list, lr_schedule = lr_list,
early_stopping = 50)
mean_pred = get_test_pred(X_test, params, lstm_fx)
file_name = "mu_preds_" + str(seed) + ".pkl"
save_obj(mean_pred, res_folder, file_name)
###Output
_____no_output_____
###Markdown
4.1 Seed 2
###Code
run_seed(2)
###Output
Backpropogation start
Epoch 10: Time taken = 3.18 | Train loss = 0.0210914 | Val loss = 1.4901116
Epoch 20: Time taken = 3.10 | Train loss = 0.0183117 | Val loss = 1.4255195
Epoch 30: Time taken = 3.00 | Train loss = 0.0167163 | Val loss = 1.3324105
Epoch 40: Time taken = 2.73 | Train loss = 0.0157636 | Val loss = 1.3327079
Epoch 50: Time taken = 3.04 | Train loss = 0.0151601 | Val loss = 1.1851038
Epoch 60: Time taken = 2.83 | Train loss = 0.0147429 | Val loss = 1.1440040
Epoch 70: Time taken = 3.05 | Train loss = 0.0144956 | Val loss = 1.1824741
Epoch 80: Time taken = 3.01 | Train loss = 0.0142956 | Val loss = 1.0966786
Epoch 90: Time taken = 3.26 | Train loss = 0.0141324 | Val loss = 1.1376482
Epoch 100: Time taken = 2.69 | Train loss = 0.0140164 | Val loss = 1.0813930
Epoch 110: Time taken = 3.60 | Train loss = 0.0139145 | Val loss = 1.1544344
Epoch 120: Time taken = 3.20 | Train loss = 0.0138075 | Val loss = 1.0966303
Epoch 130: Time taken = 3.06 | Train loss = 0.0137025 | Val loss = 1.0782758
Epoch 140: Time taken = 3.09 | Train loss = 0.0136452 | Val loss = 1.1125820
Epoch 150: Time taken = 2.54 | Train loss = 0.0135839 | Val loss = 1.1285257
Epoch 160: Time taken = 3.69 | Train loss = 0.0135023 | Val loss = 1.0910026
EARLY STOPPING. Epoch 169: Train loss = 0.0134670 | Val loss = 1.0744010
Best Validation MSE: 1.0191057
IMPROVED VALIDATION MSE
Epoch 10: Time taken = 3.36 | Train loss = 0.0137457 | Val loss = 1.1522685
Epoch 20: Time taken = 3.11 | Train loss = 0.0136666 | Val loss = 1.1105072
Epoch 30: Time taken = 2.76 | Train loss = 0.0136052 | Val loss = 1.0738761
Epoch 40: Time taken = 3.18 | Train loss = 0.0135283 | Val loss = 1.1116254
Epoch 50: Time taken = 3.39 | Train loss = 0.0134687 | Val loss = 1.1171609
EARLY STOPPING. Epoch 53: Train loss = 0.0134604 | Val loss = 1.0930943
Best Validation MSE: 1.0351499
Epoch 10: Time taken = 3.03 | Train loss = 0.0137475 | Val loss = 1.1696079
Epoch 20: Time taken = 3.05 | Train loss = 0.0136628 | Val loss = 1.0646881
Epoch 30: Time taken = 2.90 | Train loss = 0.0135919 | Val loss = 1.0979109
Epoch 40: Time taken = 3.03 | Train loss = 0.0135214 | Val loss = 1.0784936
Epoch 50: Time taken = 2.80 | Train loss = 0.0134678 | Val loss = 1.1515434
Epoch 60: Time taken = 3.19 | Train loss = 0.0134436 | Val loss = 1.1221559
Epoch 70: Time taken = 3.88 | Train loss = 0.0133664 | Val loss = 1.1079613
EARLY STOPPING. Epoch 72: Train loss = 0.0133715 | Val loss = 1.1145399
Best Validation MSE: 1.0266500
Epoch 10: Time taken = 3.12 | Train loss = 0.0137587 | Val loss = 1.1260900
Epoch 20: Time taken = 3.39 | Train loss = 0.0136383 | Val loss = 1.0656167
Epoch 30: Time taken = 3.43 | Train loss = 0.0136095 | Val loss = 1.1148870
Epoch 40: Time taken = 2.57 | Train loss = 0.0135197 | Val loss = 1.1180706
Epoch 50: Time taken = 3.09 | Train loss = 0.0134742 | Val loss = 1.0852952
Epoch 60: Time taken = 3.04 | Train loss = 0.0134234 | Val loss = 1.1239181
Epoch 70: Time taken = 3.08 | Train loss = 0.0133765 | Val loss = 1.1091535
Epoch 80: Time taken = 3.04 | Train loss = 0.0133300 | Val loss = 1.0896865
Epoch 90: Time taken = 2.80 | Train loss = 0.0132844 | Val loss = 1.0658733
Epoch 100: Time taken = 2.78 | Train loss = 0.0132452 | Val loss = 1.1023629
Epoch 110: Time taken = 3.43 | Train loss = 0.0132009 | Val loss = 1.1187969
EARLY STOPPING. Epoch 113: Train loss = 0.0132014 | Val loss = 1.1028196
Best Validation MSE: 1.0382642
Total time: 1277.62
Complete. Time taken: 1277.67s
Testing complete. Time taken: 1.33
###Markdown
4.2 Seed 4
###Code
run_seed(4)
###Output
Backpropogation start
Epoch 10: Time taken = 2.98 | Train loss = 0.0210425 | Val loss = 1.5425571
Epoch 20: Time taken = 2.76 | Train loss = 0.0182973 | Val loss = 1.3830163
Epoch 30: Time taken = 2.90 | Train loss = 0.0167179 | Val loss = 1.2682315
Epoch 40: Time taken = 2.85 | Train loss = 0.0157937 | Val loss = 1.2692807
Epoch 50: Time taken = 3.03 | Train loss = 0.0151793 | Val loss = 1.2295845
Epoch 60: Time taken = 3.03 | Train loss = 0.0147691 | Val loss = 1.1624203
Epoch 70: Time taken = 2.67 | Train loss = 0.0145008 | Val loss = 1.0727284
Epoch 80: Time taken = 2.85 | Train loss = 0.0143103 | Val loss = 1.0734706
Epoch 90: Time taken = 2.94 | Train loss = 0.0141386 | Val loss = 1.0980810
Epoch 100: Time taken = 2.88 | Train loss = 0.0139899 | Val loss = 1.0766590
Epoch 110: Time taken = 2.98 | Train loss = 0.0139254 | Val loss = 1.0828035
Epoch 120: Time taken = 2.71 | Train loss = 0.0138112 | Val loss = 1.1124989
Epoch 130: Time taken = 2.89 | Train loss = 0.0137299 | Val loss = 1.0630125
Epoch 140: Time taken = 2.63 | Train loss = 0.0136524 | Val loss = 1.1127362
Epoch 150: Time taken = 2.57 | Train loss = 0.0135843 | Val loss = 1.1110221
Epoch 160: Time taken = 2.68 | Train loss = 0.0135241 | Val loss = 1.0838360
Epoch 170: Time taken = 3.02 | Train loss = 0.0134596 | Val loss = 1.1025939
EARLY STOPPING. Epoch 172: Train loss = 0.0134457 | Val loss = 1.0671771
Best Validation MSE: 1.0247201
IMPROVED VALIDATION MSE
Epoch 10: Time taken = 3.05 | Train loss = 0.0137154 | Val loss = 1.0419832
Epoch 20: Time taken = 2.99 | Train loss = 0.0136592 | Val loss = 1.1367559
Epoch 30: Time taken = 3.18 | Train loss = 0.0135820 | Val loss = 1.1084652
Epoch 40: Time taken = 2.88 | Train loss = 0.0135119 | Val loss = 1.0923442
Epoch 50: Time taken = 3.04 | Train loss = 0.0134512 | Val loss = 1.1473956
Epoch 60: Time taken = 2.82 | Train loss = 0.0133974 | Val loss = 1.0894985
EARLY STOPPING. Epoch 60: Train loss = 0.0133974 | Val loss = 1.0894985
Best Validation MSE: 1.0419832
Epoch 10: Time taken = 2.40 | Train loss = 0.0137232 | Val loss = 1.0818902
Epoch 20: Time taken = 3.90 | Train loss = 0.0136336 | Val loss = 1.1053430
Epoch 30: Time taken = 3.15 | Train loss = 0.0135780 | Val loss = 1.0719372
Epoch 40: Time taken = 2.28 | Train loss = 0.0135225 | Val loss = 1.0896695
Epoch 50: Time taken = 2.39 | Train loss = 0.0134499 | Val loss = 1.1439047
Epoch 60: Time taken = 2.97 | Train loss = 0.0133997 | Val loss = 1.0518383
EARLY STOPPING. Epoch 62: Train loss = 0.0134052 | Val loss = 1.1811001
Best Validation MSE: 1.0166689
IMPROVED VALIDATION MSE
Epoch 10: Time taken = 3.20 | Train loss = 0.0136435 | Val loss = 1.0680068
Epoch 20: Time taken = 3.90 | Train loss = 0.0135505 | Val loss = 1.1393838
Epoch 30: Time taken = 3.36 | Train loss = 0.0134931 | Val loss = 1.1248623
Epoch 40: Time taken = 3.15 | Train loss = 0.0134449 | Val loss = 1.0844883
Epoch 50: Time taken = 3.67 | Train loss = 0.0133947 | Val loss = 1.1276841
Epoch 60: Time taken = 3.30 | Train loss = 0.0133438 | Val loss = 1.1354203
Epoch 70: Time taken = 3.96 | Train loss = 0.0132878 | Val loss = 1.1252869
Epoch 80: Time taken = 3.05 | Train loss = 0.0132515 | Val loss = 1.0979859
Epoch 90: Time taken = 3.60 | Train loss = 0.0132112 | Val loss = 1.0842342
Epoch 100: Time taken = 2.86 | Train loss = 0.0131849 | Val loss = 1.1258181
Epoch 110: Time taken = 3.18 | Train loss = 0.0131601 | Val loss = 1.1193913
Epoch 120: Time taken = 4.03 | Train loss = 0.0131007 | Val loss = 1.1101488
Epoch 130: Time taken = 3.16 | Train loss = 0.0130796 | Val loss = 1.0959141
Epoch 140: Time taken = 2.92 | Train loss = 0.0130652 | Val loss = 1.1097416
EARLY STOPPING. Epoch 143: Train loss = 0.0130441 | Val loss = 1.1302928
Best Validation MSE: 1.0475791
Total time: 1350.08
Complete. Time taken: 1350.10s
Testing complete. Time taken: 1.10
###Markdown
4.3 Seed 6
###Code
run_seed(6)
###Output
Backpropogation start
Epoch 10: Time taken = 2.29 | Train loss = 0.0210563 | Val loss = 1.5862337
Epoch 20: Time taken = 2.31 | Train loss = 0.0183553 | Val loss = 1.3565298
Epoch 30: Time taken = 2.30 | Train loss = 0.0167770 | Val loss = 1.3186829
Epoch 40: Time taken = 2.34 | Train loss = 0.0158133 | Val loss = 1.2009823
Epoch 50: Time taken = 2.34 | Train loss = 0.0152119 | Val loss = 1.1598649
Epoch 60: Time taken = 2.30 | Train loss = 0.0147860 | Val loss = 1.1357266
Epoch 70: Time taken = 2.31 | Train loss = 0.0145276 | Val loss = 1.1013349
Epoch 80: Time taken = 2.32 | Train loss = 0.0143264 | Val loss = 1.1118903
Epoch 90: Time taken = 2.30 | Train loss = 0.0141771 | Val loss = 1.1153542
Epoch 100: Time taken = 2.34 | Train loss = 0.0140467 | Val loss = 1.0773672
Epoch 110: Time taken = 2.30 | Train loss = 0.0139424 | Val loss = 1.0772125
Epoch 120: Time taken = 2.32 | Train loss = 0.0138516 | Val loss = 1.0408517
Epoch 130: Time taken = 2.31 | Train loss = 0.0137449 | Val loss = 1.0483608
Epoch 140: Time taken = 2.31 | Train loss = 0.0136685 | Val loss = 1.0912251
Epoch 150: Time taken = 2.33 | Train loss = 0.0136060 | Val loss = 1.1440814
EARLY STOPPING. Epoch 157: Train loss = 0.0135677 | Val loss = 1.1673924
Best Validation MSE: 1.0353289
IMPROVED VALIDATION MSE
Epoch 10: Time taken = 2.31 | Train loss = 0.0138692 | Val loss = 1.0705205
Epoch 20: Time taken = 2.32 | Train loss = 0.0137606 | Val loss = 1.0828068
Epoch 30: Time taken = 2.37 | Train loss = 0.0136739 | Val loss = 1.0793589
Epoch 40: Time taken = 2.31 | Train loss = 0.0136225 | Val loss = 1.1316221
Epoch 50: Time taken = 2.32 | Train loss = 0.0135593 | Val loss = 1.0967340
Epoch 60: Time taken = 2.32 | Train loss = 0.0135106 | Val loss = 1.0532078
Epoch 70: Time taken = 2.32 | Train loss = 0.0134591 | Val loss = 1.0621719
Epoch 80: Time taken = 2.31 | Train loss = 0.0134124 | Val loss = 1.0612108
EARLY STOPPING. Epoch 84: Train loss = 0.0133965 | Val loss = 1.0978452
Best Validation MSE: 1.0224943
IMPROVED VALIDATION MSE
Epoch 10: Time taken = 2.32 | Train loss = 0.0136048 | Val loss = 1.0925174
Epoch 20: Time taken = 2.31 | Train loss = 0.0135306 | Val loss = 1.0911491
Epoch 30: Time taken = 2.32 | Train loss = 0.0134825 | Val loss = 1.1142278
Epoch 40: Time taken = 2.35 | Train loss = 0.0134383 | Val loss = 1.1035403
Epoch 50: Time taken = 2.31 | Train loss = 0.0133744 | Val loss = 1.0894923
Epoch 60: Time taken = 2.31 | Train loss = 0.0133441 | Val loss = 1.0766196
Epoch 70: Time taken = 2.31 | Train loss = 0.0132860 | Val loss = 1.1126961
Epoch 80: Time taken = 2.31 | Train loss = 0.0132638 | Val loss = 1.0514114
EARLY STOPPING. Epoch 84: Train loss = 0.0132310 | Val loss = 1.1405092
Best Validation MSE: 1.0382783
Epoch 10: Time taken = 2.30 | Train loss = 0.0136121 | Val loss = 1.0538472
Epoch 20: Time taken = 2.31 | Train loss = 0.0135399 | Val loss = 1.1208901
Epoch 30: Time taken = 2.35 | Train loss = 0.0134887 | Val loss = 1.0811682
Epoch 40: Time taken = 2.36 | Train loss = 0.0134229 | Val loss = 1.0956341
Epoch 50: Time taken = 2.32 | Train loss = 0.0133740 | Val loss = 1.1090360
Epoch 60: Time taken = 2.32 | Train loss = 0.0133343 | Val loss = 1.0870464
Epoch 70: Time taken = 2.32 | Train loss = 0.0132868 | Val loss = 1.1192895
Epoch 80: Time taken = 2.37 | Train loss = 0.0132624 | Val loss = 1.1205571
Epoch 90: Time taken = 2.33 | Train loss = 0.0132080 | Val loss = 1.1562276
Epoch 100: Time taken = 2.32 | Train loss = 0.0131710 | Val loss = 1.0978723
Epoch 110: Time taken = 2.40 | Train loss = 0.0131285 | Val loss = 1.1184219
EARLY STOPPING. Epoch 113: Train loss = 0.0131244 | Val loss = 1.0859983
Best Validation MSE: 1.0398266
Total time: 1024.96
Complete. Time taken: 1024.98s
Testing complete. Time taken: 0.72
###Markdown
4.4 Seed 8
###Code
run_seed(8)
###Output
Backpropogation start
Epoch 10: Time taken = 2.32 | Train loss = 0.0209519 | Val loss = 1.4217958
Epoch 20: Time taken = 2.29 | Train loss = 0.0181985 | Val loss = 1.3043104
Epoch 30: Time taken = 2.32 | Train loss = 0.0166485 | Val loss = 1.3349144
Epoch 40: Time taken = 2.37 | Train loss = 0.0157471 | Val loss = 1.3182245
Epoch 50: Time taken = 2.32 | Train loss = 0.0151494 | Val loss = 1.1731933
Epoch 60: Time taken = 2.32 | Train loss = 0.0147478 | Val loss = 1.1946197
Epoch 70: Time taken = 2.34 | Train loss = 0.0144723 | Val loss = 1.1160094
Epoch 80: Time taken = 2.33 | Train loss = 0.0142739 | Val loss = 1.1537226
Epoch 90: Time taken = 2.32 | Train loss = 0.0141554 | Val loss = 1.1256329
Epoch 100: Time taken = 2.36 | Train loss = 0.0139980 | Val loss = 1.0551010
Epoch 110: Time taken = 2.33 | Train loss = 0.0139153 | Val loss = 1.0766619
Epoch 120: Time taken = 2.35 | Train loss = 0.0138175 | Val loss = 1.0675601
Epoch 130: Time taken = 2.34 | Train loss = 0.0137134 | Val loss = 1.1269122
Epoch 140: Time taken = 2.34 | Train loss = 0.0136372 | Val loss = 1.1029820
EARLY STOPPING. Epoch 149: Train loss = 0.0135799 | Val loss = 1.0742617
Best Validation MSE: 1.0114433
IMPROVED VALIDATION MSE
Epoch 10: Time taken = 2.36 | Train loss = 0.0139113 | Val loss = 1.0629774
Epoch 20: Time taken = 2.33 | Train loss = 0.0138082 | Val loss = 1.0547382
Epoch 30: Time taken = 2.33 | Train loss = 0.0137019 | Val loss = 1.1402183
Epoch 40: Time taken = 2.33 | Train loss = 0.0136564 | Val loss = 1.0828077
Epoch 50: Time taken = 2.37 | Train loss = 0.0135801 | Val loss = 1.1178313
Epoch 60: Time taken = 2.36 | Train loss = 0.0135102 | Val loss = 1.1253059
Epoch 70: Time taken = 2.33 | Train loss = 0.0134678 | Val loss = 1.0747007
Epoch 80: Time taken = 2.33 | Train loss = 0.0134247 | Val loss = 1.0623728
Epoch 90: Time taken = 2.34 | Train loss = 0.0133660 | Val loss = 1.0724630
Epoch 100: Time taken = 2.33 | Train loss = 0.0133113 | Val loss = 1.1300181
EARLY STOPPING. Epoch 108: Train loss = 0.0133004 | Val loss = 1.1147361
Best Validation MSE: 1.0209694
Epoch 10: Time taken = 2.33 | Train loss = 0.0138981 | Val loss = 1.0645012
Epoch 20: Time taken = 2.37 | Train loss = 0.0138243 | Val loss = 1.0792927
Epoch 30: Time taken = 2.32 | Train loss = 0.0137383 | Val loss = 1.0788300
Epoch 40: Time taken = 2.31 | Train loss = 0.0136685 | Val loss = 1.1387329
Epoch 50: Time taken = 2.34 | Train loss = 0.0135759 | Val loss = 1.0848686
EARLY STOPPING. Epoch 52: Train loss = 0.0135700 | Val loss = 1.0813973
Best Validation MSE: 1.0572135
Epoch 10: Time taken = 2.32 | Train loss = 0.0138948 | Val loss = 1.0447154
Epoch 20: Time taken = 2.31 | Train loss = 0.0138218 | Val loss = 1.1221601
Epoch 30: Time taken = 2.31 | Train loss = 0.0137338 | Val loss = 1.0614462
Epoch 40: Time taken = 2.32 | Train loss = 0.0136342 | Val loss = 1.0658587
Epoch 50: Time taken = 2.32 | Train loss = 0.0136014 | Val loss = 1.0965577
Epoch 60: Time taken = 2.33 | Train loss = 0.0135090 | Val loss = 1.0929385
Epoch 70: Time taken = 2.33 | Train loss = 0.0134710 | Val loss = 1.0831658
Epoch 80: Time taken = 2.46 | Train loss = 0.0134309 | Val loss = 1.0992556
Epoch 90: Time taken = 2.32 | Train loss = 0.0133799 | Val loss = 1.0839257
Epoch 100: Time taken = 2.35 | Train loss = 0.0133205 | Val loss = 1.0733787
EARLY STOPPING. Epoch 104: Train loss = 0.0133085 | Val loss = 1.0982710
Best Validation MSE: 1.0329014
Total time: 969.03
Complete. Time taken: 969.04s
Testing complete. Time taken: 0.70
###Markdown
4.5 Seed 42
###Code
run_seed(42)
###Output
Backpropogation start
Epoch 10: Time taken = 2.33 | Train loss = 0.0210738 | Val loss = 1.5679168
Epoch 20: Time taken = 2.34 | Train loss = 0.0182678 | Val loss = 1.2847627
Epoch 30: Time taken = 2.35 | Train loss = 0.0166720 | Val loss = 1.3003093
Epoch 40: Time taken = 2.34 | Train loss = 0.0157464 | Val loss = 1.2352611
Epoch 50: Time taken = 2.33 | Train loss = 0.0151357 | Val loss = 1.1601344
Epoch 60: Time taken = 2.33 | Train loss = 0.0147572 | Val loss = 1.1760615
Epoch 70: Time taken = 2.34 | Train loss = 0.0144957 | Val loss = 1.1625829
Epoch 80: Time taken = 2.34 | Train loss = 0.0142884 | Val loss = 1.0961379
Epoch 90: Time taken = 2.35 | Train loss = 0.0141369 | Val loss = 1.2282528
Epoch 100: Time taken = 2.33 | Train loss = 0.0140183 | Val loss = 1.0731691
Epoch 110: Time taken = 2.33 | Train loss = 0.0139008 | Val loss = 1.1389533
Epoch 120: Time taken = 2.34 | Train loss = 0.0138114 | Val loss = 1.1423104
Epoch 130: Time taken = 2.34 | Train loss = 0.0137417 | Val loss = 1.1421491
Epoch 140: Time taken = 2.34 | Train loss = 0.0136794 | Val loss = 1.1007330
Epoch 150: Time taken = 2.37 | Train loss = 0.0135943 | Val loss = 1.1221503
EARLY STOPPING. Epoch 152: Train loss = 0.0135618 | Val loss = 1.1452370
Best Validation MSE: 1.0223283
IMPROVED VALIDATION MSE
Epoch 10: Time taken = 2.39 | Train loss = 0.0138992 | Val loss = 1.0603896
Epoch 20: Time taken = 2.34 | Train loss = 0.0137715 | Val loss = 1.0926119
Epoch 30: Time taken = 2.35 | Train loss = 0.0137241 | Val loss = 1.1092016
Epoch 40: Time taken = 2.34 | Train loss = 0.0136576 | Val loss = 1.0803857
Epoch 50: Time taken = 2.34 | Train loss = 0.0135782 | Val loss = 1.1426219
Epoch 60: Time taken = 2.43 | Train loss = 0.0135126 | Val loss = 1.1242877
Epoch 70: Time taken = 2.34 | Train loss = 0.0134480 | Val loss = 1.0801755
EARLY STOPPING. Epoch 79: Train loss = 0.0134015 | Val loss = 1.0958464
Best Validation MSE: 1.0393889
Epoch 10: Time taken = 2.35 | Train loss = 0.0138789 | Val loss = 1.1089312
Epoch 20: Time taken = 2.34 | Train loss = 0.0137828 | Val loss = 1.1450919
Epoch 30: Time taken = 2.35 | Train loss = 0.0137121 | Val loss = 1.1277918
Epoch 40: Time taken = 2.35 | Train loss = 0.0136324 | Val loss = 1.0405763
Epoch 50: Time taken = 2.35 | Train loss = 0.0135498 | Val loss = 1.1003051
Epoch 60: Time taken = 2.34 | Train loss = 0.0135275 | Val loss = 1.0838794
Epoch 70: Time taken = 2.35 | Train loss = 0.0134707 | Val loss = 1.1213584
Epoch 80: Time taken = 2.34 | Train loss = 0.0134209 | Val loss = 1.0785521
Epoch 90: Time taken = 2.38 | Train loss = 0.0133576 | Val loss = 1.0747241
EARLY STOPPING. Epoch 90: Train loss = 0.0133576 | Val loss = 1.0747241
Best Validation MSE: 1.0405763
Epoch 10: Time taken = 2.34 | Train loss = 0.0138903 | Val loss = 1.1188700
Epoch 20: Time taken = 2.34 | Train loss = 0.0138049 | Val loss = 1.1237378
Epoch 30: Time taken = 2.34 | Train loss = 0.0137102 | Val loss = 1.1474231
Epoch 40: Time taken = 2.34 | Train loss = 0.0136381 | Val loss = 1.0784142
Epoch 50: Time taken = 2.35 | Train loss = 0.0135823 | Val loss = 1.1166005
Epoch 60: Time taken = 2.34 | Train loss = 0.0135125 | Val loss = 1.1591243
Epoch 70: Time taken = 2.34 | Train loss = 0.0134682 | Val loss = 1.1359279
Epoch 80: Time taken = 2.34 | Train loss = 0.0134171 | Val loss = 1.1332902
Epoch 90: Time taken = 2.34 | Train loss = 0.0133756 | Val loss = 1.1133301
Epoch 100: Time taken = 2.59 | Train loss = 0.0133166 | Val loss = 1.1456987
EARLY STOPPING. Epoch 101: Train loss = 0.0133108 | Val loss = 1.0775003
Best Validation MSE: 1.0398022
Total time: 994.43
Complete. Time taken: 994.44s
Testing complete. Time taken: 0.71
###Markdown
4.6 Compilation
###Code
mu_preds = []
for dirpath, dirnames, filenames in os.walk(res_folder):
for f in filenames:
mu_preds.append(load_obj(os.path.join(res_folder, f)))
mu_preds = np.array(mu_preds)
print(f"mean preds shape: {mu_preds.shape}")
###Output
mean preds shape: (5, 100, 400, 40)
###Markdown
5. Analyze results 5.1 Mean
###Code
mixture_pred_all_mean = mu_preds.mean(axis = 0)
desc_name = "lstm_nn" + str(nn_size) + "_ensemble"
res_ensemble = PointExperimentResult(mixture_pred_all_mean - y_test,
desc_name)
res_ensemble.plot_rmse(error_thresh = 0.5)
res_ensemble.plot_rmse(error_thresh = 1.)
res_ensemble.get_loss([0.2, 0.5, 1, 2, 3])
###Output
Median NRMSE at t = 0.2: 0.206
Median NRMSE at t = 0.5: 0.456
Median NRMSE at t = 1: 0.814
Median NRMSE at t = 2: 1.020
Median NRMSE at t = 3: 1.058
###Markdown
5.2 Variance **Visualise for one dataset**
###Code
idx = 0
plt.plot(mu_preds.var(axis = 0)[idx].mean(axis = 1))
plt.grid("on")
plt.xlabel("Time steps")
plt.ylabel("Variance")
plt.show()
###Output
_____no_output_____
###Markdown
5.3 Negative Log LH
###Code
def neg_log_LH(mean_pred, sd_pred):
d = 40
constant_loss = d * np.log(2 * np.pi)
mu_loss = (mean_pred - y_test)**2
return 0.5 * (constant_loss + d * np.log(sd_pred) + (mu_loss / sd_pred**2)).mean(axis = (0, 2))
std_dev = mu_preds.std(axis = 0)
plt.plot(neg_log_LH(mixture_pred_all_mean, std_dev))
plt.title("Negative Log LH against time")
plt.xlabel("Time steps")
plt.ylabel("Negative Log LH")
plt.grid("on")
plt.show()
print(f"Overall negative log LH: {neg_log_LH(mixture_pred_all_mean, std_dev).mean():.5f}")
###Output
Overall negative log LH: 27.20171
|
Regression_in_TF2_x.ipynb | ###Markdown
Linear Regression
###Code
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
np.random.seed(0)
area = 2.5 * np.random.randn(100) + 25
price = 25 * area + 5 + np.random.randint(20,50, size=len(area))
data = np.array([area, price])
data = pd.DataFrame(data=data.T, columns=['area', 'price'])
plt.scatter(data['area'], data['price'])
plt.show()
W = sum(price*(area-np.mean(area))) / sum((area-np.mean(area))**2)
b = np.mean(price) - W*np.mean(area)
print("The regression coefficients are", W,b)
y_pred = W * area + b
plt.plot(area, y_pred, color='red',label="Predicted Price")
plt.scatter(data['area'], data['price'], label="Training Data")
plt.xlabel("Area")
plt.ylabel("Price")
plt.legend()
###Output
_____no_output_____
###Markdown
Multiple linear regression with Estimator API
###Code
from tensorflow import feature_column as fc
numeric_column = fc.numeric_column
categorical_column_with_vocabulary_list = fc.categorical_column_with_vocabulary_list
featcols = [
tf.feature_column.numeric_column("area"),
tf.feature_column.categorical_column_with_vocabulary_list("type",["bungalow","apartment"])
]
def train_input_fn():
features = {"area":[1000,2000,4000,1000,2000,4000],
"type":["bungalow","bungalow","house",
"apartment","apartment","apartment"]}
labels = [ 500 , 1000 , 1500 , 700 , 1300 , 1900 ]
return features, labels
model = tf.estimator.LinearRegressor(featcols)
model.train(train_input_fn, steps=200)
def predict_input_fn():
features = {"area":[1500,1800],
"type":["house","apt"]}
return features
predictions = model.predict(predict_input_fn)
print(next(predictions))
print(next(predictions))
###Output
WARNING:tensorflow:Input graph does not use tf.data.Dataset or contain a QueueRunner. That means predict yields forever. This is probably a mistake.
INFO:tensorflow:Calling model_fn.
INFO:tensorflow:Done calling model_fn.
INFO:tensorflow:Graph was finalized.
INFO:tensorflow:Restoring parameters from /tmp/tmpsok_v84i/model.ckpt-200
INFO:tensorflow:Running local_init_op.
INFO:tensorflow:Done running local_init_op.
###Markdown
Boston House price prediction
###Code
from tensorflow.keras.datasets import boston_housing
(X_train,y_train), (X_test, y_test) = boston_housing.load_data()
features = ['CRIM', 'ZN',
'INDUS','CHAS','NOX','RM','AGE',
'DIS', 'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT']
x_train_df = pd.DataFrame(X_train, columns= features)
x_test_df = pd.DataFrame(X_test, columns= features)
y_train_df = pd.DataFrame(y_train, columns=['MEDV'])
y_test_df = pd.DataFrame(y_test, columns=['MEDV'])
print(x_train_df.head())
y_train_df.head()
feature_columns = []
for feature_name in features:
feature_columns.append(fc.numeric_column(feature_name, dtype=tf.float32))
def estimator_input_fn(df_data, df_label, epochs=10, shuffle=True, batch_size=32):
def input_function():
ds = tf.data.Dataset.from_tensor_slices((dict(df_data), df_label))
if shuffle:
ds = ds.shuffle(100)
ds = ds.batch(batch_size).repeat(epochs)
return ds
return input_function
train_input_fn = estimator_input_fn(x_train_df, y_train_df)
val_input_fn = estimator_input_fn(x_test_df, y_test_df, epochs=1, shuffle=False)
linear_est = tf.estimator.LinearRegressor(feature_columns=feature_columns, model_dir = 'logs/func/')
linear_est.train(train_input_fn, steps=100)
result = linear_est.evaluate(val_input_fn)
result = linear_est.predict(val_input_fn)
for pred,exp in zip(result, y_test[:32]):
print("Predicted Value: ", pred['predictions'][0], "Expected: ", exp)
###Output
INFO:tensorflow:Calling model_fn.
###Markdown
MNIST using estimators
###Code
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
(X_train, y_train), (X_test, y_test) = keras.datasets.mnist.load_data()
train_data = X_train/np.float32(255)
train_labels = y_train.astype(np.int32)
eval_data = X_test/np.float32(255)
eval_labels = y_test.astype(np.int32)
feature_columns = [
tf.feature_column.numeric_column("x", shape=[28,28])
]
classifier = tf.estimator.LinearClassifier(
feature_columns=feature_columns,
n_classes=10,
model_dir="mnist_model/"
)
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=10)
val_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = classifier.evaluate(input_fn=val_input_fn)
print(eval_results)
###Output
_____no_output_____ |
Test1.ipynb | ###Markdown
###Code
from enum import Enum
class Test1(Enum):
ONE = 1
Test1.ONE
###Output
_____no_output_____
###Markdown
###Code
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
import numpy as np
x = np.linspace(0, 5, 10, endpoint=False)
y = multivariate_normal.pdf(x, mean=2.5, cov=0.5); y
fig1 = plt.figure()
ax = fig1.add_subplot(111)
ax.plot(x, y)
x, y = np.mgrid[-1:1:.01, -1:1:.01]
pos = np.dstack((x, y))
rv = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]])
fig2 = plt.figure()
ax2 = fig2.add_subplot(111)
ax2.contourf(x, y, rv.pdf(pos))
###Output
_____no_output_____
###Markdown
###Code
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5, 6, 7, 8, 9]
y1 = [1, 3, 5, 3, 1, 3, 5, 3, 1]
y2 = [2, 4, 6, 4, 2, 4, 6, 4, 2]
plt.plot(x, y1, label="line L")
plt.plot(x, y2, label="line H")
plt.plot()
plt.xlabel("x axis")
plt.ylabel("y axis")
plt.title("Test Line Graph Vuong")
plt.legend()
plt.show()
###Output
_____no_output_____
###Markdown
Winston's code
###Code
###Output
_____no_output_____
###Markdown
Min første Jypyter Notebook Tester litt ulike ting
###Code
from matplotlib.pylab import *
a = float(input("Skriv inn koeffisienten foran x^2-leddet: "))
b = float(input("Skriv inn koeffisienten foran x-leddet: "))
c = float(input("Skriv inn konstantleddet: "))
funksjon = "f(x) = " + str(a) + "x^2"
if b < 0:
funksjon = funksjon + " – " + str(abs(b)) + "x"
elif b > 0:
funksjon = funksjon + " + " + str(b) + "x"
if c < 0:
funksjon = funksjon + " – " + str(abs(c))
elif c > 0:
funksjon = funksjon + " + " + str(c)
print(funksjon)
x = linspace(-5, 5, 1001)
print(str(x[1]))
y = a*x**2 + b*x + c
plot(x, y)
xlabel("x")
ylabel("y")
grid()
xlim(-5,3)
axhline(y=0, color="k")
axvline(x=0, color="k")
show()
###Output
Skriv inn koeffisienten foran x^2-leddet: 1
Skriv inn koeffisienten foran x-leddet: 2
Skriv inn konstantleddet: -2
f(x) = 1.0x^2 + 2.0x – 2.0
-4.99
###Markdown
###Code
from vega_datasets import data
stocks = data.stocks()
import altair as alt
alt.Chart(stocks).mark_line().encode(
x='date:T',
y='price',
color='symbol'
).interactive(bind_y=False)
# To determine which version you're using:
#!pip show tensorflow
# For the current version:
#!pip install --upgrade tensorflow
# For a specific version:
!pip install tensorflow==2.0.0-alpha
# For the latest nightly build:
#!pip install tf-nightly
from google.colab import files
uploaded = files.upload()
for fn in uploaded.keys():
print('User uploaded file "{name}" with length {length} bytes'.format(
name=fn, length=len(uploaded[fn])))
###Output
_____no_output_____ |
Conceptors/Re_implement_CN.ipynb | ###Markdown
Compare word similarity scores and calculate Spearman Correlation
###Code
def get_sim(data_f_name, cn_f_name, cn_mat, alpha):
cn_data = eval(cn_f_name)
#word_pairs = set(list(cn_data.keys()))
fin = io.open(data_f_name, 'r', encoding='utf-8', newline='\n', errors='ignore')
dataset = []
word_vec = []
keys = []
ls_word = list(cn_data.vocab)
#line_num = 0
for line in fin:
# if line_num > 0:
tokens = line.rstrip().split()
if tokens[0] in cn_data.vocab and tokens[1] in cn_data.vocab:
dataset.append(((tokens[0], tokens[1]), float(tokens[2])))
id1 = ls_word.index(tokens[0])
id2 = ls_word.index(tokens[1])
word_vec.append(cn_mat[id1])
word_vec.append(cn_mat[id2])
keys.append(tokens[0])
keys.append(tokens[1])
#line_num +=1
dataset.sort(key = lambda score: -score[1]) #sort based on score
# print(cn_data['gem'])
cn_dataset = {}
cn_dataset_list = []
for ((word1, word2), score) in dataset:
#print(word1, word2)
id1 = ls_word.index(word1)
id2 = ls_word.index(word2)
sim_score = 1 - cosine_similarity(cn_mat[id1].reshape(1,-1), cn_mat[id2].reshape(1,-1))
cn_dataset[(word1, word2)] = sim_score
cn_dataset_list.append(((word1, word2),sim_score))
cn_dataset_list.sort(key = lambda score: score[1])
spearman_list1=[]
spearman_list2=[]
for pos_1, (pair, score_1) in enumerate(dataset):
score_2 = cn_dataset[pair]
pos_2 = cn_dataset_list.index((pair, score_2))
spearman_list1.append(pos_1)
spearman_list2.append(pos_2)
rho = spearmanr(spearman_list1, spearman_list2)
return rho[0]
def get_sim_large_data(data_f_name, cn_f_name, C, alpha):
cn_data = eval(cn_f_name)
#word_pairs = set(list(cn_data.keys()))
fin = io.open(data_f_name, 'r', encoding='utf-8', newline='\n', errors='ignore')
dataset = []
ls_word = list(cn_data.vocab)
#line_num = 0
for line in fin:
# if line_num > 0:
tokens = line.rstrip().split()
if tokens[0] in cn_data.vocab and tokens[1] in cn_data.vocab:
dataset.append(((tokens[0], tokens[1]), float(tokens[2])))
#line_num +=1
dataset.sort(key = lambda score: -score[1]) #sort based on score
# print(cn_data['gem'])
cn_dataset = {}
cn_dataset_list = []
for ((word1, word2), score) in dataset:
#print(word1, word2)
sim_score = 1 - cosine_similarity( cn_data[word1] - (C @ cn_data[word1]).reshape(1,-1) , cn_data[word2] - (C@ cn_data[word2]).reshape(1,-1))
cn_dataset[(word1, word2)] = sim_score
cn_dataset_list.append(((word1, word2),sim_score))
cn_dataset_list.sort(key = lambda score: score[1])
spearman_list1=[]
spearman_list2=[]
for pos_1, (pair, score_1) in enumerate(dataset):
score_2 = cn_dataset[pair]
pos_2 = cn_dataset_list.index((pair, score_2))
spearman_list1.append(pos_1)
spearman_list2.append(pos_2)
rho = spearmanr(spearman_list1, spearman_list2)
return rho[0]
dataSets = ['EN-RG-65.txt', 'EN-WS-353-ALL.txt', 'EN-RW-STANFORD.txt', 'EN-MEN-TR-3k.txt', 'EN-MTurk-287.txt', 'EN-SIMLEX-999.txt', 'EN-SimVerb-3500.txt']
for dataset in dataSets:
dataSetAddress = '/content/'+ dataset
print('evaluating the data set', dataSetAddress)
print(' Fasttext2')
print('With CN ', "%.4f" % get_sim_large_data(dataSetAddress, 'fasttext2', fasttext2_concept_mat, 1))
print('No CN ', "%.4f" % get_sim_no_cn(dataSetAddress, 'fasttext2'))
###Output
evaluating the data set /content/EN-RG-65.txt
Fasttext2
With CN 0.8755
No CN 0.8526
evaluating the data set /content/EN-WS-353-ALL.txt
Fasttext2
With CN 0.7904
No CN 0.7921
evaluating the data set /content/EN-RW-STANFORD.txt
Fasttext2
With CN 0.6135
No CN 0.5949
evaluating the data set /content/EN-MEN-TR-3k.txt
Fasttext2
With CN 0.8466
No CN 0.8362
evaluating the data set /content/EN-MTurk-287.txt
Fasttext2
With CN 0.7323
No CN 0.7254
evaluating the data set /content/EN-SIMLEX-999.txt
Fasttext2
With CN 0.5168
No CN 0.5051
evaluating the data set /content/EN-SimVerb-3500.txt
Fasttext2
With CN 0.4348
No CN 0.4304
###Markdown
Without CN post-processing
###Code
def get_sim_no_cn(data_f_name, f_name):
model = eval(f_name)
fin = io.open(data_f_name, 'r', encoding='utf-8', newline='\n', errors='ignore')
data = []
#line_num = 0
for line in fin:
#if line_num > 0:
tokens = line.rstrip().split()
if tokens[0] in model.vocab and tokens[1] in model.vocab:
data.append(((tokens[0], tokens[1]), float(tokens[2])))
# line_num +=1
data.sort(key = lambda score: -score[1]) #sort based on score
dataset = {}
dataset_list = []
for ((word1, word2), score) in data:
sim_score = 1 - cosine_similarity(model[word1].reshape(1,-1), model[word2].reshape(1,-1))
dataset[(word1, word2)] = sim_score
dataset_list.append(((word1, word2),sim_score))
dataset_list.sort(key = lambda score: score[1])
spearman_list1=[]
spearman_list2=[]
for pos_1, (pair, score_1) in enumerate(data):
score_2 = dataset[pair]
pos_2 = dataset_list.index((pair, score_2))
spearman_list1.append(pos_1)
spearman_list2.append(pos_2)
rho = spearmanr(spearman_list1, spearman_list2)
return rho[0]
dataSets = ['EN-RG-65.txt', 'EN-WS-353-ALL.txt', 'EN-RW-STANFORD.txt', 'EN-MEN-TR-3k.txt', 'EN-MTurk-287.txt', 'EN-SIMLEX-999.txt', 'EN-SimVerb-3500.txt']
for dataset in dataSets:
dataSetAddress = '/content/'+ dataset
print('evaluating the data set', dataSetAddress)
print('Fasttext ', 'GloVe ', 'w2v ')
print("%.4f" % get_sim_no_cn(dataSetAddress, 'fasttext'), "%.4f" % get_sim_no_cn(dataSetAddress, 'glove'), "%.4f" % get_sim_no_cn(dataSetAddress, 'w2v'))
dataSets = ['EN-RG-65.txt', 'EN-WS-353-ALL.txt', 'EN-RW-STANFORD.txt', 'EN-MEN-TR-3k.txt', 'EN-MTurk-287.txt', 'EN-SIMLEX-999.txt', 'EN-SimVerb-3500.txt']
for dataset in dataSets:
dataSetAddress = '/content/'+ dataset
print('evaluating the data set', dataSetAddress)
print('Fasttext2')
print("%.4f" % get_sim_no_cn(dataSetAddress, 'fasttext2'))
###Output
evaluating the data set /content/EN-RG-65.txt
Fasttext2
0.8587
evaluating the data set /content/EN-WS-353-ALL.txt
Fasttext2
0.7915
evaluating the data set /content/EN-RW-STANFORD.txt
Fasttext2
0.5948
evaluating the data set /content/EN-MEN-TR-3k.txt
Fasttext2
0.8364
evaluating the data set /content/EN-MTurk-287.txt
Fasttext2
0.7262
evaluating the data set /content/EN-SIMLEX-999.txt
Fasttext2
0.5038
evaluating the data set /content/EN-SimVerb-3500.txt
Fasttext2
0.4304
###Markdown
Results
###Code
dataSets = ['EN-RG-65.txt', 'EN-WS-353-ALL.txt', 'EN-RW-STANFORD.txt', 'EN-MEN-TR-3k.txt', 'EN-MTurk-287.txt', 'EN-SIMLEX-999.txt', 'EN-SimVerb-3500.txt']
for dataset in dataSets:
dataSetAddress = '/content/'+ dataset
print('evaluating the data set', dataSetAddress)
print(' Fasttext ', 'GloVe ', 'w2v ')
print('With CN',"%.4f" % get_sim(dataSetAddress, 'fasttext',cn_fasttext_mat, alpha =2), "%.4f" % get_sim(dataSetAddress, 'glove', cn_glove_mat, alpha =2), "%.4f" % get_sim(dataSetAddress, 'w2v', cn_w2v_mat, alpha =2))
print('NO CN',"%.4f" % get_sim_no_cn(dataSetAddress, 'fasttext'), "%.4f" % get_sim_no_cn(dataSetAddress, 'glove'), "%.4f" % get_sim_no_cn(dataSetAddress, 'w2v'))
###Output
evaluating the data set /content/EN-RG-65.txt
Fasttext GloVe w2v
With CN 0.8621 0.7840 0.7892
NO CN 0.8400 0.7510 0.7391
evaluating the data set /content/EN-WS-353-ALL.txt
Fasttext GloVe w2v
With CN 0.7336 0.7908 0.6930
NO CN 0.7334 0.7385 0.6935
evaluating the data set /content/EN-RW-STANFORD.txt
Fasttext GloVe w2v
With CN 0.5369 0.5898 0.5804
NO CN 0.5231 0.5101 0.5578
evaluating the data set /content/EN-MEN-TR-3k.txt
Fasttext GloVe w2v
With CN 0.8062 0.8338 0.7867
NO CN 0.7902 0.8011 0.7705
evaluating the data set /content/EN-MTurk-287.txt
Fasttext GloVe w2v
With CN 0.7141 0.7107 0.6681
NO CN 0.7072 0.6908 0.6831
evaluating the data set /content/EN-SIMLEX-999.txt
Fasttext GloVe w2v
With CN 0.4584 0.4853 0.4682
NO CN 0.4521 0.4073 0.4419
evaluating the data set /content/EN-SimVerb-3500.txt
Fasttext GloVe w2v
With CN 0.3652 0.3636 0.3830
NO CN 0.3603 0.2843 0.3654
###Markdown
Experiment 2: STS BenchmarkI re-implement STS tasks by evaluating CN post-processed word vectors with 2012-2017 SemEval STS tasks. Load STS datasets
###Code
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/stsbenchmark/sts-dev.csv
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/stsbenchmark/sts-mt.csv
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/stsbenchmark/sts-other.csv
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/stsbenchmark/sts-test.csv
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/stsbenchmark/sts-train.csv
!pwd
!ls
import io
def load_sts_dataset(fname):
fin = io.open(fname, 'r', encoding='utf-8', newline='\n', errors='ignore')
# For a STS dataset, loads the relevant information: the sentences and their human rated similarity score.
sent_pairs = []
for line in fin:
items = line.rstrip().split('\t')
if len(items) == 7 or len(items) == 9:
sent_pairs.append((re.sub("[^0-9]", "", items[2]) + '-' + items[1] , items[5], items[6], float(items[4])))
elif len(items) == 6 or len(items) == 8:
sent_pairs.append((re.sub("[^0-9]", "", items[1]) + '-' + items[0] , items[4], items[5], float(items[3])))
else:
print('data format is wrong!!!')
return pd.DataFrame(sent_pairs, columns=["year_task", "sent_1", "sent_2", "sim"])
def load_all_sts_dataset():
# Loads all of the STS datasets
resourceFile = '/content/'
sts_train = load_sts_dataset(resourceFile + 'sts-train.csv')
sts_dev = load_sts_dataset(resourceFile + "sts-dev.csv")
sts_test = load_sts_dataset(resourceFile + "sts-test.csv")
sts_other = load_sts_dataset(resourceFile + "sts-other.csv")
sts_mt = load_sts_dataset(resourceFile +"sts-mt.csv")
sts_all = pd.concat([sts_train, sts_dev, sts_test, sts_other, sts_mt ])
return sts_all
sts_all = load_all_sts_dataset()
###Output
_____no_output_____
###Markdown
Load dataset by year-task
###Code
def load_by_task_year(sts_all):
sts_task_year = {}
for i in sts_all['year_task']:
indices = [index for index, x in enumerate(sts_all['year_task']) if x == i]
sts_task_year[i] = sts_all.iloc[indices]
return sts_task_year
sts_year_task = load_by_task_year(sts_all)
print(sts_year_task.keys())
print(sts_year_task['2012-MSRvid'][0:5])
###Output
dict_keys(['2012-MSRvid', '2014-images', '2015-images', '2014-deft-forum', '2012-MSRpar', '2014-deft-news', '2013-headlines', '2014-headlines', '2015-headlines', '2016-headlines', '2017-track5.en-en', '2015-answers-forums', '2016-answer-answer', '2012-surprise.OnWN', '2013-FNWN', '2013-OnWN', '2014-OnWN', '2014-tweet-news', '2015-belief', '2016-plagiarism', '2016-question-question', '2012-SMTeuroparl', '2012-surprise.SMTnews', '2016-postediting'])
year_task sent_1 \
0 2012-MSRvid A plane is taking off.
1 2012-MSRvid A man is playing a large flute.
2 2012-MSRvid A man is spreading shreded cheese on a pizza.
3 2012-MSRvid Three men are playing chess.
4 2012-MSRvid A man is playing the cello.
sent_2 sim
0 An air plane is taking off. 5.00
1 A man is playing a flute. 3.80
2 A man is spreading shredded cheese on an uncoo... 3.80
3 Two men are playing chess. 2.60
4 A man seated is playing the cello. 4.25
###Markdown
Load dataset by year
###Code
sts_year = {}
def load_by_year(sts_all):
for year in ['2012', '2013', '2014', '2015', '2016', '2017']:
indices = [index for index, x in enumerate(sts_all['year_task'])if year in x]
# store year as dictionary, [year: year-task]
#year_task = sts_all.iloc[indices]
sts_year[year] = sts_all.iloc[indices]
return sts_year
sts_year = load_by_year(sts_all)
print(len(sts_year.keys()))
print(sts_year['2016'][:5])
###Output
6
year_task sent_1 \
5552 2016-headlines Driver backs into stroller with child, drives off
5553 2016-headlines Spain Princess Testifies in Historic Fraud Probe
5554 2016-headlines Senate confirms Obama nominee to key appeals c...
5555 2016-headlines U.N. rights chief presses Egypt on Mursi deten...
5556 2016-headlines US Senate confirms Janet Yellen as US Federal ...
sent_2 sim
5552 Driver backs into mom, stroller with child the... 4.0
5553 Spain princess testifies in historic fraud probe 5.0
5554 Senate approves Obama nominee to key appeals c... 5.0
5555 UN Rights Chief Presses Egypt on Morsi Detention 5.0
5556 Senate confirms Janet Yellen as next Federal R... 5.0
###Markdown
Preparation for STS Evaluation* Define Sentence class, which has raw data and tokenized data* Get similarity scores based on embeddings
###Code
class Sentence:
def __init__(self, sentence):
self.raw = sentence
normalized = sentence.replace("‘", "'").replace("’", "'")
self.tokens = [token.lower() for token in nltk.word_tokenize(normalized)]
def sen_sim(sentences1, sentences2, cn_fname, cn_mat):
model = eval(cn_fname)
embeddings = []
ls_word = list(model.vocab)
for sent_1, sent_2 in zip(sentences1, sentences2):
tokens1 = sent_1.tokens
tokens2 = sent_2.tokens
tokens1 = [token for token in tokens1 if token in model.vocab and token.islower()]
tokens2 = [token for token in tokens2 if token in model.vocab and token.islower()]
ids1 = [ls_word.index(token) for token in tokens1 ]
ids2 = [ls_word.index(token) for token in tokens2 ]
embedding1 = np.average([cn_mat[id] for id in ids1], axis = 0)
embedding2 = np.average([cn_mat[id] for id in ids2], axis = 0)
if isinstance(embedding1, float) or isinstance(embedding2, float):
embeddings.append(np.zeros(300))
embeddings.append(np.zeros(300))
else:
embeddings.append(embedding1)
embeddings.append(embedding2)
sim_score = [cosine_similarity(embeddings[id*2].reshape(1, -1), embeddings[id*2+1].reshape(1, -1))[0][0] for id in range(len(embeddings)//2)]
return sim_score
def no_cn_sen_sim(sentences1, sentences2, fname):
model = eval(fname)
embeddings = []
for sent_1, sent_2 in zip(sentences1, sentences2):
tokens1 = sent_1.tokens
tokens2 = sent_2.tokens
tokens1 = [token for token in tokens1 if token in model.vocab and token.islower()]
tokens2 = [token for token in tokens2 if token in model.vocab and token.islower()]
embedding1 = np.average([model[token] for token in tokens1], axis = 0)
embedding2 = np.average([model[token] for token in tokens2], axis = 0)
if isinstance(embedding1, float) or isinstance(embedding2, float):
embeddings.append(np.zeros(300))
embeddings.append(np.zeros(300))
else:
embeddings.append(embedding1)
embeddings.append(embedding2)
sim_score = [cosine_similarity(embeddings[id*2].reshape(1, -1), embeddings[id*2+1].reshape(1, -1))[0][0] for id in range(len(embeddings)//2)]
return sim_score
###Output
_____no_output_____
###Markdown
Results
###Code
model_list = ['glove', 'w2v', 'fasttext']
pearson_cors = {}
pearson_cors_no_cn = {}
mat = []
for year_task in sts_all['year_task'].unique():
for model in model_list:
if model == 'glove':
mat = cn_glove_mat
elif model == 'w2v':
mat = cn_w2v_mat
elif model == 'fasttext':
mat = cn_fasttext_mat
sentences1=[Sentence(sent1) for sent1 in sts_year_task[year_task]['sent_1']]
sentences2=[Sentence(sent2) for sent2 in sts_year_task[year_task]['sent_2']]
sim = sen_sim(sentences1, sentences2, model, mat)
pearson_correlation = round(scipy.stats.pearsonr(sim, sts_year_task[year_task]['sim'])[0] * 100,2)
pearson_cors[(model, year_task)] = pearson_correlation
sim2 = no_cn_sen_sim(sentences1, sentences2, model)
pearson_correlation_no_cn = round(scipy.stats.pearsonr(sim2, sts_year_task[year_task]['sim'])[0] * 100,2)
pearson_cors_no_cn[(model, year_task)] = pearson_correlation_no_cn
count = 0
for (i,j) in pearson_cors.keys():
if count % 3 ==0:
print('')
count +=1
print('With CN',i, j, pearson_cors[(i,j)])
print('NO CN',i, j, pearson_cors_no_cn[(i,j)])
###Output
_____no_output_____
###Markdown
Re-implementation of Conceptor Negation* Word-similarity task* STS(Semantic Textual Similarity) tasks DataFor both tasks, I used small GloVe and word2vec word vector dataset, as well as Fasttext English word vector 1M dataset. \Small GloVe: https://drive.google.com/uc?id=1U_UGB2vyTuTIcbV_oeDtJCtAtlFMvXOM \Small word2vec: https://drive.google.com/uc?id=1j_b4TRpL3f0HQ8mV17_CtOXp862YjxxB \Fasttext 1M: https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki-news-300d-1M.vec.zip\
###Code
import numpy as np
import scipy, requests, codecs, os, re, nltk, itertools, csv
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import AgglomerativeClustering, KMeans
import tensorflow as tf
from scipy.stats import spearmanr
import pandas as pd
import functools as ft
import os
import io
nltk.download('punkt')
!ls
!pip install -q gdown
!gdown https://drive.google.com/uc?id=1U_UGB2vyTuTIcbV_oeDtJCtAtlFMvXOM # download a small subset of glove
!gdown https://drive.google.com/uc?id=1j_b4TRpL3f0HQ8mV17_CtOXp862YjxxB # download a small subset of word2vec
!ls
!gdown https://drive.google.com/uc?id=1Zl6a75Ybf8do9uupmrJWKQMnvqqme4fh
print(len(list(fasttext2.vocab)))
print(list(fasttext2.vectors)[0:5])
!wget https://s3-us-west-1.amazonaws.com/fasttext-vectors/wiki-news-300d-1M.vec.zip
!unzip wiki-news-300d-1M.vec.zip
!ls
!du -h wiki-news-300d-1M.vec
###Output
2.2G wiki-news-300d-1M.vec
###Markdown
Load Fasttext, small GloVe and small word2vec data
###Code
import gensim
from gensim.models.keyedvectors import KeyedVectors
fasttext2 = KeyedVectors.load_word2vec_format('/content/' + 'fasttext.bin', binary=True)
print('The fasttext embedding has been loaded!')
fasttext = KeyedVectors.load_word2vec_format('/content/' + 'wiki-news-300d-1M.vec')
!python -m gensim.scripts.glove2word2vec -i small_glove.txt -o small_glove_w2v.txt
!python -m gensim.scripts.glove2word2vec -i small_word2vec.txt -o small_w2v_w2v.txt
glove = KeyedVectors.load_word2vec_format('/content/' + 'small_glove_w2v.txt')
w2v = KeyedVectors.load_word2vec_format('/content/' + 'small_w2v_w2v.txt')
###Output
_____no_output_____
###Markdown
Post-processing with CN
###Code
#Use this func for data size smaller than 1M
def cn_mat(pre_cn_f_name, alpha):
pre_cn_data = eval(pre_cn_f_name)
#word_pairs = set(list(cn_data.keys()))
cn_mat = pre_cn_data.vectors
word_vec = np.array(cn_mat, dtype = float).T
num_word = word_vec.shape[1]
num_vec = word_vec.shape[0]
print(num_word, num_vec)
corr_mat = word_vec.dot(word_vec.T) /num_word
print('got corr_mat')
concept_mat = corr_mat @ np.linalg.inv(corr_mat + alpha ** (-2) * np.eye(num_vec))
print('got concep_mat')
new_mat = ((np.eye(num_vec)-concept_mat)@word_vec).T
print('got new_mat')
return new_mat
cn_fasttext_mat = cn_mat('fasttext', alpha = 2)
print('CN preprocess done for fasttext data')
cn_glove_mat = cn_mat('glove', alpha = 2)
print('CN preprocess done for glove data')
cn_w2v_mat = cn_mat('w2v', alpha =2)
print('CN preprocess done for w2v data')
!wget https://raw.githubusercontent.com/IlyaSemenov/wikipedia-word-frequency/master/results/enwiki-20150602-words-frequency.txt
wikiWordsPath = '/content/' + 'enwiki-20150602-words-frequency.txt'
wikiWords = []
with open(wikiWordsPath, "r+") as f_in:
for line in f_in:
one_line = line.split(' ')
if int(one_line[1]) > 200:
wikiWords.append(one_line[0])
!git clone https://github.com/PrincetonML/SIF
wikiWordsPath = '/content/' + '/SIF/auxiliary_data/enwiki_vocab_min200.txt' # https://github.com/PrincetonML/SIF/blob/master/auxiliary_data/enwiki_vocab_min200.txt
wikiWords = []
with open(wikiWordsPath, "r+") as f_in:
for line in f_in:
wikiWords.append(line.split(' ')[0])
print(len(wikiWords))
from numpy.linalg import norm, inv, eig
def reduced_cn_mat(wordVecModel_str, alpha = 1):
# compute the prototype conceptor with alpha = 1
wordVecModel = eval(wordVecModel_str)
word_in_wiki_and_model = set(list(wordVecModel.vocab)).intersection(set(wikiWords))
x_collector_indices = []
for word in word_in_wiki_and_model:
x_collector_indices.append(wordVecModel.vocab[word].index)
# put the word vectors in columns
x_collector = wordVecModel.vectors[x_collector_indices,:].T
nrWords = x_collector.shape[1] # number of total words
print(nrWords)
R = x_collector.dot(x_collector.T) / nrWords # calculate the correlation matrix
concept_mat = R @ inv(R + alpha ** (-2) * np.eye(300))# calculate the conceptor matrix
return concept_mat
fasttext2_concept_mat = reduced_cn_mat('fasttext2', alpha = 1)
print('CN preprocess done for fasttext2 data')
print(len(fasttext2_concept_mat))
###Output
300
###Markdown
Experiment 1: Word similarity evaluationI re-implemented word similarity task by evaluating CN post-processed word vectors with 7 standard word similarity datasets, namely the RG65 (Rubenstein and Goodenough, 1965), the WordSim-353 (WS) (Finkelstein et al., 2002), the rare- words (RW) (Luong, Socher, and Manning, 2013), the MEN dataset (Bruni, Tran, and Baroni, 2014), the MTurk (Radinsky et al., 2011), the SimLex-999 (SimLex) (Hill, Reichart, and Korhonen, 2015), and the SimVerb-3500 (Gerz et al., 2016) \ Load word similarity text data
###Code
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/wordSimData/EN-MEN-TR-3k.txt
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/wordSimData/EN-MTurk-287.txt
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/wordSimData/EN-RG-65.txt
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/wordSimData/EN-RW-STANFORD.txt
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/wordSimData/EN-SIMLEX-999.txt
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/wordSimData/EN-SimVerb-3500.txt
!wget https://raw.githubusercontent.com/liutianlin0121/Conceptor-Negation-WV/master/data/wordSimData/EN-WS-353-ALL.txt
!ls
!pwd
###Output
/content
|
Fitness_Data_Analysis.ipynb | ###Markdown
###Code
###Output
_____no_output_____ |
03_workflow.ipynb | ###Markdown
Workflow> Define static or dynamic workflow for automatically updating, training and deploying your ML model! ***input:*** Workflow definition parameters***output:*** python or snakemake script for running the workflow***description:***While you are developing your ML application, you might prefer running the notebooks manually again and again.However, once you have deployed your model into production it becomes unpractical and compromizes scalability, modularity and the principle of ease of reproducibility.This happens regardless of what 'production' means to you - it might well be that you are just running the notebooks and viewing the results directly from them.Whatever you are doing, having a single command to run the whole workflow makes things so much easier.Workflow automation is also the part of the work where you'll probably notice a lot of bugs and nonrobustness in your notebooks.Probably a lot more than you anticipated, but try not to get frustrated! Debugging is big and important part of the work.In this notebook we explain alternatives for automating workflows, either as a static or dynamic. By following these examples (and further documentation on [papermill](https://papermill.readthedocs.io/) and [Snakemake](https://snakemake.readthedocs.io/)) you can parameterize your notebooks,run them automatically in a workflow, and even parameterize and automate the workflow definition.With this template, you can easily define very complex and versatile workflows, that are well documented in a notebook. We selected these tools for the template because they have stable community support, they are relatively easy to use and they fit our needs.There are also other tools for workflow management and orchestration that may better suite your needs. Feel free to use them.For more information, see for example this [comparison of workflow tools for Python](https://medium.com/@Minyus86/comparison-of-pipeline-workflow-packages-airflow-luigi-gokart-metaflow-kedro-pipelinex-5daf57c17e7). Import relevant modules
###Code
import numpy as np
import pandas as pd
import papermill as pm
###Output
_____no_output_____
###Markdown
Define notebook parameters make direct derivations from the paramerters: How to run parameterized notebooks with papermillPapermill allows parameterizing and running notebooks from Python runtime with `papermill.execute_notebook(input, output, parameters)`.The `input` parameter is the notebook to be run. The `output` parameter is the filepath where copy of the executed notebook is saved with the results.This can be the same as the `input`, but you probably want to keep it separate - otherwise your version control may get messy. In this example executed notebooks are saved under `results/notebooks`. The `parameters` cell allows you to change settings of the notebooks.You may have noticed, that in the beginning of each notebook there is a cell with a comment ` This cell is tagged parameters`.The cell has been added `Parameters` tag. The template notebooks already contain the tag, but you can check [papermill documentation](https://papermill.readthedocs.io/en/latest/usage-parameterize.html) on how to do it on different notebook editors.In this cell, variables are assigned. What papermill does is, that any parameters given to the `execute_notebook` function are listed in a new cell right below the one tagged with parameters. The listed parameters will rewrite the default assignments.This is why you should not do anything else but simple assignments in the parameters cell.Let's show an example. Let's run the notebook 'model' with and without changing the 'seed'-parameter.Copies of the notebooks executed with different settings are stored in `results/notebooks`.The copies are saved with underscore prefix `_notebook.ipynb` so that they are ignored by nbdev.
###Code
# slow
# run model notebook with default parameters
_ = pm.execute_notebook(
"02_loss.ipynb",
"results/notebooks/_02_loss_default_params.ipynb",
)
# slow
# run model notebook with 'seed' -parameter changed from 0 to 1
_ = pm.execute_notebook(
"02_loss.ipynb", "results/notebooks/_02_loss_seed_1.ipynb", parameters={"seed": 1}
)
###Output
_____no_output_____
###Markdown
You can now open the notebooks and compare the results. Now we could just define and run the complete workflow from this notebook: just define which notebooks to run, in which order and with which parameters.Then just run this notebook and the workflow is executed.We could even go further and parameterize this notebook to get parameterizable workflow execution.Then, we could use papermill in another application to run this notebook to execute the rest of the workflow.However, this approach has two main restrictions. The workflow execution script would not be included in this documentation, and it does not allow dynamic workflows because launching Snakemake from inside a Python runtime will cause all sorts of problems. Static executable workflow with papermillIf we want to automatically run the workflow, we need to create and executable script to run it.We can define it in this notebook to include it in our documentation:
###Code
%%writefile static_workflow.py
# execute workflow of the example notebooks
# to run the script, call python static_workflow.py workflow_setup.yaml
# this file has been added to .gitignore
# NOTE: use curly brackets only to format in global variables!
# hint: you can include additional parameters with sys.argv
# import relevant libraries
import papermill as pm
import os
import sys
import yaml
# update modules before running just to be sure
os.system('nbdev_build_lib')
# run data notebook
_ = pm.execute_notebook('00_data.ipynb', # input
'results/notebooks/_00_data.ipynb', # output
parameters = dict(seed = 0) # params (optional)
)
# run model notebook
_ = pm.execute_notebook('01_model.ipynb',
'results/notebooks/_01_model.ipynb')
# run loss notebook
_ = pm.execute_notebook('02_loss.ipynb',
'results/_02_loss.ipynb')
# optional (uncomment): make backup of the index and workflow notebooks:
# os.system('cp {workflow} {save_notebooks_to}{workflow}')
###Output
_____no_output_____
###Markdown
Parameterized static executable workflow with papermillWhat if some of your input files change, or you would like to run your workflow with a slighly different setup?Just as we parameterized the components of the workflow, we might want to parameterize the workflow definition.You can either read parameters directly form sys.argv, python argparser or, like in this example, from a configuration file.Let's begin by defining the configuration file. We use the [yaml](https://yaml.org/) format, because it is easy to write and read by both humans and machines.The file is defined in this notebook and written directly into the `workflow_setup.yml` file. The config file is added to `.gitignore` - it is already defined in this notebook, we do not need double versioning. See [here](https://stackoverflow.com/questions/1773805/how-can-i-parse-a-yaml-file-in-python/17740431774043) how to use yaml with Python.
###Code
%%writefile workflow_setup.yml
---
notebooks: # workflow notebook setup
index: # name of notebook
notebook: index.ipynb # notebook file
data:
notebook: 00_data.ipynb
params: # notebook parameters
seed: 0
model:
notebook: 01_model.ipynb
params:
seed: 0
loss:
notebook: 02_loss.ipynb
params:
seed: 0
utils: # general workflow settings
save_notebooks_to: results/notebooks/
notebook_save_prefix: _
###Output
Overwriting workflow_setup.yml
###Markdown
Let's take a look how the setup looks loaded as a python dictionary:
###Code
import yaml
with open("workflow_setup.yml", "r") as f:
setup_dict = yaml.load(f, Loader=yaml.Loader)
setup_dict
###Output
_____no_output_____
###Markdown
Now run the cell below to create the execution script. The code is not run in this notebook, but written in the file `static_workflow.py`:
###Code
%%writefile static_workflow.py
# execute workflow of the example notebooks
# to run the script, call python static_workflow.py workflow_setup.yaml
# this file has been added to .gitignore
# NOTE: use curly brackets only to format in global variables!
# hint: you can include additional parameters with sys.argv
# import relevant libraries
import papermill as pm
import os
import sys
import yaml
## parse arguments from workflow_setup.yaml
configfilename = sys.argv[1]
with open(configfilename, 'r') as f:
config = yaml.load(f, Loader = yaml.Loader)
# variables
notebooks = config['notebooks']
data = notebooks['data']
model = notebooks['model']
loss = notebooks['loss']
utils = config['utils']
# update modules before running just to be sure
os.system('nbdev_build_lib')
# run data notebook
_ = pm.execute_notebook(data['notebook'], # input
utils['save_notebooks_to'] \
+ utils['notebook_save_prefix'] \
+ data['notebook'], # output
parameters = data['params'] # params
)
# run model notebook
_ = pm.execute_notebook(model['notebook'],
utils['save_notebooks_to'] \
+ utils['notebook_save_prefix'] \
+ model['notebook'],
parameters = model['params'])
# run loss notebook
_ = pm.execute_notebook(loss['notebook'],
utils['save_notebooks_to'] \
+ utils['notebook_save_prefix'] \
+ loss['notebook'],
parameters = loss['params'])
# optional (uncomment): make backup of the index and workflow notebooks:
# os.system('cp {workflow} {save_notebooks_to}{workflow}')
###Output
Overwriting static_workflow.py
###Markdown
If you open the file `static_workflow.py`, you notice that the contents of curly brackets were replaced with the parameters of this notebook.Now, you can run the workflow:
###Code
# slow
# run this in your terminal to also run the nbdev_build_lib
!python static_workflow.py workflow_setup.yml
###Output
sh: 1: nbdev_build_lib: not found
Executing: 0%| | 0/58 [00:00<?, ?cell/s][IPKernelApp] WARNING | Unknown error in handling startup files:
Executing: 100%|██████████████████████████████| 58/58 [00:19<00:00, 3.77cell/s]
Executing: 0%| | 0/45 [00:00<?, ?cell/s][IPKernelApp] WARNING | Unknown error in handling startup files:
Executing: 100%|██████████████████████████████| 45/45 [00:18<00:00, 1.91cell/s]
Executing: 0%| | 0/44 [00:00<?, ?cell/s][IPKernelApp] WARNING | Unknown error in handling startup files:
Executing: 100%|██████████████████████████████| 44/44 [00:13<00:00, 4.33cell/s]
###Markdown
You can again make a visible copy of the hidden notebooks folder just like above (remember to delete it afterwards) and view the notebooks.You can change some of the notebook parameters and rerun the workflow to see how it effects the results.You see that static workflow definition is quite simple. In the script above, we did not define any inputs, outputs or the relation of the different steps.It's good to keep things that way, unless there is a reason not to.It might be that we have multiple, changing data sources, complex workflow structure,need for parallelization or other issues making it either difficult to hard-codethe steps required in your workflow. Then, you might need a dynamic workflow. Dynamic executable workflow with SnakemakeSnakemake is a tool that will automatically determine which steps to run based on inputs and outputs.It's like gnu make, but for Python: easy to read and write, but powerful.Unfortunaltely it is impossible to cover all the properties of the tool but see their documentation and internet discussion for ideas.Here we only cover a tiny portion of the possibilities of snakemake, but it can do very complex things.Snakemake executes the workflow as a rule based directed acyclic graph (DAG).Each workflow step is determined by a rule, which consists of inputs, outputs and execution of code.The inputs and outputs are files (data, config, source code, notebooks, images, tables etc.).The code executed can either be shell commands or Python, written directly into the Snakefile.Let's consider a workflow where we have two parallel rules 1a and 1b, and one consequtive rule 2.In addition we have rule all, that determines the whole workflow.The rules 1a and 1b only depend on their input. The rule 2 depends on outputs of both rules 1a and 1b.The rule 2 also has an additional input independent of other rules.We can visualize the workflow as follows:Now, if we turned it into a Snakefile script, it would look something like this: rule all: used to determine the whole workflow input: output_2 rule 1a: parallel to rule 1b input: input_1a output: output_1a run: run python commands Python script to run rule 1a rule 1b: parallel to rule 1a input: input_1b output: output_1b shell: you also run shell commands shell script to run rule 1b rule 2: consequent to steps 1a and 1b input: output_1a, depends on rule 1a output_1b, depends on rule 1b input_2 independent input output: output_2 run: Python script to run rule2Snakemake can be either used to run a single rule, or the complete workflow based on rule all.Based on the inputs and outputs, snakemake will determine which other rules will then need to be executed.In our example, the notebooks consist the nodes of the DAG graph. Based on the changes in input and output files since the last execution, Snakemake determines which steps need to be run. If no changes are observed, snakemake does not do anything. For example if we change the input of rule 1b, rules 1b and 2 are executed. If we make a change to the input of rule 2, only the rule 2 will be executed.If we want to rerun all steps without changin anything, you can just touch the inputs of the independent rules `touch input_1a input_1b`.The following script will be written into a Snakefile, that you can run to execute the workflow.
###Code
%%writefile Snakefile
# import relevant libraries
import papermill as pm
import os
# determine global variables, wildcards etc.
# all: final output of the workflow
rule all:
input:
'results/LogisticRegressionClassifier.pkl' # trained model
# data
rule data:
input:
'00_data.ipynb' # every notebook is it's own input
# if we had input files they should be listed here, separated with comma ,
output: # Snakemake checks that after running these files are created / updated
'data/preprocessed_data/dataset_clean_switzerland_cleveland.csv', # clean dataset
'data/preprocessed_data/dataset_toy_switzerland_cleveland.csv', # toy dataset
'ml_project_template/data.py', # plot functions
'results/notebooks/_00_data.ipynb' # copy of the executed notebook
run:
# run notebook with papermill
_ = pm.execute_notebook('00_data.ipynb', # input
'results/notebooks/_00_data.ipynb', # output
parameters = {'seed':0}) # params (optional)
os.system('nbdev_build_lib --fname 00_data.ipynb') # build data.py
rule model:
input:
'01_model.ipynb',
'data/preprocessed_data/dataset_toy_switzerland_cleveland.csv',
output:
'ml_project_template/model.py', # model class
'results/notebooks/_01_model.ipynb'
run:
_ = pm.execute_notebook('01_model.ipynb', # we could also use '{input[0]}'
'results/notebooks/_01_model.ipynb') # we could also use '{output[1]}'
os.system('nbdev_build_lib --fname 01_model.ipynb')
rule loss:
input:
'02_loss.ipynb',
'data/preprocessed_data/dataset_clean_switzerland_cleveland.csv',
'ml_project_template/data.py',
'ml_project_template/model.py'
output:
'results/LogisticRegressionClassifier.pkl', # trained model
'results/notebooks/_02_loss.ipynb'
run:
_ = pm.execute_notebook('02_loss.ipynb',
'results/notebooks/_02_loss.ipynb')
os.system('nbdev_build_lib --fname 02_loss.ipynb')
###Output
Overwriting Snakefile
###Markdown
One thing to notice is that Snakemake should be used from terminal directly, not from a Python script or notebook.To run Snakemake, call: snakemake -n dry-run snakemake to check what would be done snakemake --jobs 1 run workflowThe --jobs parameter (you can also use -j) is the maximum number of CPU cores to use with the pipeline (local/cluster/cloud cores).Usually 1 is enough with simple pipelines, since our primary workflow step is a notebook, and most Python operations are difficult to parallelize. Parameterized dynamic executable workflow with SnakemakeAgain, we might want to parameterize the notebook for scalability.For simple parameterization, use rule parameters: rule model: input: output: parameters: seed = 0 run: _ = pm.execute_notebook(..., ..., parameters = {seed: parameters.seed})For complex parameterization, including the parameterization of inputs and outputs,consider using a config file. Snakemake can automatically load json or yaml file to python dictionary: in config.yml: rules: data: ... model: input: - 01_model.ipynb - data/preprocessed_data/dataset_toy_switzerland_cleveland.csv parameters: seed: 0 ... ... in Snakefile: automatically load json or yaml file to python dictionary 'config' configfile: config.yml rule model: input: config['rules']['model']['input'] output: ... run: run: _ = pm.execute_notebook(..., ..., parameters = {seed: config['rules']['model']['parameters']['seed']})For more information, see [Snakemake documentation](https://snakemake.readthedocs.io/). Alternative: Use this notebook to create an APIIf you just want to create conventional Python application, you can use this notebook to create main.py so that you can just run your module as python application.Then, you should change the name of this notebook and the default_exp location to main. This requires that you define and export all functions and classes to modules so that they can be used elsewhere. This is a bit messy, and deminishes many of the benefits of notebook development because instead of just running the notebooks you already created, you have to redefine all the steps required all over again. However, sometimes this might be what you want to do, so we wanted to mention it here.Pseudo example of how to define main.py in a notebook:
###Code
%%script False
## remove this line and the line above if you want to run and export the code (note that it needs editing to work)
### export main
# remove the two extra # in the line above
import numpy as np
import sys
import data, model, loss
def get_input():
"""
get input from user
"""
pass
def main(filename):
"""
run main loop
"""
# load data
X, y = data.load(filename)
# initialize model, fit, optimize
m = model.LogisticRegressionClassifier(X, y).fit().optimize()
# main loop
input_data = get_input()
while(input_data):
if input_data[0] == 'predict':
# predict on input
print(m.predict(input_data[1]))
else:
# do other things
pass
if __name__ == "__main__":
"""
run with: python ml_project_template filename
"""
np.random.seed(0)
filename = sys.argv[1]
main(filename)
%%script False
## remove this line and the line above if you want to run the code (note that it needs editing to work)
## test the main loop in this notebook and interact with the application
main()
###Output
Couldn't find program: 'False'
###Markdown
Workflow> Define workflow for automatically updating, training and deploying your ML model! ***input:*** data, model & loss notebooks and related modules***output:*** script for executing the ML model update workflow***description:***A ML model update workflow allows you to automatically reload your data, train, evaluate and deploy your model.Note that by following the notebook templates you have already done most of the work - the notebooks **are** the workflow!So, in this notebook you define a script to automatically execute the other notebooks with the [papermill](https://papermill.readthedocs.io/) tool. Note, that you can input parameters to the notebooks!You can either define static workflow, where every step is always recreated every time,or a dynamic workflow, where only the parts of the workflow are recreated that are affected by the changes since last model update.For dynamic workflows we encourage utilizing the [Snakemake](https://snakemake.readthedocs.io/) tool.Here we present a super simple static workflow example that you can build upon in your project. Edit this and other text cells to describe your project. Remember that you can utilize the `export` tag to export cell commands to `[your_module]/workflow.py`. Import relevant modules
###Code
# export
import papermill
from pathlib import Path
import os
# your code here
###Output
_____no_output_____
###Markdown
Define notebook parameters
###Code
# this cell is tagged with 'parameters'
seed = 0
# your code here
###Output
_____no_output_____
###Markdown
make direct derivations from the paramerters:
###Code
# your code here
###Output
_____no_output_____
###Markdown
Define workflowHere we present a tiny example you can try running yourself and then extend to your needs.Note, that if you run `nbdev_build_lib`, the script is exported to `[your_module]/workflow.py`.Then, you can run `python [your_module]/workflow.py` to run the workflow automatically!
###Code
# export
"""
A workflow to re-run your machine learning workflow automatically.
This example script will
- rebuild your python module
- run data notebook to reload and clean data
- run model notebook to sw test your model
- run loss notebook to train and evaluate your model with full data,
and save or deploy it for further use
Feel free to edit!
"""
cwd = Path().cwd()
save_notebooks_to = cwd / "results" / "notebooks"
# Hint! you can also create time or setup -stamped folders to store your results!
# make sure changes are updated to module
# (this will do nothing if you run the workflow from inside a notebook)
os.system("nbdev_build_lib")
# run workflow
for notebook in ["00_data.ipynb", "01_model.ipynb", "02_loss.ipynb"]:
papermill.execute_notebook(
notebook, # this notebook will be executed
save_notebooks_to
/ ("_" + notebook), # this is where the executed notebook will be saved
# (notebooks named with '_' -prefix are ignored by nbdev build_lib & build_docs!)
parameters={"seed": 1}, # you can change notebook parameters
kernel_name="python38myenv",
) # note: change kernel according to your project setup!
###Output
_____no_output_____
###Markdown
Workflow Define workflow for automatically updating, training and deploying your ML model! ***input:*** data, model & loss notebooks and related modules***output:*** script for executing the ML model update workflow***description:***A ML model update workflow allows you to automatically reload your data, train, evaluate and deploy your model.Note that by following the notebook templates you have already done most of the work - the notebooks **are** the workflow!So, in this notebook you define a script to automatically execute the other notebooks with the [papermill](https://papermill.readthedocs.io/) tool. Note, that you can input parameters to the notebooks!You can either define static workflow, where every step is always recreated every time,or a dynamic workflow, where only the parts of the workflow are recreated that are affected by the changes since last model update.For dynamic workflows we encourage utilizing the [Snakemake](https://snakemake.readthedocs.io/) tool.Here we present a super simple static workflow example that you can build upon in your project. Edit this and other text cells to describe your project. Remember that you can utilize the `export` tag to export cell commands to `[your_module]/workflow.py`. Import relevant modules
###Code
# export
import papermill
from pathlib import Path
import os
# your code here
###Output
_____no_output_____
###Markdown
Define notebook parameters
###Code
# this cell is tagged with 'parameters'
seed = 0
# your code here
###Output
_____no_output_____
###Markdown
make direct derivations from the paramerters:
###Code
# your code here
###Output
_____no_output_____
###Markdown
Define workflowHere we present a tiny example you can try running yourself and then extend to your needs.Note that if you run `nbdev_build_lib`, the script is exported to `[your_module]/workflow.py`.Then, you can run `python [your_module]/workflow.py` to run the workflow automatically!
###Code
# export
"""
A workflow to re-run your machine learning workflow automatically.
This example script will
- rebuild your python module
- run data notebook to reload and clean data
- run model notebook to sw test your model
- run loss notebook to train and evaluate your model with full data,
and save or deploy it for further use
Feel free to edit!
"""
cwd = Path().cwd()
save_notebooks_to = cwd / "results" / "notebooks"
# Hint! you can also create time or setup -stamped folders to store your results!
# make sure changes are updated to module
# (this will do nothing if you run the workflow from inside a notebook)
os.system("nbdev_build_lib")
# run workflow
for notebook in ["00_data.ipynb", "01_model.ipynb", "02_loss.ipynb"]:
papermill.execute_notebook(
notebook, # this notebook will be executed
save_notebooks_to
/ ("_" + notebook), # this is where the executed notebook will be saved
# (notebooks named with '_' -prefix are ignored by nbdev build_lib & build_docs!)
parameters={"seed": 1}, # you can change notebook parameters
kernel_name="python38myenv",
) # note: change kernel according to your project setup!
###Output
_____no_output_____ |
self-paced-labs/vertex-ai/vertex-challenge-lab/vertex-challenge-lab-solution.ipynb | ###Markdown
Building and deploying machine learning solutions with Vertex AI: Challenge Lab This Challenge Lab is recommended for students who have enrolled in the [**Building and deploying machine learning solutions with Vertex AI**](). You will be given a scenario and a set of tasks. Instead of following step-by-step instructions, you will use the skills learned from the labs in the quest to figure out how to complete the tasks on your own! An automated scoring system (shown on the Qwiklabs lab instructions page) will provide feedback on whether you have completed your tasks correctly.When you take a Challenge Lab, you will not be taught Google Cloud concepts. To build the solution to the challenge presented, use skills learned from the labs in the Quest this challenge lab is part of. You are expected to extend your learned skills and complete all the **`TODO:`** comments in this notebook.Are you ready for the challenge? Scenario You were recently hired as a Machine Learning Engineer at a startup movie review website. Your manager has tasked you with building a machine learning model to classify the sentiment of user movie reviews as positive or negative. These predictions will be used as an input in downstream movie rating systems and to surface top supportive and critical reviews on the movie website application. The challenge: your business requirements are that you have just 6 weeks to productionize a model that achieves great than 75% accuracy to improve upon an existing bootstrapped solution. Furthermore, after doing some exploratory analysis in your startup's data warehouse, you found that you only have a small dataset of 50k text reviews to build a higher performing solution.To build and deploy a high performance machine learning model with limited data quickly, you will walk through training and deploying a custom TensorFlow BERT sentiment classifier for online predictions on Google Cloud's [Vertex AI](https://cloud.google.com/vertex-ai) platform. Vertex AI is Google Cloud's next generation machine learning development platform where you can leverage the latest ML pre-built components and AutoML to significantly enhance your development productivity, scale your workflow and decision making with your data, and accelerate time to value.First, you will progress through a typical experimentation workflow where you will build your model from pre-trained BERT components from TF-Hub and `tf.keras` classification layers to train and evaluate your model in a Vertex Notebook. You will then package your model code into a Docker container to train on Google Cloud's Vertex AI. Lastly, you will define and run a Kubeflow Pipeline on Vertex Pipelines that trains and deploys your model to a Vertex Endpoint that you will query for online predictions. Learning objectives * Train a TensorFlow model locally in a hosted [**Vertex Notebook**](https://cloud.google.com/vertex-ai/docs/general/notebooks?hl=sv).* Containerize your training code with [**Cloud Build**](https://cloud.google.com/build) and push it to [**Google Cloud Artifact Registry**](https://cloud.google.com/artifact-registry).* Define a pipeline using the [**Kubeflow Pipelines (KFP) V2 SDK**](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility) to train and deploy your model on [**Vertex Pipelines**](https://cloud.google.com/vertex-ai/docs/pipelines).* Query your model on a [**Vertex Endpoint**](https://cloud.google.com/vertex-ai/docs/predictions/getting-predictions) using online predictions. Setup Define constants
###Code
# Add installed library dependencies to Python PATH variable.
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
# Retrieve and set PROJECT_ID and REGION environment variables.
# TODO: fill in PROJECT_ID.
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
REGION = 'us-central1'
# Create a globally unique Google Cloud Storage bucket for artifact storage.
GCS_BUCKET = f"gs://{PROJECT_ID}-vertex-challenge-lab"
!gsutil mb -l $REGION $GCS_BUCKET
###Output
_____no_output_____
###Markdown
Import libraries
###Code
import os
import shutil
import logging
# TensorFlow model building libraries.
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
# Re-create the AdamW optimizer used in the original BERT paper.
from official.nlp import optimization
# Libraries for data and plot model training metrics.
import pandas as pd
import matplotlib.pyplot as plt
# Import the Vertex AI Python SDK.
from google.cloud import aiplatform as vertexai
###Output
_____no_output_____
###Markdown
Initialize Vertex AI Python SDK Initialize the Vertex AI Python SDK with your GCP Project, Region, and Google Cloud Storage Bucket.
###Code
vertexai.init(project=PROJECT_ID, location=REGION, staging_bucket=GCS_BUCKET)
###Output
_____no_output_____
###Markdown
Build and train your model locally in a Vertex Notebook Note: this lab adapts and extends the official [TensorFlow BERT text classification tutorial](https://www.tensorflow.org/text/tutorials/classify_text_with_bert) to utilize Vertex AI services. See the tutorial for additional coverage on fine-tuning BERT models using TensorFlow. Lab dataset In this lab, you will use the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment) that contains the text of 50,000 movie reviews from the Internet Movie Database. These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are balanced, meaning they contain an equal number of positive and negative reviews. Data ingestion and processing code has been provided for you below: Import dataset
###Code
DATA_URL = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
LOCAL_DATA_DIR = "."
def download_data(data_url, local_data_dir):
"""Download dataset.
Args:
data_url(str): Source data URL path.
local_data_dir(str): Local data download directory path.
Returns:
dataset_dir(str): Local unpacked data directory path.
"""
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
dataset = tf.keras.utils.get_file(
fname="aclImdb_v1.tar.gz",
origin=data_url,
untar=True,
cache_dir=local_data_dir,
cache_subdir="")
dataset_dir = os.path.join(os.path.dirname(dataset), "aclImdb")
train_dir = os.path.join(dataset_dir, "train")
# Remove unused folders to make it easier to load the data.
remove_dir = os.path.join(train_dir, "unsup")
shutil.rmtree(remove_dir)
return dataset_dir
DATASET_DIR = download_data(data_url=DATA_URL, local_data_dir=LOCAL_DATA_DIR)
# Create a dictionary to iteratively add data pipeline and model training hyperparameters.
HPARAMS = {
# Set a random sampling seed to prevent data leakage in data splits from files.
"seed": 42,
# Number of training and inference examples.
"batch-size": 32
}
def load_datasets(dataset_dir, hparams):
"""Load pre-split tf.datasets.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
raw_train_ds(tf.dataset): Train split dataset (20k examples).
raw_val_ds(tf.dataset): Validation split dataset (5k examples).
raw_test_ds(tf.dataset): Test split dataset (25k examples).
"""
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='training',
seed=hparams['seed'])
raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='validation',
seed=hparams['seed'])
raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'test'),
batch_size=hparams['batch-size'])
return raw_train_ds, raw_val_ds, raw_test_ds
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(DATASET_DIR, HPARAMS)
AUTOTUNE = tf.data.AUTOTUNE
CLASS_NAMES = raw_train_ds.class_names
train_ds = raw_train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.prefetch(buffer_size=AUTOTUNE)
###Output
_____no_output_____
###Markdown
Let's print a few example reviews:
###Code
for text_batch, label_batch in train_ds.take(1):
for i in range(3):
print(f'Review {i}: {text_batch.numpy()[i]}')
label = label_batch.numpy()[i]
print(f'Label : {label} ({CLASS_NAMES[label]})')
###Output
_____no_output_____
###Markdown
Choose a pre-trained BERT model to fine-tune for higher accuracy [**Bidirectional Encoder Representations from Transformers (BERT)**](https://arxiv.org/abs/1810.04805v2) is a transformer-based text representation model pre-trained on massive datasets (3+ billion words) that can be fine-tuned for state-of-the art results on many natural language processing (NLP) tasks. Since release in 2018 by Google researchers, its has transformed the field of NLP research and come to form a core part of significant improvements to [Google Search](https://www.blog.google/products/search/search-language-understanding-bert). To meet your business requirements of achieving higher accuracy on a small dataset (20k training examples), you will use a technique called transfer learning to combine a pre-trained BERT encoder and classification layers to fine tune a new higher performing model for binary sentiment classification. For this lab, you will use a smaller BERT model that trades some accuracy for faster training times.The Small BERT models are instances of the original BERT architecture with a smaller number L of layers (i.e., residual blocks) combined with a smaller hidden size H and a matching smaller number A of attention heads, as published byIulia Turc, Ming-Wei Chang, Kenton Lee, Kristina Toutanova: ["Well-Read Students Learn Better: On the Importance of Pre-training Compact Models"](https://arxiv.org/abs/1908.08962), 2019.They have the same general architecture but fewer and/or smaller Transformer blocks, which lets you explore tradeoffs between speed, size and quality.The following preprocessing and encoder models in the TensorFlow 2 SavedModel format use the implementation of BERT from the [TensorFlow Models Github repository](https://github.com/tensorflow/models/tree/master/official/nlp/bert) with the trained weights released by the authors of Small BERT.
###Code
HPARAMS.update({
# TF Hub BERT modules.
"tfhub-bert-preprocessor": "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3",
"tfhub-bert-encoder": "https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/2",
})
###Output
_____no_output_____
###Markdown
Text inputs need to be transformed to numeric token ids and arranged in several Tensors before being input to BERT. TensorFlow Hub provides a matching preprocessing model for each of the BERT models discussed above, which implements this transformation using TF ops from the TF.text library. Since this text preprocessor is a TensorFlow model, It can be included in your model directly. For fine-tuning, you will use the same optimizer that BERT was originally trained with: the "Adaptive Moments" (Adam). This optimizer minimizes the prediction loss and does regularization by weight decay (not using moments), which is also known as [AdamW](https://arxiv.org/abs/1711.05101).For the learning rate `initial-learning-rate`, you will use the same schedule as BERT pre-training: linear decay of a notional initial learning rate, prefixed with a linear warm-up phase over the first 10% of training steps `n_warmup_steps`. In line with the BERT paper, the initial learning rate is smaller for fine-tuning.
###Code
HPARAMS.update({
# Model training hyperparameters for fine tuning and regularization.
"epochs": 3,
"initial-learning-rate": 3e-5,
"dropout": 0.1
})
epochs = HPARAMS['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
OPTIMIZER = optimization.create_optimizer(init_lr=HPARAMS['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
###Output
_____no_output_____
###Markdown
Build and compile a TensorFlow BERT sentiment classifier Next, you will define and compile your model by assembling pre-built TF-Hub components and tf.keras layers.
###Code
def build_text_classifier(hparams, optimizer):
"""Define and compile a TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
model(tf.keras.Model): A compiled TensorFlow model.
"""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
# TODO: Add a hub.KerasLayer for BERT text preprocessing.
preprocessor = hub.KerasLayer(hparams['tfhub-bert-preprocessor'], name='preprocessing')
encoder_inputs = preprocessor(text_input)
# TODO: Add a hub.KerasLayer for BERT text encoding.
encoder = hub.KerasLayer(hparams['tfhub-bert-encoder'], trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
# For the fine-tuning you are going to use the `pooled_output` array which represents
# each input sequence as a whole. The shape is [batch_size, H].
# You can think of this as an embedding for the entire movie review.
classifier = outputs['pooled_output']
# Add dropout to prevent overfitting during model fine-tuning.
classifier = tf.keras.layers.Dropout(hparams['dropout'], name='dropout')(classifier)
classifier = tf.keras.layers.Dense(1, activation=None, name='classifier')(classifier)
model = tf.keras.Model(text_input, classifier, name='bert-sentiment-classifier')
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
return model
model = build_text_classifier(HPARAMS, OPTIMIZER)
# Visualize your fine-tuned BERT sentiment classifier.
tf.keras.utils.plot_model(model)
TEST_REVIEW = ['this is such an amazing movie!']
BERT_RAW_RESULT = model(tf.constant(TEST_REVIEW))
print(BERT_RAW_RESULT)
###Output
_____no_output_____
###Markdown
Train and evaluate your BERT sentiment classifier
###Code
HPARAMS.update({
# TODO: save your BERT sentiment classifier locally. Save it to './bert-sentiment-classifier-local'
"model-dir": "./bert-sentiment-classifier-local"
})
###Output
_____no_output_____
###Markdown
**Note:** training your model locally will take about 8-10 minutes.
###Code
def train_evaluate(hparams):
"""Train and evaluate TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
history(tf.keras.callbacks.History): Keras callback that records training event history.
"""
# dataset_dir = download_data(data_url, data_dir)
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(DATASET_DIR, hparams)
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.cache().prefetch(buffer_size=AUTOTUNE)
epochs = hparams['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
optimizer = optimization.create_optimizer(init_lr=hparams['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = build_text_classifier(hparams=hparams, optimizer=optimizer)
logging.info(model.summary())
history = model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
logging.info("Test accuracy: %s", model.evaluate(test_ds))
# Export Keras model in TensorFlow SavedModel format.
model.save(hparams['model-dir'])
return history
###Output
_____no_output_____
###Markdown
Based on the `History` object returned by `model.fit()`. You can plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
history = train_evaluate(HPARAMS)
history_dict = history.history
print(history_dict.keys())
acc = history_dict['binary_accuracy']
val_acc = history_dict['val_binary_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
fig = plt.figure(figsize=(10, 6))
fig.tight_layout()
plt.subplot(2, 1, 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'r', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
# plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right');
###Output
_____no_output_____
###Markdown
In this plot, the red lines represent the training loss and accuracy, and the blue lines are the validation loss and accuracy. Based on the plots above, you should see model accuracy of around 78-80% which exceeds your business requirements target of greater than 75% accuracy. Containerize your model code Now that you trained and evaluated your model locally in a Vertex Notebook as part of an experimentation workflow, your next step is to train and deploy your model on Google Cloud's Vertex AI platform. To train your BERT classifier on Google Cloud, you will you will package your Python training scripts and write a Dockerfile that contains instructions on your ML model code, dependencies, and execution instructions. You will build your custom container with Cloud Build, whose instructions are specified in `cloudbuild.yaml` and publish your container to your Artifact Registry. This workflow gives you the opportunity to use the same container to run as part of a portable and scalable [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines/introduction) workflow. You will walk through creating the following project structure for your ML mode code:```|--/bert-sentiment-classifier |--/trainer |--__init__.py |--model.py |--task.py |--Dockerfile |--cloudbuild.yaml |--requirements.txt``` 1. Write a `model.py` training scriptFirst, you will tidy up your local TensorFlow model training code from above into a training script.
###Code
MODEL_DIR = "bert-sentiment-classifier"
%%writefile {MODEL_DIR}/trainer/model.py
import os
import shutil
import logging
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
from official.nlp import optimization
DATA_URL = 'https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
LOCAL_DATA_DIR = './tmp/data'
AUTOTUNE = tf.data.AUTOTUNE
def download_data(data_url, local_data_dir):
"""Download dataset.
Args:
data_url(str): Source data URL path.
local_data_dir(str): Local data download directory path.
Returns:
dataset_dir(str): Local unpacked data directory path.
"""
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
dataset = tf.keras.utils.get_file(
fname='aclImdb_v1.tar.gz',
origin=data_url,
untar=True,
cache_dir=local_data_dir,
cache_subdir="")
dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb')
train_dir = os.path.join(dataset_dir, 'train')
# Remove unused folders to make it easier to load the data.
remove_dir = os.path.join(train_dir, 'unsup')
shutil.rmtree(remove_dir)
return dataset_dir
def load_datasets(dataset_dir, hparams):
"""Load pre-split tf.datasets.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
raw_train_ds(tf.dataset): Train split dataset (20k examples).
raw_val_ds(tf.dataset): Validation split dataset (5k examples).
raw_test_ds(tf.dataset): Test split dataset (25k examples).
"""
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='training',
seed=hparams['seed'])
raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='validation',
seed=hparams['seed'])
raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'test'),
batch_size=hparams['batch-size'])
return raw_train_ds, raw_val_ds, raw_test_ds
def build_text_classifier(hparams, optimizer):
"""Define and compile a TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
model(tf.keras.Model): A compiled TensorFlow model.
"""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
# TODO: Add a hub.KerasLayer for BERT text preprocessing.
preprocessor = hub.KerasLayer(hparams['tfhub-bert-preprocessor'], name='preprocessing')
encoder_inputs = preprocessor(text_input)
# TODO: Add a hub.KerasLayer for BERT text encoding.
encoder = hub.KerasLayer(hparams['tfhub-bert-encoder'], trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
# For the fine-tuning you are going to use the `pooled_output` array which represents
# each input sequence as a whole. The shape is [batch_size, H].
# You can think of this as an embedding for the entire movie review.
classifier = outputs['pooled_output']
# Add dropout to prevent overfitting during model fine-tuning.
classifier = tf.keras.layers.Dropout(hparams['dropout'], name='dropout')(classifier)
classifier = tf.keras.layers.Dense(1, activation=None, name='classifier')(classifier)
model = tf.keras.Model(text_input, classifier, name='bert-sentiment-classifier')
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
return model
def train_evaluate(hparams):
"""Train and evaluate TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
history(tf.keras.callbacks.History): Keras callback that records training event history.
"""
dataset_dir = download_data(data_url=DATA_URL,
local_data_dir=LOCAL_DATA_DIR)
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(dataset_dir=dataset_dir,
hparams=hparams)
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.cache().prefetch(buffer_size=AUTOTUNE)
epochs = hparams['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
optimizer = optimization.create_optimizer(init_lr=hparams['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = build_text_classifier(hparams=hparams, optimizer=optimizer)
logging.info(model.summary())
history = model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
logging.info("Test accuracy: %s", model.evaluate(test_ds))
# Export Keras model in TensorFlow SavedModel format.
model.save(hparams['model-dir'])
return history
###Output
_____no_output_____
###Markdown
2. Write a `task.py` file as an entrypoint to your custom model container
###Code
%%writefile {MODEL_DIR}/trainer/task.py
import os
import argparse
from trainer import model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Vertex custom container training args. These are set by Vertex AI during training but can also be overwritten.
parser.add_argument('--model-dir', dest='model-dir',
default=os.environ['AIP_MODEL_DIR'], type=str, help='GCS URI for saving model artifacts.')
# Model training args.
parser.add_argument('--tfhub-bert-preprocessor', dest='tfhub-bert-preprocessor',
default='https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', type=str, help='TF-Hub URL.')
parser.add_argument('--tfhub-bert-encoder', dest='tfhub-bert-encoder',
default='https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/2', type=str, help='TF-Hub URL.')
parser.add_argument('--initial-learning-rate', dest='initial-learning-rate', default=3e-5, type=float, help='Learning rate for optimizer.')
parser.add_argument('--epochs', dest='epochs', default=3, type=int, help='Training iterations.')
parser.add_argument('--batch-size', dest='batch-size', default=32, type=int, help='Number of examples during each training iteration.')
parser.add_argument('--dropout', dest='dropout', default=0.1, type=float, help='Float percentage of DNN nodes [0,1] to drop for regularization.')
parser.add_argument('--seed', dest='seed', default=42, type=int, help='Random number generator seed to prevent overlap between train and val sets.')
args = parser.parse_args()
hparams = args.__dict__
model.train_evaluate(hparams)
###Output
_____no_output_____
###Markdown
3. Write a `Dockerfile` for your custom model container Third, you will write a `Dockerfile` that contains instructions to package your model code in `bert-sentiment-classifier` as well as specifies your model code's dependencies needed for execution together in a Docker container.
###Code
%%writefile {MODEL_DIR}/Dockerfile
# Specifies base image and tag.
# https://cloud.google.com/vertex-ai/docs/training/pre-built-containers
FROM us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-5:latest
# Sets the container working directory.
WORKDIR /root
# Copies the requirements.txt into the container to reduce network calls.
COPY requirements.txt .
# Installs additional packages.
RUN pip3 install -U -r requirements.txt
# b/203105209 Removes unneeded file from TF2.5 CPU image for python_module CustomJob training.
# Will be removed on subsequent public Vertex images.
RUN rm -rf /var/sitecustomize/sitecustomize.py
# Copies the trainer code to the docker image.
COPY . /trainer
# Sets the container working directory.
WORKDIR /trainer
# Sets up the entry point to invoke the trainer.
ENTRYPOINT ["python", "-m", "trainer.task"]
###Output
_____no_output_____
###Markdown
4. Write a `requirements.txt` file to specify additional ML code dependencies These are additional dependencies for your model code not included in the pre-built Vertex TensorFlow images such as TF-Hub, TensorFlow AdamW optimizer, and TensorFlow Text needed for importing and working with pre-trained TensorFlow BERT models.
###Code
%%writefile {MODEL_DIR}/requirements.txt
tf-models-official==2.5.0
tensorflow-text==2.5.0
tensorflow-hub==0.12.0
###Output
_____no_output_____
###Markdown
Use Cloud Build to build and submit your model container to Google Cloud Artifact Registry Next, you will use [Cloud Build](https://cloud.google.com/build) to build and upload your custom TensorFlow model container to [Google Cloud Artifact Registry](https://cloud.google.com/artifact-registry).Cloud Build brings reusability and automation to your ML experimentation by enabling you to reliably build, test, and deploy your ML model code as part of a CI/CD workflow. Artifact Registry provides a centralized repository for you to store, manage, and secure your ML container images. This will allow you to securely share your ML work with others and reproduce experiment results.**Note**: the initial build and submit step will take about 16 minutes but Cloud Build is able to take advantage of caching for subsequent builds. 1. Create Artifact Registry for custom container images
###Code
ARTIFACT_REGISTRY="bert-sentiment-classifier"
# TODO: create a Docker Artifact Registry using the gcloud CLI.
# Documentation link: https://cloud.google.com/sdk/gcloud/reference/artifacts/repositories/create
!gcloud artifacts repositories create {ARTIFACT_REGISTRY} \
--repository-format=docker \
--location={REGION} \
--description="Artifact registry for ML custom training images for sentiment classification"
###Output
_____no_output_____
###Markdown
2. Create `cloudbuild.yaml` instructions
###Code
IMAGE_NAME="bert-sentiment-classifier"
IMAGE_TAG="latest"
IMAGE_URI=f"{REGION}-docker.pkg.dev/{PROJECT_ID}/{ARTIFACT_REGISTRY}/{IMAGE_NAME}:{IMAGE_TAG}"
cloudbuild_yaml = f"""steps:
- name: 'gcr.io/cloud-builders/docker'
args: [ 'build', '-t', '{IMAGE_URI}', '.' ]
images:
- '{IMAGE_URI}'"""
with open(f"{MODEL_DIR}/cloudbuild.yaml", "w") as fp:
fp.write(cloudbuild_yaml)
###Output
_____no_output_____
###Markdown
3. Build and submit your container image to Artifact Registry using Cloud Build **Note**: the initial build and submit step will take about 16 minutes but Cloud Build is able to take advantage of caching for faster subsequent builds.
###Code
# TODO: use Cloud Build to build and submit your custom model container to your Artifact Registry.
# Documentation link: https://cloud.google.com/sdk/gcloud/reference/builds/submit
# Hint: make sure the config flag is pointed at {MODEL_DIR}/cloudbuild.yaml defined above and you include your model directory.
!gcloud builds submit {MODEL_DIR} --timeout=20m --config {MODEL_DIR}/cloudbuild.yaml
###Output
_____no_output_____
###Markdown
Define a pipeline using the KFP V2 SDK To address your business requirements and get your higher performing model into production to deliver value faster, you will define a pipeline using the [**Kubeflow Pipelines (KFP) V2 SDK**](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility) to orchestrate the training and deployment of your model on [**Vertex Pipelines**](https://cloud.google.com/vertex-ai/docs/pipelines) below.
###Code
import datetime
# google_cloud_pipeline_components includes pre-built KFP components for interfacing with Vertex AI services.
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import dsl
TIMESTAMP=datetime.datetime.now().strftime('%Y%m%d%H%M%S')
DISPLAY_NAME = "bert-sentiment-{}".format(TIMESTAMP)
GCS_BASE_OUTPUT_DIR= f"{GCS_BUCKET}/{MODEL_DIR}-{TIMESTAMP}"
USER = "dougkelly" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(GCS_BUCKET, USER)
print(f"Model display name: {DISPLAY_NAME}")
print(f"GCS dir for model training artifacts: {GCS_BASE_OUTPUT_DIR}")
print(f"GCS dir for pipeline artifacts: {PIPELINE_ROOT}")
# Pre-built Vertex model serving container for deployment.
# https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers
SERVING_IMAGE_URI = "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-5:latest"
###Output
_____no_output_____
###Markdown
The pipeline consists of two components:* `CustomContainerTrainingJobRunOp` [(documentation)](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.1.6/google_cloud_pipeline_components.aiplatform.htmlgoogle_cloud_pipeline_components.aiplatform.CustomContainerTrainingJobRunOp): trains your custom model container using Vertex Training. This is the same as configuring a Vertex Custom Container Training Job using the Vertex Python SDK you covered in the Vertex AI: Qwik Start lab.* `ModelDeployOp`: deploys a given model to a Vertex Prediction Endpoint for online predictions.
###Code
@dsl.pipeline(name="bert-sentiment-classification", pipeline_root=PIPELINE_ROOT)
def pipeline(
project: str = PROJECT_ID,
location: str = REGION,
staging_bucket: str = GCS_BUCKET,
display_name: str = DISPLAY_NAME,
container_uri: str = IMAGE_URI,
model_serving_container_uri: str = SERVING_IMAGE_URI,
gcs_base_output_dir: str = GCS_BASE_OUTPUT_DIR,
):
#TODO: add and configure the pre-built KFP CustomContainerTrainingJobRunOp component using
# the remaining arguments in the pipeline constructor.
# Hint: Refer to the component documentation link above if needed as well.
model_train_evaluate_op = gcc_aip.CustomContainerTrainingJobRunOp(
# Vertex AI Python SDK authentication parameters.
project=project,
location=location,
staging_bucket=staging_bucket,
# WorkerPool arguments.
replica_count=1,
machine_type="c2-standard-4",
# TODO: fill in the remaining arguments from the pipeline constructor.
display_name=display_name,
container_uri=container_uri,
model_serving_container_image_uri=model_serving_container_uri,
base_output_dir=gcs_base_output_dir,
)
# Pre-built KFP ModelDeployOp component to create Endpoint and deploy model to it
# for online predictions.
model_deploy_op = gcc_aip.ModelDeployOp(
# Link to model training component through output model artifact.
model=model_train_evaluate_op.outputs["model"],
# Vertex AI Python SDK authentication parameters.
project=project,
location=location,
# WorkerPool arguments.
machine_type="n1-standard-4",
)
###Output
_____no_output_____
###Markdown
Compile the pipeline
###Code
from kfp.v2 import compiler
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="bert-sentiment-classification.json"
)
###Output
_____no_output_____
###Markdown
Run the pipeline on Vertex Pipelines The `PipelineJob` is configured below and triggered through the `run()` method.Note: this pipeline run will take about 28 minutes to train and deploy your model. Follow along with the execution using the URL from the job output below.
###Code
vertex_pipelines_job = vertexai.pipeline_jobs.PipelineJob(
display_name="bert-sentiment-classification",
template_path="bert-sentiment-classification.json",
parameter_values={
"project": PROJECT_ID,
"location": REGION,
"staging_bucket": GCS_BUCKET,
"display_name": DISPLAY_NAME,
"container_uri": IMAGE_URI,
"model_serving_container_uri": SERVING_IMAGE_URI,
"gcs_base_output_dir": GCS_BASE_OUTPUT_DIR},
enable_caching=True,
)
vertex_pipelines_job.run()
###Output
_____no_output_____
###Markdown
Query deployed model on Vertex Endpoint for online predictions Finally, you will retrieve the `Endpoint` deployed by the pipeline and use it to query your model for online predictions.Configure the `Endpoint()` function below with the following parameters:* `endpoint_name`: A fully-qualified endpoint resource name or endpoint ID. Example: "projects/123/locations/us-central1/endpoints/456" or "456" when project and location are initialized or passed.* `project_id`: GCP project.* `location`: GCP region.Call `predict()` to return a prediction for a test review.
###Code
# Retrieve your deployed Endpoint name from your pipeline.
ENDPOINT_NAME = vertexai.Endpoint.list()[0].name
#TODO: Generate online predictions using your Vertex Endpoint.
endpoint = vertexai.Endpoint(
endpoint_name=ENDPOINT_NAME,
project=PROJECT_ID,
location=REGION)
#TODO: write a movie review to test your model e.g. "The Dark Knight is the best Batman movie!"
test_review = "The Dark Knight is the best Batman movie!"
# TODO: use your Endpoint to return prediction for your test_review.
prediction = endpoint.predict([test_review])
print(prediction)
# Use a sigmoid function to compress your model output between 0 and 1. For binary classification, a threshold of 0.5 is typically applied
# so if the output is >= 0.5 then the predicted sentiment is "Positive" and < 0.5 is a "Negative" prediction.
print(tf.sigmoid(prediction.predictions[0]))
###Output
_____no_output_____
###Markdown
Next steps Congratulations! You walked through a full experimentation, containerization, and MLOps workflow on Vertex AI. First, you built, trained, and evaluated a BERT sentiment classifier model in a Vertex Notebook. You then packaged your model code into a Docker container to train on Google Cloud's Vertex AI. Lastly, you defined and ran a Kubeflow Pipeline on Vertex Pipelines that trained and deployed your model container to a Vertex Endpoint that you queried for online predictions. License
###Code
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Building and deploying machine learning solutions with Vertex AI: Challenge Lab This Challenge Lab is recommended for students who have enrolled in the [**Building and deploying machine learning solutions with Vertex AI**](). You will be given a scenario and a set of tasks. Instead of following step-by-step instructions, you will use the skills learned from the labs in the quest to figure out how to complete the tasks on your own! An automated scoring system (shown on the Qwiklabs lab instructions page) will provide feedback on whether you have completed your tasks correctly.When you take a Challenge Lab, you will not be taught Google Cloud concepts. To build the solution to the challenge presented, use skills learned from the labs in the Quest this challenge lab is part of. You are expected to extend your learned skills and complete all the **`TODO:`** comments in this notebook.Are you ready for the challenge? Scenario You were recently hired as a Machine Learning Engineer at a startup movie review website. Your manager has tasked you with building a machine learning model to classify the sentiment of user movie reviews as positive or negative. These predictions will be used as an input in downstream movie rating systems and to surface top supportive and critical reviews on the movie website application. The challenge: your business requirements are that you have just 6 weeks to productionize a model that achieves great than 75% accuracy to improve upon an existing bootstrapped solution. Furthermore, after doing some exploratory analysis in your startup's data warehouse, you found that you only have a small dataset of 50k text reviews to build a higher performing solution.To build and deploy a high performance machine learning model with limited data quickly, you will walk through training and deploying a custom TensorFlow BERT sentiment classifier for online predictions on Google Cloud's [Vertex AI](https://cloud.google.com/vertex-ai) platform. Vertex AI is Google Cloud's next generation machine learning development platform where you can leverage the latest ML pre-built components and AutoML to significantly enhance your development productivity, scale your workflow and decision making with your data, and accelerate time to value.First, you will progress through a typical experimentation workflow where you will build your model from pre-trained BERT components from TF-Hub and `tf.keras` classification layers to train and evaluate your model in a Vertex Notebook. You will then package your model code into a Docker container to train on Google Cloud's Vertex AI. Lastly, you will define and run a Kubeflow Pipeline on Vertex Pipelines that trains and deploys your model to a Vertex Endpoint that you will query for online predictions. Learning objectives * Train a TensorFlow model locally in a hosted [**Vertex Notebook**](https://cloud.google.com/vertex-ai/docs/general/notebooks?hl=sv).* Containerize your training code with [**Cloud Build**](https://cloud.google.com/build) and push it to [**Google Cloud Artifact Registry**](https://cloud.google.com/artifact-registry).* Define a pipeline using the [**Kubeflow Pipelines (KFP) V2 SDK**](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility) to train and deploy your model on [**Vertex Pipelines**](https://cloud.google.com/vertex-ai/docs/pipelines).* Query your model on a [**Vertex Endpoint**](https://cloud.google.com/vertex-ai/docs/predictions/getting-predictions) using online predictions. Setup Define constants
###Code
# Add installed library dependencies to Python PATH variable.
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
# Retrieve and set PROJECT_ID and REGION environment variables.
# TODO: fill in PROJECT_ID.
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
REGION = 'us-central1'
# Create a globally unique Google Cloud Storage bucket for artifact storage.
GCS_BUCKET = f"gs://{PROJECT_ID}-vertex-challenge-lab"
!gsutil mb -l $REGION $GCS_BUCKET
###Output
_____no_output_____
###Markdown
Import libraries
###Code
import os
import shutil
import logging
# TensorFlow model building libraries.
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
# Re-create the AdamW optimizer used in the original BERT paper.
from official.nlp import optimization
# Libraries for data and plot model training metrics.
import pandas as pd
import matplotlib.pyplot as plt
# Import the Vertex AI Python SDK.
from google.cloud import aiplatform as vertexai
###Output
_____no_output_____
###Markdown
Initialize Vertex AI Python SDK Initialize the Vertex AI Python SDK with your GCP Project, Region, and Google Cloud Storage Bucket.
###Code
vertexai.init(project=PROJECT_ID, location=REGION, staging_bucket=GCS_BUCKET)
###Output
_____no_output_____
###Markdown
Build and train your model locally in a Vertex Notebook Note: this lab adapts and extends the official [TensorFlow BERT text classification tutorial](https://www.tensorflow.org/text/tutorials/classify_text_with_bert) to utilize Vertex AI services. See the tutorial for additional coverage on fine-tuning BERT models using TensorFlow. Lab dataset In this lab, you will use the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment) that contains the text of 50,000 movie reviews from the Internet Movie Database. These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are balanced, meaning they contain an equal number of positive and negative reviews. Data ingestion and processing code has been provided for you below: Import dataset
###Code
DATA_URL = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
LOCAL_DATA_DIR = "."
def download_data(data_url, local_data_dir):
"""Download dataset.
Args:
data_url(str): Source data URL path.
local_data_dir(str): Local data download directory path.
Returns:
dataset_dir(str): Local unpacked data directory path.
"""
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
dataset = tf.keras.utils.get_file(
fname="aclImdb_v1.tar.gz",
origin=data_url,
untar=True,
cache_dir=local_data_dir,
cache_subdir="")
dataset_dir = os.path.join(os.path.dirname(dataset), "aclImdb")
train_dir = os.path.join(dataset_dir, "train")
# Remove unused folders to make it easier to load the data.
remove_dir = os.path.join(train_dir, "unsup")
shutil.rmtree(remove_dir)
return dataset_dir
DATASET_DIR = download_data(data_url=DATA_URL, local_data_dir=LOCAL_DATA_DIR)
# Create a dictionary to iteratively add data pipeline and model training hyperparameters.
HPARAMS = {
# Set a random sampling seed to prevent data leakage in data splits from files.
"seed": 42,
# Number of training and inference examples.
"batch-size": 32
}
def load_datasets(dataset_dir, hparams):
"""Load pre-split tf.datasets.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
raw_train_ds(tf.dataset): Train split dataset (20k examples).
raw_val_ds(tf.dataset): Validation split dataset (5k examples).
raw_test_ds(tf.dataset): Test split dataset (25k examples).
"""
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='training',
seed=hparams['seed'])
raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='validation',
seed=hparams['seed'])
raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'test'),
batch_size=hparams['batch-size'])
return raw_train_ds, raw_val_ds, raw_test_ds
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(DATASET_DIR, HPARAMS)
AUTOTUNE = tf.data.AUTOTUNE
CLASS_NAMES = raw_train_ds.class_names
train_ds = raw_train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.prefetch(buffer_size=AUTOTUNE)
###Output
_____no_output_____
###Markdown
Let's print a few example reviews:
###Code
for text_batch, label_batch in train_ds.take(1):
for i in range(3):
print(f'Review {i}: {text_batch.numpy()[i]}')
label = label_batch.numpy()[i]
print(f'Label : {label} ({CLASS_NAMES[label]})')
###Output
_____no_output_____
###Markdown
Choose a pre-trained BERT model to fine-tune for higher accuracy [**Bidirectional Encoder Representations from Transformers (BERT)**](https://arxiv.org/abs/1810.04805v2) is a transformer-based text representation model pre-trained on massive datasets (3+ billion words) that can be fine-tuned for state-of-the art results on many natural language processing (NLP) tasks. Since release in 2018 by Google researchers, its has transformed the field of NLP research and come to form a core part of significant improvements to [Google Search](https://www.blog.google/products/search/search-language-understanding-bert). To meet your business requirements of achieving higher accuracy on a small dataset (20k training examples), you will use a technique called transfer learning to combine a pre-trained BERT encoder and classification layers to fine tune a new higher performing model for binary sentiment classification. For this lab, you will use a smaller BERT model that trades some accuracy for faster training times.The Small BERT models are instances of the original BERT architecture with a smaller number L of layers (i.e., residual blocks) combined with a smaller hidden size H and a matching smaller number A of attention heads, as published byIulia Turc, Ming-Wei Chang, Kenton Lee, Kristina Toutanova: ["Well-Read Students Learn Better: On the Importance of Pre-training Compact Models"](https://arxiv.org/abs/1908.08962), 2019.They have the same general architecture but fewer and/or smaller Transformer blocks, which lets you explore tradeoffs between speed, size and quality.The following preprocessing and encoder models in the TensorFlow 2 SavedModel format use the implementation of BERT from the [TensorFlow Models Github repository](https://github.com/tensorflow/models/tree/master/official/nlp/bert) with the trained weights released by the authors of Small BERT.
###Code
HPARAMS.update({
# TF Hub BERT modules.
"tfhub-bert-preprocessor": "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3",
"tfhub-bert-encoder": "https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/2",
})
###Output
_____no_output_____
###Markdown
Text inputs need to be transformed to numeric token ids and arranged in several Tensors before being input to BERT. TensorFlow Hub provides a matching preprocessing model for each of the BERT models discussed above, which implements this transformation using TF ops from the TF.text library. Since this text preprocessor is a TensorFlow model, It can be included in your model directly. For fine-tuning, you will use the same optimizer that BERT was originally trained with: the "Adaptive Moments" (Adam). This optimizer minimizes the prediction loss and does regularization by weight decay (not using moments), which is also known as [AdamW](https://arxiv.org/abs/1711.05101).For the learning rate `initial-learning-rate`, you will use the same schedule as BERT pre-training: linear decay of a notional initial learning rate, prefixed with a linear warm-up phase over the first 10% of training steps `n_warmup_steps`. In line with the BERT paper, the initial learning rate is smaller for fine-tuning.
###Code
HPARAMS.update({
# Model training hyperparameters for fine tuning and regularization.
"epochs": 3,
"initial-learning-rate": 3e-5,
"dropout": 0.1
})
epochs = HPARAMS['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
OPTIMIZER = optimization.create_optimizer(init_lr=HPARAMS['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
###Output
_____no_output_____
###Markdown
Build and compile a TensorFlow BERT sentiment classifier Next, you will define and compile your model by assembling pre-built TF-Hub components and tf.keras layers.
###Code
def build_text_classifier(hparams, optimizer):
"""Define and compile a TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
model(tf.keras.Model): A compiled TensorFlow model.
"""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
# TODO: Add a hub.KerasLayer for BERT text preprocessing using the hparams dict.
# Name the layer 'preprocessing' and store in the variable preprocessor.
preprocessor = hub.KerasLayer(hparams['tfhub-bert-preprocessor'], name='preprocessing')
encoder_inputs = preprocessor(text_input)
# TODO: Add a hub.KerasLayer for BERT text encoding using the hparams dict.
# Name the layer 'BERT_encoder' and store in the variable encoder.
encoder = hub.KerasLayer(hparams['tfhub-bert-encoder'], trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
# For the fine-tuning you are going to use the `pooled_output` array which represents
# each input sequence as a whole. The shape is [batch_size, H].
# You can think of this as an embedding for the entire movie review.
classifier = outputs['pooled_output']
# Add dropout to prevent overfitting during model fine-tuning.
classifier = tf.keras.layers.Dropout(hparams['dropout'], name='dropout')(classifier)
classifier = tf.keras.layers.Dense(1, activation=None, name='classifier')(classifier)
model = tf.keras.Model(text_input, classifier, name='bert-sentiment-classifier')
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
return model
model = build_text_classifier(HPARAMS, OPTIMIZER)
# Visualize your fine-tuned BERT sentiment classifier.
tf.keras.utils.plot_model(model)
TEST_REVIEW = ['this is such an amazing movie!']
BERT_RAW_RESULT = model(tf.constant(TEST_REVIEW))
print(BERT_RAW_RESULT)
###Output
_____no_output_____
###Markdown
Train and evaluate your BERT sentiment classifier
###Code
HPARAMS.update({
# TODO: Save your BERT sentiment classifier locally.
# Hint: Save it to './bert-sentiment-classifier-local'. Note the key name in model.save().
"model-dir": "./bert-sentiment-classifier-local"
})
###Output
_____no_output_____
###Markdown
**Note:** training your model locally will take about 8-10 minutes.
###Code
def train_evaluate(hparams):
"""Train and evaluate TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
history(tf.keras.callbacks.History): Keras callback that records training event history.
"""
# dataset_dir = download_data(data_url, data_dir)
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(DATASET_DIR, hparams)
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.cache().prefetch(buffer_size=AUTOTUNE)
epochs = hparams['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
optimizer = optimization.create_optimizer(init_lr=hparams['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
model = build_text_classifier(hparams=hparams, optimizer=optimizer)
logging.info(model.summary())
history = model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
logging.info("Test accuracy: %s", model.evaluate(test_ds))
# Export Keras model in TensorFlow SavedModel format.
model.save(hparams['model-dir'])
return history
###Output
_____no_output_____
###Markdown
Based on the `History` object returned by `model.fit()`. You can plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
history = train_evaluate(HPARAMS)
history_dict = history.history
print(history_dict.keys())
acc = history_dict['binary_accuracy']
val_acc = history_dict['val_binary_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
fig = plt.figure(figsize=(10, 6))
fig.tight_layout()
plt.subplot(2, 1, 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'r', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
# plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right');
###Output
_____no_output_____
###Markdown
In this plot, the red lines represent the training loss and accuracy, and the blue lines are the validation loss and accuracy. Based on the plots above, you should see model accuracy of around 78-80% which exceeds your business requirements target of greater than 75% accuracy. Containerize your model code Now that you trained and evaluated your model locally in a Vertex Notebook as part of an experimentation workflow, your next step is to train and deploy your model on Google Cloud's Vertex AI platform. To train your BERT classifier on Google Cloud, you will you will package your Python training scripts and write a Dockerfile that contains instructions on your ML model code, dependencies, and execution instructions. You will build your custom container with Cloud Build, whose instructions are specified in `cloudbuild.yaml` and publish your container to your Artifact Registry. This workflow gives you the opportunity to use the same container to run as part of a portable and scalable [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines/introduction) workflow. You will walk through creating the following project structure for your ML mode code:```|--/bert-sentiment-classifier |--/trainer |--__init__.py |--model.py |--task.py |--Dockerfile |--cloudbuild.yaml |--requirements.txt``` 1. Write a `model.py` training scriptFirst, you will tidy up your local TensorFlow model training code from above into a training script.
###Code
MODEL_DIR = "bert-sentiment-classifier"
%%writefile {MODEL_DIR}/trainer/model.py
import os
import shutil
import logging
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
from official.nlp import optimization
DATA_URL = 'https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
LOCAL_DATA_DIR = './tmp/data'
AUTOTUNE = tf.data.AUTOTUNE
def download_data(data_url, local_data_dir):
"""Download dataset.
Args:
data_url(str): Source data URL path.
local_data_dir(str): Local data download directory path.
Returns:
dataset_dir(str): Local unpacked data directory path.
"""
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
dataset = tf.keras.utils.get_file(
fname='aclImdb_v1.tar.gz',
origin=data_url,
untar=True,
cache_dir=local_data_dir,
cache_subdir="")
dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb')
train_dir = os.path.join(dataset_dir, 'train')
# Remove unused folders to make it easier to load the data.
remove_dir = os.path.join(train_dir, 'unsup')
shutil.rmtree(remove_dir)
return dataset_dir
def load_datasets(dataset_dir, hparams):
"""Load pre-split tf.datasets.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
raw_train_ds(tf.dataset): Train split dataset (20k examples).
raw_val_ds(tf.dataset): Validation split dataset (5k examples).
raw_test_ds(tf.dataset): Test split dataset (25k examples).
"""
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='training',
seed=hparams['seed'])
raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='validation',
seed=hparams['seed'])
raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'test'),
batch_size=hparams['batch-size'])
return raw_train_ds, raw_val_ds, raw_test_ds
def build_text_classifier(hparams, optimizer):
"""Define and compile a TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
model(tf.keras.Model): A compiled TensorFlow model.
"""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
# TODO: Add a hub.KerasLayer for BERT text preprocessing using the hparams dict.
# Name the layer 'preprocessing' and store in the variable preprocessor.
preprocessor = hub.KerasLayer(hparams['tfhub-bert-preprocessor'], name='preprocessing')
encoder_inputs = preprocessor(text_input)
# TODO: Add a hub.KerasLayer for BERT text encoding using the hparams dict.
# Name the layer 'BERT_encoder' and store in the variable encoder.
encoder = hub.KerasLayer(hparams['tfhub-bert-encoder'], trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
# For the fine-tuning you are going to use the `pooled_output` array which represents
# each input sequence as a whole. The shape is [batch_size, H].
# You can think of this as an embedding for the entire movie review.
classifier = outputs['pooled_output']
# Add dropout to prevent overfitting during model fine-tuning.
classifier = tf.keras.layers.Dropout(hparams['dropout'], name='dropout')(classifier)
classifier = tf.keras.layers.Dense(1, activation=None, name='classifier')(classifier)
model = tf.keras.Model(text_input, classifier, name='bert-sentiment-classifier')
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
return model
def train_evaluate(hparams):
"""Train and evaluate TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
history(tf.keras.callbacks.History): Keras callback that records training event history.
"""
dataset_dir = download_data(data_url=DATA_URL,
local_data_dir=LOCAL_DATA_DIR)
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(dataset_dir=dataset_dir,
hparams=hparams)
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.cache().prefetch(buffer_size=AUTOTUNE)
epochs = hparams['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
optimizer = optimization.create_optimizer(init_lr=hparams['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
model = build_text_classifier(hparams=hparams, optimizer=optimizer)
logging.info(model.summary())
history = model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
logging.info("Test accuracy: %s", model.evaluate(test_ds))
# Export Keras model in TensorFlow SavedModel format.
model.save(hparams['model-dir'])
return history
###Output
_____no_output_____
###Markdown
2. Write a `task.py` file as an entrypoint to your custom model container
###Code
%%writefile {MODEL_DIR}/trainer/task.py
import os
import argparse
from trainer import model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Vertex custom container training args. These are set by Vertex AI during training but can also be overwritten.
parser.add_argument('--model-dir', dest='model-dir',
default=os.environ['AIP_MODEL_DIR'], type=str, help='GCS URI for saving model artifacts.')
# Model training args.
parser.add_argument('--tfhub-bert-preprocessor', dest='tfhub-bert-preprocessor',
default='https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', type=str, help='TF-Hub URL.')
parser.add_argument('--tfhub-bert-encoder', dest='tfhub-bert-encoder',
default='https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/2', type=str, help='TF-Hub URL.')
parser.add_argument('--initial-learning-rate', dest='initial-learning-rate', default=3e-5, type=float, help='Learning rate for optimizer.')
parser.add_argument('--epochs', dest='epochs', default=3, type=int, help='Training iterations.')
parser.add_argument('--batch-size', dest='batch-size', default=32, type=int, help='Number of examples during each training iteration.')
parser.add_argument('--dropout', dest='dropout', default=0.1, type=float, help='Float percentage of DNN nodes [0,1] to drop for regularization.')
parser.add_argument('--seed', dest='seed', default=42, type=int, help='Random number generator seed to prevent overlap between train and val sets.')
args = parser.parse_args()
hparams = args.__dict__
model.train_evaluate(hparams)
###Output
_____no_output_____
###Markdown
3. Write a `Dockerfile` for your custom model container Third, you will write a `Dockerfile` that contains instructions to package your model code in `bert-sentiment-classifier` as well as specifies your model code's dependencies needed for execution together in a Docker container.
###Code
%%writefile {MODEL_DIR}/Dockerfile
# Specifies base image and tag.
# https://cloud.google.com/vertex-ai/docs/training/pre-built-containers
FROM us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-6:latest
# Sets the container working directory.
WORKDIR /root
# Copies the requirements.txt into the container to reduce network calls.
COPY requirements.txt .
# Installs additional packages.
RUN pip3 install -U -r requirements.txt
# b/203105209 Removes unneeded file from TF2.5 CPU image for python_module CustomJob training.
# Will be removed on subsequent public Vertex images.
RUN rm -rf /var/sitecustomize/sitecustomize.py
# Copies the trainer code to the docker image.
COPY . /trainer
# Sets the container working directory.
WORKDIR /trainer
# Sets up the entry point to invoke the trainer.
ENTRYPOINT ["python", "-m", "trainer.task"]
###Output
_____no_output_____
###Markdown
4. Write a `requirements.txt` file to specify additional ML code dependencies These are additional dependencies for your model code not included in the pre-built Vertex TensorFlow images such as TF-Hub, TensorFlow AdamW optimizer, and TensorFlow Text needed for importing and working with pre-trained TensorFlow BERT models.
###Code
%%writefile {MODEL_DIR}/requirements.txt
tf-models-official==2.6.0
tensorflow-text==2.6.0
tensorflow-hub==0.12.0
###Output
_____no_output_____
###Markdown
Use Cloud Build to build and submit your model container to Google Cloud Artifact Registry Next, you will use [Cloud Build](https://cloud.google.com/build) to build and upload your custom TensorFlow model container to [Google Cloud Artifact Registry](https://cloud.google.com/artifact-registry).Cloud Build brings reusability and automation to your ML experimentation by enabling you to reliably build, test, and deploy your ML model code as part of a CI/CD workflow. Artifact Registry provides a centralized repository for you to store, manage, and secure your ML container images. This will allow you to securely share your ML work with others and reproduce experiment results.**Note**: the initial build and submit step will take about 16 minutes but Cloud Build is able to take advantage of caching for subsequent builds. 1. Create Artifact Registry for custom container images
###Code
ARTIFACT_REGISTRY="bert-sentiment-classifier"
# TODO: create a Docker Artifact Registry using the gcloud CLI.
# Documentation link: https://cloud.google.com/sdk/gcloud/reference/artifacts/repositories/create
!gcloud artifacts repositories create {ARTIFACT_REGISTRY} \
--repository-format=docker \
--location={REGION} \
--description="Artifact registry for ML custom training images for sentiment classification"
###Output
_____no_output_____
###Markdown
2. Create `cloudbuild.yaml` instructions
###Code
IMAGE_NAME="bert-sentiment-classifier"
IMAGE_TAG="latest"
IMAGE_URI=f"{REGION}-docker.pkg.dev/{PROJECT_ID}/{ARTIFACT_REGISTRY}/{IMAGE_NAME}:{IMAGE_TAG}"
cloudbuild_yaml = f"""steps:
- name: 'gcr.io/cloud-builders/docker'
args: [ 'build', '-t', '{IMAGE_URI}', '.' ]
images:
- '{IMAGE_URI}'"""
with open(f"{MODEL_DIR}/cloudbuild.yaml", "w") as fp:
fp.write(cloudbuild_yaml)
###Output
_____no_output_____
###Markdown
3. Build and submit your container image to Artifact Registry using Cloud Build **Note**: the initial build and submit step will take about 16 minutes but Cloud Build is able to take advantage of caching for faster subsequent builds.
###Code
# TODO: use Cloud Build to build and submit your custom model container to your Artifact Registry.
# Documentation link: https://cloud.google.com/sdk/gcloud/reference/builds/submit
# Hint: make sure the config flag is pointed at {MODEL_DIR}/cloudbuild.yaml defined above and you include your model directory.
!gcloud builds submit {MODEL_DIR} --timeout=20m --config {MODEL_DIR}/cloudbuild.yaml
###Output
_____no_output_____
###Markdown
Define a pipeline using the KFP V2 SDK To address your business requirements and get your higher performing model into production to deliver value faster, you will define a pipeline using the [**Kubeflow Pipelines (KFP) V2 SDK**](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility) to orchestrate the training and deployment of your model on [**Vertex Pipelines**](https://cloud.google.com/vertex-ai/docs/pipelines) below.
###Code
import datetime
# google_cloud_pipeline_components includes pre-built KFP components for interfacing with Vertex AI services.
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import dsl
TIMESTAMP=datetime.datetime.now().strftime('%Y%m%d%H%M%S')
DISPLAY_NAME = "bert-sentiment-{}".format(TIMESTAMP)
GCS_BASE_OUTPUT_DIR= f"{GCS_BUCKET}/{MODEL_DIR}-{TIMESTAMP}"
USER = "dougkelly" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(GCS_BUCKET, USER)
print(f"Model display name: {DISPLAY_NAME}")
print(f"GCS dir for model training artifacts: {GCS_BASE_OUTPUT_DIR}")
print(f"GCS dir for pipeline artifacts: {PIPELINE_ROOT}")
# Pre-built Vertex model serving container for deployment.
# https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers
SERVING_IMAGE_URI = "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-6:latest"
###Output
_____no_output_____
###Markdown
The pipeline consists of three components:* `CustomContainerTrainingJobRunOp` [(documentation)](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.2.0/google_cloud_pipeline_components.aiplatform.htmlgoogle_cloud_pipeline_components.aiplatform.CustomContainerTrainingJobRunOp): trains your custom model container using Vertex Training. This is the same as configuring a Vertex Custom Container Training Job using the Vertex Python SDK you covered in the Vertex AI: Qwik Start lab.* `EndpointCreateOp` [(documentation)](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.2.0/google_cloud_pipeline_components.aiplatform.htmlgoogle_cloud_pipeline_components.aiplatform.EndpointCreateOp): Creates a Google Cloud Vertex Endpoint resource that maps physical machine resources with your model to enable it to serve online predictions. Online predictions have low latency requirements; providing resources to the model in advance reduces latency. * `ModelDeployOp`[(documentation)](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.2.0/google_cloud_pipeline_components.aiplatform.htmlgoogle_cloud_pipeline_components.aiplatform.ModelDeployOp): deploys your model to a Vertex Prediction Endpoint for online predictions.
###Code
@dsl.pipeline(name="bert-sentiment-classification", pipeline_root=PIPELINE_ROOT)
def pipeline(
project: str = PROJECT_ID,
location: str = REGION,
staging_bucket: str = GCS_BUCKET,
display_name: str = DISPLAY_NAME,
container_uri: str = IMAGE_URI,
model_serving_container_image_uri: str = SERVING_IMAGE_URI,
base_output_dir: str = GCS_BASE_OUTPUT_DIR,
):
#TODO: add and configure the pre-built KFP CustomContainerTrainingJobRunOp component using
# the remaining arguments in the pipeline constructor.
# Hint: Refer to the component documentation link above if needed as well.
model_train_evaluate_op = gcc_aip.CustomContainerTrainingJobRunOp(
# Vertex AI Python SDK authentication parameters.
project=project,
location=location,
staging_bucket=staging_bucket,
# WorkerPool arguments.
replica_count=1,
machine_type="n1-standard-4",
# TODO: fill in the remaining arguments from the pipeline constructor.
display_name=display_name,
container_uri=container_uri,
model_serving_container_image_uri=model_serving_container_image_uri,
base_output_dir=base_output_dir,
)
# Create a Vertex Endpoint resource in parallel with model training.
endpoint_create_op = gcc_aip.EndpointCreateOp(
# Vertex AI Python SDK authentication parameters.
project=project,
location=location,
display_name=display_name
)
# Deploy your model to the created Endpoint resource for online predictions.
model_deploy_op = gcc_aip.ModelDeployOp(
# Link to model training component through output model artifact.
model=model_train_evaluate_op.outputs["model"],
# Link to the created Endpoint.
endpoint=endpoint_create_op.outputs["endpoint"],
# Define prediction request routing. {"0": 100} indicates 100% of traffic
# to the ID of the current model being deployed.
traffic_split={"0": 100},
# WorkerPool arguments.
dedicated_resources_machine_type="n1-standard-4",
dedicated_resources_min_replica_count=1,
dedicated_resources_max_replica_count=2
)
###Output
_____no_output_____
###Markdown
Compile the pipeline
###Code
from kfp.v2 import compiler
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="bert-sentiment-classification.json"
)
###Output
_____no_output_____
###Markdown
Run the pipeline on Vertex Pipelines The `PipelineJob` is configured below and triggered through the `run()` method.Note: This pipeline run will take around 30-40 minutes to train and deploy your model. Follow along with the execution using the URL from the job output below.
###Code
vertex_pipelines_job = vertexai.pipeline_jobs.PipelineJob(
display_name="bert-sentiment-classification",
template_path="bert-sentiment-classification.json",
parameter_values={
"project": PROJECT_ID,
"location": REGION,
"staging_bucket": GCS_BUCKET,
"display_name": DISPLAY_NAME,
"container_uri": IMAGE_URI,
"model_serving_container_image_uri": SERVING_IMAGE_URI,
"base_output_dir": GCS_BASE_OUTPUT_DIR},
enable_caching=True,
)
vertex_pipelines_job.run()
###Output
_____no_output_____
###Markdown
Query deployed model on Vertex Endpoint for online predictions Finally, you will retrieve the `Endpoint` deployed by the pipeline and use it to query your model for online predictions.Configure the `Endpoint()` function below with the following parameters:* `endpoint_name`: A fully-qualified endpoint resource name or endpoint ID. Example: "projects/123/locations/us-central1/endpoints/456" or "456" when project and location are initialized or passed.* `project_id`: GCP project.* `location`: GCP region.Call `predict()` to return a prediction for a test review.
###Code
# Retrieve your deployed Endpoint name from your pipeline.
ENDPOINT_NAME = vertexai.Endpoint.list()[0].name
#TODO: Generate online predictions using your Vertex Endpoint.
endpoint = vertexai.Endpoint(
endpoint_name=ENDPOINT_NAME,
project=PROJECT_ID,
location=REGION)
#TODO: write a movie review to test your model e.g. "The Dark Knight is the best Batman movie!"
test_review = "The Dark Knight is the best Batman movie!"
# TODO: use your Endpoint to return prediction for your test_review.
prediction = endpoint.predict([test_review])
print(prediction)
# Use a sigmoid function to compress your model output between 0 and 1. For binary classification, a threshold of 0.5 is typically applied
# so if the output is >= 0.5 then the predicted sentiment is "Positive" and < 0.5 is a "Negative" prediction.
print(tf.sigmoid(prediction.predictions[0]))
###Output
_____no_output_____
###Markdown
Next steps Congratulations! You walked through a full experimentation, containerization, and MLOps workflow on Vertex AI. First, you built, trained, and evaluated a BERT sentiment classifier model in a Vertex Notebook. You then packaged your model code into a Docker container to train on Google Cloud's Vertex AI. Lastly, you defined and ran a Kubeflow Pipeline on Vertex Pipelines that trained and deployed your model container to a Vertex Endpoint that you queried for online predictions. License
###Code
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Building and deploying machine learning solutions with Vertex AI: Challenge Lab This Challenge Lab is recommended for students who have enrolled in the [**Building and deploying machine learning solutions with Vertex AI**](). You will be given a scenario and a set of tasks. Instead of following step-by-step instructions, you will use the skills learned from the labs in the quest to figure out how to complete the tasks on your own! An automated scoring system (shown on the Qwiklabs lab instructions page) will provide feedback on whether you have completed your tasks correctly.When you take a Challenge Lab, you will not be taught Google Cloud concepts. To build the solution to the challenge presented, use skills learned from the labs in the Quest this challenge lab is part of. You are expected to extend your learned skills and complete all the **`TODO:`** comments in this notebook.Are you ready for the challenge? Scenario You were recently hired as a Machine Learning Engineer at a startup movie review website. Your manager has tasked you with building a machine learning model to classify the sentiment of user movie reviews as positive or negative. These predictions will be used as an input in downstream movie rating systems and to surface top supportive and critical reviews on the movie website application. The challenge: your business requirements are that you have just 6 weeks to productionize a model that achieves great than 75% accuracy to improve upon an existing bootstrapped solution. Furthermore, after doing some exploratory analysis in your startup's data warehouse, you found that you only have a small dataset of 50k text reviews to build a higher performing solution.To build and deploy a high performance machine learning model with limited data quickly, you will walk through training and deploying a custom TensorFlow BERT sentiment classifier for online predictions on Google Cloud's [Vertex AI](https://cloud.google.com/vertex-ai) platform. Vertex AI is Google Cloud's next generation machine learning development platform where you can leverage the latest ML pre-built components and AutoML to significantly enhance your development productivity, scale your workflow and decision making with your data, and accelerate time to value.First, you will progress through a typical experimentation workflow where you will build your model from pre-trained BERT components from TF-Hub and `tf.keras` classification layers to train and evaluate your model in a Vertex Notebook. You will then package your model code into a Docker container to train on Google Cloud's Vertex AI. Lastly, you will define and run a Kubeflow Pipeline on Vertex Pipelines that trains and deploys your model to a Vertex Endpoint that you will query for online predictions. Learning objectives * Train a TensorFlow model locally in a hosted [**Vertex Notebook**](https://cloud.google.com/vertex-ai/docs/general/notebooks?hl=sv).* Containerize your training code with [**Cloud Build**](https://cloud.google.com/build) and push it to [**Google Cloud Artifact Registry**](https://cloud.google.com/artifact-registry).* Define a pipeline using the [**Kubeflow Pipelines (KFP) V2 SDK**](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility) to train and deploy your model on [**Vertex Pipelines**](https://cloud.google.com/vertex-ai/docs/pipelines).* Query your model on a [**Vertex Endpoint**](https://cloud.google.com/vertex-ai/docs/predictions/getting-predictions) using online predictions. Setup Define constants
###Code
# Add installed library dependencies to Python PATH variable.
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
# Retrieve and set PROJECT_ID and REGION environment variables.
# TODO: fill in PROJECT_ID.
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
REGION = 'us-central1'
# Create a globally unique Google Cloud Storage bucket for artifact storage.
GCS_BUCKET = f"gs://{PROJECT_ID}-vertex-challenge-lab"
!gsutil mb -l $REGION $GCS_BUCKET
###Output
_____no_output_____
###Markdown
Import libraries
###Code
import os
import shutil
import logging
# TensorFlow model building libraries.
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
# Re-create the AdamW optimizer used in the original BERT paper.
from official.nlp import optimization
# Libraries for data and plot model training metrics.
import pandas as pd
import matplotlib.pyplot as plt
# Import the Vertex AI Python SDK.
from google.cloud import aiplatform as vertexai
###Output
_____no_output_____
###Markdown
Initialize Vertex AI Python SDK Initialize the Vertex AI Python SDK with your GCP Project, Region, and Google Cloud Storage Bucket.
###Code
vertexai.init(project=PROJECT_ID, location=REGION, staging_bucket=GCS_BUCKET)
###Output
_____no_output_____
###Markdown
Build and train your model locally in a Vertex Notebook Note: this lab adapts and extends the official [TensorFlow BERT text classification tutorial](https://www.tensorflow.org/text/tutorials/classify_text_with_bert) to utilize Vertex AI services. See the tutorial for additional coverage on fine-tuning BERT models using TensorFlow. Lab dataset In this lab, you will use the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment) that contains the text of 50,000 movie reviews from the Internet Movie Database. These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are balanced, meaning they contain an equal number of positive and negative reviews. Data ingestion and processing code has been provided for you below: Import dataset
###Code
DATA_URL = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
LOCAL_DATA_DIR = "."
def download_data(data_url, local_data_dir):
"""Download dataset.
Args:
data_url(str): Source data URL path.
local_data_dir(str): Local data download directory path.
Returns:
dataset_dir(str): Local unpacked data directory path.
"""
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
dataset = tf.keras.utils.get_file(
fname="aclImdb_v1.tar.gz",
origin=data_url,
untar=True,
cache_dir=local_data_dir,
cache_subdir="")
dataset_dir = os.path.join(os.path.dirname(dataset), "aclImdb")
train_dir = os.path.join(dataset_dir, "train")
# Remove unused folders to make it easier to load the data.
remove_dir = os.path.join(train_dir, "unsup")
shutil.rmtree(remove_dir)
return dataset_dir
DATASET_DIR = download_data(data_url=DATA_URL, local_data_dir=LOCAL_DATA_DIR)
# Create a dictionary to iteratively add data pipeline and model training hyperparameters.
HPARAMS = {
# Set a random sampling seed to prevent data leakage in data splits from files.
"seed": 42,
# Number of training and inference examples.
"batch-size": 32
}
def load_datasets(dataset_dir, hparams):
"""Load pre-split tf.datasets.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
raw_train_ds(tf.dataset): Train split dataset (20k examples).
raw_val_ds(tf.dataset): Validation split dataset (5k examples).
raw_test_ds(tf.dataset): Test split dataset (25k examples).
"""
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='training',
seed=hparams['seed'])
raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='validation',
seed=hparams['seed'])
raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'test'),
batch_size=hparams['batch-size'])
return raw_train_ds, raw_val_ds, raw_test_ds
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(DATASET_DIR, HPARAMS)
AUTOTUNE = tf.data.AUTOTUNE
CLASS_NAMES = raw_train_ds.class_names
train_ds = raw_train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.prefetch(buffer_size=AUTOTUNE)
###Output
_____no_output_____
###Markdown
Let's print a few example reviews:
###Code
for text_batch, label_batch in train_ds.take(1):
for i in range(3):
print(f'Review {i}: {text_batch.numpy()[i]}')
label = label_batch.numpy()[i]
print(f'Label : {label} ({CLASS_NAMES[label]})')
###Output
_____no_output_____
###Markdown
Choose a pre-trained BERT model to fine-tune for higher accuracy [**Bidirectional Encoder Representations from Transformers (BERT)**](https://arxiv.org/abs/1810.04805v2) is a transformer-based text representation model pre-trained on massive datasets (3+ billion words) that can be fine-tuned for state-of-the art results on many natural language processing (NLP) tasks. Since release in 2018 by Google researchers, its has transformed the field of NLP research and come to form a core part of significant improvements to [Google Search](https://www.blog.google/products/search/search-language-understanding-bert). To meet your business requirements of achieving higher accuracy on a small dataset (20k training examples), you will use a technique called transfer learning to combine a pre-trained BERT encoder and classification layers to fine tune a new higher performing model for binary sentiment classification. For this lab, you will use a smaller BERT model that trades some accuracy for faster training times.The Small BERT models are instances of the original BERT architecture with a smaller number L of layers (i.e., residual blocks) combined with a smaller hidden size H and a matching smaller number A of attention heads, as published byIulia Turc, Ming-Wei Chang, Kenton Lee, Kristina Toutanova: ["Well-Read Students Learn Better: On the Importance of Pre-training Compact Models"](https://arxiv.org/abs/1908.08962), 2019.They have the same general architecture but fewer and/or smaller Transformer blocks, which lets you explore tradeoffs between speed, size and quality.The following preprocessing and encoder models in the TensorFlow 2 SavedModel format use the implementation of BERT from the [TensorFlow Models Github repository](https://github.com/tensorflow/models/tree/master/official/nlp/bert) with the trained weights released by the authors of Small BERT.
###Code
HPARAMS.update({
# TF Hub BERT modules.
"tfhub-bert-preprocessor": "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3",
"tfhub-bert-encoder": "https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/2",
})
###Output
_____no_output_____
###Markdown
Text inputs need to be transformed to numeric token ids and arranged in several Tensors before being input to BERT. TensorFlow Hub provides a matching preprocessing model for each of the BERT models discussed above, which implements this transformation using TF ops from the TF.text library. Since this text preprocessor is a TensorFlow model, It can be included in your model directly. For fine-tuning, you will use the same optimizer that BERT was originally trained with: the "Adaptive Moments" (Adam). This optimizer minimizes the prediction loss and does regularization by weight decay (not using moments), which is also known as [AdamW](https://arxiv.org/abs/1711.05101).For the learning rate `initial-learning-rate`, you will use the same schedule as BERT pre-training: linear decay of a notional initial learning rate, prefixed with a linear warm-up phase over the first 10% of training steps `n_warmup_steps`. In line with the BERT paper, the initial learning rate is smaller for fine-tuning.
###Code
HPARAMS.update({
# Model training hyperparameters for fine tuning and regularization.
"epochs": 3,
"initial-learning-rate": 3e-5,
"dropout": 0.1
})
epochs = HPARAMS['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
OPTIMIZER = optimization.create_optimizer(init_lr=HPARAMS['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
###Output
_____no_output_____
###Markdown
Build and compile a TensorFlow BERT sentiment classifier Next, you will define and compile your model by assembling pre-built TF-Hub components and tf.keras layers.
###Code
def build_text_classifier(hparams, optimizer):
"""Define and compile a TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
model(tf.keras.Model): A compiled TensorFlow model.
"""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
# TODO: Add a hub.KerasLayer for BERT text preprocessing using the hparams dict.
# Name the layer 'preprocessing' and store in the variable preprocessor.
preprocessor = hub.KerasLayer(hparams['tfhub-bert-preprocessor'], name='preprocessing')
encoder_inputs = preprocessor(text_input)
# TODO: Add a hub.KerasLayer for BERT text encoding using the hparams dict.
# Name the layer 'BERT_encoder' and store in the variable encoder.
encoder = hub.KerasLayer(hparams['tfhub-bert-encoder'], trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
# For the fine-tuning you are going to use the `pooled_output` array which represents
# each input sequence as a whole. The shape is [batch_size, H].
# You can think of this as an embedding for the entire movie review.
classifier = outputs['pooled_output']
# Add dropout to prevent overfitting during model fine-tuning.
classifier = tf.keras.layers.Dropout(hparams['dropout'], name='dropout')(classifier)
classifier = tf.keras.layers.Dense(1, activation=None, name='classifier')(classifier)
model = tf.keras.Model(text_input, classifier, name='bert-sentiment-classifier')
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
return model
model = build_text_classifier(HPARAMS, OPTIMIZER)
# Visualize your fine-tuned BERT sentiment classifier.
tf.keras.utils.plot_model(model)
TEST_REVIEW = ['this is such an amazing movie!']
BERT_RAW_RESULT = model(tf.constant(TEST_REVIEW))
print(BERT_RAW_RESULT)
###Output
_____no_output_____
###Markdown
Train and evaluate your BERT sentiment classifier
###Code
HPARAMS.update({
# TODO: Save your BERT sentiment classifier locally.
# Hint: Save it to './bert-sentiment-classifier-local'. Note the key name in model.save().
"model-dir": "./bert-sentiment-classifier-local"
})
###Output
_____no_output_____
###Markdown
**Note:** training your model locally will take about 8-10 minutes.
###Code
def train_evaluate(hparams):
"""Train and evaluate TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
history(tf.keras.callbacks.History): Keras callback that records training event history.
"""
# dataset_dir = download_data(data_url, data_dir)
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(DATASET_DIR, hparams)
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.cache().prefetch(buffer_size=AUTOTUNE)
epochs = hparams['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
optimizer = optimization.create_optimizer(init_lr=hparams['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
model = build_text_classifier(hparams=hparams, optimizer=optimizer)
logging.info(model.summary())
history = model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
logging.info("Test accuracy: %s", model.evaluate(test_ds))
# Export Keras model in TensorFlow SavedModel format.
model.save(hparams['model-dir'])
return history
###Output
_____no_output_____
###Markdown
Based on the `History` object returned by `model.fit()`. You can plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
history = train_evaluate(HPARAMS)
history_dict = history.history
print(history_dict.keys())
acc = history_dict['binary_accuracy']
val_acc = history_dict['val_binary_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
fig = plt.figure(figsize=(10, 6))
fig.tight_layout()
plt.subplot(2, 1, 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'r', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
# plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right');
###Output
_____no_output_____
###Markdown
In this plot, the red lines represent the training loss and accuracy, and the blue lines are the validation loss and accuracy. Based on the plots above, you should see model accuracy of around 78-80% which exceeds your business requirements target of greater than 75% accuracy. Containerize your model code Now that you trained and evaluated your model locally in a Vertex Notebook as part of an experimentation workflow, your next step is to train and deploy your model on Google Cloud's Vertex AI platform. To train your BERT classifier on Google Cloud, you will you will package your Python training scripts and write a Dockerfile that contains instructions on your ML model code, dependencies, and execution instructions. You will build your custom container with Cloud Build, whose instructions are specified in `cloudbuild.yaml` and publish your container to your Artifact Registry. This workflow gives you the opportunity to use the same container to run as part of a portable and scalable [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines/introduction) workflow. You will walk through creating the following project structure for your ML mode code:```|--/bert-sentiment-classifier |--/trainer |--__init__.py |--model.py |--task.py |--Dockerfile |--cloudbuild.yaml |--requirements.txt``` 1. Write a `model.py` training scriptFirst, you will tidy up your local TensorFlow model training code from above into a training script.
###Code
MODEL_DIR = "bert-sentiment-classifier"
%%writefile {MODEL_DIR}/trainer/model.py
import os
import shutil
import logging
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
from official.nlp import optimization
DATA_URL = 'https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
LOCAL_DATA_DIR = './tmp/data'
AUTOTUNE = tf.data.AUTOTUNE
def download_data(data_url, local_data_dir):
"""Download dataset.
Args:
data_url(str): Source data URL path.
local_data_dir(str): Local data download directory path.
Returns:
dataset_dir(str): Local unpacked data directory path.
"""
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
dataset = tf.keras.utils.get_file(
fname='aclImdb_v1.tar.gz',
origin=data_url,
untar=True,
cache_dir=local_data_dir,
cache_subdir="")
dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb')
train_dir = os.path.join(dataset_dir, 'train')
# Remove unused folders to make it easier to load the data.
remove_dir = os.path.join(train_dir, 'unsup')
shutil.rmtree(remove_dir)
return dataset_dir
def load_datasets(dataset_dir, hparams):
"""Load pre-split tf.datasets.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
raw_train_ds(tf.dataset): Train split dataset (20k examples).
raw_val_ds(tf.dataset): Validation split dataset (5k examples).
raw_test_ds(tf.dataset): Test split dataset (25k examples).
"""
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='training',
seed=hparams['seed'])
raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='validation',
seed=hparams['seed'])
raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'test'),
batch_size=hparams['batch-size'])
return raw_train_ds, raw_val_ds, raw_test_ds
def build_text_classifier(hparams, optimizer):
"""Define and compile a TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
model(tf.keras.Model): A compiled TensorFlow model.
"""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
# TODO: Add a hub.KerasLayer for BERT text preprocessing using the hparams dict.
# Name the layer 'preprocessing' and store in the variable preprocessor.
preprocessor = hub.KerasLayer(hparams['tfhub-bert-preprocessor'], name='preprocessing')
encoder_inputs = preprocessor(text_input)
# TODO: Add a hub.KerasLayer for BERT text encoding using the hparams dict.
# Name the layer 'BERT_encoder' and store in the variable encoder.
encoder = hub.KerasLayer(hparams['tfhub-bert-encoder'], trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
# For the fine-tuning you are going to use the `pooled_output` array which represents
# each input sequence as a whole. The shape is [batch_size, H].
# You can think of this as an embedding for the entire movie review.
classifier = outputs['pooled_output']
# Add dropout to prevent overfitting during model fine-tuning.
classifier = tf.keras.layers.Dropout(hparams['dropout'], name='dropout')(classifier)
classifier = tf.keras.layers.Dense(1, activation=None, name='classifier')(classifier)
model = tf.keras.Model(text_input, classifier, name='bert-sentiment-classifier')
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
return model
def train_evaluate(hparams):
"""Train and evaluate TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
history(tf.keras.callbacks.History): Keras callback that records training event history.
"""
dataset_dir = download_data(data_url=DATA_URL,
local_data_dir=LOCAL_DATA_DIR)
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(dataset_dir=dataset_dir,
hparams=hparams)
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.cache().prefetch(buffer_size=AUTOTUNE)
epochs = hparams['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
optimizer = optimization.create_optimizer(init_lr=hparams['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
model = build_text_classifier(hparams=hparams, optimizer=optimizer)
logging.info(model.summary())
history = model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
logging.info("Test accuracy: %s", model.evaluate(test_ds))
# Export Keras model in TensorFlow SavedModel format.
model.save(hparams['model-dir'])
return history
###Output
_____no_output_____
###Markdown
2. Write a `task.py` file as an entrypoint to your custom model container
###Code
%%writefile {MODEL_DIR}/trainer/task.py
import os
import argparse
from trainer import model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Vertex custom container training args. These are set by Vertex AI during training but can also be overwritten.
parser.add_argument('--model-dir', dest='model-dir',
default=os.environ['AIP_MODEL_DIR'], type=str, help='GCS URI for saving model artifacts.')
# Model training args.
parser.add_argument('--tfhub-bert-preprocessor', dest='tfhub-bert-preprocessor',
default='https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', type=str, help='TF-Hub URL.')
parser.add_argument('--tfhub-bert-encoder', dest='tfhub-bert-encoder',
default='https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/2', type=str, help='TF-Hub URL.')
parser.add_argument('--initial-learning-rate', dest='initial-learning-rate', default=3e-5, type=float, help='Learning rate for optimizer.')
parser.add_argument('--epochs', dest='epochs', default=3, type=int, help='Training iterations.')
parser.add_argument('--batch-size', dest='batch-size', default=32, type=int, help='Number of examples during each training iteration.')
parser.add_argument('--dropout', dest='dropout', default=0.1, type=float, help='Float percentage of DNN nodes [0,1] to drop for regularization.')
parser.add_argument('--seed', dest='seed', default=42, type=int, help='Random number generator seed to prevent overlap between train and val sets.')
args = parser.parse_args()
hparams = args.__dict__
model.train_evaluate(hparams)
###Output
_____no_output_____
###Markdown
3. Write a `Dockerfile` for your custom model container Third, you will write a `Dockerfile` that contains instructions to package your model code in `bert-sentiment-classifier` as well as specifies your model code's dependencies needed for execution together in a Docker container.
###Code
%%writefile {MODEL_DIR}/Dockerfile
# Specifies base image and tag.
# https://cloud.google.com/vertex-ai/docs/training/pre-built-containers
FROM us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-6:latest
# Sets the container working directory.
WORKDIR /root
# Copies the requirements.txt into the container to reduce network calls.
COPY requirements.txt .
# Installs additional packages.
RUN pip3 install -U -r requirements.txt
# b/203105209 Removes unneeded file from TF2.5 CPU image for python_module CustomJob training.
# Will be removed on subsequent public Vertex images.
RUN rm -rf /var/sitecustomize/sitecustomize.py
# Copies the trainer code to the docker image.
COPY . /trainer
# Sets the container working directory.
WORKDIR /trainer
# Sets up the entry point to invoke the trainer.
ENTRYPOINT ["python", "-m", "trainer.task"]
###Output
_____no_output_____
###Markdown
4. Write a `requirements.txt` file to specify additional ML code dependencies These are additional dependencies for your model code not included in the pre-built Vertex TensorFlow images such as TF-Hub, TensorFlow AdamW optimizer, and TensorFlow Text needed for importing and working with pre-trained TensorFlow BERT models.
###Code
%%writefile {MODEL_DIR}/requirements.txt
tf-models-official==2.6.0
tensorflow-text==2.6.0
tensorflow-hub==0.12.0
###Output
_____no_output_____
###Markdown
Use Cloud Build to build and submit your model container to Google Cloud Artifact Registry Next, you will use [Cloud Build](https://cloud.google.com/build) to build and upload your custom TensorFlow model container to [Google Cloud Artifact Registry](https://cloud.google.com/artifact-registry).Cloud Build brings reusability and automation to your ML experimentation by enabling you to reliably build, test, and deploy your ML model code as part of a CI/CD workflow. Artifact Registry provides a centralized repository for you to store, manage, and secure your ML container images. This will allow you to securely share your ML work with others and reproduce experiment results.**Note**: the initial build and submit step will take about 16 minutes but Cloud Build is able to take advantage of caching for subsequent builds. 1. Create Artifact Registry for custom container images
###Code
ARTIFACT_REGISTRY="bert-sentiment-classifier"
# TODO: create a Docker Artifact Registry using the gcloud CLI.
# Documentation link: https://cloud.google.com/sdk/gcloud/reference/artifacts/repositories/create
!gcloud artifacts repositories create {ARTIFACT_REGISTRY} \
--repository-format=docker \
--location={REGION} \
--description="Artifact registry for ML custom training images for sentiment classification"
###Output
_____no_output_____
###Markdown
2. Create `cloudbuild.yaml` instructions
###Code
IMAGE_NAME="bert-sentiment-classifier"
IMAGE_TAG="latest"
IMAGE_URI=f"{REGION}-docker.pkg.dev/{PROJECT_ID}/{ARTIFACT_REGISTRY}/{IMAGE_NAME}:{IMAGE_TAG}"
cloudbuild_yaml = f"""steps:
- name: 'gcr.io/cloud-builders/docker'
args: [ 'build', '-t', '{IMAGE_URI}', '.' ]
images:
- '{IMAGE_URI}'"""
with open(f"{MODEL_DIR}/cloudbuild.yaml", "w") as fp:
fp.write(cloudbuild_yaml)
###Output
_____no_output_____
###Markdown
3. Build and submit your container image to Artifact Registry using Cloud Build **Note**: the initial build and submit step will take about 16 minutes but Cloud Build is able to take advantage of caching for faster subsequent builds.
###Code
# TODO: use Cloud Build to build and submit your custom model container to your Artifact Registry.
# Documentation link: https://cloud.google.com/sdk/gcloud/reference/builds/submit
# Hint: make sure the config flag is pointed at {MODEL_DIR}/cloudbuild.yaml defined above and you include your model directory.
!gcloud builds submit {MODEL_DIR} --timeout=20m --config {MODEL_DIR}/cloudbuild.yaml
###Output
_____no_output_____
###Markdown
Define a pipeline using the KFP V2 SDK To address your business requirements and get your higher performing model into production to deliver value faster, you will define a pipeline using the [**Kubeflow Pipelines (KFP) V2 SDK**](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility) to orchestrate the training and deployment of your model on [**Vertex Pipelines**](https://cloud.google.com/vertex-ai/docs/pipelines) below.
###Code
import datetime
# google_cloud_pipeline_components includes pre-built KFP components for interfacing with Vertex AI services.
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import dsl
TIMESTAMP=datetime.datetime.now().strftime('%Y%m%d%H%M%S')
DISPLAY_NAME = "bert-sentiment-{}".format(TIMESTAMP)
GCS_BASE_OUTPUT_DIR= f"{GCS_BUCKET}/{MODEL_DIR}-{TIMESTAMP}"
USER = "dougkelly" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(GCS_BUCKET, USER)
print(f"Model display name: {DISPLAY_NAME}")
print(f"GCS dir for model training artifacts: {GCS_BASE_OUTPUT_DIR}")
print(f"GCS dir for pipeline artifacts: {PIPELINE_ROOT}")
# Pre-built Vertex model serving container for deployment.
# https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers
SERVING_IMAGE_URI = "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-6:latest"
###Output
_____no_output_____
###Markdown
Building and deploying machine learning solutions with Vertex AI: Challenge Lab This Challenge Lab is recommended for students who have enrolled in the [**Building and deploying machine learning solutions with Vertex AI**](). You will be given a scenario and a set of tasks. Instead of following step-by-step instructions, you will use the skills learned from the labs in the quest to figure out how to complete the tasks on your own! An automated scoring system (shown on the Qwiklabs lab instructions page) will provide feedback on whether you have completed your tasks correctly.When you take a Challenge Lab, you will not be taught Google Cloud concepts. To build the solution to the challenge presented, use skills learned from the labs in the Quest this challenge lab is part of. You are expected to extend your learned skills and complete all the **`TODO:`** comments in this notebook.Are you ready for the challenge? Scenario You were recently hired as a Machine Learning Engineer at a startup movie review website. Your manager has tasked you with building a machine learning model to classify the sentiment of user movie reviews as positive or negative. These predictions will be used as an input in downstream movie rating systems and to surface top supportive and critical reviews on the movie website application. The challenge: your business requirements are that you have just 6 weeks to productionize a model that achieves great than 75% accuracy to improve upon an existing bootstrapped solution. Furthermore, after doing some exploratory analysis in your startup's data warehouse, you found that you only have a small dataset of 50k text reviews to build a higher performing solution.To build and deploy a high performance machine learning model with limited data quickly, you will walk through training and deploying a custom TensorFlow BERT sentiment classifier for online predictions on Google Cloud's [Vertex AI](https://cloud.google.com/vertex-ai) platform. Vertex AI is Google Cloud's next generation machine learning development platform where you can leverage the latest ML pre-built components and AutoML to significantly enhance your development productivity, scale your workflow and decision making with your data, and accelerate time to value.First, you will progress through a typical experimentation workflow where you will build your model from pre-trained BERT components from TF-Hub and `tf.keras` classification layers to train and evaluate your model in a Vertex Notebook. You will then package your model code into a Docker container to train on Google Cloud's Vertex AI. Lastly, you will define and run a Kubeflow Pipeline on Vertex Pipelines that trains and deploys your model to a Vertex Endpoint that you will query for online predictions. Learning objectives * Train a TensorFlow model locally in a hosted [**Vertex Notebook**](https://cloud.google.com/vertex-ai/docs/general/notebooks?hl=sv).* Containerize your training code with [**Cloud Build**](https://cloud.google.com/build) and push it to [**Google Cloud Artifact Registry**](https://cloud.google.com/artifact-registry).* Define a pipeline using the [**Kubeflow Pipelines (KFP) V2 SDK**](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility) to train and deploy your model on [**Vertex Pipelines**](https://cloud.google.com/vertex-ai/docs/pipelines).* Query your model on a [**Vertex Endpoint**](https://cloud.google.com/vertex-ai/docs/predictions/getting-predictions) using online predictions. Setup Define constants
###Code
# Add installed library dependencies to Python PATH variable.
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
# Retrieve and set PROJECT_ID and REGION environment variables.
# TODO: fill in PROJECT_ID.
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
REGION = 'us-central1'
# Create a globally unique Google Cloud Storage bucket for artifact storage.
GCS_BUCKET = f"gs://{PROJECT_ID}-vertex-challenge-lab"
!gsutil mb -l $REGION $GCS_BUCKET
###Output
_____no_output_____
###Markdown
Import libraries
###Code
import os
import shutil
import logging
# TensorFlow model building libraries.
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
# Re-create the AdamW optimizer used in the original BERT paper.
from official.nlp import optimization
# Libraries for data and plot model training metrics.
import pandas as pd
import matplotlib.pyplot as plt
# Import the Vertex AI Python SDK.
from google.cloud import aiplatform as vertexai
###Output
_____no_output_____
###Markdown
Initialize Vertex AI Python SDK Initialize the Vertex AI Python SDK with your GCP Project, Region, and Google Cloud Storage Bucket.
###Code
vertexai.init(project=PROJECT_ID, location=REGION, staging_bucket=GCS_BUCKET)
###Output
_____no_output_____
###Markdown
Build and train your model locally in a Vertex Notebook Note: this lab adapts and extends the official [TensorFlow BERT text classification tutorial](https://www.tensorflow.org/text/tutorials/classify_text_with_bert) to utilize Vertex AI services. See the tutorial for additional coverage on fine-tuning BERT models using TensorFlow. Lab dataset In this lab, you will use the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment) that contains the text of 50,000 movie reviews from the Internet Movie Database. These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are balanced, meaning they contain an equal number of positive and negative reviews. Data ingestion and processing code has been provided for you below: Import dataset
###Code
DATA_URL = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
LOCAL_DATA_DIR = "."
def download_data(data_url, local_data_dir):
"""Download dataset.
Args:
data_url(str): Source data URL path.
local_data_dir(str): Local data download directory path.
Returns:
dataset_dir(str): Local unpacked data directory path.
"""
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
dataset = tf.keras.utils.get_file(
fname="aclImdb_v1.tar.gz",
origin=data_url,
untar=True,
cache_dir=local_data_dir,
cache_subdir="")
dataset_dir = os.path.join(os.path.dirname(dataset), "aclImdb")
train_dir = os.path.join(dataset_dir, "train")
# Remove unused folders to make it easier to load the data.
remove_dir = os.path.join(train_dir, "unsup")
shutil.rmtree(remove_dir)
return dataset_dir
DATASET_DIR = download_data(data_url=DATA_URL, local_data_dir=LOCAL_DATA_DIR)
# Create a dictionary to iteratively add data pipeline and model training hyperparameters.
HPARAMS = {
# Set a random sampling seed to prevent data leakage in data splits from files.
"seed": 42,
# Number of training and inference examples.
"batch-size": 32
}
def load_datasets(dataset_dir, hparams):
"""Load pre-split tf.datasets.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
raw_train_ds(tf.dataset): Train split dataset (20k examples).
raw_val_ds(tf.dataset): Validation split dataset (5k examples).
raw_test_ds(tf.dataset): Test split dataset (25k examples).
"""
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='training',
seed=hparams['seed'])
raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='validation',
seed=hparams['seed'])
raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'test'),
batch_size=hparams['batch-size'])
return raw_train_ds, raw_val_ds, raw_test_ds
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(DATASET_DIR, HPARAMS)
AUTOTUNE = tf.data.AUTOTUNE
CLASS_NAMES = raw_train_ds.class_names
train_ds = raw_train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.prefetch(buffer_size=AUTOTUNE)
###Output
_____no_output_____
###Markdown
Let's print a few example reviews:
###Code
for text_batch, label_batch in train_ds.take(1):
for i in range(3):
print(f'Review {i}: {text_batch.numpy()[i]}')
label = label_batch.numpy()[i]
print(f'Label : {label} ({CLASS_NAMES[label]})')
###Output
_____no_output_____
###Markdown
Choose a pre-trained BERT model to fine-tune for higher accuracy [**Bidirectional Encoder Representations from Transformers (BERT)**](https://arxiv.org/abs/1810.04805v2) is a transformer-based text representation model pre-trained on massive datasets (3+ billion words) that can be fine-tuned for state-of-the art results on many natural language processing (NLP) tasks. Since release in 2018 by Google researchers, its has transformed the field of NLP research and come to form a core part of significant improvements to [Google Search](https://www.blog.google/products/search/search-language-understanding-bert). To meet your business requirements of achieving higher accuracy on a small dataset (20k training examples), you will use a technique called transfer learning to combine a pre-trained BERT encoder and classification layers to fine tune a new higher performing model for binary sentiment classification. For this lab, you will use a smaller BERT model that trades some accuracy for faster training times.The Small BERT models are instances of the original BERT architecture with a smaller number L of layers (i.e., residual blocks) combined with a smaller hidden size H and a matching smaller number A of attention heads, as published byIulia Turc, Ming-Wei Chang, Kenton Lee, Kristina Toutanova: ["Well-Read Students Learn Better: On the Importance of Pre-training Compact Models"](https://arxiv.org/abs/1908.08962), 2019.They have the same general architecture but fewer and/or smaller Transformer blocks, which lets you explore tradeoffs between speed, size and quality.The following preprocessing and encoder models in the TensorFlow 2 SavedModel format use the implementation of BERT from the [TensorFlow Models Github repository](https://github.com/tensorflow/models/tree/master/official/nlp/bert) with the trained weights released by the authors of Small BERT.
###Code
HPARAMS.update({
# TF Hub BERT modules.
"tfhub-bert-preprocessor": "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3",
"tfhub-bert-encoder": "https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/2",
})
###Output
_____no_output_____
###Markdown
Text inputs need to be transformed to numeric token ids and arranged in several Tensors before being input to BERT. TensorFlow Hub provides a matching preprocessing model for each of the BERT models discussed above, which implements this transformation using TF ops from the TF.text library. Since this text preprocessor is a TensorFlow model, It can be included in your model directly. For fine-tuning, you will use the same optimizer that BERT was originally trained with: the "Adaptive Moments" (Adam). This optimizer minimizes the prediction loss and does regularization by weight decay (not using moments), which is also known as [AdamW](https://arxiv.org/abs/1711.05101).For the learning rate `initial-learning-rate`, you will use the same schedule as BERT pre-training: linear decay of a notional initial learning rate, prefixed with a linear warm-up phase over the first 10% of training steps `n_warmup_steps`. In line with the BERT paper, the initial learning rate is smaller for fine-tuning.
###Code
HPARAMS.update({
# Model training hyperparameters for fine tuning and regularization.
"epochs": 3,
"initial-learning-rate": 3e-5,
"dropout": 0.1
})
epochs = HPARAMS['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
OPTIMIZER = optimization.create_optimizer(init_lr=HPARAMS['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
###Output
_____no_output_____
###Markdown
Build and compile a TensorFlow BERT sentiment classifier Next, you will define and compile your model by assembling pre-built TF-Hub components and tf.keras layers.
###Code
def build_text_classifier(hparams, optimizer):
"""Define and compile a TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
model(tf.keras.Model): A compiled TensorFlow model.
"""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
# TODO: Add a hub.KerasLayer for BERT text preprocessing.
preprocessor = hub.KerasLayer(hparams['tfhub-bert-preprocessor'], name='preprocessing')
encoder_inputs = preprocessor(text_input)
# TODO: Add a hub.KerasLayer for BERT text encoding.
encoder = hub.KerasLayer(hparams['tfhub-bert-encoder'], trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
# For the fine-tuning you are going to use the `pooled_output` array which represents
# each input sequence as a whole. The shape is [batch_size, H].
# You can think of this as an embedding for the entire movie review.
classifier = outputs['pooled_output']
# Add dropout to prevent overfitting during model fine-tuning.
classifier = tf.keras.layers.Dropout(hparams['dropout'], name='dropout')(classifier)
classifier = tf.keras.layers.Dense(1, activation=None, name='classifier')(classifier)
model = tf.keras.Model(text_input, classifier, name='bert-sentiment-classifier')
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
return model
model = build_text_classifier(HPARAMS, OPTIMIZER)
# Visualize your fine-tuned BERT sentiment classifier.
tf.keras.utils.plot_model(model)
TEST_REVIEW = ['this is such an amazing movie!']
BERT_RAW_RESULT = model(tf.constant(TEST_REVIEW))
print(BERT_RAW_RESULT)
###Output
_____no_output_____
###Markdown
Train and evaluate your BERT sentiment classifier
###Code
HPARAMS.update({
# TODO: save your BERT sentiment classifier locally. Save it to './bert-sentiment-classifier-local'
"model-dir": "./bert-sentiment-classifier-local"
})
###Output
_____no_output_____
###Markdown
**Note:** training your model locally will take about 8-10 minutes.
###Code
def train_evaluate(hparams):
"""Train and evaluate TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
history(tf.keras.callbacks.History): Keras callback that records training event history.
"""
# dataset_dir = download_data(data_url, data_dir)
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(DATASET_DIR, hparams)
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.cache().prefetch(buffer_size=AUTOTUNE)
epochs = hparams['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
optimizer = optimization.create_optimizer(init_lr=hparams['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = build_text_classifier(hparams=hparams, optimizer=optimizer)
logging.info(model.summary())
history = model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
logging.info("Test accuracy: %s", model.evaluate(test_ds))
# Export Keras model in TensorFlow SavedModel format.
model.save(hparams['model-dir'])
return history
###Output
_____no_output_____
###Markdown
Based on the `History` object returned by `model.fit()`. You can plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
history = train_evaluate(HPARAMS)
history_dict = history.history
print(history_dict.keys())
acc = history_dict['binary_accuracy']
val_acc = history_dict['val_binary_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
fig = plt.figure(figsize=(10, 6))
fig.tight_layout()
plt.subplot(2, 1, 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'r', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
# plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right');
###Output
_____no_output_____
###Markdown
In this plot, the red lines represent the training loss and accuracy, and the blue lines are the validation loss and accuracy. Based on the plots above, you should see model accuracy of around 78-80% which exceeds your business requirements target of greater than 75% accuracy. Containerize your model code Now that you trained and evaluated your model locally in a Vertex Notebook as part of an experimentation workflow, your next step is to train and deploy your model on Google Cloud's Vertex AI platform. To train your BERT classifier on Google Cloud, you will you will package your Python training scripts and write a Dockerfile that contains instructions on your ML model code, dependencies, and execution instructions. You will build your custom container with Cloud Build, whose instructions are specified in `cloudbuild.yaml` and publish your container to your Artifact Registry. This workflow gives you the opportunity to use the same container to run as part of a portable and scalable [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines/introduction) workflow. You will walk through creating the following project structure for your ML mode code:```|--/bert-sentiment-classifier |--/trainer |--__init__.py |--model.py |--task.py |--Dockerfile |--cloudbuild.yaml |--requirements.txt``` 1. Write a `model.py` training scriptFirst, you will tidy up your local TensorFlow model training code from above into a training script.
###Code
MODEL_DIR = "bert-sentiment-classifier"
%%writefile {MODEL_DIR}/trainer/model.py
import os
import shutil
import logging
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
from official.nlp import optimization
DATA_URL = 'https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
LOCAL_DATA_DIR = './tmp/data'
AUTOTUNE = tf.data.AUTOTUNE
def download_data(data_url, local_data_dir):
"""Download dataset.
Args:
data_url(str): Source data URL path.
local_data_dir(str): Local data download directory path.
Returns:
dataset_dir(str): Local unpacked data directory path.
"""
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
dataset = tf.keras.utils.get_file(
fname='aclImdb_v1.tar.gz',
origin=data_url,
untar=True,
cache_dir=local_data_dir,
cache_subdir="")
dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb')
train_dir = os.path.join(dataset_dir, 'train')
# Remove unused folders to make it easier to load the data.
remove_dir = os.path.join(train_dir, 'unsup')
shutil.rmtree(remove_dir)
return dataset_dir
def load_datasets(dataset_dir, hparams):
"""Load pre-split tf.datasets.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
raw_train_ds(tf.dataset): Train split dataset (20k examples).
raw_val_ds(tf.dataset): Validation split dataset (5k examples).
raw_test_ds(tf.dataset): Test split dataset (25k examples).
"""
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='training',
seed=hparams['seed'])
raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='validation',
seed=hparams['seed'])
raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'test'),
batch_size=hparams['batch-size'])
return raw_train_ds, raw_val_ds, raw_test_ds
def build_text_classifier(hparams, optimizer):
"""Define and compile a TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
model(tf.keras.Model): A compiled TensorFlow model.
"""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
# TODO: Add a hub.KerasLayer for BERT text preprocessing.
preprocessor = hub.KerasLayer(hparams['tfhub-bert-preprocessor'], name='preprocessing')
encoder_inputs = preprocessor(text_input)
# TODO: Add a hub.KerasLayer for BERT text encoding.
encoder = hub.KerasLayer(hparams['tfhub-bert-encoder'], trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
# For the fine-tuning you are going to use the `pooled_output` array which represents
# each input sequence as a whole. The shape is [batch_size, H].
# You can think of this as an embedding for the entire movie review.
classifier = outputs['pooled_output']
# Add dropout to prevent overfitting during model fine-tuning.
classifier = tf.keras.layers.Dropout(hparams['dropout'], name='dropout')(classifier)
classifier = tf.keras.layers.Dense(1, activation=None, name='classifier')(classifier)
model = tf.keras.Model(text_input, classifier, name='bert-sentiment-classifier')
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
return model
def train_evaluate(hparams):
"""Train and evaluate TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
history(tf.keras.callbacks.History): Keras callback that records training event history.
"""
dataset_dir = download_data(data_url=DATA_URL,
local_data_dir=LOCAL_DATA_DIR)
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(dataset_dir=dataset_dir,
hparams=hparams)
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.cache().prefetch(buffer_size=AUTOTUNE)
epochs = hparams['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
optimizer = optimization.create_optimizer(init_lr=hparams['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = build_text_classifier(hparams=hparams, optimizer=optimizer)
logging.info(model.summary())
history = model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
logging.info("Test accuracy: %s", model.evaluate(test_ds))
# Export Keras model in TensorFlow SavedModel format.
model.save(hparams['model-dir'])
return history
###Output
_____no_output_____
###Markdown
2. Write a `task.py` file as an entrypoint to your custom model container
###Code
%%writefile {MODEL_DIR}/trainer/task.py
import os
import argparse
from trainer import model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Vertex custom container training args. These are set by Vertex AI during training but can also be overwritten.
parser.add_argument('--model-dir', dest='model-dir',
default=os.environ['AIP_MODEL_DIR'], type=str, help='GCS URI for saving model artifacts.')
# Model training args.
parser.add_argument('--tfhub-bert-preprocessor', dest='tfhub-bert-preprocessor',
default='https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', type=str, help='TF-Hub URL.')
parser.add_argument('--tfhub-bert-encoder', dest='tfhub-bert-encoder',
default='https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/2', type=str, help='TF-Hub URL.')
parser.add_argument('--initial-learning-rate', dest='initial-learning-rate', default=3e-5, type=float, help='Learning rate for optimizer.')
parser.add_argument('--epochs', dest='epochs', default=3, type=int, help='Training iterations.')
parser.add_argument('--batch-size', dest='batch-size', default=32, type=int, help='Number of examples during each training iteration.')
parser.add_argument('--dropout', dest='dropout', default=0.1, type=float, help='Float percentage of DNN nodes [0,1] to drop for regularization.')
parser.add_argument('--seed', dest='seed', default=42, type=int, help='Random number generator seed to prevent overlap between train and val sets.')
args = parser.parse_args()
hparams = args.__dict__
model.train_evaluate(hparams)
###Output
_____no_output_____
###Markdown
3. Write a `Dockerfile` for your custom model container Third, you will write a `Dockerfile` that contains instructions to package your model code in `bert-sentiment-classifier` as well as specifies your model code's dependencies needed for execution together in a Docker container.
###Code
%%writefile {MODEL_DIR}/Dockerfile
# Specifies base image and tag.
# https://cloud.google.com/vertex-ai/docs/training/pre-built-containers
FROM us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-5:latest
# Sets the container working directory.
WORKDIR /root
# Copies the requirements.txt into the container to reduce network calls.
COPY requirements.txt .
# Installs additional packages.
RUN pip3 install -U -r requirements.txt
# b/203105209 Removes unneeded file from TF2.5 CPU image for python_module CustomJob training.
# Will be removed on subsequent public Vertex images.
RUN rm -rf /var/sitecustomize/sitecustomize.py
# Copies the trainer code to the docker image.
COPY . /trainer
# Sets the container working directory.
WORKDIR /trainer
# Sets up the entry point to invoke the trainer.
ENTRYPOINT ["python", "-m", "trainer.task"]
###Output
_____no_output_____
###Markdown
4. Write a `requirements.txt` file to specify additional ML code dependencies These are additional dependencies for your model code not included in the pre-built Vertex TensorFlow images such as TF-Hub, TensorFlow AdamW optimizer, and TensorFlow Text needed for importing and working with pre-trained TensorFlow BERT models.
###Code
%%writefile {MODEL_DIR}/requirements.txt
tf-models-official==2.5.0
tensorflow-text==2.5.0
tensorflow-hub==0.12.0
###Output
_____no_output_____
###Markdown
Use Cloud Build to build and submit your model container to Google Cloud Artifact Registry Next, you will use [Cloud Build](https://cloud.google.com/build) to build and upload your custom TensorFlow model container to [Google Cloud Artifact Registry](https://cloud.google.com/artifact-registry).Cloud Build brings reusability and automation to your ML experimentation by enabling you to reliably build, test, and deploy your ML model code as part of a CI/CD workflow. Artifact Registry provides a centralized repository for you to store, manage, and secure your ML container images. This will allow you to securely share your ML work with others and reproduce experiment results.**Note**: the initial build and submit step will take about 16 minutes but Cloud Build is able to take advantage of caching for subsequent builds. 1. Create Artifact Registry for custom container images
###Code
ARTIFACT_REGISTRY="bert-sentiment-classifier"
# TODO: create a Docker Artifact Registry using the gcloud CLI.
# Documentation link: https://cloud.google.com/sdk/gcloud/reference/artifacts/repositories/create
!gcloud artifacts repositories create {ARTIFACT_REGISTRY} \
--repository-format=docker \
--location={REGION} \
--description="Artifact registry for ML custom training images for sentiment classification"
###Output
_____no_output_____
###Markdown
2. Create `cloudbuild.yaml` instructions
###Code
IMAGE_NAME="bert-sentiment-classifier"
IMAGE_TAG="latest"
IMAGE_URI=f"{REGION}-docker.pkg.dev/{PROJECT_ID}/{ARTIFACT_REGISTRY}/{IMAGE_NAME}:{IMAGE_TAG}"
cloudbuild_yaml = f"""steps:
- name: 'gcr.io/cloud-builders/docker'
args: [ 'build', '-t', '{IMAGE_URI}', '.' ]
images:
- '{IMAGE_URI}'"""
with open(f"{MODEL_DIR}/cloudbuild.yaml", "w") as fp:
fp.write(cloudbuild_yaml)
###Output
_____no_output_____
###Markdown
3. Build and submit your container image to Artifact Registry using Cloud Build **Note**: the initial build and submit step will take about 16 minutes but Cloud Build is able to take advantage of caching for faster subsequent builds.
###Code
# TODO: use Cloud Build to build and submit your custom model container to your Artifact Registry.
# Documentation link: https://cloud.google.com/sdk/gcloud/reference/builds/submit
# Hint: make sure the config flag is pointed at {MODEL_DIR}/cloudbuild.yaml defined above and you include your model directory.
!gcloud builds submit {MODEL_DIR} --timeout=20m --config {MODEL_DIR}/cloudbuild.yaml
###Output
_____no_output_____
###Markdown
Define a pipeline using the KFP V2 SDK To address your business requirements and get your higher performing model into production to deliver value faster, you will define a pipeline using the [**Kubeflow Pipelines (KFP) V2 SDK**](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility) to orchestrate the training and deployment of your model on [**Vertex Pipelines**](https://cloud.google.com/vertex-ai/docs/pipelines) below.
###Code
import datetime
# google_cloud_pipeline_components includes pre-built KFP components for interfacing with Vertex AI services.
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import dsl
TIMESTAMP=datetime.datetime.now().strftime('%Y%m%d%H%M%S')
DISPLAY_NAME = "bert-sentiment-{}".format(TIMESTAMP)
GCS_BASE_OUTPUT_DIR= f"{GCS_BUCKET}/{MODEL_DIR}-{TIMESTAMP}"
USER = "dougkelly" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(GCS_BUCKET, USER)
print(f"Model display name: {DISPLAY_NAME}")
print(f"GCS dir for model training artifacts: {GCS_BASE_OUTPUT_DIR}")
print(f"GCS dir for pipeline artifacts: {PIPELINE_ROOT}")
# Pre-built Vertex model serving container for deployment.
# https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers
SERVING_IMAGE_URI = "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-5:latest"
###Output
_____no_output_____
###Markdown
The pipeline consists of two components:* `CustomContainerTrainingJobRunOp` [(documentation)](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.1.6/google_cloud_pipeline_components.aiplatform.htmlgoogle_cloud_pipeline_components.aiplatform.CustomContainerTrainingJobRunOp): trains your custom model container using Vertex Training. This is the same as configuring a Vertex Custom Container Training Job using the Vertex Python SDK you covered in the Vertex AI: Qwik Start lab.* `ModelDeployOp`: deploys a given model to a Vertex Prediction Endpoint for online predictions.
###Code
@dsl.pipeline(name="bert-sentiment-classification", pipeline_root=PIPELINE_ROOT)
def pipeline(
project: str = PROJECT_ID,
location: str = REGION,
staging_bucket: str = GCS_BUCKET,
display_name: str = DISPLAY_NAME,
container_uri: str = IMAGE_URI,
model_serving_container_uri: str = SERVING_IMAGE_URI,
gcs_base_output_dir: str = GCS_BASE_OUTPUT_DIR,
):
#TODO: add and configure the pre-built KFP CustomContainerTrainingJobRunOp component using
# the remaining arguments in the pipeline constructor.
# Hint: Refer to the component documentation link above if needed as well.
model_train_evaluate_op = gcc_aip.CustomContainerTrainingJobRunOp(
# Vertex AI Python SDK authentication parameters.
project=project,
location=location,
staging_bucket=staging_bucket,
# WorkerPool arguments.
replica_count=1,
machine_type="c2-standard-4",
# TODO: fill in the remaining arguments from the pipeline constructor.
display_name=display_name,
container_uri=container_uri,
model_serving_container_image_uri=model_serving_container_uri,
base_output_dir=gcs_base_output_dir,
)
# b/203678136 `ModelDeployOp` runs on latest image with contains backward compatability
# breaking change. Temporary workaround for lab library dependency resolution.
gcc_aip.ModelDeployOp.component_spec.implementation.container.image = (
"gcr.io/ml-pipeline/google-cloud-pipeline-components:0.1.7")
# Pre-built KFP ModelDeployOp component to create Endpoint and deploy model to it
# for online predictions.
model_deploy_op = gcc_aip.ModelDeployOp(
# Link to model training component through output model artifact.
model=model_train_evaluate_op.outputs["model"],
# Vertex AI Python SDK authentication parameters.
project=project,
location=location,
# WorkerPool arguments.
machine_type="c2-standard-4",
)
###Output
_____no_output_____
###Markdown
Compile the pipeline
###Code
from kfp.v2 import compiler
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="bert-sentiment-classification.json"
)
###Output
_____no_output_____
###Markdown
Run the pipeline on Vertex Pipelines The `PipelineJob` is configured below and triggered through the `run()` method.Note: this pipeline run will take about 28 minutes to train and deploy your model. Follow along with the execution using the URL from the job output below.
###Code
vertex_pipelines_job = vertexai.pipeline_jobs.PipelineJob(
display_name="bert-sentiment-classification",
template_path="bert-sentiment-classification.json",
parameter_values={
"project": PROJECT_ID,
"location": REGION,
"staging_bucket": GCS_BUCKET,
"display_name": DISPLAY_NAME,
"container_uri": IMAGE_URI,
"model_serving_container_uri": SERVING_IMAGE_URI,
"gcs_base_output_dir": GCS_BASE_OUTPUT_DIR},
enable_caching=True,
)
vertex_pipelines_job.run()
###Output
_____no_output_____
###Markdown
Query deployed model on Vertex Endpoint for online predictions Finally, you will retrieve the `Endpoint` deployed by the pipeline and use it to query your model for online predictions.Configure the `Endpoint()` function below with the following parameters:* `endpoint_name`: A fully-qualified endpoint resource name or endpoint ID. Example: "projects/123/locations/us-central1/endpoints/456" or "456" when project and location are initialized or passed.* `project_id`: GCP project.* `location`: GCP region.Call `predict()` to return a prediction for a test review.
###Code
# Retrieve your deployed Endpoint name from your pipeline.
ENDPOINT_NAME = vertexai.Endpoint.list()[0].name
#TODO: Generate online predictions using your Vertex Endpoint.
endpoint = vertexai.Endpoint(
endpoint_name=ENDPOINT_NAME,
project=PROJECT_ID,
location=REGION)
#TODO: write a movie review to test your model e.g. "The Dark Knight is the best Batman movie!"
test_review = "The Dark Knight is the best Batman movie!"
# TODO: use your Endpoint to return prediction for your test_review.
prediction = endpoint.predict([test_review])
print(prediction)
# Use a sigmoid function to compress your model output between 0 and 1. For binary classification, a threshold of 0.5 is typically applied
# so if the output is >= 0.5 then the predicted sentiment is "Positive" and < 0.5 is a "Negative" prediction.
print(tf.sigmoid(prediction.predictions[0]))
# Timestamp | Prediction (number)
#
# Timestamp | Store ID (string) | Product ID | Product feedback | Text | is_holday (50%) | Prediction (number)
###Output
_____no_output_____
###Markdown
Next steps Congratulations! You walked through a full experimentation, containerization, and MLOps workflow on Vertex AI. First, you built, trained, and evaluated a BERT sentiment classifier model in a Vertex Notebook. You then packaged your model code into a Docker container to train on Google Cloud's Vertex AI. Lastly, you defined and ran a Kubeflow Pipeline on Vertex Pipelines that trained and deployed your model container to a Vertex Endpoint that you queried for online predictions. License
###Code
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
The pipeline consists of three components:* `CustomContainerTrainingJobRunOp` [(documentation)](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.2.0/google_cloud_pipeline_components.aiplatform.htmlgoogle_cloud_pipeline_components.aiplatform.CustomContainerTrainingJobRunOp): trains your custom model container using Vertex Training. This is the same as configuring a Vertex Custom Container Training Job using the Vertex Python SDK you covered in the Vertex AI: Qwik Start lab.* `EndpointCreateOp` [(documentation)](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.2.0/google_cloud_pipeline_components.aiplatform.htmlgoogle_cloud_pipeline_components.aiplatform.EndpointCreateOp): Creates a Google Cloud Vertex Endpoint resource that maps physical machine resources with your model to enable it to serve online predictions. Online predictions have low latency requirements; providing resources to the model in advance reduces latency. * `ModelDeployOp`[(documentation)](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.2.0/google_cloud_pipeline_components.aiplatform.htmlgoogle_cloud_pipeline_components.aiplatform.ModelDeployOp): deploys your model to a Vertex Prediction Endpoint for online predictions.
###Code
@dsl.pipeline(name="bert-sentiment-classification", pipeline_root=PIPELINE_ROOT)
def pipeline(
project: str = PROJECT_ID,
location: str = REGION,
staging_bucket: str = GCS_BUCKET,
display_name: str = DISPLAY_NAME,
container_uri: str = IMAGE_URI,
model_serving_container_image_uri: str = SERVING_IMAGE_URI,
base_output_dir: str = GCS_BASE_OUTPUT_DIR,
):
#TODO: add and configure the pre-built KFP CustomContainerTrainingJobRunOp component using
# the remaining arguments in the pipeline constructor.
# Hint: Refer to the component documentation link above if needed as well.
model_train_evaluate_op = gcc_aip.CustomContainerTrainingJobRunOp(
# Vertex AI Python SDK authentication parameters.
project=project,
location=location,
staging_bucket=staging_bucket,
# WorkerPool arguments.
replica_count=1,
machine_type="n1-standard-4",
# TODO: fill in the remaining arguments from the pipeline constructor.
display_name=display_name,
container_uri=container_uri,
model_serving_container_image_uri=model_serving_container_image_uri,
base_output_dir=base_output_dir,
)
# Create a Vertex Endpoint resource in parallel with model training.
endpoint_create_op = gcc_aip.EndpointCreateOp(
# Vertex AI Python SDK authentication parameters.
project=project,
location=location,
display_name=display_name
)
# Deploy your model to the created Endpoint resource for online predictions.
model_deploy_op = gcc_aip.ModelDeployOp(
# Link to model training component through output model artifact.
model=model_train_evaluate_op.outputs["model"],
# Link to the created Endpoint.
endpoint=endpoint_create_op.outputs["endpoint"],
# Define prediction request routing. {"0": 100} indicates 100% of traffic
# to the ID of the current model being deployed.
traffic_split={"0": 100},
# WorkerPool arguments.
dedicated_resources_machine_type="n1-standard-4",
dedicated_resources_min_replica_count=1,
dedicated_resources_max_replica_count=2
)
###Output
_____no_output_____
###Markdown
Compile the pipeline
###Code
from kfp.v2 import compiler
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="bert-sentiment-classification.json"
)
###Output
_____no_output_____
###Markdown
Run the pipeline on Vertex Pipelines The `PipelineJob` is configured below and triggered through the `run()` method.Note: this pipeline run will take about 28 minutes to train and deploy your model. Follow along with the execution using the URL from the job output below.
###Code
vertex_pipelines_job = vertexai.pipeline_jobs.PipelineJob(
display_name="bert-sentiment-classification",
template_path="bert-sentiment-classification.json",
parameter_values={
"project": PROJECT_ID,
"location": REGION,
"staging_bucket": GCS_BUCKET,
"display_name": DISPLAY_NAME,
"container_uri": IMAGE_URI,
"model_serving_container_image_uri": SERVING_IMAGE_URI,
"base_output_dir": GCS_BASE_OUTPUT_DIR},
enable_caching=True,
)
vertex_pipelines_job.run()
###Output
_____no_output_____
###Markdown
Query deployed model on Vertex Endpoint for online predictions Finally, you will retrieve the `Endpoint` deployed by the pipeline and use it to query your model for online predictions.Configure the `Endpoint()` function below with the following parameters:* `endpoint_name`: A fully-qualified endpoint resource name or endpoint ID. Example: "projects/123/locations/us-central1/endpoints/456" or "456" when project and location are initialized or passed.* `project_id`: GCP project.* `location`: GCP region.Call `predict()` to return a prediction for a test review.
###Code
# Retrieve your deployed Endpoint name from your pipeline.
ENDPOINT_NAME = vertexai.Endpoint.list()[0].name
#TODO: Generate online predictions using your Vertex Endpoint.
endpoint = vertexai.Endpoint(
endpoint_name=ENDPOINT_NAME,
project=PROJECT_ID,
location=REGION)
#TODO: write a movie review to test your model e.g. "The Dark Knight is the best Batman movie!"
test_review = "The Dark Knight is the best Batman movie!"
# TODO: use your Endpoint to return prediction for your test_review.
prediction = endpoint.predict([test_review])
print(prediction)
# Use a sigmoid function to compress your model output between 0 and 1. For binary classification, a threshold of 0.5 is typically applied
# so if the output is >= 0.5 then the predicted sentiment is "Positive" and < 0.5 is a "Negative" prediction.
print(tf.sigmoid(prediction.predictions[0]))
###Output
_____no_output_____
###Markdown
Next steps Congratulations! You walked through a full experimentation, containerization, and MLOps workflow on Vertex AI. First, you built, trained, and evaluated a BERT sentiment classifier model in a Vertex Notebook. You then packaged your model code into a Docker container to train on Google Cloud's Vertex AI. Lastly, you defined and ran a Kubeflow Pipeline on Vertex Pipelines that trained and deployed your model container to a Vertex Endpoint that you queried for online predictions. License
###Code
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
Building and deploying machine learning solutions with Vertex AI: Challenge Lab This Challenge Lab is recommended for students who have enrolled in the [**Building and deploying machine learning solutions with Vertex AI**](). You will be given a scenario and a set of tasks. Instead of following step-by-step instructions, you will use the skills learned from the labs in the quest to figure out how to complete the tasks on your own! An automated scoring system (shown on the Qwiklabs lab instructions page) will provide feedback on whether you have completed your tasks correctly.When you take a Challenge Lab, you will not be taught Google Cloud concepts. To build the solution to the challenge presented, use skills learned from the labs in the Quest this challenge lab is part of. You are expected to extend your learned skills and complete all the **`TODO:`** comments in this notebook.Are you ready for the challenge? Scenario You were recently hired as a Machine Learning Engineer at a startup movie review website. Your manager has tasked you with building a machine learning model to classify the sentiment of user movie reviews as positive or negative. These predictions will be used as an input in downstream movie rating systems and to surface top supportive and critical reviews on the movie website application. The challenge: your business requirements are that you have just 6 weeks to productionize a model that achieves great than 75% accuracy to improve upon an existing bootstrapped solution. Furthermore, after doing some exploratory analysis in your startup's data warehouse, you found that you only have a small dataset of 50k text reviews to build a higher performing solution.To build and deploy a high performance machine learning model with limited data quickly, you will walk through training and deploying a custom TensorFlow BERT sentiment classifier for online predictions on Google Cloud's [Vertex AI](https://cloud.google.com/vertex-ai) platform. Vertex AI is Google Cloud's next generation machine learning development platform where you can leverage the latest ML pre-built components and AutoML to significantly enhance your development productivity, scale your workflow and decision making with your data, and accelerate time to value.First, you will progress through a typical experimentation workflow where you will build your model from pre-trained BERT components from TF-Hub and `tf.keras` classification layers to train and evaluate your model in a Vertex Notebook. You will then package your model code into a Docker container to train on Google Cloud's Vertex AI. Lastly, you will define and run a Kubeflow Pipeline on Vertex Pipelines that trains and deploys your model to a Vertex Endpoint that you will query for online predictions. Learning objectives * Train a TensorFlow model locally in a hosted [**Vertex Notebook**](https://cloud.google.com/vertex-ai/docs/general/notebooks?hl=sv).* Containerize your training code with [**Cloud Build**](https://cloud.google.com/build) and push it to [**Google Cloud Artifact Registry**](https://cloud.google.com/artifact-registry).* Define a pipeline using the [**Kubeflow Pipelines (KFP) V2 SDK**](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility) to train and deploy your model on [**Vertex Pipelines**](https://cloud.google.com/vertex-ai/docs/pipelines).* Query your model on a [**Vertex Endpoint**](https://cloud.google.com/vertex-ai/docs/predictions/getting-predictions) using online predictions. Setup Define constants
###Code
# Add installed library dependencies to Python PATH variable.
PATH=%env PATH
%env PATH={PATH}:/home/jupyter/.local/bin
# Retrieve and set PROJECT_ID and REGION environment variables.
# TODO: fill in PROJECT_ID.
PROJECT_ID = !(gcloud config get-value core/project)
PROJECT_ID = PROJECT_ID[0]
REGION = 'us-central1'
# Create a globally unique Google Cloud Storage bucket for artifact storage.
GCS_BUCKET = f"gs://{PROJECT_ID}-vertex-challenge-lab"
!gsutil mb -l $REGION $GCS_BUCKET
###Output
_____no_output_____
###Markdown
Import libraries
###Code
import os
import shutil
import logging
# TensorFlow model building libraries.
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
# Re-create the AdamW optimizer used in the original BERT paper.
from official.nlp import optimization
# Libraries for data and plot model training metrics.
import pandas as pd
import matplotlib.pyplot as plt
# Import the Vertex AI Python SDK.
from google.cloud import aiplatform as vertexai
###Output
_____no_output_____
###Markdown
Initialize Vertex AI Python SDK Initialize the Vertex AI Python SDK with your GCP Project, Region, and Google Cloud Storage Bucket.
###Code
vertexai.init(project=PROJECT_ID, location=REGION, staging_bucket=GCS_BUCKET)
###Output
_____no_output_____
###Markdown
Build and train your model locally in a Vertex Notebook Note: this lab adapts and extends the official [TensorFlow BERT text classification tutorial](https://www.tensorflow.org/text/tutorials/classify_text_with_bert) to utilize Vertex AI services. See the tutorial for additional coverage on fine-tuning BERT models using TensorFlow. Lab dataset In this lab, you will use the [Large Movie Review Dataset](https://ai.stanford.edu/~amaas/data/sentiment) that contains the text of 50,000 movie reviews from the Internet Movie Database. These are split into 25,000 reviews for training and 25,000 reviews for testing. The training and testing sets are balanced, meaning they contain an equal number of positive and negative reviews. Data ingestion and processing code has been provided for you below: Import dataset
###Code
DATA_URL = "https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
LOCAL_DATA_DIR = "."
def download_data(data_url, local_data_dir):
"""Download dataset.
Args:
data_url(str): Source data URL path.
local_data_dir(str): Local data download directory path.
Returns:
dataset_dir(str): Local unpacked data directory path.
"""
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
dataset = tf.keras.utils.get_file(
fname="aclImdb_v1.tar.gz",
origin=data_url,
untar=True,
cache_dir=local_data_dir,
cache_subdir="")
dataset_dir = os.path.join(os.path.dirname(dataset), "aclImdb")
train_dir = os.path.join(dataset_dir, "train")
# Remove unused folders to make it easier to load the data.
remove_dir = os.path.join(train_dir, "unsup")
shutil.rmtree(remove_dir)
return dataset_dir
DATASET_DIR = download_data(data_url=DATA_URL, local_data_dir=LOCAL_DATA_DIR)
# Create a dictionary to iteratively add data pipeline and model training hyperparameters.
HPARAMS = {
# Set a random sampling seed to prevent data leakage in data splits from files.
"seed": 42,
# Number of training and inference examples.
"batch-size": 32
}
def load_datasets(dataset_dir, hparams):
"""Load pre-split tf.datasets.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
raw_train_ds(tf.dataset): Train split dataset (20k examples).
raw_val_ds(tf.dataset): Validation split dataset (5k examples).
raw_test_ds(tf.dataset): Test split dataset (25k examples).
"""
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='training',
seed=hparams['seed'])
raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='validation',
seed=hparams['seed'])
raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'test'),
batch_size=hparams['batch-size'])
return raw_train_ds, raw_val_ds, raw_test_ds
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(DATASET_DIR, HPARAMS)
AUTOTUNE = tf.data.AUTOTUNE
CLASS_NAMES = raw_train_ds.class_names
train_ds = raw_train_ds.prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.prefetch(buffer_size=AUTOTUNE)
###Output
_____no_output_____
###Markdown
Let's print a few example reviews:
###Code
for text_batch, label_batch in train_ds.take(1):
for i in range(3):
print(f'Review {i}: {text_batch.numpy()[i]}')
label = label_batch.numpy()[i]
print(f'Label : {label} ({CLASS_NAMES[label]})')
###Output
_____no_output_____
###Markdown
Choose a pre-trained BERT model to fine-tune for higher accuracy [**Bidirectional Encoder Representations from Transformers (BERT)**](https://arxiv.org/abs/1810.04805v2) is a transformer-based text representation model pre-trained on massive datasets (3+ billion words) that can be fine-tuned for state-of-the art results on many natural language processing (NLP) tasks. Since release in 2018 by Google researchers, its has transformed the field of NLP research and come to form a core part of significant improvements to [Google Search](https://www.blog.google/products/search/search-language-understanding-bert). To meet your business requirements of achieving higher accuracy on a small dataset (20k training examples), you will use a technique called transfer learning to combine a pre-trained BERT encoder and classification layers to fine tune a new higher performing model for binary sentiment classification. For this lab, you will use a smaller BERT model that trades some accuracy for faster training times.The Small BERT models are instances of the original BERT architecture with a smaller number L of layers (i.e., residual blocks) combined with a smaller hidden size H and a matching smaller number A of attention heads, as published byIulia Turc, Ming-Wei Chang, Kenton Lee, Kristina Toutanova: ["Well-Read Students Learn Better: On the Importance of Pre-training Compact Models"](https://arxiv.org/abs/1908.08962), 2019.They have the same general architecture but fewer and/or smaller Transformer blocks, which lets you explore tradeoffs between speed, size and quality.The following preprocessing and encoder models in the TensorFlow 2 SavedModel format use the implementation of BERT from the [TensorFlow Models Github repository](https://github.com/tensorflow/models/tree/master/official/nlp/bert) with the trained weights released by the authors of Small BERT.
###Code
HPARAMS.update({
# TF Hub BERT modules.
"tfhub-bert-preprocessor": "https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3",
"tfhub-bert-encoder": "https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/2",
})
###Output
_____no_output_____
###Markdown
Text inputs need to be transformed to numeric token ids and arranged in several Tensors before being input to BERT. TensorFlow Hub provides a matching preprocessing model for each of the BERT models discussed above, which implements this transformation using TF ops from the TF.text library. Since this text preprocessor is a TensorFlow model, It can be included in your model directly. For fine-tuning, you will use the same optimizer that BERT was originally trained with: the "Adaptive Moments" (Adam). This optimizer minimizes the prediction loss and does regularization by weight decay (not using moments), which is also known as [AdamW](https://arxiv.org/abs/1711.05101).For the learning rate `initial-learning-rate`, you will use the same schedule as BERT pre-training: linear decay of a notional initial learning rate, prefixed with a linear warm-up phase over the first 10% of training steps `n_warmup_steps`. In line with the BERT paper, the initial learning rate is smaller for fine-tuning.
###Code
HPARAMS.update({
# Model training hyperparameters for fine tuning and regularization.
"epochs": 3,
"initial-learning-rate": 3e-5,
"dropout": 0.1
})
epochs = HPARAMS['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
OPTIMIZER = optimization.create_optimizer(init_lr=HPARAMS['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
###Output
_____no_output_____
###Markdown
Build and compile a TensorFlow BERT sentiment classifier Next, you will define and compile your model by assembling pre-built TF-Hub components and tf.keras layers.
###Code
def build_text_classifier(hparams, optimizer):
"""Define and compile a TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
model(tf.keras.Model): A compiled TensorFlow model.
"""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
# TODO: Add a hub.KerasLayer for BERT text preprocessing using the hparams dict.
# Name the layer 'preprocessing' and store in the variable preprocessor.
preprocessor = hub.KerasLayer(hparams['tfhub-bert-preprocessor'], name='preprocessing')
encoder_inputs = preprocessor(text_input)
# TODO: Add a hub.KerasLayer for BERT text encoding using the hparams dict.
# Name the layer 'BERT_encoder' and store in the variable encoder.
encoder = hub.KerasLayer(hparams['tfhub-bert-encoder'], trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
# For the fine-tuning you are going to use the `pooled_output` array which represents
# each input sequence as a whole. The shape is [batch_size, H].
# You can think of this as an embedding for the entire movie review.
classifier = outputs['pooled_output']
# Add dropout to prevent overfitting during model fine-tuning.
classifier = tf.keras.layers.Dropout(hparams['dropout'], name='dropout')(classifier)
classifier = tf.keras.layers.Dense(1, activation=None, name='classifier')(classifier)
model = tf.keras.Model(text_input, classifier, name='bert-sentiment-classifier')
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
return model
model = build_text_classifier(HPARAMS, OPTIMIZER)
# Visualize your fine-tuned BERT sentiment classifier.
tf.keras.utils.plot_model(model)
TEST_REVIEW = ['this is such an amazing movie!']
BERT_RAW_RESULT = model(tf.constant(TEST_REVIEW))
print(BERT_RAW_RESULT)
###Output
_____no_output_____
###Markdown
Train and evaluate your BERT sentiment classifier
###Code
HPARAMS.update({
# TODO: Save your BERT sentiment classifier locally.
# Hint: Save it to './bert-sentiment-classifier-local'. Note the key name in model.save().
"model-dir": "./bert-sentiment-classifier-local"
})
###Output
_____no_output_____
###Markdown
**Note:** training your model locally will take about 8-10 minutes.
###Code
def train_evaluate(hparams):
"""Train and evaluate TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
history(tf.keras.callbacks.History): Keras callback that records training event history.
"""
# dataset_dir = download_data(data_url, data_dir)
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(DATASET_DIR, hparams)
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.cache().prefetch(buffer_size=AUTOTUNE)
epochs = hparams['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
optimizer = optimization.create_optimizer(init_lr=hparams['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
model = build_text_classifier(hparams=hparams, optimizer=optimizer)
logging.info(model.summary())
history = model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
logging.info("Test accuracy: %s", model.evaluate(test_ds))
# Export Keras model in TensorFlow SavedModel format.
model.save(hparams['model-dir'])
return history
###Output
_____no_output_____
###Markdown
Based on the `History` object returned by `model.fit()`. You can plot the training and validation loss for comparison, as well as the training and validation accuracy:
###Code
history = train_evaluate(HPARAMS)
history_dict = history.history
print(history_dict.keys())
acc = history_dict['binary_accuracy']
val_acc = history_dict['val_binary_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
fig = plt.figure(figsize=(10, 6))
fig.tight_layout()
plt.subplot(2, 1, 1)
# "bo" is for "blue dot"
plt.plot(epochs, loss, 'r', label='Training loss')
# b is for "solid blue line"
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
# plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(epochs, acc, 'r', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend(loc='lower right');
###Output
_____no_output_____
###Markdown
In this plot, the red lines represent the training loss and accuracy, and the blue lines are the validation loss and accuracy. Based on the plots above, you should see model accuracy of around 78-80% which exceeds your business requirements target of greater than 75% accuracy. Containerize your model code Now that you trained and evaluated your model locally in a Vertex Notebook as part of an experimentation workflow, your next step is to train and deploy your model on Google Cloud's Vertex AI platform. To train your BERT classifier on Google Cloud, you will you will package your Python training scripts and write a Dockerfile that contains instructions on your ML model code, dependencies, and execution instructions. You will build your custom container with Cloud Build, whose instructions are specified in `cloudbuild.yaml` and publish your container to your Artifact Registry. This workflow gives you the opportunity to use the same container to run as part of a portable and scalable [Vertex Pipelines](https://cloud.google.com/vertex-ai/docs/pipelines/introduction) workflow. You will walk through creating the following project structure for your ML mode code:```|--/bert-sentiment-classifier |--/trainer |--__init__.py |--model.py |--task.py |--Dockerfile |--cloudbuild.yaml |--requirements.txt``` 1. Write a `model.py` training scriptFirst, you will tidy up your local TensorFlow model training code from above into a training script.
###Code
MODEL_DIR = "bert-sentiment-classifier"
%%writefile {MODEL_DIR}/trainer/model.py
import os
import shutil
import logging
import tensorflow as tf
import tensorflow_text as text
import tensorflow_hub as hub
from official.nlp import optimization
DATA_URL = 'https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz'
LOCAL_DATA_DIR = './tmp/data'
AUTOTUNE = tf.data.AUTOTUNE
def download_data(data_url, local_data_dir):
"""Download dataset.
Args:
data_url(str): Source data URL path.
local_data_dir(str): Local data download directory path.
Returns:
dataset_dir(str): Local unpacked data directory path.
"""
if not os.path.exists(local_data_dir):
os.makedirs(local_data_dir)
dataset = tf.keras.utils.get_file(
fname='aclImdb_v1.tar.gz',
origin=data_url,
untar=True,
cache_dir=local_data_dir,
cache_subdir="")
dataset_dir = os.path.join(os.path.dirname(dataset), 'aclImdb')
train_dir = os.path.join(dataset_dir, 'train')
# Remove unused folders to make it easier to load the data.
remove_dir = os.path.join(train_dir, 'unsup')
shutil.rmtree(remove_dir)
return dataset_dir
def load_datasets(dataset_dir, hparams):
"""Load pre-split tf.datasets.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
raw_train_ds(tf.dataset): Train split dataset (20k examples).
raw_val_ds(tf.dataset): Validation split dataset (5k examples).
raw_test_ds(tf.dataset): Test split dataset (25k examples).
"""
raw_train_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='training',
seed=hparams['seed'])
raw_val_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'train'),
batch_size=hparams['batch-size'],
validation_split=0.2,
subset='validation',
seed=hparams['seed'])
raw_test_ds = tf.keras.preprocessing.text_dataset_from_directory(
os.path.join(dataset_dir, 'test'),
batch_size=hparams['batch-size'])
return raw_train_ds, raw_val_ds, raw_test_ds
def build_text_classifier(hparams, optimizer):
"""Define and compile a TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
model(tf.keras.Model): A compiled TensorFlow model.
"""
text_input = tf.keras.layers.Input(shape=(), dtype=tf.string, name='text')
# TODO: Add a hub.KerasLayer for BERT text preprocessing using the hparams dict.
# Name the layer 'preprocessing' and store in the variable preprocessor.
preprocessor = hub.KerasLayer(hparams['tfhub-bert-preprocessor'], name='preprocessing')
encoder_inputs = preprocessor(text_input)
# TODO: Add a hub.KerasLayer for BERT text encoding using the hparams dict.
# Name the layer 'BERT_encoder' and store in the variable encoder.
encoder = hub.KerasLayer(hparams['tfhub-bert-encoder'], trainable=True, name='BERT_encoder')
outputs = encoder(encoder_inputs)
# For the fine-tuning you are going to use the `pooled_output` array which represents
# each input sequence as a whole. The shape is [batch_size, H].
# You can think of this as an embedding for the entire movie review.
classifier = outputs['pooled_output']
# Add dropout to prevent overfitting during model fine-tuning.
classifier = tf.keras.layers.Dropout(hparams['dropout'], name='dropout')(classifier)
classifier = tf.keras.layers.Dense(1, activation=None, name='classifier')(classifier)
model = tf.keras.Model(text_input, classifier, name='bert-sentiment-classifier')
loss = tf.keras.losses.BinaryCrossentropy(from_logits=True)
metrics = tf.metrics.BinaryAccuracy()
model.compile(optimizer=optimizer,
loss=loss,
metrics=metrics)
return model
def train_evaluate(hparams):
"""Train and evaluate TensorFlow BERT sentiment classifier.
Args:
hparams(dict): A dictionary containing model training arguments.
Returns:
history(tf.keras.callbacks.History): Keras callback that records training event history.
"""
dataset_dir = download_data(data_url=DATA_URL,
local_data_dir=LOCAL_DATA_DIR)
raw_train_ds, raw_val_ds, raw_test_ds = load_datasets(dataset_dir=dataset_dir,
hparams=hparams)
train_ds = raw_train_ds.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = raw_val_ds.cache().prefetch(buffer_size=AUTOTUNE)
test_ds = raw_test_ds.cache().prefetch(buffer_size=AUTOTUNE)
epochs = hparams['epochs']
steps_per_epoch = tf.data.experimental.cardinality(train_ds).numpy()
n_train_steps = steps_per_epoch * epochs
n_warmup_steps = int(0.1 * n_train_steps)
optimizer = optimization.create_optimizer(init_lr=hparams['initial-learning-rate'],
num_train_steps=n_train_steps,
num_warmup_steps=n_warmup_steps,
optimizer_type='adamw')
model = build_text_classifier(hparams=hparams, optimizer=optimizer)
logging.info(model.summary())
history = model.fit(x=train_ds,
validation_data=val_ds,
epochs=epochs)
logging.info("Test accuracy: %s", model.evaluate(test_ds))
# Export Keras model in TensorFlow SavedModel format.
model.save(hparams['model-dir'])
return history
###Output
_____no_output_____
###Markdown
2. Write a `task.py` file as an entrypoint to your custom model container
###Code
%%writefile {MODEL_DIR}/trainer/task.py
import os
import argparse
from trainer import model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Vertex custom container training args. These are set by Vertex AI during training but can also be overwritten.
parser.add_argument('--model-dir', dest='model-dir',
default=os.environ['AIP_MODEL_DIR'], type=str, help='GCS URI for saving model artifacts.')
# Model training args.
parser.add_argument('--tfhub-bert-preprocessor', dest='tfhub-bert-preprocessor',
default='https://tfhub.dev/tensorflow/bert_en_uncased_preprocess/3', type=str, help='TF-Hub URL.')
parser.add_argument('--tfhub-bert-encoder', dest='tfhub-bert-encoder',
default='https://tfhub.dev/tensorflow/small_bert/bert_en_uncased_L-2_H-128_A-2/2', type=str, help='TF-Hub URL.')
parser.add_argument('--initial-learning-rate', dest='initial-learning-rate', default=3e-5, type=float, help='Learning rate for optimizer.')
parser.add_argument('--epochs', dest='epochs', default=3, type=int, help='Training iterations.')
parser.add_argument('--batch-size', dest='batch-size', default=32, type=int, help='Number of examples during each training iteration.')
parser.add_argument('--dropout', dest='dropout', default=0.1, type=float, help='Float percentage of DNN nodes [0,1] to drop for regularization.')
parser.add_argument('--seed', dest='seed', default=42, type=int, help='Random number generator seed to prevent overlap between train and val sets.')
args = parser.parse_args()
hparams = args.__dict__
model.train_evaluate(hparams)
###Output
_____no_output_____
###Markdown
3. Write a `Dockerfile` for your custom model container Third, you will write a `Dockerfile` that contains instructions to package your model code in `bert-sentiment-classifier` as well as specifies your model code's dependencies needed for execution together in a Docker container.
###Code
%%writefile {MODEL_DIR}/Dockerfile
# Specifies base image and tag.
# https://cloud.google.com/vertex-ai/docs/training/pre-built-containers
FROM us-docker.pkg.dev/vertex-ai/training/tf-cpu.2-6:latest
# Sets the container working directory.
WORKDIR /root
# Copies the requirements.txt into the container to reduce network calls.
COPY requirements.txt .
# Installs additional packages.
RUN pip3 install -U -r requirements.txt
# b/203105209 Removes unneeded file from TF2.5 CPU image for python_module CustomJob training.
# Will be removed on subsequent public Vertex images.
RUN rm -rf /var/sitecustomize/sitecustomize.py
# Copies the trainer code to the docker image.
COPY . /trainer
# Sets the container working directory.
WORKDIR /trainer
# Sets up the entry point to invoke the trainer.
ENTRYPOINT ["python", "-m", "trainer.task"]
###Output
_____no_output_____
###Markdown
4. Write a `requirements.txt` file to specify additional ML code dependencies These are additional dependencies for your model code not included in the pre-built Vertex TensorFlow images such as TF-Hub, TensorFlow AdamW optimizer, and TensorFlow Text needed for importing and working with pre-trained TensorFlow BERT models.
###Code
%%writefile {MODEL_DIR}/requirements.txt
tf-models-official==2.6.0
tensorflow-text==2.6.0
tensorflow-hub==0.12.0
###Output
_____no_output_____
###Markdown
Use Cloud Build to build and submit your model container to Google Cloud Artifact Registry Next, you will use [Cloud Build](https://cloud.google.com/build) to build and upload your custom TensorFlow model container to [Google Cloud Artifact Registry](https://cloud.google.com/artifact-registry).Cloud Build brings reusability and automation to your ML experimentation by enabling you to reliably build, test, and deploy your ML model code as part of a CI/CD workflow. Artifact Registry provides a centralized repository for you to store, manage, and secure your ML container images. This will allow you to securely share your ML work with others and reproduce experiment results.**Note**: the initial build and submit step will take about 16 minutes but Cloud Build is able to take advantage of caching for subsequent builds. 1. Create Artifact Registry for custom container images
###Code
ARTIFACT_REGISTRY="bert-sentiment-classifier"
# TODO: create a Docker Artifact Registry using the gcloud CLI.
# Documentation link: https://cloud.google.com/sdk/gcloud/reference/artifacts/repositories/create
!gcloud artifacts repositories create {ARTIFACT_REGISTRY} \
--repository-format=docker \
--location={REGION} \
--description="Artifact registry for ML custom training images for sentiment classification"
###Output
_____no_output_____
###Markdown
2. Create `cloudbuild.yaml` instructions
###Code
IMAGE_NAME="bert-sentiment-classifier"
IMAGE_TAG="latest"
IMAGE_URI=f"{REGION}-docker.pkg.dev/{PROJECT_ID}/{ARTIFACT_REGISTRY}/{IMAGE_NAME}:{IMAGE_TAG}"
cloudbuild_yaml = f"""steps:
- name: 'gcr.io/cloud-builders/docker'
args: [ 'build', '-t', '{IMAGE_URI}', '.' ]
images:
- '{IMAGE_URI}'"""
with open(f"{MODEL_DIR}/cloudbuild.yaml", "w") as fp:
fp.write(cloudbuild_yaml)
###Output
_____no_output_____
###Markdown
3. Build and submit your container image to Artifact Registry using Cloud Build **Note**: the initial build and submit step will take about 16 minutes but Cloud Build is able to take advantage of caching for faster subsequent builds.
###Code
# TODO: use Cloud Build to build and submit your custom model container to your Artifact Registry.
# Documentation link: https://cloud.google.com/sdk/gcloud/reference/builds/submit
# Hint: make sure the config flag is pointed at {MODEL_DIR}/cloudbuild.yaml defined above and you include your model directory.
!gcloud builds submit {MODEL_DIR} --timeout=20m --config {MODEL_DIR}/cloudbuild.yaml
###Output
_____no_output_____
###Markdown
Define a pipeline using the KFP V2 SDK To address your business requirements and get your higher performing model into production to deliver value faster, you will define a pipeline using the [**Kubeflow Pipelines (KFP) V2 SDK**](https://www.kubeflow.org/docs/components/pipelines/sdk/v2/v2-compatibility) to orchestrate the training and deployment of your model on [**Vertex Pipelines**](https://cloud.google.com/vertex-ai/docs/pipelines) below.
###Code
import datetime
# google_cloud_pipeline_components includes pre-built KFP components for interfacing with Vertex AI services.
from google_cloud_pipeline_components import aiplatform as gcc_aip
from kfp.v2 import dsl
TIMESTAMP=datetime.datetime.now().strftime('%Y%m%d%H%M%S')
DISPLAY_NAME = "bert-sentiment-{}".format(TIMESTAMP)
GCS_BASE_OUTPUT_DIR= f"{GCS_BUCKET}/{MODEL_DIR}-{TIMESTAMP}"
USER = "dougkelly" # <---CHANGE THIS
PIPELINE_ROOT = "{}/pipeline_root/{}".format(GCS_BUCKET, USER)
print(f"Model display name: {DISPLAY_NAME}")
print(f"GCS dir for model training artifacts: {GCS_BASE_OUTPUT_DIR}")
print(f"GCS dir for pipeline artifacts: {PIPELINE_ROOT}")
# Pre-built Vertex model serving container for deployment.
# https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers
SERVING_IMAGE_URI = "us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-6:latest"
###Output
_____no_output_____
###Markdown
The pipeline consists of three components:* `CustomContainerTrainingJobRunOp` [(documentation)](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.2.0/google_cloud_pipeline_components.aiplatform.htmlgoogle_cloud_pipeline_components.aiplatform.CustomContainerTrainingJobRunOp): trains your custom model container using Vertex Training. This is the same as configuring a Vertex Custom Container Training Job using the Vertex Python SDK you covered in the Vertex AI: Qwik Start lab.* `EndpointCreateOp` [(documentation)](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.2.0/google_cloud_pipeline_components.aiplatform.htmlgoogle_cloud_pipeline_components.aiplatform.EndpointCreateOp): Creates a Google Cloud Vertex Endpoint resource that maps physical machine resources with your model to enable it to serve online predictions. Online predictions have low latency requirements; providing resources to the model in advance reduces latency. * `ModelDeployOp`[(documentation)](https://google-cloud-pipeline-components.readthedocs.io/en/google-cloud-pipeline-components-0.2.0/google_cloud_pipeline_components.aiplatform.htmlgoogle_cloud_pipeline_components.aiplatform.ModelDeployOp): deploys your model to a Vertex Prediction Endpoint for online predictions.
###Code
@dsl.pipeline(name="bert-sentiment-classification", pipeline_root=PIPELINE_ROOT)
def pipeline(
project: str = PROJECT_ID,
location: str = REGION,
staging_bucket: str = GCS_BUCKET,
display_name: str = DISPLAY_NAME,
container_uri: str = IMAGE_URI,
model_serving_container_image_uri: str = SERVING_IMAGE_URI,
base_output_dir: str = GCS_BASE_OUTPUT_DIR,
):
#TODO: add and configure the pre-built KFP CustomContainerTrainingJobRunOp component using
# the remaining arguments in the pipeline constructor.
# Hint: Refer to the component documentation link above if needed as well.
model_train_evaluate_op = gcc_aip.CustomContainerTrainingJobRunOp(
# Vertex AI Python SDK authentication parameters.
project=project,
location=location,
staging_bucket=staging_bucket,
# WorkerPool arguments.
replica_count=1,
machine_type="n1-standard-4",
# TODO: fill in the remaining arguments from the pipeline constructor.
display_name=display_name,
container_uri=container_uri,
model_serving_container_image_uri=model_serving_container_image_uri,
base_output_dir=base_output_dir,
)
# Create a Vertex Endpoint resource in parallel with model training.
endpoint_create_op = gcc_aip.EndpointCreateOp(
# Vertex AI Python SDK authentication parameters.
project=project,
location=location,
display_name=display_name
)
# Deploy your model to the created Endpoint resource for online predictions.
model_deploy_op = gcc_aip.ModelDeployOp(
# Link to model training component through output model artifact.
model=model_train_evaluate_op.outputs["model"],
# Link to the created Endpoint.
endpoint=endpoint_create_op.outputs["endpoint"],
# Define prediction request routing. {"0": 100} indicates 100% of traffic
# to the ID of the current model being deployed.
traffic_split={"0": 100},
# WorkerPool arguments.
dedicated_resources_machine_type="n1-standard-4",
dedicated_resources_min_replica_count=1,
dedicated_resources_max_replica_count=2
)
###Output
_____no_output_____
###Markdown
Compile the pipeline
###Code
from kfp.v2 import compiler
compiler.Compiler().compile(
pipeline_func=pipeline, package_path="bert-sentiment-classification.json"
)
###Output
_____no_output_____
###Markdown
Run the pipeline on Vertex Pipelines The `PipelineJob` is configured below and triggered through the `run()` method.Note: This pipeline run will take around 30-40 minutes to train and deploy your model. Follow along with the execution using the URL from the job output below.
###Code
vertex_pipelines_job = vertexai.pipeline_jobs.PipelineJob(
display_name="bert-sentiment-classification",
template_path="bert-sentiment-classification.json",
parameter_values={
"project": PROJECT_ID,
"location": REGION,
"staging_bucket": GCS_BUCKET,
"display_name": DISPLAY_NAME,
"container_uri": IMAGE_URI,
"model_serving_container_image_uri": SERVING_IMAGE_URI,
"base_output_dir": GCS_BASE_OUTPUT_DIR},
enable_caching=True,
)
vertex_pipelines_job.run()
###Output
_____no_output_____
###Markdown
Query deployed model on Vertex Endpoint for online predictions Finally, you will retrieve the `Endpoint` deployed by the pipeline and use it to query your model for online predictions.Configure the `Endpoint()` function below with the following parameters:* `endpoint_name`: A fully-qualified endpoint resource name or endpoint ID. Example: "projects/123/locations/us-central1/endpoints/456" or "456" when project and location are initialized or passed.* `project_id`: GCP project.* `location`: GCP region.Call `predict()` to return a prediction for a test review.
###Code
# Retrieve your deployed Endpoint name from your pipeline.
ENDPOINT_NAME = vertexai.Endpoint.list()[0].name
#TODO: Generate online predictions using your Vertex Endpoint.
endpoint = vertexai.Endpoint(
endpoint_name=ENDPOINT_NAME,
project=PROJECT_ID,
location=REGION)
#TODO: write a movie review to test your model e.g. "The Dark Knight is the best Batman movie!"
test_review = "The Dark Knight is the best Batman movie!"
# TODO: use your Endpoint to return prediction for your test_review.
prediction = endpoint.predict([test_review])
print(prediction)
# Use a sigmoid function to compress your model output between 0 and 1. For binary classification, a threshold of 0.5 is typically applied
# so if the output is >= 0.5 then the predicted sentiment is "Positive" and < 0.5 is a "Negative" prediction.
print(tf.sigmoid(prediction.predictions[0]))
###Output
_____no_output_____
###Markdown
Next steps Congratulations! You walked through a full experimentation, containerization, and MLOps workflow on Vertex AI. First, you built, trained, and evaluated a BERT sentiment classifier model in a Vertex Notebook. You then packaged your model code into a Docker container to train on Google Cloud's Vertex AI. Lastly, you defined and ran a Kubeflow Pipeline on Vertex Pipelines that trained and deployed your model container to a Vertex Endpoint that you queried for online predictions. License
###Code
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____ |
experiments/tl_3v2/jitter1/cores-oracle.run1.framed/trials/7/trial.ipynb | ###Markdown
Transfer Learning Template
###Code
%load_ext autoreload
%autoreload 2
%matplotlib inline
import os, json, sys, time, random
import numpy as np
import torch
from torch.optim import Adam
from easydict import EasyDict
import matplotlib.pyplot as plt
from steves_models.steves_ptn import Steves_Prototypical_Network
from steves_utils.lazy_iterable_wrapper import Lazy_Iterable_Wrapper
from steves_utils.iterable_aggregator import Iterable_Aggregator
from steves_utils.ptn_train_eval_test_jig import PTN_Train_Eval_Test_Jig
from steves_utils.torch_sequential_builder import build_sequential
from steves_utils.torch_utils import get_dataset_metrics, ptn_confusion_by_domain_over_dataloader
from steves_utils.utils_v2 import (per_domain_accuracy_from_confusion, get_datasets_base_path)
from steves_utils.PTN.utils import independent_accuracy_assesment
from torch.utils.data import DataLoader
from steves_utils.stratified_dataset.episodic_accessor import Episodic_Accessor_Factory
from steves_utils.ptn_do_report import (
get_loss_curve,
get_results_table,
get_parameters_table,
get_domain_accuracies,
)
from steves_utils.transforms import get_chained_transform
###Output
_____no_output_____
###Markdown
Allowed ParametersThese are allowed parameters, not defaultsEach of these values need to be present in the injected parameters (the notebook will raise an exception if they are not present)Papermill uses the cell tag "parameters" to inject the real parameters below this cell.Enable tags to see what I mean
###Code
required_parameters = {
"experiment_name",
"lr",
"device",
"seed",
"dataset_seed",
"n_shot",
"n_query",
"n_way",
"train_k_factor",
"val_k_factor",
"test_k_factor",
"n_epoch",
"patience",
"criteria_for_best",
"x_net",
"datasets",
"torch_default_dtype",
"NUM_LOGS_PER_EPOCH",
"BEST_MODEL_PATH",
"x_shape",
}
from steves_utils.CORES.utils import (
ALL_NODES,
ALL_NODES_MINIMUM_1000_EXAMPLES,
ALL_DAYS
)
from steves_utils.ORACLE.utils_v2 import (
ALL_DISTANCES_FEET_NARROWED,
ALL_RUNS,
ALL_SERIAL_NUMBERS,
)
standalone_parameters = {}
standalone_parameters["experiment_name"] = "STANDALONE PTN"
standalone_parameters["lr"] = 0.001
standalone_parameters["device"] = "cuda"
standalone_parameters["seed"] = 1337
standalone_parameters["dataset_seed"] = 1337
standalone_parameters["n_way"] = 8
standalone_parameters["n_shot"] = 3
standalone_parameters["n_query"] = 2
standalone_parameters["train_k_factor"] = 1
standalone_parameters["val_k_factor"] = 2
standalone_parameters["test_k_factor"] = 2
standalone_parameters["n_epoch"] = 50
standalone_parameters["patience"] = 10
standalone_parameters["criteria_for_best"] = "source_loss"
standalone_parameters["datasets"] = [
{
"labels": ALL_SERIAL_NUMBERS,
"domains": ALL_DISTANCES_FEET_NARROWED,
"num_examples_per_domain_per_label": 100,
"pickle_path": os.path.join(get_datasets_base_path(), "oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl"),
"source_or_target_dataset": "source",
"x_transforms": ["unit_mag", "minus_two"],
"episode_transforms": [],
"domain_prefix": "ORACLE_"
},
{
"labels": ALL_NODES,
"domains": ALL_DAYS,
"num_examples_per_domain_per_label": 100,
"pickle_path": os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"),
"source_or_target_dataset": "target",
"x_transforms": ["unit_power", "times_zero"],
"episode_transforms": [],
"domain_prefix": "CORES_"
}
]
standalone_parameters["torch_default_dtype"] = "torch.float32"
standalone_parameters["x_net"] = [
{"class": "nnReshape", "kargs": {"shape":[-1, 1, 2, 256]}},
{"class": "Conv2d", "kargs": { "in_channels":1, "out_channels":256, "kernel_size":(1,7), "bias":False, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":256}},
{"class": "Conv2d", "kargs": { "in_channels":256, "out_channels":80, "kernel_size":(2,7), "bias":True, "padding":(0,3), },},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features":80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 80*256, "out_features": 256}}, # 80 units per IQ pair
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features":256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
]
# Parameters relevant to results
# These parameters will basically never need to change
standalone_parameters["NUM_LOGS_PER_EPOCH"] = 10
standalone_parameters["BEST_MODEL_PATH"] = "./best_model.pth"
# Parameters
parameters = {
"experiment_name": "tl_3-jitter1v2:cores -> oracle.run1.framed",
"device": "cuda",
"lr": 0.0001,
"x_shape": [2, 256],
"n_shot": 3,
"n_query": 2,
"train_k_factor": 3,
"val_k_factor": 2,
"test_k_factor": 2,
"torch_default_dtype": "torch.float32",
"n_epoch": 50,
"patience": 3,
"criteria_for_best": "target_accuracy",
"x_net": [
{"class": "nnReshape", "kargs": {"shape": [-1, 1, 2, 256]}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 1,
"out_channels": 256,
"kernel_size": [1, 7],
"bias": False,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 256}},
{
"class": "Conv2d",
"kargs": {
"in_channels": 256,
"out_channels": 80,
"kernel_size": [2, 7],
"bias": True,
"padding": [0, 3],
},
},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm2d", "kargs": {"num_features": 80}},
{"class": "Flatten", "kargs": {}},
{"class": "Linear", "kargs": {"in_features": 20480, "out_features": 256}},
{"class": "ReLU", "kargs": {"inplace": True}},
{"class": "BatchNorm1d", "kargs": {"num_features": 256}},
{"class": "Linear", "kargs": {"in_features": 256, "out_features": 256}},
],
"NUM_LOGS_PER_EPOCH": 10,
"BEST_MODEL_PATH": "./best_model.pth",
"n_way": 16,
"datasets": [
{
"labels": [
"1-10.",
"1-11.",
"1-15.",
"1-16.",
"1-17.",
"1-18.",
"1-19.",
"10-4.",
"10-7.",
"11-1.",
"11-14.",
"11-17.",
"11-20.",
"11-7.",
"13-20.",
"13-8.",
"14-10.",
"14-11.",
"14-14.",
"14-7.",
"15-1.",
"15-20.",
"16-1.",
"16-16.",
"17-10.",
"17-11.",
"17-2.",
"19-1.",
"19-16.",
"19-19.",
"19-20.",
"19-3.",
"2-10.",
"2-11.",
"2-17.",
"2-18.",
"2-20.",
"2-3.",
"2-4.",
"2-5.",
"2-6.",
"2-7.",
"2-8.",
"3-13.",
"3-18.",
"3-3.",
"4-1.",
"4-10.",
"4-11.",
"4-19.",
"5-5.",
"6-15.",
"7-10.",
"7-14.",
"8-18.",
"8-20.",
"8-3.",
"8-8.",
],
"domains": [1, 2, 3, 4, 5],
"num_examples_per_domain_per_label": -1,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/cores.stratified_ds.2022A.pkl",
"source_or_target_dataset": "source",
"x_transforms": [
"unit_mag",
"jitter_256_1",
"lowpass_+/-10MHz",
"take_200",
],
"episode_transforms": [],
"domain_prefix": "C_",
},
{
"labels": [
"3123D52",
"3123D65",
"3123D79",
"3123D80",
"3123D54",
"3123D70",
"3123D7B",
"3123D89",
"3123D58",
"3123D76",
"3123D7D",
"3123EFE",
"3123D64",
"3123D78",
"3123D7E",
"3124E4A",
],
"domains": [32, 38, 8, 44, 14, 50, 20, 26],
"num_examples_per_domain_per_label": 2000,
"pickle_path": "/mnt/wd500GB/CSC500/csc500-main/datasets/oracle.Run1_framed_2000Examples_stratified_ds.2022A.pkl",
"source_or_target_dataset": "target",
"x_transforms": [
"unit_mag",
"jitter_256_1",
"take_200",
"resample_20Msps_to_25Msps",
],
"episode_transforms": [],
"domain_prefix": "O_",
},
],
"seed": 154325,
"dataset_seed": 154325,
}
# Set this to True if you want to run this template directly
STANDALONE = False
if STANDALONE:
print("parameters not injected, running with standalone_parameters")
parameters = standalone_parameters
if not 'parameters' in locals() and not 'parameters' in globals():
raise Exception("Parameter injection failed")
#Use an easy dict for all the parameters
p = EasyDict(parameters)
if "x_shape" not in p:
p.x_shape = [2,256] # Default to this if we dont supply x_shape
supplied_keys = set(p.keys())
if supplied_keys != required_parameters:
print("Parameters are incorrect")
if len(supplied_keys - required_parameters)>0: print("Shouldn't have:", str(supplied_keys - required_parameters))
if len(required_parameters - supplied_keys)>0: print("Need to have:", str(required_parameters - supplied_keys))
raise RuntimeError("Parameters are incorrect")
###################################
# Set the RNGs and make it all deterministic
###################################
np.random.seed(p.seed)
random.seed(p.seed)
torch.manual_seed(p.seed)
torch.use_deterministic_algorithms(True)
###########################################
# The stratified datasets honor this
###########################################
torch.set_default_dtype(eval(p.torch_default_dtype))
###################################
# Build the network(s)
# Note: It's critical to do this AFTER setting the RNG
###################################
x_net = build_sequential(p.x_net)
start_time_secs = time.time()
p.domains_source = []
p.domains_target = []
train_original_source = []
val_original_source = []
test_original_source = []
train_original_target = []
val_original_target = []
test_original_target = []
# global_x_transform_func = lambda x: normalize(x.to(torch.get_default_dtype()), "unit_power") # unit_power, unit_mag
# global_x_transform_func = lambda x: normalize(x, "unit_power") # unit_power, unit_mag
def add_dataset(
labels,
domains,
pickle_path,
x_transforms,
episode_transforms,
domain_prefix,
num_examples_per_domain_per_label,
source_or_target_dataset:str,
iterator_seed=p.seed,
dataset_seed=p.dataset_seed,
n_shot=p.n_shot,
n_way=p.n_way,
n_query=p.n_query,
train_val_test_k_factors=(p.train_k_factor,p.val_k_factor,p.test_k_factor),
):
if x_transforms == []: x_transform = None
else: x_transform = get_chained_transform(x_transforms)
if episode_transforms == []: episode_transform = None
else: raise Exception("episode_transforms not implemented")
episode_transform = lambda tup, _prefix=domain_prefix: (_prefix + str(tup[0]), tup[1])
eaf = Episodic_Accessor_Factory(
labels=labels,
domains=domains,
num_examples_per_domain_per_label=num_examples_per_domain_per_label,
iterator_seed=iterator_seed,
dataset_seed=dataset_seed,
n_shot=n_shot,
n_way=n_way,
n_query=n_query,
train_val_test_k_factors=train_val_test_k_factors,
pickle_path=pickle_path,
x_transform_func=x_transform,
)
train, val, test = eaf.get_train(), eaf.get_val(), eaf.get_test()
train = Lazy_Iterable_Wrapper(train, episode_transform)
val = Lazy_Iterable_Wrapper(val, episode_transform)
test = Lazy_Iterable_Wrapper(test, episode_transform)
if source_or_target_dataset=="source":
train_original_source.append(train)
val_original_source.append(val)
test_original_source.append(test)
p.domains_source.extend(
[domain_prefix + str(u) for u in domains]
)
elif source_or_target_dataset=="target":
train_original_target.append(train)
val_original_target.append(val)
test_original_target.append(test)
p.domains_target.extend(
[domain_prefix + str(u) for u in domains]
)
else:
raise Exception(f"invalid source_or_target_dataset: {source_or_target_dataset}")
for ds in p.datasets:
add_dataset(**ds)
# from steves_utils.CORES.utils import (
# ALL_NODES,
# ALL_NODES_MINIMUM_1000_EXAMPLES,
# ALL_DAYS
# )
# add_dataset(
# labels=ALL_NODES,
# domains = ALL_DAYS,
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "cores.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"cores_{u}"
# )
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# add_dataset(
# labels=ALL_SERIAL_NUMBERS,
# domains = list(set(ALL_DISTANCES_FEET) - {2,62}),
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"),
# source_or_target_dataset="source",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"oracle1_{u}"
# )
# from steves_utils.ORACLE.utils_v2 import (
# ALL_DISTANCES_FEET,
# ALL_RUNS,
# ALL_SERIAL_NUMBERS,
# )
# add_dataset(
# labels=ALL_SERIAL_NUMBERS,
# domains = list(set(ALL_DISTANCES_FEET) - {2,62,56}),
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "oracle.Run2_framed_2000Examples_stratified_ds.2022A.pkl"),
# source_or_target_dataset="source",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"oracle2_{u}"
# )
# add_dataset(
# labels=list(range(19)),
# domains = [0,1,2],
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "metehan.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"met_{u}"
# )
# # from steves_utils.wisig.utils import (
# # ALL_NODES_MINIMUM_100_EXAMPLES,
# # ALL_NODES_MINIMUM_500_EXAMPLES,
# # ALL_NODES_MINIMUM_1000_EXAMPLES,
# # ALL_DAYS
# # )
# import steves_utils.wisig.utils as wisig
# add_dataset(
# labels=wisig.ALL_NODES_MINIMUM_100_EXAMPLES,
# domains = wisig.ALL_DAYS,
# num_examples_per_domain_per_label=100,
# pickle_path=os.path.join(get_datasets_base_path(), "wisig.node3-19.stratified_ds.2022A.pkl"),
# source_or_target_dataset="target",
# x_transform_func=global_x_transform_func,
# domain_modifier=lambda u: f"wisig_{u}"
# )
###################################
# Build the dataset
###################################
train_original_source = Iterable_Aggregator(train_original_source, p.seed)
val_original_source = Iterable_Aggregator(val_original_source, p.seed)
test_original_source = Iterable_Aggregator(test_original_source, p.seed)
train_original_target = Iterable_Aggregator(train_original_target, p.seed)
val_original_target = Iterable_Aggregator(val_original_target, p.seed)
test_original_target = Iterable_Aggregator(test_original_target, p.seed)
# For CNN We only use X and Y. And we only train on the source.
# Properly form the data using a transform lambda and Lazy_Iterable_Wrapper. Finally wrap them in a dataloader
transform_lambda = lambda ex: ex[1] # Original is (<domain>, <episode>) so we strip down to episode only
train_processed_source = Lazy_Iterable_Wrapper(train_original_source, transform_lambda)
val_processed_source = Lazy_Iterable_Wrapper(val_original_source, transform_lambda)
test_processed_source = Lazy_Iterable_Wrapper(test_original_source, transform_lambda)
train_processed_target = Lazy_Iterable_Wrapper(train_original_target, transform_lambda)
val_processed_target = Lazy_Iterable_Wrapper(val_original_target, transform_lambda)
test_processed_target = Lazy_Iterable_Wrapper(test_original_target, transform_lambda)
datasets = EasyDict({
"source": {
"original": {"train":train_original_source, "val":val_original_source, "test":test_original_source},
"processed": {"train":train_processed_source, "val":val_processed_source, "test":test_processed_source}
},
"target": {
"original": {"train":train_original_target, "val":val_original_target, "test":test_original_target},
"processed": {"train":train_processed_target, "val":val_processed_target, "test":test_processed_target}
},
})
from steves_utils.transforms import get_average_magnitude, get_average_power
print(set([u for u,_ in val_original_source]))
print(set([u for u,_ in val_original_target]))
s_x, s_y, q_x, q_y, _ = next(iter(train_processed_source))
print(s_x)
# for ds in [
# train_processed_source,
# val_processed_source,
# test_processed_source,
# train_processed_target,
# val_processed_target,
# test_processed_target
# ]:
# for s_x, s_y, q_x, q_y, _ in ds:
# for X in (s_x, q_x):
# for x in X:
# assert np.isclose(get_average_magnitude(x.numpy()), 1.0)
# assert np.isclose(get_average_power(x.numpy()), 1.0)
###################################
# Build the model
###################################
# easfsl only wants a tuple for the shape
model = Steves_Prototypical_Network(x_net, device=p.device, x_shape=tuple(p.x_shape))
optimizer = Adam(params=model.parameters(), lr=p.lr)
###################################
# train
###################################
jig = PTN_Train_Eval_Test_Jig(model, p.BEST_MODEL_PATH, p.device)
jig.train(
train_iterable=datasets.source.processed.train,
source_val_iterable=datasets.source.processed.val,
target_val_iterable=datasets.target.processed.val,
num_epochs=p.n_epoch,
num_logs_per_epoch=p.NUM_LOGS_PER_EPOCH,
patience=p.patience,
optimizer=optimizer,
criteria_for_best=p.criteria_for_best,
)
total_experiment_time_secs = time.time() - start_time_secs
###################################
# Evaluate the model
###################################
source_test_label_accuracy, source_test_label_loss = jig.test(datasets.source.processed.test)
target_test_label_accuracy, target_test_label_loss = jig.test(datasets.target.processed.test)
source_val_label_accuracy, source_val_label_loss = jig.test(datasets.source.processed.val)
target_val_label_accuracy, target_val_label_loss = jig.test(datasets.target.processed.val)
history = jig.get_history()
total_epochs_trained = len(history["epoch_indices"])
val_dl = Iterable_Aggregator((datasets.source.original.val,datasets.target.original.val))
confusion = ptn_confusion_by_domain_over_dataloader(model, p.device, val_dl)
per_domain_accuracy = per_domain_accuracy_from_confusion(confusion)
# Add a key to per_domain_accuracy for if it was a source domain
for domain, accuracy in per_domain_accuracy.items():
per_domain_accuracy[domain] = {
"accuracy": accuracy,
"source?": domain in p.domains_source
}
# Do an independent accuracy assesment JUST TO BE SURE!
# _source_test_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.test, p.device)
# _target_test_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.test, p.device)
# _source_val_label_accuracy = independent_accuracy_assesment(model, datasets.source.processed.val, p.device)
# _target_val_label_accuracy = independent_accuracy_assesment(model, datasets.target.processed.val, p.device)
# assert(_source_test_label_accuracy == source_test_label_accuracy)
# assert(_target_test_label_accuracy == target_test_label_accuracy)
# assert(_source_val_label_accuracy == source_val_label_accuracy)
# assert(_target_val_label_accuracy == target_val_label_accuracy)
experiment = {
"experiment_name": p.experiment_name,
"parameters": dict(p),
"results": {
"source_test_label_accuracy": source_test_label_accuracy,
"source_test_label_loss": source_test_label_loss,
"target_test_label_accuracy": target_test_label_accuracy,
"target_test_label_loss": target_test_label_loss,
"source_val_label_accuracy": source_val_label_accuracy,
"source_val_label_loss": source_val_label_loss,
"target_val_label_accuracy": target_val_label_accuracy,
"target_val_label_loss": target_val_label_loss,
"total_epochs_trained": total_epochs_trained,
"total_experiment_time_secs": total_experiment_time_secs,
"confusion": confusion,
"per_domain_accuracy": per_domain_accuracy,
},
"history": history,
"dataset_metrics": get_dataset_metrics(datasets, "ptn"),
}
ax = get_loss_curve(experiment)
plt.show()
get_results_table(experiment)
get_domain_accuracies(experiment)
print("Source Test Label Accuracy:", experiment["results"]["source_test_label_accuracy"], "Target Test Label Accuracy:", experiment["results"]["target_test_label_accuracy"])
print("Source Val Label Accuracy:", experiment["results"]["source_val_label_accuracy"], "Target Val Label Accuracy:", experiment["results"]["target_val_label_accuracy"])
json.dumps(experiment)
###Output
_____no_output_____ |
study_roadmaps/1_getting_started_roadmap/9_custom_network_builder/1) Create a simple custom network while debugging it.ipynb | ###Markdown
Goals Learn how to create custom network Table of Contents [0. Install](0) [1. Load Data](1) [2. Create and debug network](2) [3. Train](3) Install Monk - git clone https://github.com/Tessellate-Imaging/monk_v1.git - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt - (Select the requirements file as per OS and CUDA version)
###Code
!git clone https://github.com/Tessellate-Imaging/monk_v1.git
# Select the requirements file as per OS and CUDA version
!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
###Output
_____no_output_____
###Markdown
Dataset - Stanford Dogs classification dataset - https://www.kaggle.com/jessicali9530/stanford-dogs-dataset
###Code
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1b4tC_Pl1O80of7U-PJ7VExmszzSX3ZEM' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1b4tC_Pl1O80of7U-PJ7VExmszzSX3ZEM" -O dogs-species-dataset.zip && rm -rf /tmp/cookies.txt
! unzip -qq dogs-species-dataset.zip
###Output
_____no_output_____
###Markdown
Imports
###Code
# Monk
import os
import sys
sys.path.append("monk_v1/monk/");
#Using mxnet-gluon backend
from gluon_prototype import prototype
###Output
_____no_output_____
###Markdown
Load data
###Code
gtf = prototype(verbose=1);
gtf.Prototype("project", "basic_custom_model");
###Output
Mxnet Version: 1.5.0
Experiment Details
Project: project
Experiment: basic_custom_model
Dir: /home/abhi/Desktop/Work/tess_tool/gui/v0.3/finetune_models/Organization/development/v5.3_roadmaps/1_getting_started_roadmap/9_custom_network_builder/workspace/project/basic_custom_model/
###Markdown
Set Data params
###Code
gtf.Dataset_Params(dataset_path="dogs-species-dataset/train",
split=0.9,
input_size=224,
batch_size=2,
shuffle_data=True,
num_processors=3);
###Output
Dataset Details
Train path: dogs-species-dataset/train
Val path: None
CSV train path: None
CSV val path: None
Dataset Params
Input Size: 224
Batch Size: 2
Data Shuffle: True
Processors: 3
Train-val split: 0.9
###Markdown
Apply Transforms
###Code
gtf.apply_random_horizontal_flip(train=True, val=True);
gtf.apply_normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], train=True, val=True, test=True);
###Output
_____no_output_____
###Markdown
Load Dataset
###Code
gtf.Dataset();
###Output
Pre-Composed Train Transforms
[{'RandomHorizontalFlip': {'p': 0.5}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Pre-Composed Val Transforms
[{'RandomHorizontalFlip': {'p': 0.5}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Dataset Numbers
Num train images: 18522
Num val images: 2058
Num classes: 120
###Markdown
Create custom model with simultaneous debugging
###Code
network = [];
network.append(gtf.convolution(output_channels=16));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=32));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.convolution(output_channels=64));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=64));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.convolution(output_channels=128));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=128));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.flatten());
network.append(gtf.dropout(drop_probability=0.2));
network.append(gtf.fully_connected(units=1024));
network.append(gtf.dropout(drop_probability=0.2));
network.append(gtf.fully_connected(units=gtf.system_dict["dataset"]["params"]["num_classes"]));
gtf.debug_custom_model_design(network);
###Output
_____no_output_____
###Markdown
Create and setup model
###Code
gtf.Compile_Network(network, data_shape=(3, 224, 224));
###Output
Model Details
Loading pretrained model
Model Loaded on device
Model name: Custom Model
Num of potentially trainable layers: 16
Num of actual trainable layers: 16
###Markdown
Visualize with netron
###Code
gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8081);
from IPython.display import Image
Image(filename='imgs/basic_custom_network.png')
###Output
_____no_output_____
###Markdown
Set Training params
###Code
gtf.Training_Params(num_epochs=5,
display_progress=True,
display_progress_realtime=True,
save_intermediate_models=False,
save_training_logs=True);
## Set Optimizer, losses and learning rate schedulers
gtf.optimizer_sgd(0.0001);
gtf.lr_fixed();
gtf.loss_softmax_crossentropy()
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
###Output
_____no_output_____
###Markdown
Goals Learn how to create custom network Table of Contents [Install](0) [Load Data](1) [Create and debug network](2) [Train](3) Install Monk Using pip (Recommended) - colab (gpu) - All bakcends: `pip install -U monk-colab` - kaggle (gpu) - All backends: `pip install -U monk-kaggle` - cuda 10.2 - All backends: `pip install -U monk-cuda102` - Gluon bakcned: `pip install -U monk-gluon-cuda102` - Pytorch backend: `pip install -U monk-pytorch-cuda102` - Keras backend: `pip install -U monk-keras-cuda102` - cuda 10.1 - All backend: `pip install -U monk-cuda101` - Gluon bakcned: `pip install -U monk-gluon-cuda101` - Pytorch backend: `pip install -U monk-pytorch-cuda101` - Keras backend: `pip install -U monk-keras-cuda101` - cuda 10.0 - All backend: `pip install -U monk-cuda100` - Gluon bakcned: `pip install -U monk-gluon-cuda100` - Pytorch backend: `pip install -U monk-pytorch-cuda100` - Keras backend: `pip install -U monk-keras-cuda100` - cuda 9.2 - All backend: `pip install -U monk-cuda92` - Gluon bakcned: `pip install -U monk-gluon-cuda92` - Pytorch backend: `pip install -U monk-pytorch-cuda92` - Keras backend: `pip install -U monk-keras-cuda92` - cuda 9.0 - All backend: `pip install -U monk-cuda90` - Gluon bakcned: `pip install -U monk-gluon-cuda90` - Pytorch backend: `pip install -U monk-pytorch-cuda90` - Keras backend: `pip install -U monk-keras-cuda90` - cpu - All backend: `pip install -U monk-cpu` - Gluon bakcned: `pip install -U monk-gluon-cpu` - Pytorch backend: `pip install -U monk-pytorch-cpu` - Keras backend: `pip install -U monk-keras-cpu` Install Monk Manually (Not recommended) Step 1: Clone the library - git clone https://github.com/Tessellate-Imaging/monk_v1.git Step 2: Install requirements - Linux - Cuda 9.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt` - Cuda 9.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt` - Cuda 10.0 - `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt` - Cuda 10.1 - `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt` - Cuda 10.2 - `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt` - Windows - Cuda 9.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt` - Cuda 9.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt` - Cuda 10.0 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt` - Cuda 10.1 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt` - Cuda 10.2 (Experimental support) - `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt` - CPU (Non gpu system) - `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt` - Mac - CPU (Non gpu system) - `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt` - Misc - Colab (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt` - Kaggle (GPU) - `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt` Step 3: Add to system path (Required for every terminal or kernel run) - `import sys` - `sys.path.append("monk_v1/");` Dataset - Stanford Dogs classification dataset - https://www.kaggle.com/jessicali9530/stanford-dogs-dataset
###Code
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1b4tC_Pl1O80of7U-PJ7VExmszzSX3ZEM' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1b4tC_Pl1O80of7U-PJ7VExmszzSX3ZEM" -O dogs-species-dataset.zip && rm -rf /tmp/cookies.txt
! unzip -qq dogs-species-dataset.zip
###Output
_____no_output_____
###Markdown
Imports
###Code
#Using mxnet-gluon backend
# When installed using pip
from monk.gluon_prototype import prototype
# When installed manually (Uncomment the following)
#import os
#import sys
#sys.path.append("monk_v1/");
#sys.path.append("monk_v1/monk/");
#from monk.gluon_prototype import prototype
###Output
_____no_output_____
###Markdown
Load data
###Code
gtf = prototype(verbose=1);
gtf.Prototype("project", "basic_custom_model");
###Output
Mxnet Version: 1.5.1
Experiment Details
Project: project
Experiment: basic_custom_model
Dir: /home/ubuntu/Desktop/monk_pip_test/monk_v1/study_roadmaps/1_getting_started_roadmap/9_custom_network_builder/workspace/project/basic_custom_model/
###Markdown
Set Data params
###Code
gtf.Dataset_Params(dataset_path="dogs-species-dataset/train",
split=0.9,
input_size=224,
batch_size=2,
shuffle_data=True,
num_processors=3);
###Output
Dataset Details
Train path: dogs-species-dataset/train
Val path: None
CSV train path: None
CSV val path: None
Label Type: single
Dataset Params
Input Size: 224
Batch Size: 2
Data Shuffle: True
Processors: 3
Train-val split: 0.9
###Markdown
Apply Transforms
###Code
gtf.apply_random_horizontal_flip(train=True, val=True);
gtf.apply_normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], train=True, val=True, test=True);
###Output
_____no_output_____
###Markdown
Load Dataset
###Code
gtf.Dataset();
###Output
Pre-Composed Train Transforms
[{'RandomHorizontalFlip': {'p': 0.5}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Pre-Composed Val Transforms
[{'RandomHorizontalFlip': {'p': 0.5}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Dataset Numbers
Num train images: 18522
Num val images: 2058
Num classes: 120
###Markdown
Create custom model with simultaneous debugging
###Code
network = [];
network.append(gtf.convolution(output_channels=16));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=32));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.convolution(output_channels=64));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=64));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.convolution(output_channels=128));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=128));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.flatten());
network.append(gtf.dropout(drop_probability=0.2));
network.append(gtf.fully_connected(units=1024));
network.append(gtf.dropout(drop_probability=0.2));
network.append(gtf.fully_connected(units=gtf.system_dict["dataset"]["params"]["num_classes"]));
gtf.debug_custom_model_design(network);
###Output
_____no_output_____
###Markdown
Create and setup model
###Code
gtf.Compile_Network(network, data_shape=(3, 224, 224));
###Output
Model Details
Loading pretrained model
Model Loaded on device
Model name: Custom Model
Num of potentially trainable layers: 14
Num of actual trainable layers: 14
###Markdown
Visualize with netron
###Code
gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8081);
from IPython.display import Image
Image(filename='imgs/basic_custom_network.png')
###Output
_____no_output_____
###Markdown
Set Training params
###Code
gtf.Training_Params(num_epochs=5,
display_progress=True,
display_progress_realtime=True,
save_intermediate_models=False,
save_training_logs=True);
## Set Optimizer, losses and learning rate schedulers
gtf.optimizer_sgd(0.0001);
gtf.lr_fixed();
gtf.loss_softmax_crossentropy()
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
###Output
_____no_output_____
###Markdown
Goals Learn how to create custom network Table of Contents [0. Install](0) [1. Load Data](1) [2. Create and debug network](2) [3. Train](3) Install Monk - git clone https://github.com/Tessellate-Imaging/monk_v1.git - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt - (Select the requirements file as per OS and CUDA version)
###Code
!git clone https://github.com/Tessellate-Imaging/monk_v1.git
# If using Colab install using the commands below
!cd monk_v1/installation/Misc && pip install -r requirements_colab.txt
# If using Kaggle uncomment the following command
#!cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt
# Select the requirements file as per OS and CUDA version when using a local system or cloud
#!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
###Output
_____no_output_____
###Markdown
Dataset - Stanford Dogs classification dataset - https://www.kaggle.com/jessicali9530/stanford-dogs-dataset
###Code
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1b4tC_Pl1O80of7U-PJ7VExmszzSX3ZEM' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1b4tC_Pl1O80of7U-PJ7VExmszzSX3ZEM" -O dogs-species-dataset.zip && rm -rf /tmp/cookies.txt
! unzip -qq dogs-species-dataset.zip
###Output
_____no_output_____
###Markdown
Imports
###Code
# Monk
import os
import sys
sys.path.append("monk_v1/monk/");
#Using mxnet-gluon backend
from gluon_prototype import prototype
###Output
_____no_output_____
###Markdown
Load data
###Code
gtf = prototype(verbose=1);
gtf.Prototype("project", "basic_custom_model");
###Output
Mxnet Version: 1.5.0
Experiment Details
Project: project
Experiment: basic_custom_model
Dir: /home/abhi/Desktop/Work/tess_tool/gui/v0.3/finetune_models/Organization/development/v5.3_roadmaps/1_getting_started_roadmap/9_custom_network_builder/workspace/project/basic_custom_model/
###Markdown
Set Data params
###Code
gtf.Dataset_Params(dataset_path="dogs-species-dataset/train",
split=0.9,
input_size=224,
batch_size=2,
shuffle_data=True,
num_processors=3);
###Output
Dataset Details
Train path: dogs-species-dataset/train
Val path: None
CSV train path: None
CSV val path: None
Dataset Params
Input Size: 224
Batch Size: 2
Data Shuffle: True
Processors: 3
Train-val split: 0.9
###Markdown
Apply Transforms
###Code
gtf.apply_random_horizontal_flip(train=True, val=True);
gtf.apply_normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], train=True, val=True, test=True);
###Output
_____no_output_____
###Markdown
Load Dataset
###Code
gtf.Dataset();
###Output
Pre-Composed Train Transforms
[{'RandomHorizontalFlip': {'p': 0.5}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Pre-Composed Val Transforms
[{'RandomHorizontalFlip': {'p': 0.5}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Dataset Numbers
Num train images: 18522
Num val images: 2058
Num classes: 120
###Markdown
Create custom model with simultaneous debugging
###Code
network = [];
network.append(gtf.convolution(output_channels=16));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=32));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.convolution(output_channels=64));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=64));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.convolution(output_channels=128));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=128));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.flatten());
network.append(gtf.dropout(drop_probability=0.2));
network.append(gtf.fully_connected(units=1024));
network.append(gtf.dropout(drop_probability=0.2));
network.append(gtf.fully_connected(units=gtf.system_dict["dataset"]["params"]["num_classes"]));
gtf.debug_custom_model_design(network);
###Output
_____no_output_____
###Markdown
Create and setup model
###Code
gtf.Compile_Network(network, data_shape=(3, 224, 224));
###Output
Model Details
Loading pretrained model
Model Loaded on device
Model name: Custom Model
Num of potentially trainable layers: 16
Num of actual trainable layers: 16
###Markdown
Visualize with netron
###Code
gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8081);
from IPython.display import Image
Image(filename='imgs/basic_custom_network.png')
###Output
_____no_output_____
###Markdown
Set Training params
###Code
gtf.Training_Params(num_epochs=5,
display_progress=True,
display_progress_realtime=True,
save_intermediate_models=False,
save_training_logs=True);
## Set Optimizer, losses and learning rate schedulers
gtf.optimizer_sgd(0.0001);
gtf.lr_fixed();
gtf.loss_softmax_crossentropy()
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
###Output
_____no_output_____
###Markdown
Goals Learn how to create custom network Table of Contents [0. Install](0) [1. Load Data](1) [2. Create and debug network](2) [3. Train](3) Install Monk - git clone https://github.com/Tessellate-Imaging/monk_v1.git - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt - (Select the requirements file as per OS and CUDA version)
###Code
!git clone https://github.com/Tessellate-Imaging/monk_v1.git
# Select the requirements file as per OS and CUDA version
!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
###Output
_____no_output_____
###Markdown
Dataset - Stanford Dogs classification dataset - https://www.kaggle.com/jessicali9530/stanford-dogs-dataset
###Code
! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1b4tC_Pl1O80of7U-PJ7VExmszzSX3ZEM' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1b4tC_Pl1O80of7U-PJ7VExmszzSX3ZEM" -O dogs-species-dataset.zip && rm -rf /tmp/cookies.txt
! unzip -qq dogs-species-dataset.zip
###Output
_____no_output_____
###Markdown
Imports
###Code
# Monk
import os
import sys
sys.path.append("monk_v1/monk/");
#Using mxnet-gluon backend
from gluon_prototype import prototype
###Output
_____no_output_____
###Markdown
Load data
###Code
gtf = prototype(verbose=1);
gtf.Prototype("project", "basic_custom_model");
###Output
Mxnet Version: 1.5.0
Experiment Details
Project: project
Experiment: basic_custom_model
Dir: /home/abhi/Desktop/Work/tess_tool/gui/v0.3/finetune_models/Organization/development/v5.3_roadmaps/1_getting_started_roadmap/9_custom_network_builder/workspace/project/basic_custom_model/
###Markdown
Set Data params
###Code
gtf.Dataset_Params(dataset_path="dogs-species-dataset/train",
split=0.9,
input_size=224,
batch_size=2,
shuffle_data=True,
num_processors=3);
###Output
Dataset Details
Train path: dogs-species-dataset/train
Val path: None
CSV train path: None
CSV val path: None
Dataset Params
Input Size: 224
Batch Size: 2
Data Shuffle: True
Processors: 3
Train-val split: 0.9
###Markdown
Apply Transforms
###Code
gtf.apply_random_horizontal_flip(train=True, val=True);
gtf.apply_normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], train=True, val=True, test=True);
###Output
_____no_output_____
###Markdown
Load Dataset
###Code
gtf.Dataset();
###Output
Pre-Composed Train Transforms
[{'RandomHorizontalFlip': {'p': 0.5}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Pre-Composed Val Transforms
[{'RandomHorizontalFlip': {'p': 0.5}}, {'Normalize': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}}]
Dataset Numbers
Num train images: 18522
Num val images: 2058
Num classes: 120
###Markdown
Create custom model with simultaneous debugging
###Code
network = [];
network.append(gtf.convolution(output_channels=16));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=32));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.convolution(output_channels=64));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=64));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.convolution(output_channels=128));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.convolution(output_channels=128));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
network.append(gtf.average_pooling(kernel_size=2));
gtf.debug_custom_model_design(network);
network.append(gtf.flatten());
network.append(gtf.dropout(drop_probability=0.2));
network.append(gtf.fully_connected(units=1024));
network.append(gtf.dropout(drop_probability=0.2));
network.append(gtf.fully_connected(units=gtf.system_dict["dataset"]["params"]["num_classes"]));
gtf.debug_custom_model_design(network);
###Output
_____no_output_____
###Markdown
Create and setup model
###Code
gtf.Compile_Network(network, data_shape=(3, 224, 224));
###Output
Model Details
Loading pretrained model
Model Loaded on device
Model name: Custom Model
Num of potentially trainable layers: 16
Num of actual trainable layers: 16
###Markdown
Visualize with netron
###Code
gtf.Visualize_With_Netron(data_shape=(3, 224, 224), port=8081);
from IPython.display import Image
Image(filename='imgs/basic_custom_network.png')
###Output
_____no_output_____
###Markdown
Set Training params
###Code
gtf.Training_Params(num_epochs=5,
display_progress=True,
display_progress_realtime=True,
save_intermediate_models=False,
save_training_logs=True);
## Set Optimizer, losses and learning rate schedulers
gtf.optimizer_sgd(0.0001);
gtf.lr_fixed();
gtf.loss_softmax_crossentropy()
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
###Output
_____no_output_____ |
rpi-deeplearning/ipynb/demo.ipynb | ###Markdown
Example taken from [https://www.tensorflow.org/tutorials/keras/basic_classification](https://www.tensorflow.org/tutorials/keras/basic_classification).
###Code
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
train_images.shape
len(train_labels)
train_labels
test_images.shape
len(test_labels)
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation=tf.nn.relu),
keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
predictions = model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array[i], true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions, test_labels)
plt.show()
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
plt.show()
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
plt.xticks(range(10), class_names, rotation=45)
plt.show()
prediction_result = np.argmax(predictions_single[0])
print(prediction_result)
###Output
9
|
ipynb/Germany-Brandenburg-SK-Cottbus.ipynb | ###Markdown
Germany: SK Cottbus (Brandenburg)* Homepage of project: https://oscovida.github.io* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Brandenburg-SK-Cottbus.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="Germany", subregion="SK Cottbus");
# load the data
cases, deaths, region_label = germany_get_region(landkreis="SK Cottbus")
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 500 rows
pd.set_option("max_rows", 500)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Brandenburg-SK-Cottbus.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____
###Markdown
Germany: SK Cottbus (Brandenburg)* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Brandenburg-SK-Cottbus.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="Germany", subregion="SK Cottbus", weeks=5);
overview(country="Germany", subregion="SK Cottbus");
compare_plot(country="Germany", subregion="SK Cottbus", dates="2020-03-15:");
# load the data
cases, deaths = germany_get_region(landkreis="SK Cottbus")
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 500 rows
pd.set_option("max_rows", 500)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Brandenburg-SK-Cottbus.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____
###Markdown
Germany: SK Cottbus (Brandenburg)* Homepage of project: https://oscovida.github.io* Plots are explained at http://oscovida.github.io/plots.html* [Execute this Jupyter Notebook using myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Brandenburg-SK-Cottbus.ipynb)
###Code
import datetime
import time
start = datetime.datetime.now()
print(f"Notebook executed on: {start.strftime('%d/%m/%Y %H:%M:%S%Z')} {time.tzname[time.daylight]}")
%config InlineBackend.figure_formats = ['svg']
from oscovida import *
overview(country="Germany", subregion="SK Cottbus", weeks=5);
overview(country="Germany", subregion="SK Cottbus");
compare_plot(country="Germany", subregion="SK Cottbus", dates="2020-03-15:");
# load the data
cases, deaths = germany_get_region(landkreis="SK Cottbus")
# get population of the region for future normalisation:
inhabitants = population(country="Germany", subregion="SK Cottbus")
print(f'Population of country="Germany", subregion="SK Cottbus": {inhabitants} people')
# compose into one table
table = compose_dataframe_summary(cases, deaths)
# show tables with up to 1000 rows
pd.set_option("max_rows", 1000)
# display the table
table
###Output
_____no_output_____
###Markdown
Explore the data in your web browser- If you want to execute this notebook, [click here to use myBinder](https://mybinder.org/v2/gh/oscovida/binder/master?filepath=ipynb/Germany-Brandenburg-SK-Cottbus.ipynb)- and wait (~1 to 2 minutes)- Then press SHIFT+RETURN to advance code cell to code cell- See http://jupyter.org for more details on how to use Jupyter Notebook Acknowledgements:- Johns Hopkins University provides data for countries- Robert Koch Institute provides data for within Germany- Atlo Team for gathering and providing data from Hungary (https://atlo.team/koronamonitor/)- Open source and scientific computing community for the data tools- Github for hosting repository and html files- Project Jupyter for the Notebook and binder service- The H2020 project Photon and Neutron Open Science Cloud ([PaNOSC](https://www.panosc.eu/))--------------------
###Code
print(f"Download of data from Johns Hopkins university: cases at {fetch_cases_last_execution()} and "
f"deaths at {fetch_deaths_last_execution()}.")
# to force a fresh download of data, run "clear_cache()"
print(f"Notebook execution took: {datetime.datetime.now()-start}")
###Output
_____no_output_____ |
4. Time Series/Coursera/Exam Prep/05_forecasting_with_rnn.ipynb | ###Markdown
Forecasting with an RNN Setup
###Code
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size + 1, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size + 1))
dataset = dataset.shuffle(shuffle_buffer)
dataset = dataset.map(lambda window: (window[:-1], window[-1]))
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
###Output
_____no_output_____
###Markdown
Simple RNN Forecasting
###Code
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = window_dataset(x_train, window_size, batch_size=128) # 128 x 30 x 100 because 100 units in layer
model = keras.models.Sequential([
keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1), # to set the dim to 3D and more https://www.tensorflow.org/api_docs/python/tf/expand_dims
input_shape=[None]), # any dims is allowed
keras.layers.SimpleRNN(100, return_sequences=True),
keras.layers.SimpleRNN(100), # this is sequence to vector RNN
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0) # what is the purpose ? normalize here it is scaling to stabalize. High value so small weights can be used.
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-7 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-7, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule]) # note validation data is not used here b/c of learning rate finder
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-7, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = window_dataset(x_train, window_size, batch_size=128)
valid_set = window_dataset(x_valid, window_size, batch_size=128)
model = keras.models.Sequential([
keras.layers.Lambda(lambda x: tf.expand_dims(x, axis=-1),
input_shape=[None]),
keras.layers.SimpleRNN(100, return_sequences=True),
keras.layers.SimpleRNN(100),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0)
])
optimizer = keras.optimizers.SGD(lr=1.5e-6, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
early_stopping = keras.callbacks.EarlyStopping(patience=50) # wait till no change in the mae for 50 epochs
model_checkpoint = keras.callbacks.ModelCheckpoint( # save model everytime it improves against validation data set
"my_checkpoint", save_best_only=True)
model.fit(train_set, epochs=500,
validation_data=valid_set, # notice two call backs here
callbacks=[early_stopping, model_checkpoint])
model = keras.models.load_model("my_checkpoint") # load the best model saved during training + validation data.
rnn_forecast = model_forecast(
model,
series[split_time - window_size:-1],
window_size)[:, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
# Idea is to normalize, remove trend and noise for the analysis.
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____
###Markdown
Sequence-to-Sequence Forecasting
###Code
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:])) # all values except very first. This is sequence not tensor.
return ds.batch(batch_size).prefetch(1)
for X_batch, Y_batch in seq2seq_window_dataset(tf.range(10), 3,
batch_size=1):
print("X:", X_batch.numpy())
print("Y:", Y_batch.numpy()) # labels y are sequence too with one shift.
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128) # why 128 ?
model = keras.models.Sequential([
keras.layers.SimpleRNN(100, return_sequences=True, # no need for lambda layer here because it is done in the sequence
input_shape=[None, 1]),
keras.layers.SimpleRNN(100, return_sequences=True), # series to series RNN here - Sequence here in the Y. not tensor. ?
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-7 * 10**(epoch / 30))
optimizer = keras.optimizers.SGD(lr=1e-7, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-7, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
# note two sequences here .... train_set & valid_set
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.SimpleRNN(100, return_sequences=True,
input_shape=[None, 1]),
keras.layers.SimpleRNN(100, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0)
])
optimizer = keras.optimizers.SGD(lr=1e-6, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
early_stopping = keras.callbacks.EarlyStopping(patience=10) # you can add model check point here.
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping])
rnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
###Output
_____no_output_____ |
.ipynb_checkpoints/18_stop_by_acc-checkpoint.ipynb | ###Markdown
Create setup dicts1. Word Unit+Labels: `morph, token, multitok`1. Char Arch: `char_lstm, char_cnn, no_char`1. Word Embedding: `ft_tok, ft_yap, ft_tok_oov, ft_yap_oov, w2v_tok, w2v_yap, no_word_embed`3 x 3 x 7 = **63 dicts**
###Code
data_folder = '../NER/data/for_ncrf'
datasets = {
'morph': {
'_unit': 'morpheme',
'_scheme': 'bioes',
'train_dir': 'morph_gold_train.bmes',
'dev_dir': 'morph_gold_dev.bmes',
'test_dir': 'morph_gold_test.bmes',
},
'token': {
'_unit': 'token',
'_scheme': 'bioes',
'train_dir': 'token_gold_train_fix.bmes',
'dev_dir': 'token_gold_dev_fix.bmes',
'test_dir': 'token_gold_test_fix.bmes',
},
'multitok': {
'_unit': 'token',
'_scheme': 'concat_bioes',
'seg': False,
'train_dir': 'token_gold_train_concat.bmes',
'dev_dir': 'token_gold_dev_concat.bmes',
'test_dir': 'token_gold_test_concat.bmes',
},
}
###Output
_____no_output_____
###Markdown
Create PER-LOC-ORG only datasets
###Code
trans_map = {
'ANG': None,
'DUC': None,
'EVE': None,
'FAC': 'LOC',
'GPE': 'LOC',
'LOC': 'LOC',
'ORG': 'ORG',
'PER': 'PER',
'WOA': None,
}
import re
cat_re = re.compile('.*\-([^\^]+)\^?')
for n, ds in datasets.items():
for k in ds:
if ('train' in k or 'dev' in k or 'test' in k):
path = os.path.join(data_folder, ds[k])
new_path = os.path.join(data_folder, ds[k].split('.')[0]+'_plo.bmes')
print(path)
print(new_path)
with open(new_path, 'w') as of:
for line in open(path, 'r'):
line = line.split(' ')
word = line[0].strip()
if word!='':
tag = line[-1].strip()
tags = tag.split('^')
#cat = cat_re.search(tag)
new_tags = []
for t in tags:
if t=='O':
new_tags.append('O')
else:
try:
bio, cat = t.split('-')
except:
print(line)
raise ValueError
if trans_map[cat] is None:
new_tags.append('O')
else:
new_tags.append(bio+'-'+trans_map[cat])
new_tag = '^'.join(new_tags)
of.write(word+' '+new_tag+'\n')
#print(word, tag, new_tag)
else:
of.write('\n')
#print('\n')
data_folder = '../NER/data/for_ncrf'
new_datasets = {
'morph': {
'_unit': 'morpheme',
'_scheme': 'bioes',
'train_dir': 'morph_gold_train_plo.bmes',
'dev_dir': 'morph_gold_dev_plo.bmes',
'test_dir': 'morph_gold_test_plo.bmes',
},
'token': {
'_unit': 'token',
'_scheme': 'bioes',
'train_dir': 'token_gold_train_fix_plo.bmes',
'dev_dir': 'token_gold_dev_fix_plo.bmes',
'test_dir': 'token_gold_test_fix_plo.bmes',
},
'multitok': {
'_unit': 'token',
'_scheme': 'concat_bioes',
'seg': False,
'train_dir': 'token_gold_train_concat_plo.bmes',
'dev_dir': 'token_gold_dev_concat_plo.bmes',
'test_dir': 'token_gold_test_concat_plo.bmes',
},
}
default_grid = {
# FIXED
'word_seq_feature': 'LSTM',
'word_emb_dim': 300,
'char_emb_dim': 30,
'iteration': 200,
'bilstm': True,
'norm_word_emb': False,
'norm_char_emb': False,
'ave_batch_loss': False,
'use_crf': True,
'l2': 1e-8,
'lstm_layer': 2,
'batch_size': 8,
'number_normalized': True,
'optimizer': 'SGD',
'lr_decay': 0.05,
'momentum': 0,
'nbest': 1,
'hidden_dim': 200,
'dropout': 0.5,
}
dataset_grids = {
'multitok': {
'learning_rate': 0.005,
},
'morph': {
'learning_rate': 0.01,
},
'token': {
'learning_rate': 0.01,
},
}
arch_grids = {
'char_lstm': {
'char_seq_feature': 'LSTM',
'use_char': True,
'char_hidden_dim': 70,
},
'char_cnn': {
'char_seq_feature': 'CNN',
'use_char': True,
'char_hidden_dim': 70,
'char_kernel_size': 7,
},
'no_char': {
'use_char': False,
},
}
word_embedding_files = {
#'ft_yap': '../wordembedding-hebrew/vectors_alt_tok/wikipedia.alt_tok.yap_form.fasttext_skipgram.model.vec.nofirstline',
#'ft_tok': '../wordembedding-hebrew/vectors_alt_tok/wikipedia.alt_tok.tokenized.fasttext_skipgram.model.vec.nofirstline',
'ft_oov_yap': 'data/htb_all_words.wikipedia.alt_tok.yap_form.fasttext_skipgram.txt',
'ft_oov_tok': 'data/htb_all_words.wikipedia.alt_tok.tokenized.fasttext_skipgram.txt',
#'w2v_yap': '../wordembedding-hebrew/vectors_alt_tok/wikipedia.alt_tok.yap_form.word2vec_skipgram.txt.nofirstline',
#'w2v_tok': '../wordembedding-hebrew/vectors_alt_tok/wikipedia.alt_tok.tokenized.word2vec_skipgram.txt.nofirstline',
#'glv_yap': '../wordembedding-hebrew/vectors_alt_tok/wikipedia.alt_tok.yap_form.glove.txt',
#'glv_tok': '../wordembedding-hebrew/vectors_alt_tok/wikipedia.alt_tok.tokenized.glove.txt',
#'no_word': None,
}
models_folder = 'final_setup/plo_models'
conf_folder = 'final_setup/plo_conf'
json_folder = 'final_setup/plo_conf_json'
logs_folder = 'final_setup/plo_logs'
seed_num_options = np.arange(44, 54)
seed_num_options
def create_conf_dict(model_base_name, dataset, arch, emb_name, seed_num):
full_conf_dict = {}
full_conf_dict['status'] = 'train'
full_conf_dict['model_dir'] = os.path.join(models_folder, model_base_name)
for k, v in new_datasets[dataset].items():
if not k.startswith('_'):
if k in ['train_dir', 'dev_dir', 'test_dir']:
full_conf_dict[k] = os.path.join(data_folder, v)
else:
full_conf_dict[k] = v
if not(emb_name == 'no_word' or word_embedding_files[emb_name] is None):
full_conf_dict['word_emb_dir'] = word_embedding_files[emb_name]
full_conf_dict.update(default_grid)
full_conf_dict.update(dataset_grids[dataset])
full_conf_dict.update(arch_grids[arch])
return full_conf_dict
ds_embeds = {'morph': ['ft_oov_yap', 'ft_oov_tok'],
'token': ['ft_oov_tok'],
'multitok': ['ft_oov_tok']}
ds_archs = {'morph': ['char_cnn'],
'token': ['char_cnn'],
'multitok': ['char_lstm'],}
confs = {}
for dataset in datasets:
for arch in ds_archs[dataset]:
for emb_name in ds_embeds[dataset]:
for seed_num in seed_num_options:
model_base_name = '.'.join([dataset, arch, emb_name, str(seed_num)+'_seed'])
confs[model_base_name] = create_conf_dict(model_base_name, dataset,
arch, emb_name, seed_num)
len(confs)
import pickle
pickle.dump(confs, open('final_setup/plo_confs.pkl', 'wb'))
###Output
_____no_output_____
###Markdown
Create conf files for setup dicts1. Random Seed: 10 different `(44, 45, 46...)`1. `morph.charlstm.ft_tok.44_seed.conf`1. `multitok.nochar.no_word_embed.47_seed.conf`63 * 10 = **630 conf files**
###Code
if not os.path.exists(models_folder):
os.mkdir(models_folder)
if not os.path.exists(conf_folder):
os.mkdir(conf_folder)
if not os.path.exists(json_folder):
os.mkdir(json_folder)
if not os.path.exists(logs_folder):
os.mkdir(logs_folder)
for name, conf in confs.items():
conf_path = os.path.join(conf_folder, name+'.conf')
with open(conf_path, 'w', encoding='utf8') as of:
for k, v in conf.items():
of.write(k+'='+str(v)+'\n')
json_path = os.path.join(json_folder, name+'.json')
with open(json_path, 'w') as of:
of.write(json.dumps(conf))
###Output
_____no_output_____
###Markdown
Create `main.X.py` filesOnly difference is `seed_num`: - `main.44.py` will have `seed_num = 44` Create `final_setup_run.py`1. seed_num match: runs `.conf` files with matching `main.X.py` file **only**.1. Choose device. 1. Choose conf prefix.1. Skip confs that are running or ran already (using `.dset` file)
###Code
emb_options = list(word_embedding_files.keys())+[None]
emb_options
###Output
_____no_output_____
###Markdown
Read logs
###Code
import pickle
confs = pickle.load( open('final_setup/plo_confs.pkl', 'rb'))
import re
import os
DEV_RES_LINE = re.compile('Dev: .*; acc: (?P<acc>[^,]+)(?:, p: (?P<p>[^,]+), r: (?P<r>[^,]+), f: (?P<f>[-\d\.]+))?')
#Dev: time: 0.94s speed: 536.09st/s; acc: 0.9043
#Dev: time: 3.42s, speed: 146.59st/s; acc: 0.9546, p: 0.7577, r: 0.6393, f: 0.6935
mtimes = []
res = []
archs = []
for f in os.scandir(logs_folder):
if f.name.startswith('.ipy'):
continue
mtimes.append(os.path.getmtime(f.path))
model_base_name = '.'.join(f.name.split('.')[:-1])
model_no_seed = '.'.join(f.name.split('.')[:-2])
unit, arch, w_embed, seed_num = f.name.split('.')[:-1]
archs.append(arch)
matching_conf = confs[model_base_name]
params = { 'model_base_name': model_base_name, 'arch': arch,
'unit': unit, 'w_embed': w_embed, 'seed_num': seed_num,
'model_no_seed': model_no_seed,}
params.update(matching_conf)
with open(f.path, 'r') as fp:
i= 0
for line in fp:
m = DEV_RES_LINE.match(line)
if m:
r = m.groupdict().copy()
for k, v in r.items():
if v is not None:
r[k] = float(v)
r.update(params)
r['epoch'] = i
i+=1
res.append(r)
rdf = pd.DataFrame(res)
rdf['model_file_name'] = rdf.model_base_name + '.' + rdf.epoch.astype(str) + '.model'
rdf['dset_file_name'] = rdf.model_base_name +'.dset'
rdf['char_seq_feature'] = rdf.char_seq_feature.fillna('NoChar')
rdf['relevant_score'] = rdf.f.fillna(rdf.acc)
def get_embed_unit(s):
if 'yap' in s:
return 'morph'
elif 'tok' in s:
return 'token'
return 'na'
def get_clash_match(s):
if s.embed_unit=='na':
return 'na'
elif s.embed_unit==s.input_unit:
return 'Match'
else:
return 'Clash'
rdf['input_unit'] = rdf.unit.apply(lambda x: 'morph' if x=='morph' else 'token')
rdf['embed_unit'] = rdf.w_embed.apply(get_embed_unit)
rdf['embed_type'] = rdf.w_embed.str.replace('_tok|_yap', '')
rdf['cm'] = rdf.apply(get_clash_match, axis=1)
erdf = rdf[(rdf.groupby(['seed_num', 'arch', 'unit', 'w_embed']).relevant_score.transform(max)==rdf.relevant_score) ]
erdf = erdf[(erdf.groupby(['seed_num', 'arch', 'unit', 'w_embed']).epoch.transform(min)==erdf.epoch) ]
erdf.shape
erdf.groupby(['unit', 'arch', 'w_embed']).seed_num.nunique().unstack()
print ('Mean time per run:', round((max(mtimes) - min(mtimes) )/ len(mtimes) / 60, 2), 'minutes')
erdf.groupby(['unit', 'arch', 'embed_type', 'cm']).relevant_score.mean().unstack([-2,-1]).mul(100).round(2)
import numpy as np
def perc(n):
def perc_(x):
return np.percentile(x, n)
perc_.__name__ = 'perc_%s' % n
return perc_
erdf.groupby(['unit', 'char_seq_feature']).relevant_score.agg(['max', 'min', 'mean', 'std', 'median', perc(95)]).mul(100).round(2)
erdf.groupby(['unit', 'char_seq_feature']).relevant_score.agg(['max', 'min', 'mean', 'std', 'median', perc(95)]).mul(100).round(2)
erdf.to_pickle('final_setup/plo_erdf.pkl')
###Output
_____no_output_____
###Markdown
Decode
###Code
output_folder = 'final_setup/plo_decode_output'
decode_conf_folder = 'final_setup/plo_decode_conf'
if not os.path.exists(output_folder):
os.mkdir(output_folder)
if not os.path.exists(decode_conf_folder):
os.mkdir(decode_conf_folder)
decode_sets = {
'morph': {
'morph_dev_gold': '../NER/data/for_ncrf/morph_gold_dev_plo.bmes',
'morph_dev_yap': '../NER/data/for_ncrf/morph_yap_dev_dummy_o.bmes',
'morph_test_gold': '../NER/data/for_ncrf/morph_gold_test_plo.bmes',
'morph_test_yap': '../NER/data/for_ncrf/morph_yap_test_dummy_o.bmes',
},
'token': {
'token_dev': '../NER/data/for_ncrf/token_gold_dev_fix_plo.bmes',
'token_test': '../NER/data/for_ncrf/token_gold_test_fix_plo.bmes',
},
'multitok': {
'token_dev': '../NER/data/for_ncrf/token_gold_dev_concat_plo.bmes',
'token_test': '../NER/data/for_ncrf/token_gold_test_concat_plo.bmes',
}
}
params = { 'status': 'decode' }
for i, row in erdf.iterrows():
unit = row['unit']
for name, set_path in decode_sets[unit].items():
row_par = params.copy()
row_par['load_model_dir'] = os.path.join(models_folder, row['model_file_name'])
row_par['dset_dir'] = os.path.join(models_folder, row['dset_file_name'])
row_par['decode_dir'] = os.path.join(output_folder, name+'.'+row['model_base_name']+'.bmes')
row_par['raw_dir'] = set_path
conf_path = os.path.join(decode_conf_folder, name+'.'+row['model_base_name']+'.decode.conf')
if not os.path.exists(conf_path):
with open(conf_path, 'w', encoding='utf8') as of:
for k, v in row_par.items():
of.write(k+'='+str(v)+'\n')
import os, re
pred_line = re.compile('Predict raw 1-best result has been written into file.*')
bads = []
for f in os.scandir('final_setup/plo_decode_logs'):
if f.name=='.ipynb_checkpoints' or f.name=='.log':
continue
with open(f.path, 'r') as fp:
data = fp.read()
if len(re.findall(pred_line, data))==0:
bads.append (f.name)
#os.remove(f.path)
sorted(bads)
from collections import Counter
xxx = []
for f in os.scandir('final_setup/plo_decode_output'):
if f.name=='.ipynb_checkpoints' or f.name=='.bmes':
continue
elif 'pruned' in f.name:
xxx.append('.'.join(f.name.split('.')[:-2]))
Counter(xxx).most_common()
###Output
_____no_output_____
###Markdown
Evaluate decoded folder
###Code
erdf = pd.read_pickle('final_setup/plo_erdf.pkl')
import sys
sys.path.append('../NER')
import ne_evaluate_mentions as nem
scores = {}
if os.path.exists('final_setup/plo_scores.pkl'):
scores = pickle.load(open('final_setup/plo_scores.pkl', 'rb'))
for file in os.scandir(output_folder):
if file.name=='.ipynb_checkpoints':
continue
gold_name, inp, arch, w_embed, seed_num = file.name.split('.')[:-1]
if (gold_name, inp, arch, w_embed, seed_num) not in scores:
if len(gold_name.split('_'))>2:
unit, pred_set, _ = gold_name.split('_')
gold_path = decode_sets[unit][unit+'_'+pred_set+'_gold']
else:
unit, pred_set = gold_name.split('_')
gold_path = decode_sets[unit][unit+'_'+pred_set]
p, r, f = nem.evaluate_files(gold_path, file)
scores[(gold_name, inp, arch, w_embed, seed_num)] = (p, r, f)
import pickle
pickle.dump(scores, open('final_setup/plo_scores.pkl', 'wb'))
score_tups = [(*k, *v) for k,v in scores.items()]
mev = pd.DataFrame(score_tups, columns=('gold_name', 'unit', 'arch',
'w_embed', 'seed_num',
'p_m', 'r_m', 'f_m'))
(mev[mev.gold_name.str.contains('dev')].groupby(['gold_name', 'unit', 'arch'])
.f_m.agg(['max', 'min', 'mean', 'std', 'median', perc(95)]))
(mev[mev.gold_name.str.contains('dev')].groupby(['gold_name', 'unit', 'arch']).size())
mev.head()
mev['pred_set'] = mev.gold_name.apply(lambda x: '_'.join(x.split('_')[1:]))
mev = mev.merge(erdf, how='left')
(mev[mev.pred_set.str.contains('dev')].groupby(['unit', 'pred_set', 'arch', 'embed_type', 'cm'])
.f_m.agg(['mean', 'std']).mul(100).round(2)
.assign(mean = lambda x: x['mean'].apply('{:,.2f}'.format).astype(str)+' ± '+ x['std'].round(1).astype(str))[['mean']]
.unstack([-2,-1]))
x = (mev[mev.pred_set.str.contains('dev')].groupby(['unit', 'pred_set', 'arch', 'embed_type', 'cm'])
.f_m.agg([ 'mean', 'std']).mul(100).round(2)
.assign(std = lambda x: x['std'].round(1))
.unstack([-2,-1]))
x.columns = x.columns.reorder_levels([1,2,0])
pd.set_option("max_columns", 30)
x.sort_index(axis=1)
mev[(mev.unit=='morph') & (mev.pred_set.str.contains('pruned')) & (mev.embed_type=='ft_oov') & (mev.arch=='char_cnn')].groupby(['pred_set','cm']).f_m.mean().unstack()
mev['pred_set_sub'] = mev.pred_set.apply(lambda x: x.split('_')[1] if '_' in x else '')
mev['pred_set_main'] = mev.pred_set.apply(lambda x: x.split('_')[0] )
(mev[((mev.unit!='morph') & (mev.embed_type=='ft_oov')
)
|
((mev.unit=='morph')
& (mev.embed_type=='ft_oov')
& (mev.arch=='char_cnn'))].groupby(['unit', 'pred_set_sub', 'cm', 'pred_set_main',])
.f_m.mean().unstack().mul(100).round(2)
.assign(ratio = lambda x: (x.test/x.dev -1).mul(100).round(1)))
mev.to_pickle('final_setup/plo_mev.pkl')
import os
from collections import defaultdict
for d in os.scandir('hp_search'):
if d.name.startswith('models'):
all_models_paths = defaultdict(list)
all_models_epoch = defaultdict(lambda: -1)
for f in os.scandir(d.path):
if f.name!='.model' and f.name.endswith('.model'):
a, c, _, e, _ = f.name.split('.')
e = int(e)
all_models_epoch[(a,c)] = max(alL_models_epoch[(a,c)], e)
all_models_paths[(a,c)].append((e, f.path))
for k, v in all_models_paths.items():
for e, path in v:
if e!=all_models_epoch[k]:
#os.remove(path)
###Output
_____no_output_____ |
_sources/curriculum-notebooks/Science/ConcentrationAndPH/concentration-and-ph.ipynb | ###Markdown

###Code
%%html
<script>
function code_toggle() {
if (code_shown){
$('div.input').hide('500');
$('#toggleButton').val('Show Code')
} else {
$('div.input').show('500');
$('#toggleButton').val('Hide Code')
}
code_shown = !code_shown
}
$( document ).ready(function(){
code_shown=false;
$('div.input').hide()
});
</script>
<form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>
import ipywidgets as widgets
import IPython
from IPython.display import HTML
%%javascript
require.config({
paths: {
d3: "https://d3js.org/d3.v3.min"
}
});
require(["d3"], function(d3) {
window.d3 = d3;
});
###Output
_____no_output_____
###Markdown
Concentration and pH Grade 9 curriculumThis is a jupyter notebook about how to measure the quantity of different substances in the environment. This includes measuring air and water quality. This notebook will focus on what the concentration and pH of different substances means for the environment. You will be able to:* apply and interpret meausres of chemical concentration* find the pH of a solution using an indicator solution and litmus paper* identify acids, bases, and neutral substances based on pH* apply this knowledge to help determine the health of an environment ConcentrationFor many of the products that you use or buy, the manufacturer tells us how much of a substance is present by giving the percentage (%) of weight or volume it represents. Take milk for example. The label 1% on a carton of milk tells us that 1% of the milk is milk fat.This means that in every 100 ml of milk, there is 1 ml of milk fat.So a 1 litre (1000 ml) carton contains 10 ml of milk fat. [source](https://www.healthyeating.org/Milk-Dairy/Dairy-Facts/Types-of-Milk)When we say how much of one substance is contained in a certain amount of another, we are giving a **concentration**.Percentage is actually decribing "parts per hundred".The concentration 10% is the same as 10 parts per hundred or $\dfrac{10}{100}$.This is very helpful for many everyday substances, but many substances are in much smaller concentrations than 1%.We could give a concentration like 0.00001%, but there's a simpler way to express this.This percentage represents $\dfrac{1}{1,000,000}$ which has its own unit, called parts per million (ppm).Later in this notebook, we will talk a little bit about a pesticide called DDT. Its concentration is measured in parts per billion (ppb) because it can be deadly in even smaller concentrations. Just like how 1000 g is equal to 1 kg, 1 ppm is equal to 1 mg/kg (milligram per kilogram). There are 1000 milligrams in a gram and 1000 grams in a kilogram, so there's 1,000,000 milligrams in a kilogram, making one milligram per kilogram one part per million. When the substance is in water, 1 ppm is equal to 1 mg/L (milligram per litre). This only works for water because water's density is 1 kg/L. Other liquids have different densities, so the concentration needs to be calculated differently in other solutions. Let's look at an example of how to calculate ppm of a substance.Example:If the nutritional information label on a container of yogurt specifies that each 125 g serving contains 7 mg of cholesterol, what is the concentration of cholesterol in a serving of yogurt in parts per million (ppm)? [source](https://drive.google.com/file/d/1YJUpQYN2QlrzM0obSWKjdnMeFASNcKQb/view)Solution:1. First, state your information as a ratio. $\frac{7 \text{ mg cholesterol}}{125 \text{ g yogurt}} = 0.056 \text{ mg/g}$ 2. Second, express that ratio in the form of mg/kg $ 0.056 \text{ mg/g} \times 1000 \text{ g/kg} = 56 \text{ mg/kg}$Since mg/kg is equivalent to ppm, there are 56 ppm of cholesterol in each serving of this yogurt. Why is this important?There are many substances that are toxic, meaning they are able to cause harm to organisms.They cause harm, not by how an organism is exposed to it, nor how long, but by how much enters the organism.But it's hard to determine the concentration of the substance which is toxic because many factors influence it.Body mass and metabolism are two factors that affect when the concentration of a substance is considered toxic.It is easier to say what concentration would likely kill 50% of the population to which it's applied.This is called the **lethal dose 50** or **LD50**.Many scientists have done studies on what concentrations are harmful to humans. One of the first to do this was Paracelsus during the Renaissance. One of his famous quotes is *"only the dose makes the poison"*. Here is a table of the LD50 of different chemicals for humans:| Chemical | Source | Concentration (ppm)||-----------------|--------|--------------------||botulinum toxin A|Clostridium botulinum bacterium|0.00000003|| dioxin |contaminant in some herbicides and in PCBs|0.03|| nicotine |cigarette smoke|0.86|| solanine |green parts of potatoes|6.0|| caffeine |coffee, tea, chocolate|150-200|| NaCl | table salt | 12,357 || glucose | sugar | 30,000 *(for rats)*|| H2O | water | 90,000 *(for rats)*|*Most of these are extrapolated from data of LD50 of rats or from observational data*You might notice that some things in this table you consume every day. Or you see other people consume these.It turns out that too much of anything can kill an organism. 90,000 ppm of water is around 6 litres for a healthy average adult. 200 ppm of caffiene is about 100 cups of coffee for that same adult. It's very difficult to get that kind of concentration within our bodies, as it must be all in the same sitting (before it starts being digested), so we consider that an **acceptable risk**. Case Study: DDT DDT, or dichlorodiphenyltrichloroethane, is a pesticide originally created to kill lice, which spread the disease typhus.It was so effective that it was also used to kill mosquitoes to reduce malaria.But what people didn't know when they were using it, was that this pesticide is *persistent in the environment*, which means that it stays in the environment for really long periods or time.When DDT was used to control insect damage on crops, it got into water supplies and eventually into the ocean.In the ocean, the concentration of DDT is pretty low, but then small sea life such as zooplankton consume it.Instead of being digested, the pestiside stays in their system and accumulates as they consume it.Then the small fish that eat zooplankton and plankton consume the DDT as well, and accumulate an even higher concentration within their fat cells. This goes all the way up the food chain, increasing the concentration in each organism, which is called biomagnification. At each step in the food chain, some of these organisms die because they consumed too much DDT and couldn't digest it.Humans have even been found to have small concentrations of DDT in our systems too, as we eat animals which have ingested DDT. We track the spread of DDT by calculating its concentration in certain areas and species. *********** Acids and BasesThe main factor that defines an acid or a base is its pH. pH stands for "power of hydrogen", and it measures the concentration of hydrogen ions (H+) in a solution. The more hydrogen ions there are in a solution, the more acidic a solution is. The pH scale is from 0 to 14 to indicate how strong or weak an acid or base is.>A solution with a pH between 0 and 6 is considered an acid. >>A pH of 7 is considered neutral.>>A solution with a pH between 8 and 14 is considered a base. This scale is a logarithmic scale, meaning each number is 10 times stronger or weaker than the number next to it. For example: an acid with a pH of 3 is 10 times stronger than one with a pH of 4. AcidsAn acid is a compound that dissolves in water and forms a solution with pH less than 7. Acids are **sour** and react with bases, neutralizing both to a pH around 7. Acids can be strong like stomach acid, or weak like citric acid (found in sour fruits). You can turn a strong acidic solution into a weak acidic solution by diluting the solution with pure or distilled water (which is a neutral solution).
###Code
answers1 = widgets.SelectMultiple(options=['Vinegar', 'Tums/Antacids', 'Pure Water', 'Tomatoes', 'Soap'],
value=[], description='Substances')
def display1():
print("Which of these household substances do you think contain an acid?")
print("You can select more than one by holding 'crtl' while selecting.")
IPython.display.display(answers1)
def check1(a):
IPython.display.clear_output()
display1()
if answers1.value == ('Vinegar', 'Tomatoes'):
print("Awesome! You found the acids!")
print("Vinegar has acetic acid in it, making it sour.")
print("Tomatoes have citric acid in them, also making them a little sour.")
else:
if answers1.value == ('Vinegar',) or answers1.value == ('Tomatoes',):
print("You're right, but there's one more that contains an acid. Look at the definition again.")
else:
print("You've selected at least one substance that is not an acid. Look at the definition again.")
display1()
answers1.observe(check1, 'value')
###Output
_____no_output_____
###Markdown
BasesA base is a compound that dissolves in water and forms a solution with pH greater than 7. Bases are **bitter and slippery** and react with acids, neutralizing both to a pH around 7. Bases can be strong like many household cleaners such as bleach, or weak like baking soda (used in baking and cleaning). You can turn a strong basic solution into a weak basic solution by diluting the solution with pure or distilled water (which is a neutral solution). A basic solution is also called an **alkaline** solution.
###Code
answers2 = widgets.SelectMultiple(options=['Vinegar', 'Tums/Antacids', 'Pure Water', 'Tomatoes', 'Soap'],
value=[], description='Substances')
def display2():
print("From the same list as above, which of these household substances do you think contain a base?")
print("Once again, you can select more than one by holding down 'crtl' while selecting.")
IPython.display.display(answers2)
def check2(a):
IPython.display.clear_output()
display2()
if answers2.value == ('Tums/Antacids','Soap'):
print("Well done! You found the substances that contain a base!")
print("When you eat an antacid, it neutralizes the stomach acid that's giving you heart burn.")
print("Soap contains a small amount of base to make it slippery and able to clean well.")
else:
if answers2.value == ('Tums/Antacids',) or answers2.value == ('Soap',):
print("You're right, but there's another substance that contains a base. Look at the definition again.")
else:
print("You've selected one or more substances that do not contain a base. Look at the definition again.")
display2()
answers2.observe(check2, 'value')
###Output
_____no_output_____
###Markdown
Apply your knowledgeAnswer the questions below about whether each substance is an acid, a base, or neutral based on their pH.
###Code
answers3 = widgets.RadioButtons(options=['An acid', 'A base', 'Neutral'],
value=None)
def display3():
print("Orange juice has a pH around 3. What is it?")
IPython.display.display(answers3)
def check3(a):
IPython.display.clear_output()
display3()
if answers3.value == 'An acid':
print("Great job! Because the pH is less than 7, it's an acid!")
else:
print("That's not right, remember what the definitions of acids and bases are.")
display3()
answers3.observe(check3, 'value')
answers5 = widgets.RadioButtons(options=['An acid', 'A base', 'Neutral'],
value=None)
def display5():
print("Baking soda has a pH around 9. What is it?")
IPython.display.display(answers5)
def check5(a):
IPython.display.clear_output()
display5()
if answers5.value == 'A base':
print("That's right! Because the pH is bigger than 7, it's a base!")
else:
print("That's not right, remember what the definitions of acids and bases are.")
display5()
answers5.observe(check5, 'value')
###Output
_____no_output_____
###Markdown
pH in the EnvironmentNow that we know a few acids and bases that you can find in your house, let's extend this to the environment. Normal rain has a pH of around 5.6, while pure water has a pH around 7, and ocean water has a pH around 8. Tap water has a pH around 7.5 to prevent corrosion of pipes, but varies because of human influence and natural processes. But sometimes air pollutants can create more acid when it reacts with water in the air, creating acid rain. Acid rain has a pH less than 5.6 and is not healthy for the environment. This video from National Geographic explains acid rain, its effects, and what we can do to help.
###Code
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/1PDjVDIrFec" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>')
answers4 = widgets.RadioButtons(options=['Sea water', 'Fresh water', 'Pure/distilled water', 'Acid rain'],
value=None)
def display4():
print("Which type of water has a neutral pH? (A pH around 7)")
IPython.display.display(answers4)
def check4(a):
IPython.display.clear_output()
display4()
if answers4.value == 'Pure/distilled water':
print("That's right! pure water or distilled water has a pH close to 7. That makes it neither a base, nor an acid.")
else:
print("That's not right, look back at the last block of text.")
display4()
answers4.observe(check4, 'value')
###Output
_____no_output_____
###Markdown
How does changing the pH affect the enviroment?Every substance has a pH. If this pH is changed, it affects organisms that use or consume the substance by changing their pH. Changing an organism's pH will affect how it functions. For example, making soil more acidic detroys nutrients, making plants unable to grow healthy. Acidic water can poison fish as well as their eggs which prevents them from hatching. There's a lot of balance that goes on in nature and changing pH disturbs that balance which affects much more than just the original change. The full scale How do we measure pH?There are lots of ways to meausre pH. There's even a meter that records the pH of a solution digitally, but those are expensive. There are lots of cheaper indicators that change colour when the pH changes past a specific limit. If you want to know if something is acidic or basic, but not the exact pH, there is a test called litmus paper. Litmus paper comes in blue and red. The blue litmus paper turns red if it's dipped in an acid. The red litmus paper turns blue if it's dipped in a base. There's also a universal indicator solution which changes colour at different pHs between 2 and 10. There's many more indicators such as phenolthalein, phenol red, bromothymol blue, and even cabbage juice. These all change colour at different points on the pH scale. LabLet's test a mystery solution with litmus paper to determine if it's an acid or a base. Drag the red and blue litmus paper strips into the liquid in the beaker one at a time. Remember, if the red litmus paper turns blue on the end that was dipped, it's a base. If the blue litmus paper turns red on the end that was dipped, it's an acid. If neither change colour when dipped, then it's neutral.
###Code
%%html
<script type = "text/javascript" src="sources/animation2.js"></script>
%%html
<div id="animation"></div>
<script>
display(10);
</script>
<! -- 10 represents the pH of the solution in the beaker.
If this is changed then the question below the lab might not have the right answer.-- >
answers6 = widgets.RadioButtons(options=['An acid', 'A base', 'Neutral'],
value=None)
def display6():
print("Is the solution in the beaker an acid, a base, or neutral?")
IPython.display.display(answers6)
def check6(a):
IPython.display.clear_output()
display6()
if answers6.value == 'A base': # Here's where it checks for the right answer
print("Great job! Because the red litmus paper turned blue on the end, it's a base!")
else:
print("That's not right. Look at what the litmus paper tells you again.")
display6()
answers6.observe(check6, 'value')
###Output
_____no_output_____
###Markdown
Universal indicator solutionUniversal indicator can come on paper like the litmus tests, or it can be in a solution that is put into the solution being tested. If you're using paper then the paper changes colour with respect to the scale below depending on the pH. If you use the solution in your tested solution using a dropper, then the whole tested solution changes colour with respect to the scale below depending on the solution's pH! Try solutions with different pHs InstructionsSlide the slider to change the pH of the solution in the beaker. Then add the universal indicator solution in the dropper over top the beaker. Then watch what colour the solution in the beaker changes to! If you want to try another pH, pick a new pH and drag the dropper over the beaker again!
###Code
%%html
<link rel="stylesheet" type="text/css" href="sources/dropperstyle.css">
<script type = "text/javascript" src="sources/dropperAnimation.js"></script>
%%html
<div id="slider"></div>
<div id="beaker">
</div>
<script>
display2(7);
</script>
###Output
_____no_output_____
###Markdown
How to measure water qualityThere are many factors that go into determining the quality of fresh and salt water, including acidity (pH), colour, concentration of disolved oxygen, and turbidity (the amount of suspended particles in the water). Let's apply our knowledge of concentration and pH to measuring water quality.Good quality fresh water usually has a pH around 6-7, and a concentration of dissolved oxygen around 9-10 ppm. Good quality sea water has a pH around 7-8.5, and a concentration of dissolved oxygen around 7-8 ppm. These measurements are taken at room temperature, as the temperature of water affects the amount of dissolved oxygen that can be found in water. [source](https://www.engineeringtoolbox.com/oxygen-solubility-water-d_841.html)
###Code
answers7 = widgets.RadioButtons(options=['pH will increase towards 14',
'pH will decrease towards 1',
'pH will because neutral (7)',
'pH will stay the same'],
value=None)
def display7():
print("What will happen to the pH of a pond if acid rain falls into it?")
IPython.display.display(answers7)
def check7(a):
IPython.display.clear_output()
display7()
if answers7.value == 'pH will decrease towards 1':
print("That's right! Acid rain makes the pond more acidic, meaning it's pH is closer to 1.")
else:
print("Sorry, try again. Remember what acid rain is.")
display7()
answers7.observe(check7, 'value')
###Output
_____no_output_____
###Markdown
Water in the Edmonton AreaDetermining the quality of drinking water is a little different than that of fresh or sea water. Drinking water doesn't need a specific concentration of dissolved oxygen, and is instead tested for things like the concentration of calcuim carbonate (CaCO3) because it makes the water hard. However, pH is always used for determining the quality of water.Let's compare data from a water treatment plant in the Rossdale area of Edmonton to the average pH of distilled water, tap water, and salt/sea water that we learned in this notebook. Fill in the values of the average pH of each of these kinds of water by double clicking the appropriate box under Average pH. You can find this information earlier in the notebook.When you're done, make sure you're not on a editable cell, then press the "click to save data" button.
###Code
# same code from investigating electrical conductivity notebook
import pandas as pd
import numpy as np
import qgrid
df = pd.DataFrame(index=pd.Series(['Distilled Water', 'Tap Water', 'Salt Water']), columns=pd.Series(['Average pH']))
df_widget = qgrid.QgridWidget(df =df, show_toolbar=False)
df_widget
# same code from investigating electrical conductivity notebook
from IPython.display import Javascript, display
from ipywidgets import widgets
def run_all(ev):
display(Javascript('IPython.notebook.execute_cell_range(IPython.notebook.get_selected_index()+1,IPython.notebook.get_selected_index()+2)'))
button = widgets.Button(description="Click to save data")
button.on_click(run_all)
display(button)
# same code from investigating electrical conductivity notebook
output_data = df_widget.get_changed_df()
###Output
_____no_output_____
###Markdown
Now that we have the averages, here is the collected data from Rossdale in the past week. Notice that column 2 is labelled pH. That is what we will be comparing to our averages. Click on the button below to compare the data in a graph.
###Code
# same code from investigating electrical conductivity notebook
url_ELS = "http://apps.epcor.ca/DAilyWaterQuality/Default.aspx?zone=ELS"
url_RD = "http://apps.epcor.ca/DAilyWaterQuality/Default.aspx?zone=Rossdale"
table_RD = pd.read_html(url_RD, header=0)
table_ELS = pd.read_html(url_ELS, header=0)
qgrid.QgridWidget(df = table_RD[0])
# same code from investigating electrical conductivity notebook
button = widgets.Button(description="Update the plot")
button.on_click(run_all)
display(button)
# same code from investigating electrical conductivity notebook
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
import matplotlib.cm as cm
import warnings
# ignoring runtime warning from taking the mean of a single number
# this also catches strings.
np.warnings.filterwarnings('ignore')
cmap = cm.Set1
norm=Normalize(vmin=0, vmax=len(output_data.index)+1)
for i in range(len(output_data.index)):
try:
sample_mean=float(output_data.iloc[i,0])
plt.axhline(y=sample_mean, c=cmap(norm(i)), ls='dashed', label='Average of ' + output_data.index[i], lw='3')
except ValueError as e:
# skip data that has strings in it.
continue
plt.plot(table_RD[0].iloc[2, 1:8], 'bo-', label='Rossdale readings')
#plt.plot(pd.DataFrame(table_RD[0].iloc[6, 1:8].apply(pd.to_numeric)))
plt.xlabel('Day of Measurement')
plt.ylabel('pH (pH)')
plt.title('pH of treated waste water over the last week')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2)
plt.show()
answers8 = widgets.RadioButtons(options=['An acid', 'A base', 'Neutral'],
value=None)
def display8():
print("What should be added to Rossdale's water to make it more like tap water?")
IPython.display.display(answers8)
def check8(a):
IPython.display.clear_output()
display8()
if table_RD[0].iloc[2, 7] >= 7.5 and answers8.value == 'An acid':
print("That's right! Adding an acid to Rossdale's water will make it more like tap water because it's too alkaline.")
else:
if table_RD[0].iloc[2, 7] < 7.5 and answers8.value == 'An base':
print("That's right! Adding a base to Rossdale's water will make it more alkaline like tap water.")
else:
print("That's not right, remember what happens when acids and bases react and a pH of 7 is neutral.")
display8()
answers8.observe(check8, 'value')
###Output
_____no_output_____
###Markdown
How to measure air qualityThe video below is a company in Victoria, Australia that measures the air quality. Try to find all the substances they meausre the concentration of.
###Code
HTML('<iframe width="560" height="315" src="https://www.youtube.com/embed/mp3kztZy7ow?start=19" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>')
###Output
_____no_output_____ |
notebooks/real-data-processing.ipynb | ###Markdown
real-data-processing Processing real data for the experimentsIn this Notebook, let us process the two real datasets to be considered at the thesis experiments: **COIL-20** and **Olivetti Faces**. Let us save the image data in an easy to manipulate way. Tools & LibrariesWe use **`Python`**. The following modules are used:* **pandas:** reading, writing and manipulating data.* **numpy:** vectorized calculations and other relevant math functions.* **scipy:** functions for scientific purposes. Great statistics content.* **matplotlib & seaborn:** data visualization.* **sklearn:** comprehensive machine learning libraries.* **PyPNG:** pure Python library for reading and saving images.
###Code
# opening up a console as the notebook starts
%qtconsole
# making plots stay on the notebook (no extra windows!)
%matplotlib inline
# show figures with highest resolution
%config InlineBackend.figure_format = 'retina'
# changing working directory
import os
os.chdir('C:\\Users\\Guilherme\\Documents\\TCC\\tsne-optim')
# importing modules
import pandas as pd
import numpy as np
import scipy
import matplotlib.pyplot as plt
import seaborn as sns
import png, array
###Output
_____no_output_____
###Markdown
1. Olivetti Faces datasetThis dataset comes with sklearn `datasets` module. Let us fetch and process the data.
###Code
# fetching data #
# importing fetch function
from sklearn.datasets import fetch_olivetti_faces
# fetching
olivetti_df = fetch_olivetti_faces()
# let us check the dataset description
olivetti_df['DESCR']
# let us see one face
plt.imshow(olivetti_df['images'][0])
# let us check the data
olivetti_df['data']
# finally, convert it to a dataframe
olivetti_data_df = pd.DataFrame(olivetti_df['data'])
olivetti_data_df.columns = ['pixel_'+str(e) for e in olivetti_data_df.columns]
# adding target variable
olivetti_data_df.loc[:,'TARGET'] = olivetti_df['target']
# saving
olivetti_data_df.to_csv('data/final/olivetti-faces.csv', index=False)
###Output
_____no_output_____
###Markdown
2. COIL-20The dataset is available as a set of images. Let us read the images and convert it to a structured .csv file.
###Code
# directory where images are stored
coil_20_dir = 'data/raw/coil-20-proc'
# reading image files
img_files = [f for f in os.listdir(coil_20_dir)]
# reading each image and structuring data #
# dataframe to accumulate images
coil_20_df = pd.DataFrame()
# loop for each file
for img_file in img_files:
# reading image
reader = png.Reader(filename=os.path.join(coil_20_dir, img_file))
w, h, pixels, metadata = reader.read_flat()
# structuring in a dataframe
temp_df = pd.DataFrame(np.matrix(pixels))
temp_df.columns = ['pixel_' + str(e) for e in temp_df.columns]
temp_df.loc[:,'TARGET'] = int(img_file.split('_')[0][3:])
# saving to main df
coil_20_df = pd.concat([temp_df,coil_20_df])
# saving to .csv
coil_20_df.to_csv('data/final/coil-20.csv', index=False)
###Output
_____no_output_____ |
book_sample/4-4-4-scikit-learn-dimensionality-reduction.ipynb | ###Markdown
4.4.4 次元削減
###Code
import numpy as np
import matplotlib.pyplot as plt
# シード値を固定
np.random.seed(123)
# 0以上1未満の一様乱数を50個生成
X = np.random.random(size=50)
# Xを2倍した後に0以上1未満の一様乱数を0.5倍して足し合わせる
Y = 2*X + 0.5*np.random.rand(50)
# 散布図をプロット
fig, ax = plt.subplots()
ax.scatter(X, Y)
plt.show()
from sklearn.decomposition import PCA
# 主成分のクラスをインスタンス化
pca = PCA(n_components=2)
# 主成分分析を実行
X_pca = pca.fit_transform(np.hstack((X[:, np.newaxis], Y[:, np.newaxis])))
# 主成分分析の結果から得られた座標を散布図にプロット
fig, ax = plt.subplots()
ax.scatter(X_pca[:, 0], X_pca[:, 1])
ax.set_xlabel('PCA1')
ax.set_ylabel('PCA2')
ax.set_xlim(-1.1, 1.1)
ax.set_ylim(-1.1, 1.1)
plt.show()
from sklearn.decomposition import PCA
#
###Output
_____no_output_____ |
example/points_example.ipynb | ###Markdown
Fill from Outflow Points Example
###Code
import heapq
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from osgeo import gdal
import floodfill
%matplotlib inline
matplotlib.rcParams['figure.figsize'] = (10, 10)
###Output
_____no_output_____
###Markdown
Initial test arrays
###Code
dem_adj = np.array([
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 0.1, 1.0, 1.0, 1.0, 1.0],
[0.1, 0.6, 0.2, 0.3, 1.0, 1.0],
[0.2, 0.7, 0.3, 0.4, 0.1, 1.0],
[1.0, 0.8, 0.4, 1.0, 1.0, 1.0],
[1.0, 1.0, 0.2, 1.0, 1.0, 1.0],
])
hru_type = np.array([
[0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
])
outflow_pts = [[3, 4]]
print(dem_adj)
print(hru_type)
print(dem_adj[outflow_pts[0][0], outflow_pts[0][1]])
###Output
_____no_output_____
###Markdown
8-way fillingSet inactive cells to nodata and compute fill
###Code
dem_fill = dem_adj.copy()
print(dem_fill)
dem_mask = (hru_type > 0)
dem_fill[~dem_mask] = np.nan
dem_fill = floodfill.from_points(dem_fill, outflow_pts, four_way=False)
dem_fill[~dem_mask] = dem_adj[~dem_mask]
print(dem_fill)
###Output
_____no_output_____
###Markdown
4-way filling
###Code
dem_fill = dem_adj.copy()
print(dem_fill)
dem_mask = (hru_type > 0)
dem_fill[~dem_mask] = np.nan
dem_fill = floodfill.from_points(dem_fill, outflow_pts, four_way=True)
dem_fill[~dem_mask] = dem_adj[~dem_mask]
print(dem_fill)
###Output
_____no_output_____
###Markdown
Try with a couple of outflow points
###Code
dem_adj = np.array([
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 0.1, 1.0, 1.0, 1.0, 1.0],
[0.1, 0.6, 0.2, 0.3, 1.0, 1.0],
[0.2, 0.7, 0.3, 0.4, 0.1, 1.0],
[1.0, 0.8, 0.4, 0.4, 0.3, 1.0],
[1.0, 1.0, 0.2, 0.4, 0.2, 1.0],
])
outflow_pts = [[1, 1], [3, 4]]
mask = np.zeros(dem_adj.shape, dtype=np.bool)
mask[zip(*outflow_pts)] = 1
print(dem_adj)
print(mask)
for outflow_pt in outflow_pts:
print(dem_adj[outflow_pt[0], outflow_pt[1]])
dem_fill = dem_adj.copy()
print(dem_fill)
dem_fill = fill.outflow_fill(dem_fill, outflow_pts, four_way=False)
print(dem_fill)
###Output
_____no_output_____ |
practicals/01.SummaryStatistics.ipynb | ###Markdown
Learning ObjectivesToday we learn how to clean real data and use summary statistics to answer real world questions about the dataset PracticalsThis is a series that can be watched to reinforce the material of data science foundations, and while I will try to make it as stand alone as possible, it will heavily lean on the foundations material.The point of these lectures will be to dive into how to use the knowledge we gained during data science fundamentals. What we know so farAs always we will start off by checking out our assumptions. In the data science foundations we will discuss why we need these assumptions and what we can use them for in theory, but here we will use them in practice.The assumptions that we begin with are simply: we have data.
###Code
import networkx as nx
from nxpd import draw
from nxpd import nxpdParams
nxpdParams['show'] = 'ipynb'
G = nx.DiGraph()
G.add_node('Inputs (x_1, x_2, ..., x_n)')
draw(G)
###Output
_____no_output_____
###Markdown
Our dataThe data that we will be looking at throughout this series will be the billionaires dataset ([full details](http://www.iie.com/publications/interstitial.cfm?ResearchID=2917))Researchers have compiled a multi-decade database of the super-rich. Building off the Forbes World's Billionaires lists from 1996-2014, scholars at Peterson Institute for International Economics have added a couple dozen more variables about each billionaire - including whether they were self-made or inherited their wealth.While we could focus on more than one data set and have quite some fun doing so, we will focus on just one to drill down instead on the methodology and the practice of data science. So let's begin by looking at our data.
###Code
import pandas as pd
# Pandas has many different wrappers to read data
# But read_csv tends to be the most commonly used one
df = pd.read_csv('../data/billionaires.csv')
# The first thing I always do it do .info()
df.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 2614 entries, 0 to 2613
Data columns (total 22 columns):
age 2614 non-null int64
category 2613 non-null object
citizenship 2614 non-null object
company.name 2576 non-null object
company.type 2578 non-null object
country code 2614 non-null object
founded 2614 non-null int64
from emerging 2614 non-null bool
gdp 2614 non-null float64
gender 2580 non-null object
industry 2613 non-null object
inherited 2614 non-null bool
name 2614 non-null object
rank 2614 non-null int64
region 2614 non-null object
relationship 2568 non-null object
sector 2591 non-null object
was founder 2614 non-null bool
was political 2614 non-null bool
wealth.type 2592 non-null object
worth in billions 2614 non-null float64
year 2614 non-null int64
dtypes: bool(4), float64(2), int64(4), object(12)
memory usage: 377.9+ KB
###Markdown
So here we get a ton of information. We have a data set with 22 columns and up to 2614 rows. Notice that some columns don't have 2614 rows filled rows and instead have some null rows. In addition we can get the types of the rows. Quantitative and QualitativeWe can see from above which columns are quantitative and qualitative. A good rule of thumb is that any float is a quantitative column and the rest are qualitative.That being said it is good to inspect the columns and ask the question: can I average this. A quick visual inspection using `.head()` can help:
###Code
df.head(2)
###Output
_____no_output_____
###Markdown
Describing qualitative featuresWe can't do too much to describe qualitative features, but one thing that we can do is to count and display the unique entries of these columns. For example, you could be interested in how most billionaires made their fortunes, the below command can tease this out (this will even give us the mode):
###Code
df.groupby('wealth.type').sector.count()
###Output
_____no_output_____
###Markdown
For an ordered qualitative column (like year) we can even take the median or the max and min:
###Code
print df.year.min()
print df.year.median()
print df.year.max()
###Output
1996
2014.0
2014
###Markdown
Describing quantitative featuresNow when it comes to describing quantitative features we have much more we can do, but generally it is always good to start off with a `.describe()` command:
###Code
df.describe()
###Output
_____no_output_____
###Markdown
This will take all of the numeric features (even those that are not actually quantitative like rank) and calculate some very relevant summary statistics. We get to see the worth in billions, its max, min, percentiles, mean and standard deviation. And with this information we can get a good understanding of how our data is distributed.We also get to see parts of the data we should mung, specifically age that has values less than 1 (thus the mean age biased). Or founded or gdp that has values of 0. So let's clean the data and do this one more time.
###Code
import numpy as np
df.age.replace(-1, np.NaN, inplace=True)
df.founded.replace(0, np.NaN, inplace=True)
df.gdp.replace(0, np.NaN, inplace=True)
df.describe()
###Output
_____no_output_____
###Markdown
We now can see a much more reasonable series of numbers for gdp and founded min, and means. In addition notice how the average age jumped by 10 years! Without this cleaning the summary statistics would be highly inaccurate. Another way that we can check the quality of our data is the check out the number of unique entries in each column. Sometimes you may be surprised to see more than two values in what you thought was a binary column or fewer values than you might expect.
###Code
df.nunique()
###Output
_____no_output_____
###Markdown
We actually see a number of surprises here. First, from emerging, inherited, and was founder are all incorrect. There is only one value in each and this carries very little information. Second we might see that there are only 3 years. The years here represent when we surveyed the data. Finally we might see other things, like the number of names is more than the number of companies showing that some people own the same company.Let's go ahead and delete the columns with only one unique value:
###Code
del df['was founder']
del df['inherited']
del df['from emerging']
###Output
_____no_output_____
###Markdown
Finally we can of course take more complex statistics like correlation with a single command:
###Code
df.corr().dropna((0, 1), how='all')
###Output
_____no_output_____ |
week-5/submission/EVA5_WK_5_4.ipynb | ###Markdown
Import Libraries Iteration - 4 1. Target* Reach target Accuracy* Model Accuracy should be consistent* Add Learning rate scheduler 2. Result* Params - 7758* Best Test accuracy - 99.51 at Epoch - 8* Best Train accuracy - 99.20 at Epoch - 14 3. Analysis* Able to reach desired accuracy - 99.4%* Accuracy is consistent, from Epoch 7 accuracy consistently above 99.4* As observed previously, applying LR resulted in deriving desired accuracy.
###Code
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
# !pip install torchviz
!pip3 install graphviz
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Data TransformationsWe first start with defining our data transformations. We need to think what our data is and how can we augment it to correct represent images which it might not see otherwise.
###Code
# Train Phase transformations
train_transforms = transforms.Compose([
# transforms.Resize((28, 28)),
# transforms.ColorJitter(brightness=0.10, contrast=0.1, saturation=0.10, hue=0.1),
transforms.RandomRotation((-10.0, 10.0), fill=(1,)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) # The mean and std have to be sequences (e.g., tuples), therefore you should add a comma after the values.
# Note the difference between (0.1307) and (0.1307,)
])
# Test Phase transformations
test_transforms = transforms.Compose([
# transforms.Resize((28, 28)),
# transforms.ColorJitter(brightness=0.10, contrast=0.1, saturation=0.10, hue=0.1),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
###Output
_____no_output_____
###Markdown
Dataset and Creating Train/Test Split
###Code
train = datasets.MNIST('./data', train=True, download=True, transform=train_transforms)
test = datasets.MNIST('./data', train=False, download=True, transform=test_transforms)
###Output
_____no_output_____
###Markdown
Dataloader Arguments & Test/Train Dataloaders
###Code
SEED = 1
# CUDA?
cuda = torch.cuda.is_available()
print("CUDA Available?", cuda)
# For reproducibility
torch.manual_seed(SEED)
if cuda:
torch.cuda.manual_seed(SEED)
# dataloader arguments - something you'll fetch these from cmdprmt
dataloader_args = dict(shuffle=True, batch_size=128, num_workers=4, pin_memory=True) if cuda else dict(shuffle=True, batch_size=64)
# train dataloader
train_loader = torch.utils.data.DataLoader(train, **dataloader_args)
# test dataloader
test_loader = torch.utils.data.DataLoader(test, **dataloader_args)
###Output
CUDA Available? True
###Markdown
Data StatisticsIt is important to know your data very well. Let's check some of the statistics around our data and how it actually looks like
###Code
# # We'd need to convert it into Numpy! Remember above we have converted it into tensors already
# train_data = train.train_data
# train_data = train.transform(train_data.numpy())
# print('[Train]')
# print(' - Numpy Shape:', train.train_data.cpu().numpy().shape)
# print(' - Tensor Shape:', train.train_data.size())
# print(' - min:', torch.min(train_data))
# print(' - max:', torch.max(train_data))
# print(' - mean:', torch.mean(train_data))
# print(' - std:', torch.std(train_data))
# print(' - var:', torch.var(train_data))
# dataiter = iter(train_loader)
# images, labels = dataiter.next()
# print(images.shape)
# print(labels.shape)
# # Let's visualize some of the images
# plt.imshow(images[0].numpy().squeeze(), cmap='gray_r')
###Output
_____no_output_____
###Markdown
MOREIt is important that we view as many images as possible. This is required to get some idea on image augmentation later on
###Code
# figure = plt.figure()
# num_of_images = 60
# for index in range(1, num_of_images + 1):
# plt.subplot(6, 10, index)
# plt.axis('off')
# plt.imshow(images[index].numpy().squeeze(), cmap='gray_r')
###Output
_____no_output_____
###Markdown
The modelLet's start with the model we first saw
###Code
dropout_value = 0.01
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
# Input Convolution Block
self.convblock1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=10, kernel_size=(3, 3), padding=1, bias=False),
nn.BatchNorm2d(10),
nn.Dropout(dropout_value),
nn.ReLU()
) # input_side = 28, output_size = 28, RF = 3
# CONVOLUTION BLOCK 1
self.convblock2 = nn.Sequential(
nn.Conv2d(in_channels=10, out_channels=10, kernel_size=(3, 3), padding=1, bias=False),
nn.BatchNorm2d(10),
nn.Dropout(dropout_value),
nn.ReLU()
) # output_size = 28, RF = 5
# TRANSITION BLOCK 1
self.pool1 = nn.MaxPool2d(2, 2) # output_size = 12, RF = 6
self.convblock3 = nn.Sequential(
nn.Conv2d(in_channels=10, out_channels=10, kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(10),
nn.Dropout(dropout_value),
nn.ReLU()
) # output_size = 12, RF = 10
# CONVOLUTION BLOCK 2
self.convblock4 = nn.Sequential(
nn.Conv2d(in_channels=10, out_channels=10, kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(10),
nn.ReLU()
) # output_size = 10, RF = 14
self.convblock5 = nn.Sequential(
nn.Conv2d(in_channels=10, out_channels=10, kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(10),
nn.Dropout(dropout_value),
nn.ReLU()
) # output_size = 8, RF = 18
self.convblock6 = nn.Sequential(
nn.Conv2d(in_channels=10, out_channels=16, kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(16),
nn.Dropout(dropout_value),
nn.ReLU()
) # output_size = 6, RF = 22
self.convblock7 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=16, kernel_size=(3, 3), padding=0, bias=False),
nn.BatchNorm2d(16),
nn.Dropout(dropout_value),
nn.ReLU()
) # output_size = 4, RF = 26
# OUTPUT BLOCK
self.gap = nn.AvgPool2d(kernel_size=(4,4))
self.convblock8 = nn.Sequential(
nn.Conv2d(in_channels=16, out_channels=10, kernel_size=(1, 1), padding=0, bias=False),
# nn.ReLU() NEVER!
) # output_size = 1, RF = 26
def forward(self, x):
x = self.convblock1(x)
x = self.convblock2(x)
x = self.pool1(x)
x = self.convblock3(x)
x = self.convblock4(x)
x = self.convblock5(x)
x = self.convblock6(x)
x = self.convblock7(x)
x = self.gap(x)
x = self.convblock8(x)
x = x.view(-1, 10)
return F.log_softmax(x, dim=-1)
###Output
_____no_output_____
###Markdown
Model ParamsCan't emphasize on how important viewing Model Summary is. Unfortunately, there is no in-built model visualizer, so we have to take external help
###Code
!pip install torchsummary
from torchsummary import summary
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
print(device)
model = Net().to(device)
summary(model, input_size=(1, 28, 28))
# !pip install hiddenlayer
# import hiddenlayer as hl
# # hl.build_graph(model, torch.zeros([1, 1, 28, 28]))
###Output
_____no_output_____
###Markdown
Training and TestingLooking at logs can be boring, so we'll introduce **tqdm** progressbar to get cooler logs. Let's write train and test functions
###Code
from tqdm import tqdm
train_losses = []
test_losses = []
train_acc = []
test_acc = []
def train(model, device, train_loader, optimizer, epoch):
model.train()
pbar = tqdm(train_loader)
correct = 0
processed = 0
for batch_idx, (data, target) in enumerate(pbar):
# get samples
data, target = data.to(device), target.to(device)
# Init
optimizer.zero_grad()
# In PyTorch, we need to set the gradients to zero before starting to do backpropragation because PyTorch accumulates the gradients on subsequent backward passes.
# Because of this, when you start your training loop, ideally you should zero out the gradients so that you do the parameter update correctly.
# Predict
y_pred = model(data)
# Calculate loss
loss = F.nll_loss(y_pred, target)
train_losses.append(loss)
# Backpropagation
loss.backward()
optimizer.step()
# Learning rate for onecycle LR # Vamsi - added
# scheduler.step()
# Update pbar-tqdm
pred = y_pred.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
processed += len(data)
pbar.set_description(desc= f'Train set: Loss={loss.item()} Batch_id={batch_idx} Accuracy={100*correct/processed:0.2f}')
train_acc.append(100*correct/processed)
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
test_losses.append(test_loss)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
test_acc.append(100. * correct / len(test_loader.dataset))
###Output
_____no_output_____
###Markdown
Let's Train and test our model
###Code
from torch.optim.lr_scheduler import StepLR,OneCycleLR
EPOCHS = 15
model = Net().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
scheduler = StepLR(optimizer, step_size=6, gamma=0.1)
# scheduler = OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=len(train_loader), epochs=EPOCHS)
for epoch in range(EPOCHS):
print("EPOCH:", epoch, "last LR=",scheduler.get_last_lr(), "LR = ", scheduler.get_lr())
train(model, device, train_loader, optimizer, epoch)
test(model, device, test_loader)
scheduler.step() # for StepLR
fig, axs = plt.subplots(2,2,figsize=(15,10))
axs[0, 0].plot(train_losses)
axs[0, 0].set_title("Training Loss")
axs[1, 0].plot(train_acc)
axs[1, 0].set_title("Training Accuracy")
axs[0, 1].plot(test_losses)
axs[0, 1].set_title("Test Loss")
axs[1, 1].plot(test_acc)
axs[1, 1].set_title("Test Accuracy")
test_acc
###Output
_____no_output_____ |
kaggle_environments/envs/connectx/connectx.ipynb | ###Markdown
ConnectX - Kaggle Environment
###Code
from kaggle_environments import make
env = make("connectx")
print(env.name, env.version)
print("Default Agents: ", *env.agents)
###Output
connectx 1.0.1
Default Agents: random negamax
###Markdown
TLDR;
###Code
def agent(observation, configuration):
board = observation.board
columns = configuration.columns
return [c for c in range(columns) if board[c] == 0][0]
env = make("connectx", debug=True)
# play agent above vs default random agent.
env.run([agent, "random"])
env.render(mode="ipython", width=600, height=500, header=False)
###Output
_____no_output_____
###Markdown
Specification
###Code
import json
print("Configuration:", json.dumps(env.specification.configuration, indent=4, sort_keys=True))
print("Observation:", json.dumps(env.specification.observation, indent=4, sort_keys=True))
print("Action:", json.dumps(env.specification.action, indent=4, sort_keys=True))
###Output
Configuration: {
"actTimeout": {
"default": 2,
"description": "Maximum runtime (seconds) to obtain an action from an agent.",
"minimum": 1,
"type": "integer"
},
"agentExec": {
"default": "PROCESS",
"description": "How the agent is executed alongside the running envionment.",
"enum": [
"LOCAL",
"PROCESS"
],
"type": "string"
},
"agentTimeout": {
"default": 10,
"description": "Maximum runtime (seconds) to initialize an agent.",
"minimum": 1,
"type": "integer"
},
"columns": {
"default": 7,
"description": "The number of columns on the board",
"minimum": 1,
"type": "integer"
},
"episodeSteps": {
"default": 1000,
"description": "Maximum number of steps in the episode.",
"minimum": 1,
"type": "integer"
},
"inarow": {
"default": 4,
"description": "The number of checkers in a row required to win.",
"minimum": 1,
"type": "integer"
},
"rows": {
"default": 6,
"description": "The number of rows on the board",
"minimum": 1,
"type": "integer"
},
"runTimeout": {
"default": 600,
"description": "Maximum runtime (seconds) of an episode (not necessarily DONE).",
"minimum": 1,
"type": "integer"
}
}
Observation: {
"board": {
"default": [],
"description": "Serialized grid (rows x columns). 0 = Empty, 1 = P1, 2 = P2",
"shared": true,
"type": "array"
},
"mark": {
"defaults": [
1,
2
],
"description": "Which checkers are the agents.",
"enum": [
1,
2
]
}
}
Action: {
"default": 0,
"description": "Column to drop a checker onto the board.",
"minimum": 0,
"type": "integer"
}
###Markdown
Manual Play
###Code
env = make("connectx")
# Play against 3 default shortest agents.
env.play([None, "negamax"], width=800, height=600)
###Output
_____no_output_____
###Markdown
Training using Gym
###Code
from kaggle_environments import make
env = make("connectx", debug=True)
# Training agent in first position (player 1) against the default random agent.
trainer = env.train([None, "random"])
obs = trainer.reset()
for _ in range(10):
env.render()
action = 0 # Action for the agent being trained.
obs, reward, done, info = trainer.step(action)
if done:
obs = trainer.reset()
###Output
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 2 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 2 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 2 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 2 | 0 | 0 | 0 | 2 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 2 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 2 | 0 | 0 |
+---+---+---+---+---+---+---+
| 2 | 0 | 0 | 0 | 2 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 2 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 2 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 2 | 0 | 2 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 2 | 0 | 2 | 0 | 2 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
###Markdown
ConnectX - Kaggle Environment
###Code
from kaggle_environments import make
env = make("connectx")
print(env.name, env.version)
print("Default Agents: ", *env.agents)
###Output
connectx 1.0.1
Default Agents: random negamax
###Markdown
TLDR;
###Code
def agent(observation, configuration):
board = observation.board
columns = configuration.columns
return [c for c in range(columns) if board[c] == 0][0]
env = make("connectx", debug=True)
# play agent above vs default random agent.
env.run([agent, "random"])
env.render(mode="ipython", width=600, height=500, header=False)
###Output
_____no_output_____
###Markdown
Specification
###Code
import json
print("Configuration:", json.dumps(env.specification.configuration, indent=4, sort_keys=True))
print("Observation:", json.dumps(env.specification.observation, indent=4, sort_keys=True))
print("Action:", json.dumps(env.specification.action, indent=4, sort_keys=True))
###Output
Configuration: {
"actTimeout": {
"default": 2,
"description": "Maximum runtime (seconds) to obtain an action from an agent.",
"minimum": 1,
"type": "integer"
},
"agentExec": {
"default": "PROCESS",
"description": "How the agent is executed alongside the running envionment.",
"enum": [
"LOCAL",
"PROCESS"
],
"type": "string"
},
"agentTimeout": {
"default": 10,
"description": "Maximum runtime (seconds) to initialize an agent.",
"minimum": 1,
"type": "integer"
},
"columns": {
"default": 7,
"description": "The number of columns on the board",
"minimum": 1,
"type": "integer"
},
"episodeSteps": {
"default": 1000,
"description": "Maximum number of steps in the episode.",
"minimum": 1,
"type": "integer"
},
"inarow": {
"default": 4,
"description": "The number of checkers in a row required to win.",
"minimum": 1,
"type": "integer"
},
"rows": {
"default": 6,
"description": "The number of rows on the board",
"minimum": 1,
"type": "integer"
},
"runTimeout": {
"default": 600,
"description": "Maximum runtime (seconds) of an episode (not necessarily DONE).",
"minimum": 1,
"type": "integer"
}
}
Observation: {
"board": {
"default": [],
"description": "Serialized grid (rows x columns). 0 = Empty, 1 = P1, 2 = P2",
"shared": true,
"type": "array"
},
"mark": {
"defaults": [
1,
2
],
"description": "Which checkers are the agents.",
"enum": [
1,
2
]
}
}
Action: {
"default": 0,
"description": "Column to drop a checker onto the board.",
"minimum": 0,
"type": "integer"
}
###Markdown
Manual Play
###Code
env = make("battlegeese", configuration={"agentExec": "LOCAL"})
# Play against 3 default shortest agents.
env.play([None, "negamax"], width=800, height=600)
###Output
_____no_output_____
###Markdown
Training using Gym
###Code
from kaggle_environments import make
env = make("connectx", debug=True)
# Training agent in first position (player 1) against the default random agent.
trainer = env.train([None, "random"])
obs = trainer.reset()
for _ in range(10):
env.render()
action = 0 # Action for the agent being trained.
obs, reward, done, info = trainer.step(action)
if done:
obs = trainer.reset()
###Output
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 2 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 2 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 2 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 2 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 2 | 0 | 2 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 2 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 2 | 0 | 2 | 2 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 2 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 2 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 2 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 2 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 1 | 0 | 2 | 0 | 0 | 2 | 0 |
+---+---+---+---+---+---+---+
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
| 0 | 0 | 0 | 0 | 0 | 0 | 0 |
+---+---+---+---+---+---+---+
|
notebooks/Read_SEGY_with_ObsPy.ipynb | ###Markdown
Read SEG-Y with `obspy`Before going any further, you might like to know, [What is SEG-Y?](http://www.agilegeoscience.com/blog/2014/3/26/what-is-seg-y.html). See also the articles in [SubSurfWiki](http://www.subsurfwiki.org/wiki/SEG_Y) and [Wikipedia](https://en.wikipedia.org/wiki/SEG_Y).We'll use the [obspy](https://github.com/obspy/obspy) seismology library to read and write SEGY data. Technical SEG-Y documentation:* [SEG-Y Rev 1](http://seg.org/Portals/0/SEG/News%20and%20Resources/Technical%20Standards/seg_y_rev1.pdf)* [SEG-Y Rev 2 proposal](https://www.dropbox.com/s/txrqsfuwo59fjea/SEG-Y%20Rev%202.0%20Draft%20August%202015.pdf?dl=0) and [draft repo](http://community.seg.org/web/technical-standards-committee/documents/-/document_library/view/6062543)
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
ls -l ../data/*.sgy
###Output
_____no_output_____
###Markdown
2D data
###Code
filename = '../data/HUN00-ALT-01_STK.sgy'
from obspy.io.segy.segy import _read_segy
section = _read_segy(filename)
# OPTIONS
# headonly=True — only reads the header info, then you can index in on-the-fly.
# unpack_headers=True — slows you down here and isn't really required.
data = np.vstack([t.data for t in section.traces])
plt.figure(figsize=(16,8))
plt.imshow(data.T, cmap="Greys")
plt.colorbar(shrink=0.5)
plt.show()
section.traces[0]
section.textual_file_header
###Output
_____no_output_____
###Markdown
Aargh... OK, fine, we'll reformat this.
###Code
def chunk(string, width=80):
try:
# Make sure we don't have a ``bytes`` object.
string = string.decode()
except:
# String is already a string, carry on.
pass
lines = int(np.ceil(len(string) / width))
result = ''
for i in range(lines):
line = string[i*width:i*width+width]
result += line + (width-len(line))*' ' + '\n'
return result
s = section.textual_file_header.decode()
print(chunk(s))
section.traces[0]
t = section.traces[0]
t.npts
t.header
###Output
_____no_output_____
###Markdown
3D dataEither use the small volume, or **[get the large dataset from Agile's S3 bucket](https://s3.amazonaws.com/agilegeo/Penobscot_0-1000ms.sgy.gz)**
###Code
#filename = '../data/F3_very_small.sgy'
filename = '../data/Penobscot_0-1000ms.sgy'
from obspy.io.segy.segy import _read_segy
raw = _read_segy(filename)
data = np.vstack([t.data for t in raw.traces])
###Output
_____no_output_____
###Markdown
I happen to know that the shape of this dataset is 601 × 481.
###Code
_, t = data.shape
seismic = data.reshape((601, 481, t))
###Output
_____no_output_____
###Markdown
Note that we don't actually need to know the last dimension, if we already have two of the three dimensions. `np.reshape()` can compute it for us on the fly:
###Code
seismic = data.reshape((601, 481, -1))
###Output
_____no_output_____
###Markdown
Plot the result...
###Code
clip = np.percentile(seismic, 99)
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
plt.imshow(seismic[100,:,:].T, cmap="Greys", vmin=-clip, vmax=clip)
plt.colorbar(label="Amplitude", shrink=0.8)
ax.set_xlabel("Trace number")
ax.set_ylabel("Time sample")
plt.show()
###Output
_____no_output_____
###Markdown
Read SEG-Y with `obspy`Before going any further, you might like to know, [What is SEG-Y?](http://www.agilegeoscience.com/blog/2014/3/26/what-is-seg-y.html). See also the articles in [SubSurfWiki](http://www.subsurfwiki.org/wiki/SEG_Y) and [Wikipedia](https://en.wikipedia.org/wiki/SEG_Y).We'll use the [obspy](https://github.com/obspy/obspy) seismology library to read and write SEGY data. Technical SEG-Y documentation:* [SEG-Y Rev 1](http://seg.org/Portals/0/SEG/News%20and%20Resources/Technical%20Standards/seg_y_rev1.pdf)* [SEG-Y Rev 2 proposal](https://www.dropbox.com/s/txrqsfuwo59fjea/SEG-Y%20Rev%202.0%20Draft%20August%202015.pdf?dl=0) and [draft repo](http://community.seg.org/web/technical-standards-committee/documents/-/document_library/view/6062543)
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
ls -l ../data/*.sgy
###Output
-rw-rw-r--@ 1 matt staff 3811416128 12 Jun 2015 ../data/3D_gathers_pstm_nmo_X1001.sgy
-rw-r--r--@ 1 matt staff 256732 27 Aug 2015 ../data/F3_very_small.sgy
-rw-r--r--@ 1 matt staff 48281760 28 Aug 2015 ../data/HUN00-ALT-01_STK.sgy
-rw-r--r--@ 1 matt staff 474438768 25 Aug 2015 ../data/Penobscot.sgy
-rw-r--r--@ 1 matt staff 359620364 12 Sep 2016 ../data/Penobscot_0-1000ms.sgy
###Markdown
2D data
###Code
filename = '../data/HUN00-ALT-01_STK.sgy'
from obspy.io.segy.segy import _read_segy
section = _read_segy(filename)
# OPTIONS
# headonly=True — only reads the header info, then you can index in on-the-fly.
# unpack_headers=True — slows you down here and isn't really required.
data = np.vstack([t.data for t in section.traces])
plt.figure(figsize=(16,8))
plt.imshow(data.T, cmap="Greys")
plt.colorbar(shrink=0.5)
plt.show()
section.traces[0]
section.textual_file_header
###Output
_____no_output_____
###Markdown
Aargh... OK, fine, we'll reformat this.
###Code
def chunk(string, width=80):
try:
# Make sure we don't have a ``bytes`` object.
string = string.decode()
except:
# String is already a string, carry on.
pass
lines = int(np.ceil(len(string) / width))
result = ''
for i in range(lines):
line = string[i*width:i*width+width]
result += line + (width-len(line))*' ' + '\n'
return result
s = section.textual_file_header.decode()
print(chunk(s))
section.traces[0]
t = section.traces[0]
t.npts
t.header
###Output
_____no_output_____
###Markdown
3D dataEither use the small volume, or **[get the large dataset from Agile's S3 bucket](https://s3.amazonaws.com/agilegeo/Penobscot_0-1000ms.sgy.gz)**
###Code
filename = '../data/F3_very_small.sgy'
# filename = '../data/Penobscot_0-1000ms.sgy'
from obspy.io.segy.segy import _read_segy
raw = _read_segy(filename)
data = np.vstack([t.data for t in raw.traces])
###Output
_____no_output_____
###Markdown
I happen to know that the shape of this dataset is 601 × 481.
###Code
_, t = data.shape
seismic = data.reshape((601, 481, t))
###Output
_____no_output_____
###Markdown
Note that we don't actually need to know the last dimension, if we already have two of the three dimensions. `np.reshape()` can compute it for us on the fly:
###Code
seismic = data.reshape((601, 481, -1))
###Output
_____no_output_____
###Markdown
Plot the result...
###Code
clip = np.percentile(seismic, 99)
fig = plt.figure(figsize=(12,6))
ax = fig.add_subplot(111)
plt.imshow(seismic[100,:,:].T, cmap="Greys", vmin=-clip, vmax=clip)
plt.colorbar(label="Amplitude", shrink=0.8)
ax.set_xlabel("Trace number")
ax.set_ylabel("Time sample")
plt.show()
###Output
_____no_output_____ |
labs/ex04/template/ex04.ipynb | ###Markdown
Cross-Validation and Bias-Variance decomposition Cross-ValidationImplementing 4-fold cross-validation below:
###Code
from __future__ import absolute_import
import helpers
from costs import compute_mse, compute_rmse
def ridge_regression(y, tx, lamb):
"""implement ridge regression."""
# ***************************************************
# INSERT YOUR CODE HERE
# ridge regression: TODO
# ***************************************************
# Hes = tx.T * tx + 2*N*lambda * I_m
G = np.eye(tx.shape[1])
G[0, 0] = 0
hes = np.dot(tx.T, tx) + lamb * G
weight = np.linalg.solve(hes, np.dot(tx.T, y))
mse = compute_mse(y, tx, weight)
return mse, weight
from build_polynomial import build_poly
from plots import cross_validation_visualization
# load dataset
x, y = helpers.load_data()
def build_k_indices(y, k_fold, seed):
"""build k indices for k-fold."""
num_row = y.shape[0]
interval = int(num_row / k_fold)
np.random.seed(seed)
indices = np.random.permutation(num_row)
k_indices = [indices[k * interval: (k + 1) * interval]
for k in range(k_fold)]
return np.array(k_indices)
def cross_validation(y, x, k_indices, k, lamb, degree, rmse=False):
"""return the loss of ridge regression."""
# ***************************************************
# Split data into K groups according to indices
# get k'th subgroup in test, others in train:
# ***************************************************
x = np.array(x)
y = np.array(y)
train_ind = np.concatenate((k_indices[:k], k_indices[k+1:]), axis=0)
train_ind = np.reshape(train_ind, (train_ind.size,))
test_ind = k_indices[k]
# Note: different from np.ndarray, tuple is name[index,]
# ndarray is name[index,:]
train_x = x[train_ind,]
train_y = y[train_ind,]
test_x = x[test_ind,]
test_y = y[test_ind,]
# ***************************************************
# INSERT YOUR CODE HERE
# form data with polynomial degree:
# ***************************************************
train_x = build_poly(train_x, degree)
test_x = build_poly(test_x, degree)
# ***************************************************
# INSERT YOUR CODE HERE
# ridge regression:
# ***************************************************
loss_tr, weight = ridge_regression(train_y, train_x, lamb)
# Test with sklearn ridge solve.
# clf = linear_model.ridge_regression(train_x, train_y, alpha=lamb)
# weight = clf
# ***************************************************
# INSERT YOUR CODE HERE
# calculate the loss for train and test data: TODO
# ***************************************************
''' Compute MSE by ridge weights '''
loss_tr = compute_mse(train_y, train_x, weight)
loss_te = compute_mse(test_y, test_x, weight)
# loss_tr = compute_mse(train_y, train_x, weight)
# loss_te = compute_mse(test_y, test_x, weight)
if rmse is True:
loss_tr = compute_rmse(loss_tr)
loss_te = compute_rmse(loss_te)
return loss_tr, loss_te
def cross_validation_demo():
seed = 1
degree = 7
k_fold = 4
lambdas = np.logspace(-4, 2, 30)
# y,x = data_load()
# split data in k fold
k_indices = build_k_indices(y, k_fold, seed)
# define lists to store the loss of training data and test data
mse_tr = []
mse_te = []
# ***************************************************
# INSERT YOUR CODE HERE
# cross validation:
# ***************************************************
for lamb in lambdas:
_mse_tr = []
_mse_te = []
for k in range(k_fold):
loss_tr, loss_te = cross_validation(y,x,k_indices,k,lamb,degree, rmse=True)
_mse_tr += [loss_tr]
_mse_te += [loss_te]
avg_tr = np.average(_mse_tr)
avg_te = np.average(_mse_te)
mse_tr += [avg_tr]
mse_te += [avg_te]
cross_validation_visualization(lambdas, mse_tr, mse_te)
print(mse_tr, mse_te)
cross_validation_demo()
###Output
[0.2463628441522478, 0.24639438591167401, 0.24645941110276431, 0.24658005338575609, 0.24677578124864258, 0.24704739127521619, 0.24736716366662131, 0.24768951562092667, 0.24797475941004027, 0.24820503327586749, 0.24838503221556793, 0.24853562755481065, 0.24868802498781456, 0.24887783432222546, 0.2491344535723842, 0.24946908719898742, 0.24987875785040009, 0.25038021830066681, 0.25106323379008938, 0.25213449970220098, 0.25390868608827477, 0.25668538320877454, 0.26051333181597341, 0.26502756818150064, 0.26958300093355009, 0.27359572573669211, 0.27677542367488595, 0.27912117803359432, 0.28079614680366749, 0.28200952807641783] [0.30794677069151549, 0.30700329490908962, 0.30577806845866173, 0.30433857987019397, 0.30284965988225671, 0.30151720709125496, 0.30047667090943508, 0.29972396235081072, 0.29914500761080542, 0.29858948174603678, 0.29792086139559132, 0.29703668083981299, 0.29588598916694331, 0.29449858769737425, 0.29300998188696631, 0.29165011114761408, 0.29069675514776411, 0.2904510237695816, 0.29128497157399508, 0.29371898010988412, 0.29837731555957536, 0.30565767395539856, 0.31522032126465971, 0.32581596090022685, 0.33579258040094573, 0.34384700797237594, 0.34938040081229882, 0.35238428314196707, 0.35318557183957888, 0.35228291067717354]
###Markdown
Bias-Variance DecompositionVisualize bias-variance trade-off by implementing the function `bias_variance_demo()` below:
###Code
def least_squares(y, tx):
"""calculate the least squares solution."""
# ***************************************************
# INSERT YOUR CODE HERE
# least squares: TODO
# returns mse, and optimal weights
# ***************************************************
weight = np.linalg.solve(np.dot(tx.T,tx), np.dot(tx.T,y))
return compute_mse(y,tx, weight),weight
from split_data import split_data
from plots import bias_variance_decomposition_visualization
def bias_variance(function, x, weight, variance_e):
'''
For linear model bias-variance calculation. The dimension is len(weight)
:param y:
:param x:
:param weight: beta of linear model
:param function:
:param variance_e:
:return:
'''
y = function(x[:,1])
# N = len(x)
# res = np.dot(x, weight)
# error = variance_e * (len(weight) / N) + np.sum( (y - np.dot(x, weight)) **2 )/ N
# return compute_rmse(error)
return compute_rmse(compute_mse(y,x,weight))
def bias_variance_demo():
"""The entry."""
# define parameters
seeds = range(100)
num_data = 10000
ratio_train = 0.005
degrees = range(1, 10)
# define list to store the variable
rmse_tr = np.empty((len(seeds), len(degrees)))
rmse_te = np.empty((len(seeds), len(degrees)))
for index_seed, seed in enumerate(seeds):
np.random.seed(seed)
x = np.linspace(0.1, 2 * np.pi, num_data)
y = np.sin(x) + 0.3 * np.random.randn(num_data).T
# ***************************************************
# INSERT YOUR CODE HERE
# split data with a specific seed: TODO
# ***************************************************
train_x, train_y, test_x, test_y = split_data(x,y,ratio_train,seed)
# ***************************************************
# INSERT YOUR CODE HERE
# bias_variance_decomposition: TODO
# ***************************************************
for ind_degree, degree in enumerate(degrees):
# Use least square
x_tr = build_poly(train_x, degree)
x_te = build_poly(test_x, degree)
mse, weight = least_squares(train_y, x_tr)
# rmse_tr[index_seed][ind_degree] = bias_variance(np.sin, x_tr, weight, 1)
# rmse_te[index_seed][ind_degree] = bias_variance(np.sin, x_te, weight, 1)
rmse_tr[index_seed][ind_degree] = compute_rmse(compute_mse(train_y, x_tr, weight))
rmse_te[index_seed][ind_degree] =compute_rmse(compute_mse(test_y, x_te, weight))
bias_variance_decomposition_visualization(degrees, rmse_tr, rmse_te)
bias_variance_demo()
###Output
_____no_output_____ |
docs/source/examples/notebooks/basic_example.ipynb | ###Markdown
Basic exampleThis example demonstrates some of the core functionality and export features provided by rabpro.Note: you will need to download HydroBasins to run this demo. See [this notebook](https://github.com/VeinsOfTheEarth/rabpro/blob/main/docs/source/examples/notebooks/downloading_data.ipynb) for download instructions.
###Code
import pandas as pd
import geopandas as gpd
from matplotlib import pyplot as plt
import rabpro
###Output
_____no_output_____
###Markdown
First, we need to specify a point for which we'd like a watershed delineated.
###Code
coords = (56.22659, -130.87974)
###Output
_____no_output_____
###Markdown
Now we can initialize the profiler. The rabpro profiler is the main entry point into the package - it provides wrapper funcitons for most of the rabpro's core functionality.Note that we can optionally specify a drainage area (`da`) or set `force_merit` to `True`, to ensure that we use MERIT data rather than HydroBasins to perform basin delineation.
###Code
rpo = rabpro.profiler(coords, name='basic_test')
###Output
_____no_output_____
###Markdown
rabpro can now compute the watershed for this point. Since we are not providing a pre-known drainage area to the profiler or specifying `force_merit=True`, rabpro will use HydroBasins to delineate the watershed. Delineation may take a minute or two as rabpro has to identify the correct level-12 HydroBasins shapefile and load it into memory (these files are >100MB).
###Code
%%capture
rpo.delineate_basin()
###Output
_____no_output_____
###Markdown
The basin geometry is stored in a GeoPandas GeoDataFrame, and can be accessed through the `rpo` object.
###Code
rpo.watershed.plot()
###Output
_____no_output_____
###Markdown
Next, we try to compute the river elevation profile. This will fail because we have not yet downloaded MERIT data.
###Code
%%capture
rpo.elev_profile(dist_to_walk_km=5)
###Output
_____no_output_____
###Markdown
If you'd like to complete this task, you will need to download the MERIT tile `n30w150`.
###Code
%%capture
# we can use rabpro.utils.coords_to_merit_tile to identify the correct tile name
rabpro.utils.coords_to_merit_tile(coords[1], coords[0])
###Output
_____no_output_____
###Markdown
Detailed instructions, including how to get a username and password for MERIT-Hydro downloads, are [here](https://github.com/VeinsOfTheEarth/rabpro/blob/main/docs/source/examples/notebooks/downloading_data.ipynb). Note that the MERIT tile will consume ~1.6 GB of space when unzipped.`download_merit_hydro()` will automatically rebuild virtual rasters, which are how rabpro interacts with the individual geotiffs, after downloading a tile. ```pythonfrom rabpro import data_utilsdata_utils.download_merit_hydro('n30w150', username=your_merit_username, password=your_merit_password)``` Now we can try again:
###Code
rpo.elev_profile(dist_to_walk_km=5)
plt.plot(rpo.flowline['Distance (m)'], rpo.flowline['Elevation (m)'])
plt.xlabel('Along-stream distance, m')
plt.ylabel('Elevation')
###Output
_____no_output_____
###Markdown
The along-stream distance is with respect to the provided coordinate. You can use the `rpo.flowline` GeoDataFrame to compute slopes. You can export the `watershed` GeoDataFrame and/or the `flowline` GeoDataFrame using the `.export()` method.
###Code
%%capture
rpo.export("all")
###Output
_____no_output_____
###Markdown
Once the subbasins are delinated, rabpro can use Google Earth Engine (GEE) to compute statistics for each subbasin. Using Google Earth Engine reduces the need to store large datasets locally, and speeds up computation by using GEE's parallel distributed computing capabilities.Note: In order to use rabpro for basin statistics, you'll need to sign up for a GEE account. See rabpro's documentation for more information.
###Code
# Specify which statistics to calculate for the JRC/GSW1_3/GlobalSurfaceWater dataset's occurrence band
statlist = ['min', 'max', 'range', 'std', 'sum', 'pct50', 'pct3']
data = rabpro.basin_stats.Dataset("JRC/GSW1_3/GlobalSurfaceWater", "occurrence", stats=statlist)
d, t = rpo.basin_stats([data], folder="rabpro test")
###Output
Submitting basin stats task to GEE for JRC/GSW1_3/GlobalSurfaceWater...
###Markdown
The output data will be placed in the `rabpro test` folder in your Google Drive if it already exists. If not, GEE will create a new `rabpro test` folder at the root level of your Drive.`basin_stats` returns a url to the resulting csv data which can be read directly with `pandas`:
###Code
pd.read_csv(d[0])
###Output
_____no_output_____
###Markdown
Basic exampleThis example demonstrates some of the core functionality and export features provided by rabpro.Note: you will need to download HydroBasins to run this demo. See [this notebook](https://github.com/VeinsOfTheEarth/rabpro/blob/main/docs/source/examples/notebooks/downloading_data.ipynb) for download instructions.
###Code
import pandas as pd
import geopandas as gpd
from matplotlib import pyplot as plt
import rabpro
###Output
_____no_output_____
###Markdown
First, we need to specify a point for which we'd like a watershed delineated.
###Code
coords = (56.22659, -130.87974)
###Output
_____no_output_____
###Markdown
Now we can initialize the profiler. The rabpro profiler is the main entry point into the package - it provides wrapper funcitons for most of the rabpro's core functionality.Note that we can optionally specify a drainage area (`da`) or set `force_merit` to `True`, to ensure that we use MERIT data rather than HydroBasins to perform basin delineation.
###Code
rpo = rabpro.profiler(coords, name='basic_test')
###Output
_____no_output_____
###Markdown
rabpro can now compute the watershed for this point. Since we are not providing a pre-known drainage area to the profiler or specifying `force_merit=True`, rabpro will use HydroBasins to delineate the watershed. Delineation may take a minute or two as rabpro has to identify the correct level-12 HydroBasins shapefile and load it into memory (these files are >100MB).
###Code
%%capture
rpo.delineate_basin()
###Output
_____no_output_____
###Markdown
The basin geometry is stored in a GeoPandas GeoDataFrame, and can be accessed through the `rpo` object.
###Code
rpo.watershed.plot()
###Output
_____no_output_____
###Markdown
Next, we try to compute the river elevation profile. This will fail because we have not yet downloaded MERIT data.
###Code
%%capture
rpo.elev_profile(dist_to_walk_km=5)
###Output
_____no_output_____
###Markdown
If you'd like to complete this task, you will need to download the MERIT tile `n30w150`. Detailed instructions, including how to get a username and password for MERIT-Hydro downloads, are [here](https://github.com/VeinsOfTheEarth/rabpro/blob/main/docs/source/examples/notebooks/downloading_data.ipynb). Note that the MERIT tile will consume ~1.6 GB of space when unzipped.`download_merit_hydro()` will automatically rebuild virtual rasters, which are how rabpro interacts with the individual geotiffs, after downloading a tile. ```pythonfrom rabpro import data_utilsdata_utils.download_merit_hydro('n30w150', username=your_merit_username, password=your_merit_password)``` Now we can try again:
###Code
rpo.elev_profile(dist_to_walk_km=5)
plt.plot(rpo.flowline['Distance (m)'], rpo.flowline['Elevation (m)'])
plt.xlabel('Along-stream distance, m')
plt.ylabel('Elevation')
###Output
Extracting flowpath from DEM...
###Markdown
The along-stream distance is with respect to the provided coordinate. You can use the `rpo.flowline` GeoDataFrame to compute slopes. You can export the `watershed` GeoDataFrame and/or the `flowline` GeoDataFrame using the `.export()` method.
###Code
%%capture
rpo.export("all")
###Output
_____no_output_____
###Markdown
Once the subbasins are delinated, rabpro can use Google Earth Engine (GEE) to compute statistics for each subbasin. Using Google Earth Engine reduces the need to store large datasets locally, and speeds up computation by using GEE's parallel distributed computing capabilities.Note: In order to use rabpro for basin statistics, you'll need to sign up for a GEE account. See rabpro's documentation for more information.
###Code
# Specify which statistics to calculate for the JRC/GSW1_3/GlobalSurfaceWater dataset's occurrence band
statlist = ['min', 'max', 'range', 'std', 'sum', 'pct50', 'pct3']
data = rabpro.basin_stats.Dataset("JRC/GSW1_3/GlobalSurfaceWater", "occurrence", stats=statlist)
d, t = rpo.basin_stats([data], folder="rabpro test")
###Output
Submitting basin stats task to GEE for JRC/GSW1_3/GlobalSurfaceWater...
###Markdown
The output data will be placed in the `rabpro test` folder in your Google Drive if it already exists. If not, GEE will create a new `rabpro test` folder at the root level of your Drive.`basin_stats` returns a url to the resulting csv data which can be read directly with `pandas`:
###Code
pd.read_csv(d[0])
###Output
_____no_output_____ |
ddsp/colab/experiments/02_initialize_gan.ipynb | ###Markdown
Timbre Painting in DDSP Basic Upsampler
###Code
%load_ext autoreload
%autoreload 2
# Extract some f0
from ddsp.synths import BasicUpsampler
import numpy as np
from matplotlib import pyplot as plt
from ddsp.colab.jupyter_utils import show_audio
sample_rate = 16000
n_samples = 4*sample_rate
synth = BasicUpsampler(n_samples)
f0_hz = np.linspace(400, 800, 100).reshape([1,-1,1])
amplitudes = np.abs(np.sin(np.linspace(0, 2*np.pi, 100))).reshape([1,-1,1])
wav = synth.get_signal(amplitudes, f0_hz)
show_audio(wav, focus_points=[0.45, 0.8], focus_windows=[2000, 2000])
###Output
_____no_output_____
###Markdown
Basic Upsampler + ParallelWaveGANUpsampler
###Code
from ddsp.training.decoders import TimbrePaintingDecoder
decoder = TimbrePaintingDecoder(name='tpd', input_keys=('amplitudes', 'f0_hz'))
batch = {
'f0_hz': f0_hz,
'amplitudes': amplitudes
}
controls = decoder(batch)
wav = controls['audio_tensor'].numpy().squeeze()
show_audio(wav, focus_points=[0.05, 0.95], focus_windows=[2000, 2000])
###Output
_____no_output_____
###Markdown
Discriminator
###Code
from ddsp.training import discriminator
critic = discriminator.ParallelWaveGANDiscriminator(input_keys=['audio_tensor', 'f0_hz', 'amplitudes'])
critic_score = critic(controls)
critic_score
###Output
_____no_output_____
###Markdown
Gan Autoencoder
###Code
import ddsp
from ddsp.training import models, preprocessing, decoders, discriminator
from ddsp import synths
dag = [(synths.TensorToAudio(), ['audio_tensor'])]
ae = models.Autoencoder(
preprocessor=None,
encoder=None,
decoder=decoders.TimbrePaintingDecoder(name='tpd', input_keys=('amplitudes', 'f0_hz')),
processor_group=ddsp.processors.ProcessorGroup(dag=dag, name='processor_group'),
discriminator=discriminator.ParallelWaveGANDiscriminator(input_keys=['discriminator_audio', 'f0_hz', 'amplitudes']),
losses=[]
)
batch = {
'f0_hz': f0_hz,
'amplitudes': amplitudes,
'audio': np.random.normal(0,1,size=n_samples)
}
outputs = ae(batch)
outputs.keys()
###Output
_____no_output_____ |
Project/6_multiindex.ipynb | ###Markdown
Parallelizing the computation, 2nd approachIn the last notebook we have created a table of track ids and offensiveness.But we have lost the detailed multi-index structure.The flattening is not necessary for pandas outer-join to work.We repeat the procedure and generate a second table which has a deeply nested multiindex.The structure of this notebook is basically identical to the previous one.
###Code
import numpy as np
import pandas as pd
word_table = pd.read_pickle("../pickles/word_table_cleaned.pickle")
word_table.head()
import sqlite3
conn = sqlite3.connect("../datasets/mxm_dataset.db")
cursor = conn.cursor()
cursor.execute("SELECT track_id, word, count FROM lyrics ORDER BY track_id;")
track_word_count = cursor.fetchall()
cursor.close()
track_word_count[:5]
sqldb_frame = pd.DataFrame(track_word_count, columns=["track_id", "word", "count"])
del track_word_count
sqldb_frame["word"]=sqldb_frame["word"].astype(str)
###Output
_____no_output_____
###Markdown
Performing an outer joint to match words between lyrics and offensiveness rating
###Code
joint = sqldb_frame.join(word_table, on="word", how="left", lsuffix='_caller', rsuffix='_other')
print(joint.shape)
joint.head()
joint_indexed = joint.set_index(["track_id", "category", "strength", "target"])
joint_indexed.loc[("TRAADYI128E078FB38",),]
###Output
_____no_output_____
###Markdown
Aggregating the dataThe joint table uses track ids and offensiveness categories as indices. This is what we want, but we still have individual cells for every word.Now we aggregate the items for every index. We sum the entries. This gives us the total count of words in each category.
###Code
joint_indexed.index.is_unique
joint_indexed_filtered = joint_indexed["count"]
joint_indexed_filtered.head()
joint_aggregated = joint_indexed_filtered.agg("sum")
offensiveness_rating = joint_indexed_filtered.groupby(str, axis=0).agg("sum")
offensiveness_rating.head()
offensiveness_rating.to_pickle("../pickles/offensiveness_rating_structured")
###Output
_____no_output_____ |
notebooks/exploratory/307_afox_trackendsandpaths_sumsandmeans_nonorth_final.ipynb | ###Markdown
OSNAP line Lagrangian particle tracking investigation of the cold/fresh blob Technical preamble
###Code
# import matplotlib.colors as colors
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
import xarray as xr
from datetime import datetime, timedelta
import seaborn as sns
# from matplotlib.colors import ListedColormap
import cmocean as co
import pandas as pd
import matplotlib.dates as mdates
import cartopy.crs as ccrs
import cartopy
import seawater as sw
from matplotlib import colors as c
from matplotlib import ticker
# from xhistogram.xarray import histogram
sns.set(style="darkgrid")
xr.set_options(keep_attrs=True)
np.warnings.filterwarnings('ignore')
sns.set_palette("colorblind")
xr.set_options(keep_attrs=True);
plt.rc('font', size=14) #controls default text size
plt.rc('axes', titlesize=14) #fontsize of the title
plt.rc('axes', labelsize=14) #fontsize of the x and y labels
plt.rc('xtick', labelsize=14) #fontsize of the x tick labels
plt.rc('ytick', labelsize=14) #fontsize of the y tick labels
plt.rc('legend', fontsize=14) #fontsize of the legend
plt.rc('savefig', dpi=300) # higher res outputs
###Output
_____no_output_____
###Markdown
_(Click on the link above if you want to see the Dask cluster in action.)_ Set up paths and read in trajectory data
###Code
# parameters
project_path = Path.cwd() / '..' / '..'
project_path = project_path.resolve()
interim_data_path = Path('/data/spg_fresh_blob_202104_data/2022-02-27_wr-final-runs/data/interim/endtracks/plusDist/')
outputPath = Path('data/processed/sumsAndMeans/noNorth/')
output_data_path = project_path / outputPath
sectionPath = Path('data/external/')
sectionFilename = 'osnap_pos_wp.txt'
sectionname = 'osnap'
# do this year-by-year because of filesizes
year = 2012
nsubsets = 32
# proportion of data in subset
subset = 1.0
yearstr = str(year)
# model mask file
data_path = Path("data/external/iAtlantic/")
experiment_name = "VIKING20X.L46-KKG36107B"
mesh_mask_file = project_path / data_path / "mask" / experiment_name / "1_mesh_mask.nc"
#section lonlat file
sectionPath = Path('data/external/')
sectionFilename = 'osnap_pos_wp.txt'
sectionname = 'osnap'
gsrsectionFilename = 'gsr_pos_wp.txt'
degree2km = 1.852*60.0
# some transport values specific to osnap runs
# randomly seeded 39995 particles, 19886 were in ocean points (the rest were land)
osnap_section_length = 3594572.87839 # m
osnap_subsection_length = 2375914.29783 # m
osnap_section_depth = 4000 # m over which particles launched
osnap_subsection_depth = 1000 # m over which particles launched
osnap_subsection_ocean_area = osnap_subsection_length * osnap_subsection_depth * 2100000 / 2643886
# this is to compensate for not using all the particles. 1 in 10 particles selected.
max_current = 2.0
particle_section_area = max_current * osnap_subsection_length * osnap_subsection_depth / (2643886 * subset)
###Output
_____no_output_____
###Markdown
Load data mesh and masks
###Code
mesh_mask = xr.open_dataset(mesh_mask_file)
mesh_mask = mesh_mask.squeeze()
mesh_mask = mesh_mask.set_coords(["nav_lon", "nav_lat", "nav_lev"])
bathy = mesh_mask.mbathy.rename("number of water filled points")
depth = (mesh_mask.e3t_0 * mesh_mask.tmask).sum("z")
# display(mesh_mask)
###Output
_____no_output_____
###Markdown
section position data
###Code
lonlat = xr.Dataset(pd.read_csv(project_path / sectionPath / sectionFilename,delim_whitespace=True))
lonlat.lon.attrs['long_name']='Longitude'
lonlat.lat.attrs['long_name']='Latitude'
lonlat.lon.attrs['standard_name']='longitude'
lonlat.lat.attrs['standard_name']='latitude'
lonlat.lon.attrs['units']='degrees_east'
lonlat.lat.attrs['units']='degrees_north'
lonlat2mean= lonlat.rolling({'dim_0':2}).mean()
lonlatdiff = (lonlat.diff('dim_0'))
lonlatdiff = lonlatdiff.assign({'y':lonlatdiff['lat']*degree2km})
lonlatdiff = lonlatdiff.assign({'x':lonlatdiff['lon']*degree2km*np.cos(np.radians(lonlat2mean.lat.data[1:]))})
lonlatdiff=lonlatdiff.assign({'length':np.sqrt(lonlatdiff['x']**2+lonlatdiff['y']**2)})
lonlatdiff=lonlatdiff.assign({'costheta':lonlatdiff['x']/lonlatdiff['length']})
lonlatdiff=lonlatdiff.assign({'sintheta':lonlatdiff['y']/lonlatdiff['length']})
total_length = lonlatdiff.length.sum().data
total_osnap_length = lonlatdiff.length[0:12].sum().data; # exclude section across UK - just there for testing north/south
length_west = xr.concat((xr.DataArray([0],dims=("dim_0"),coords={"dim_0": [0]}),lonlatdiff.length.cumsum()),dim='dim_0')
lonlat
###Output
_____no_output_____
###Markdown
tracks Load VIKING20X dataWe'll first find all the relevant files and then open them as a virtual contiguous dataset.
###Code
# data_stores_subsets = list(sorted(Path(data_path).glob("*_????_subset.zarr/")))[:use_number_subset_years]
data_trackends_subsets = list(sorted(Path(interim_data_path).glob(f"*{yearstr}*.nc/")))
print(data_trackends_subsets)
# ds = xr.concat(
# [xr.open_dataset(store,chunks={
# "ends": 1, "traj": 1024
# }) for store in data_trackends_subsets],
# dim="traj",
# )
ds = xr.concat(
[xr.open_dataset(store) for store in data_trackends_subsets],
dim="traj",
)
display(ds)
print(ds.nbytes / 1e9, "GiB")
ds.isel(ends=0).time.dt.year == year
###Output
_____no_output_____
###Markdown
32 subsets, run separately
###Code
display(ds.time.isel(ends=1))
###Output
_____no_output_____
###Markdown
Subset tracks by OSNAP line cross longitude and depth range
###Code
lonRange=[-37,0]
depthRange=[0,500]
range_str = 'OsnapE_test'
ds = ds.where((ds.isel(ends=0).lon > lonRange[0]) & (ds.isel(ends=0).lon < lonRange[1]))
ds = ds.where((ds.isel(ends=0).z > depthRange[0]) & (ds.isel(ends=0).z < depthRange[1]))
ds = ds.where(ds.isel(ends=0).north_of_osnap == False)
ds = ds.where(ds.isel(ends=0).time.dt.year == year)
ds = ds.dropna('traj', how='all')
ds
ds.north_of_osnap.sum()
###Output
_____no_output_____
###Markdown
Add density (sigma0) to variables
###Code
ds = ds.assign({'rho0':xr.apply_ufunc(
sw.dens,
ds.salt,ds.temp,0,
dask="parallelized",
output_dtypes=[float, ])})
ds.rho0.attrs = {'units':'kg/m3','long_name':'potential density $\rho_0$'}
###Output
_____no_output_____
###Markdown
Velocity conversions from degrees lat/lon per second to m/s
###Code
ds=ds.assign({'uvel_ms':ds.uvel * degree2km * 1000.0 * np.cos(np.radians(ds.lat))})
ds=ds.assign({'vvel_ms':ds.vvel * degree2km * 1000.0})
ds = ds.assign({'section_index':(ds.isel(ends=0).lon > lonlat.lon).sum(dim='dim_0')-1})
costheta = lonlatdiff.costheta[ds.section_index]
sintheta = lonlatdiff.sintheta[ds.section_index]
ds = ds.assign({'u_normal':ds.isel(ends=0).vvel_ms * costheta -
ds.isel(ends=0).uvel_ms * sintheta})
ds = ds.assign({'u_along':ds.isel(ends=0).vvel_ms * sintheta +
ds.isel(ends=0).uvel_ms * costheta})
###Output
_____no_output_____
###Markdown
Find along-section distances of initial points
###Code
ds = ds.assign({'x':xr.DataArray(length_west[ds.section_index] + lonlatdiff.length[ds.section_index]*
(ds.isel(ends=0).lon - lonlat.lon[ds.section_index])/lonlatdiff.lon[ds.section_index],dims='traj')})
###Output
_____no_output_____
###Markdown
volume, temperature and salt transports along track
###Code
# at osnap line
ds = ds.assign({'vol_trans_normal':np.sign(ds.u_normal) * particle_section_area/1.0e06})
ds = ds.assign({'particle_vol':ds.vol_trans_normal/ds.u_normal})
# at osnap line
ds = ds.assign({'temp_transport':ds.temp * ds.vol_trans_normal})
ds = ds.assign({'salt_transport':ds.salt * ds.vol_trans_normal})
ds = ds.assign({'depth_transport':ds.z * ds.vol_trans_normal})
ds = ds.assign({'lon_transport':ds.lon * ds.vol_trans_normal})
ds = ds.assign({'tempxvol':ds.temp * ds.particle_vol})
ds = ds.assign({'saltxvol':ds.salt * ds.particle_vol})
ds = ds.assign({'depthxvol':ds.z * ds.particle_vol})
ds = ds.assign({'lonxvol':ds.lon * ds.particle_vol})
ds
###Output
_____no_output_____
###Markdown
check for particles crossing osnap southwards and remove.
###Code
ds = ds.where(ds.isel(ends=0).u_normal >= 0.0)
ds = ds.dropna('traj', how='all')
ds.vol_trans_normal.plot()
###Output
_____no_output_____
###Markdown
Calculate means and sums at osnap crossing
###Code
total_sum_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).groupby("time").sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
total_mean_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).groupby("time").mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
labcu_sum_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LabCu_is_source)
.groupby("time").sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
labcu_mean_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LabCu_is_source)
.groupby("time").mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
gulfs_sum_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).GulfS_is_source)
.groupby("time").sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
gulfs_mean_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).GulfS_is_source)
.groupby("time").mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
other_sum_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).other_is_source)
.groupby("time").sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
other_mean_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).other_is_source)
.groupby("time").mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
lc60w_sum_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LC60W_is_path)
.groupby("time").sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
lc60w_mean_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LC60W_is_path)
.groupby("time").mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
lcdir_sum_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LCdir_is_path)
.groupby("time").sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
lcdir_mean_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LCdir_is_path)
.groupby("time").mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
green_sum_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Green_is_source)
.groupby("time").sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
green_mean_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Green_is_source)
.groupby("time").mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
davis_sum_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Davis_is_source)
.groupby("time").sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
davis_mean_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Davis_is_source)
.groupby("time").mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
hudba_sum_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Hudba_is_source)
.groupby("time").sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
hudba_mean_0 = xr.concat(
[ds.isel(ends=0,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Hudba_is_source)
.groupby("time").mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
###Output
_____no_output_____
###Markdown
calculate means and sums at source. Grouped by osnap crossing time.
###Code
total_sum_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
total_mean_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
labcu_sum_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LabCu_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
labcu_mean_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LabCu_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
gulfs_sum_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).GulfS_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
gulfs_mean_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).GulfS_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
other_sum_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).other_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
other_mean_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).other_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
lc60w_sum_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LC60W_is_path)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
lc60w_mean_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LC60W_is_path)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
lcdir_sum_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LCdir_is_path)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
lcdir_mean_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LCdir_is_path)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
green_sum_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Green_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
green_mean_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Green_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
davis_sum_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Davis_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
davis_mean_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Davis_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
hudba_sum_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Hudba_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
hudba_mean_1 = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).Hudba_is_source)
.groupby(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).time).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
###Output
_____no_output_____
###Markdown
group by times at source
###Code
starttime = np.datetime64('1980-01-01T00:00')
deltat = np.timedelta64(5,'D')
times = np.array([starttime + i * deltat for i in range(2923)])
total_sum_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.groupby_bins("time",times).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno"
)
total_mean_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.groupby_bins("time",times).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno"
)
labcu_sum_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LabCu_is_source)
.groupby_bins("time",times).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno"
)
labcu_mean_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LabCu_is_source)
.groupby_bins("time",times).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno"
)
gulfs_sum_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).GulfS_is_source)
.groupby_bins("time",times).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno"
)
gulfs_mean_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).GulfS_is_source)
.groupby_bins("time",times).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno"
)
other_sum_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).other_is_source)
.groupby_bins("time",times).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
other_mean_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).other_is_source)
.groupby_bins("time",times).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno",
)
lc60w_sum_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LC60W_is_path)
.groupby_bins("time",times).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno"
)
lc60w_mean_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LC60W_is_path)
.groupby_bins("time",times).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno"
)
lcdir_sum_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LCdir_is_path)
.groupby_bins("time",times).sum()
for subsetno in range(0,nsubsets)],
dim="subsetno"
)
lcdir_mean_sourcetime = xr.concat(
[ds.isel(ends=1,traj=slice(subsetno,None,nsubsets))
.where(ds.isel(ends=0,traj=slice(subsetno,None,nsubsets)).LCdir_is_path)
.groupby_bins("time",times).mean()
for subsetno in range(0,nsubsets)],
dim="subsetno"
)
total_sum_sourcetime.time_bins.plot()
time_mid = [v.mid for v in total_sum_sourcetime.time_bins.values]
total_sum_sourcetime["time_bins"]=time_mid
time_mid = [v.mid for v in labcu_sum_sourcetime.time_bins.values]
labcu_sum_sourcetime["time_bins"]=time_mid
time_mid = [v.mid for v in gulfs_sum_sourcetime.time_bins.values]
gulfs_sum_sourcetime["time_bins"]=time_mid
time_mid = [v.mid for v in other_sum_sourcetime.time_bins.values]
other_sum_sourcetime["time_bins"]=time_mid
time_mid = [v.mid for v in lcdir_sum_sourcetime.time_bins.values]
lcdir_sum_sourcetime["time_bins"]=time_mid
time_mid = [v.mid for v in lc60w_sum_sourcetime.time_bins.values]
lc60w_sum_sourcetime["time_bins"]=time_mid
time_mid = [v.mid for v in total_mean_sourcetime.time_bins.values]
total_mean_sourcetime["time_bins"]=time_mid
time_mid = [v.mid for v in labcu_mean_sourcetime.time_bins.values]
labcu_mean_sourcetime["time_bins"]=time_mid
time_mid = [v.mid for v in gulfs_mean_sourcetime.time_bins.values]
gulfs_mean_sourcetime["time_bins"]=time_mid
time_mid = [v.mid for v in other_mean_sourcetime.time_bins.values]
other_mean_sourcetime["time_bins"]=time_mid
time_mid = [v.mid for v in lcdir_mean_sourcetime.time_bins.values]
lcdir_mean_sourcetime["time_bins"]=time_mid
time_mid = [v.mid for v in lc60w_mean_sourcetime.time_bins.values]
lc60w_mean_sourcetime["time_bins"]=time_mid
fig,ax = plt.subplots(1,figsize = (9,7),sharex=True)
# for i in range(16):
# (16*total_sum_0.isel(subsetno=i).vol_trans_normal).plot(ax=ax,alpha=0.4,zorder=1)
total_sum_rolling = (total_sum_0.vol_trans_normal.sum(dim='subsetno')
.rolling(time=18,center=True).mean())
total_sum_0.vol_trans_normal.sum(dim='subsetno').plot(ax=ax,
color='C0',
zorder=10)
total_sum_rolling.plot(ax=ax,
color='C0',
zorder=10)
std = ((32*total_sum_0.vol_trans_normal.rolling(time=18,center=True).mean())
.std(dim='subsetno'))
ax.fill_between(total_sum_0.time.data,
total_sum_rolling+1.96*std,
total_sum_rolling-1.96*std,
color='C0',
zorder=1,
alpha=0.3)
labcu_sum_rolling = (labcu_sum_0.vol_trans_normal.sum(dim='subsetno')
.rolling(time=18,center=True).mean())
labcu_sum_0.vol_trans_normal.sum(dim='subsetno').plot(ax=ax,
color='C1',
zorder=10)
labcu_sum_rolling.plot(ax=ax,
color='C1',
zorder=10)
std = ((32*labcu_sum_0.vol_trans_normal.rolling(time=18,center=True).mean())
.std(dim='subsetno'))
ax.fill_between(labcu_sum_0.time.data,
labcu_sum_rolling+1.96*std,
labcu_sum_rolling-1.96*std,
color='C1',
zorder=1,
alpha=0.3)
gulfs_sum_rolling = (gulfs_sum_0.vol_trans_normal.sum(dim='subsetno')
.rolling(time=18,center=True).mean())
gulfs_sum_0.vol_trans_normal.sum(dim='subsetno').plot(ax=ax,
color='C2',
zorder=10)
gulfs_sum_rolling.plot(ax=ax,
color='C2',
zorder=10)
std = ((32*gulfs_sum_0.vol_trans_normal.rolling(time=18,center=True).mean())
.std(dim='subsetno'))
ax.fill_between(gulfs_sum_0.time.data,
gulfs_sum_rolling+1.96*std,
gulfs_sum_rolling-1.96*std,
color='C2',
zorder=1,
alpha=0.3)
ax.set_ylim(0)
fig,ax = plt.subplots(1,figsize = (9,7),sharex=True)
total_sum_rolling = (total_sum_sourcetime.vol_trans_normal.sum(dim='subsetno')
.rolling(time_bins=1,center=True).mean())
total_sum_rolling.plot(ax=ax,color='C0',zorder=10)
std = ((32*total_sum_sourcetime.vol_trans_normal.rolling(time_bins=18,center=True).mean())
.std(dim='subsetno'))
ax.fill_between(total_sum_sourcetime.time_bins.data,
total_sum_rolling+1.96*std,
total_sum_rolling-1.96*std,
color='C0',
zorder=1,
alpha=0.3)
labcu_sum_rolling = (labcu_sum_sourcetime.vol_trans_normal.sum(dim='subsetno')
.rolling(time_bins=18,center=True).mean())
labcu_sum_rolling.plot(ax=ax,color='C1',zorder=10)
std = ((32*labcu_sum_sourcetime.vol_trans_normal.rolling(time_bins=18,center=True).mean())
.std(dim='subsetno'))
ax.fill_between(labcu_sum_sourcetime.time_bins.data,
labcu_sum_rolling+1.96*std,
labcu_sum_rolling-1.96*std,
color='C1',
zorder=1,
alpha=0.3)
gulfs_sum_rolling = (gulfs_sum_sourcetime.vol_trans_normal.sum(dim='subsetno')
.rolling(time_bins=18,center=True).mean())
gulfs_sum_rolling.plot(ax=ax,color='C2',zorder=10)
std = ((32*gulfs_sum_sourcetime.vol_trans_normal.rolling(time_bins=18,center=True).mean())
.std(dim='subsetno'))
ax.fill_between(gulfs_sum_sourcetime.time_bins.data,
gulfs_sum_rolling+1.96*std,
gulfs_sum_rolling-1.96*std,
color='C2',
zorder=1,
alpha=0.3)
ax.set_ylim(0)
total_sum_0.to_netcdf(output_data_path / str('total_sum_0_'+yearstr+'.nc'))
labcu_sum_0.to_netcdf(output_data_path / str('labcu_sum_0_'+yearstr+'.nc'))
gulfs_sum_0.to_netcdf(output_data_path / str('gulfs_sum_0_'+yearstr+'.nc'))
other_sum_0.to_netcdf(output_data_path / str('other_sum_0_'+yearstr+'.nc'))
lcdir_sum_0.to_netcdf(output_data_path / str('lcdir_sum_0_'+yearstr+'.nc'))
lc60w_sum_0.to_netcdf(output_data_path / str('lc60w_sum_0_'+yearstr+'.nc'))
green_sum_0.to_netcdf(output_data_path / str('green_sum_0_'+yearstr+'.nc'))
davis_sum_0.to_netcdf(output_data_path / str('davis_sum_0_'+yearstr+'.nc'))
hudba_sum_0.to_netcdf(output_data_path / str('hudba_sum_0_'+yearstr+'.nc'))
total_mean_0.to_netcdf(output_data_path / str('total_mean_0_'+yearstr+'.nc'))
labcu_mean_0.to_netcdf(output_data_path / str('labcu_mean_0_'+yearstr+'.nc'))
gulfs_mean_0.to_netcdf(output_data_path / str('gulfs_mean_0_'+yearstr+'.nc'))
other_mean_0.to_netcdf(output_data_path / str('other_mean_0_'+yearstr+'.nc'))
lcdir_mean_0.to_netcdf(output_data_path / str('lcdir_mean_0_'+yearstr+'.nc'))
lc60w_mean_0.to_netcdf(output_data_path / str('lc60w_mean_0_'+yearstr+'.nc'))
green_mean_0.to_netcdf(output_data_path / str('green_mean_0_'+yearstr+'.nc'))
davis_mean_0.to_netcdf(output_data_path / str('davis_mean_0_'+yearstr+'.nc'))
hudba_mean_0.to_netcdf(output_data_path / str('hudba_mean_0_'+yearstr+'.nc'))
total_sum_1.to_netcdf(output_data_path / str('total_sum_1_'+yearstr+'.nc'))
labcu_sum_1.to_netcdf(output_data_path / str('labcu_sum_1_'+yearstr+'.nc'))
gulfs_sum_1.to_netcdf(output_data_path / str('gulfs_sum_1_'+yearstr+'.nc'))
other_sum_1.to_netcdf(output_data_path / str('other_sum_1_'+yearstr+'.nc'))
lcdir_sum_1.to_netcdf(output_data_path / str('lcdir_sum_1_'+yearstr+'.nc'))
lc60w_sum_1.to_netcdf(output_data_path / str('lc60w_sum_1_'+yearstr+'.nc'))
green_sum_1.to_netcdf(output_data_path / str('green_sum_1_'+yearstr+'.nc'))
davis_sum_1.to_netcdf(output_data_path / str('davis_sum_1_'+yearstr+'.nc'))
hudba_sum_1.to_netcdf(output_data_path / str('hudba_sum_1_'+yearstr+'.nc'))
total_mean_1.to_netcdf(output_data_path / str('total_mean_1_'+yearstr+'.nc'))
labcu_mean_1.to_netcdf(output_data_path / str('labcu_mean_1_'+yearstr+'.nc'))
gulfs_mean_1.to_netcdf(output_data_path / str('gulfs_mean_1_'+yearstr+'.nc'))
other_mean_1.to_netcdf(output_data_path / str('other_mean_1_'+yearstr+'.nc'))
lcdir_mean_1.to_netcdf(output_data_path / str('lcdir_mean_1_'+yearstr+'.nc'))
lc60w_mean_1.to_netcdf(output_data_path / str('lc60w_mean_1_'+yearstr+'.nc'))
green_mean_1.to_netcdf(output_data_path / str('green_mean_1_'+yearstr+'.nc'))
davis_mean_1.to_netcdf(output_data_path / str('davis_mean_1_'+yearstr+'.nc'))
hudba_mean_1.to_netcdf(output_data_path / str('hudba_mean_1_'+yearstr+'.nc'))
total_sum_sourcetime.to_netcdf(output_data_path / str('total_sum_sourcetime_'+yearstr+'.nc'))
labcu_sum_sourcetime.to_netcdf(output_data_path / str('labcu_sum_sourcetime_'+yearstr+'.nc'))
gulfs_sum_sourcetime.to_netcdf(output_data_path / str('gulfs_sum_sourcetime_'+yearstr+'.nc'))
other_sum_sourcetime.to_netcdf(output_data_path / str('other_sum_sourcetime_'+yearstr+'.nc'))
lcdir_sum_sourcetime.to_netcdf(output_data_path / str('lcdir_sum_sourcetime_'+yearstr+'.nc'))
lc60w_sum_sourcetime.to_netcdf(output_data_path / str('lc60w_sum_sourcetime_'+yearstr+'.nc'))
total_mean_sourcetime.to_netcdf(output_data_path / str('total_mean_sourcetime_'+yearstr+'.nc'))
labcu_mean_sourcetime.to_netcdf(output_data_path / str('labcu_mean_sourcetime_'+yearstr+'.nc'))
gulfs_mean_sourcetime.to_netcdf(output_data_path / str('gulfs_mean_sourcetime_'+yearstr+'.nc'))
other_mean_sourcetime.to_netcdf(output_data_path / str('other_mean_sourcetime_'+yearstr+'.nc'))
lcdir_mean_sourcetime.to_netcdf(output_data_path / str('lcdir_mean_sourcetime_'+yearstr+'.nc'))
lc60w_mean_sourcetime.to_netcdf(output_data_path / str('lc60w_mean_sourcetime_'+yearstr+'.nc'))
conda list
###Output
_____no_output_____ |
docs/source/carnot.ipynb | ###Markdown
Scene Building With PlotsOne can distingish between two kinds of plots: * Quantitative scientific plots with numbers and dimensions to describe data (e.g. made with matplotlib).* Qualitative explanatory plots that convey a message in the clearest way possible (e.g. made with manim) In this tutorial, I will take you on a journey from choosing a topic to making a scientific plot to then transforming it into an explanatory plot with manim:  Formulating a Quantitative Concept First, we do research on our topic of choice and then look up the formulas that we need. Alternatively, one can search for existing implementations. I chose the carnot process where I want to see how the pressure **pressure p** is altering, when **volume V** or **temperature T** are changing. In order to see how this works, the only think we need to know is that the Carnot Cycle obeys these formulas: * $pV = RT $ ideal gas equation* $pV = const $ upper and lower curve (also called "isotherm")* $pV^k = const $ with $ k = 5/3$ for the left and right curve (also called "adiabatic") You don't have to understand these formulas in detail in order to understand this tutorial. As we need reference points in the diagram, we first define some default values. For temperatures, we choose $ 20 °$C and $300°$C.For volumes we choose $v_1= 1 \, \text{m}^3$, and $v_2 = 2 \, \text{m}^3$.
###Code
import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import zero_Celsius
plt.rcParams['figure.dpi'] = 150
Tmax = zero_Celsius +300
Tmin = zero_Celsius +20
R = 8.314
kappa = 5/3
V1= 1
V2= 2
###Output
_____no_output_____
###Markdown
Now, let's have a look on the plot via matplotlib. As of now, implementing and debugging formulas is important, design is not.
###Code
p1 = R*Tmax/V1 # ideal gas equation
p2 = p1*V1/V2
V3 = (Tmax/Tmin * V2**(kappa-1))**(1/(kappa-1))
p3 = p2* V2**kappa / V3**kappa
V4 = (Tmax/Tmin * V1**(kappa-1))**(1/(kappa-1))
p4 = p3*V3/V4
V12 = np.linspace(V1,V2,100)
V23 = np.linspace(V2,V3,100)
V34 = np.linspace(V3,V4,100)
V41 = np.linspace(V4,V1,100)
def p_isotherm(V,T):
return (R*T)/V
def p_adiabatisch(V,p_start,v_start):
return (p_start*v_start**kappa)/V**kappa
plt.plot(V12, p_isotherm(V12,Tmax),label = "T$_{max}$" +f"= {Tmax-zero_Celsius:.0f}°C")
plt.plot(V23, p_adiabatisch(V23, p2,V2),label = f"adiabatic expansion")
plt.plot(V34, p_isotherm(V34,Tmin),label = "T$_{min}$" +f"= {Tmin-zero_Celsius:.0f}°C")
plt.plot(V41, p_adiabatisch(V41, p4,V4),label = f"adiabatic contraction")
plt.legend()
plt.scatter(V1,p1)
plt.scatter(V2,p2)
plt.scatter(V3,p3)
plt.scatter(V4,p4)
plt.ylabel("Pressure [Pa]")
plt.xlabel("Volume [m$^3$]")
plt.ticklabel_format(axis="x", style="sci", scilimits=(0,5))
###Output
_____no_output_____
###Markdown
Good! Now comes the second part: Building the explanatory plot! Extending to The Qualitiative Concept Now we have a good basis to convert this idea into a visually appealing and explanatory graph that will make it easy for everyone to understand complex problems.
###Code
from manim import *
param = "-v WARNING -s -ql --disable_caching --progress_bar None Example"
%%manim $param
class Example(Scene):
def construct(self):
my_ax = Axes()
labels = my_ax.get_axis_labels(x_label="V", y_label="p")
self.add(my_ax,labels)
# making some styling here
Axes.set_default(axis_config={"color": BLACK}, tips= False)
MathTex.set_default(color = BLACK)
config.background_color = WHITE
%%manim $param
ax = Axes(x_range=[0.9, 5.8, 4.9], y_range=[0, 5000, 5000],x_length=8, y_length=5,stroke_color=BLACK)
labels = ax.get_axis_labels(x_label="V", y_label="p")
labels[0].shift(.6*DOWN)
labels[1].shift(.6*LEFT)
isotherm12_graph = ax.plot(
lambda x: p_isotherm(x, Tmax), x_range=[V1, V2,0.01], color=BLACK
)
adiabatisch23_graph = ax.plot(
lambda x: p_adiabatisch(x, p2, V2) , x_range=[V2, V3,0.01], color=BLACK
)
isotherm34_graph = ax.plot(
lambda x: p_isotherm(x, Tmin), x_range=[V3, V4,-0.01], color=BLACK
)
adiabatisch41_graph = ax.plot(
lambda x: p_adiabatisch(x, p4, V4), x_range=[V4, V1,-0.01], color=BLACK
)
lines = VGroup(
isotherm12_graph, adiabatisch23_graph, isotherm34_graph, adiabatisch41_graph
)
ax.add(labels)
class Example(Scene):
def construct(self):
self.add(ax,lines)
%%manim $param
Dot.set_default(color=BLACK)
dots = VGroup()
dots += Dot().move_to(isotherm12_graph.get_start())
dots += Dot().move_to(isotherm12_graph.get_end())
dots += Dot().move_to(isotherm34_graph.get_start())
dots += Dot().move_to(isotherm34_graph.get_end())
class Example(Scene):
def construct(self):
self.add(ax,lines,dots)
%%manim $param
nums= VGroup()
nums+= MathTex(r"{\large \textcircled{\small 1}} ").scale(0.7).next_to(dots[0],RIGHT,buff=0.4*SMALL_BUFF)
nums+= MathTex(r"{\large \textcircled{\small 2}} ").scale(0.7).next_to(dots[1],UP, buff=0.4 * SMALL_BUFF)
nums+= MathTex(r"{\large \textcircled{\small 3}} ").scale(0.7).next_to(dots[2],UP,buff=0.4*SMALL_BUFF)
nums+= MathTex(r"{\large \textcircled{\small 4}} ").scale(0.7).next_to(dots[3],DL ,buff=0.4*SMALL_BUFF)
class Example(Scene):
def construct(self):
self.add(ax,lines, dots,nums)
%%manim $param
background_strokes = VGroup()
background_strokes += ax.plot(lambda x: p_isotherm(x, Tmax),x_range=[V1 - 0.1, V2 + 0.5, 0.01], color=RED, stroke_opacity=0.5)
background_strokes += ax.plot(lambda x: p_isotherm(x, Tmin), x_range=[V3 + 0.3,V4 - 0.5,-0.01], color=BLUE, stroke_opacity=0.5)
background_strokes.set_z_index(-1);
label = VGroup()
label += MathTex(r"\text{T}_{\text{min}}").scale(0.7).next_to(background_strokes[1],RIGHT,aligned_edge=DOWN, buff=0)
label += MathTex(r"\text{T}_{\text{max}}").scale(0.7).next_to(background_strokes[0],RIGHT,aligned_edge=DOWN, buff=0)
background_strokes += label
class Example(Scene):
def construct(self):
self.add(ax,lines, dots,nums,background_strokes)
%%manim $param
downstrokes = VGroup()
downstrokes += ax.get_vertical_line(ax.i2gp(V1, isotherm12_graph), color=BLACK).set_z_index(-2)
downstrokes += ax.get_vertical_line(ax.i2gp(V2, isotherm12_graph), color=BLACK).set_z_index(-2)
downstrokes += ax.get_vertical_line(ax.i2gp(V3, isotherm34_graph), color=BLACK).set_z_index(-2)
downstrokes += ax.get_vertical_line(ax.i2gp(V4, isotherm34_graph), color=BLACK).set_z_index(-2)
down_labels= VGroup()
down_labels += MathTex("{ V }_{ 1 }").next_to(downstrokes[0], DOWN)
down_labels += MathTex("{ V }_{ 2 }").next_to(downstrokes[1], DOWN)
down_labels += MathTex("{ V }_{ 3 }").next_to(downstrokes[2], DOWN)
down_labels += MathTex("{ V }_{ 4 }").next_to(downstrokes[3], DOWN)
class Example(Scene):
def construct(self):
self.add(ax,lines, dots,nums,background_strokes, downstrokes,down_labels)
%%manim $param
heat_annotation = VGroup()
deltaW = MathTex(r"\Delta W").next_to(dots[3], UL).scale(0.65).shift(0.15 * UP)
bg = deltaW.add_background_rectangle(color=WHITE)
heat_annotation += deltaW
point = isotherm12_graph.point_from_proportion(0.5)
arrow = Arrow(point + UR * 0.5, point, buff=0).set_color(BLACK)
deltaQa = MathTex(r"\Delta Q_a").scale(0.7).next_to(arrow, UR, buff=0)
heat_annotation += arrow
heat_annotation += deltaQa
point = isotherm34_graph.point_from_proportion(0.4)
arrow = Arrow(point, point + DL * 0.5, buff=0).set_color(BLACK)
deltaQb = MathTex(r"\Delta Q_b").scale(0.7).next_to(arrow, LEFT, buff=0.1).shift(0.1 * DOWN)
heat_annotation += arrow
heat_annotation += deltaQb
class Example(Scene):
def construct(self):
self.add(ax,lines, dots,nums,background_strokes, downstrokes,down_labels,heat_annotation)
%%manim $param
c1 = Cutout(lines[0].copy().reverse_points(),lines[3]).set_opacity(1).set_color(GREEN)
c2 = Cutout(lines[1],lines[2])
bg_grey = Union(c1,c2, color=GREY_A).set_opacity(1)
bg_grey.z_index=-1
class Example(Scene):
def construct(self):
#self.add(c1,c2)
self.add(ax,lines, dots,nums,background_strokes)
self.add(downstrokes,down_labels,heat_annotation,bg_grey)
carnot_graph= VGroup(ax,lines, dots,nums,background_strokes,downstrokes,down_labels,heat_annotation,bg_grey)
###Output
_____no_output_____
###Markdown
And here is the final plot:
###Code
%%manim $param
sourunding_dot = Dot().scale(1.3).set_fill(color=BLACK).set_z_index(-1)
innerdot = Dot().set_color(WHITE).scale(1)
moving_dot = VGroup(sourunding_dot, innerdot)
moving_dot.move_to(isotherm12_graph.point_from_proportion(0.3))
class Example(Scene):
def construct(self):
self.add(carnot_graph)
self.add(moving_dot)
###Output
_____no_output_____
###Markdown
OutlookAfter having this foundation of an explanory plot, one can go on and animtate it as can be seen here. (A tutorial for this animation will follow!)
###Code
from IPython.display import YouTubeVideo
YouTubeVideo('_8RkZaiXP0E', width=800, height=600)
###Output
_____no_output_____ |
course-v3/nbs/dl1/lesson1-pets.ipynb | ###Markdown
Lesson 1 - What's your pet Welcome to lesson 1! For those of you who are using a Jupyter Notebook for the first time, you can learn about this useful tool in a tutorial we prepared specially for you; click `File`->`Open` now and click `00_notebook_tutorial.ipynb`. In this lesson we will build our first image classifier from scratch, and see if we can achieve world-class results. Let's dive in!Every notebook starts with the following three lines; they ensure that any edits to libraries you make are reloaded here automatically, and also that any charts or images displayed are shown in this notebook.
###Code
%reload_ext autoreload
%autoreload 2
%matplotlib inline
###Output
_____no_output_____
###Markdown
We import all the necessary packages. We are going to work with the [fastai V1 library](http://www.fast.ai/2018/10/02/fastai-ai/) which sits on top of [Pytorch 1.0](https://hackernoon.com/pytorch-1-0-468332ba5163). The fastai library provides many useful functions that enable us to quickly and easily build neural networks and train our models.
###Code
from fastai import *
from fastai.vision import *
from fastai.metrics import error_rate
###Output
_____no_output_____
###Markdown
If you're using a computer with an unusually small GPU, you may get an out of memory error when running this notebook. If this happens, click Kernel->Restart, uncomment the 2nd line below to use a smaller *batch size* (you'll learn all about what this means during the course), and try again.
###Code
bs = 64
# bs = 16 # uncomment this line if you run out of memory even after clicking Kernel->Restart
###Output
_____no_output_____
###Markdown
Looking at the data We are going to use the [Oxford-IIIT Pet Dataset](http://www.robots.ox.ac.uk/~vgg/data/pets/) by [O. M. Parkhi et al., 2012](http://www.robots.ox.ac.uk/~vgg/publications/2012/parkhi12a/parkhi12a.pdf) which features 12 cat breeds and 25 dogs breeds. Our model will need to learn to differentiate between these 37 distinct categories. According to their paper, the best accuracy they could get in 2012 was 59.21%, using a complex model that was specific to pet detection, with separate "Image", "Head", and "Body" models for the pet photos. Let's see how accurate we can be using deep learning!We are going to use the `untar_data` function to which we must pass a URL as an argument and which will download and extract the data.
###Code
help(untar_data)
path = untar_data(URLs.PETS); path
path.ls()
path_anno = path/'annotations'
path_img = path/'images'
###Output
_____no_output_____
###Markdown
The first thing we do when we approach a problem is to take a look at the data. We _always_ need to understand very well what the problem is and what the data looks like before we can figure out how to solve it. Taking a look at the data means understanding how the data directories are structured, what the labels are and what some sample images look like.The main difference between the handling of image classification datasets is the way labels are stored. In this particular dataset, labels are stored in the filenames themselves. We will need to extract them to be able to classify the images into the correct categories. Fortunately, the fastai library has a handy function made exactly for this, `ImageDataBunch.from_name_re` gets the labels from the filenames using a [regular expression](https://docs.python.org/3.6/library/re.html).
###Code
fnames = get_image_files(path_img)
fnames[:5]
np.random.seed(2)
pat = r'\\([^\\]+)_\d+.jpg$'
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(), size=224, bs=bs
).normalize(imagenet_stats)
data.show_batch(rows=3, figsize=(7,6))
print(data.classes)
len(data.classes),data.c
###Output
['Abyssinian', 'Bengal', 'Birman', 'Bombay', 'British_Shorthair', 'Egyptian_Mau', 'Maine_Coon', 'Persian', 'Ragdoll', 'Russian_Blue', 'Siamese', 'Sphynx', 'american_bulldog', 'american_pit_bull_terrier', 'basset_hound', 'beagle', 'boxer', 'chihuahua', 'english_cocker_spaniel', 'english_setter', 'german_shorthaired', 'great_pyrenees', 'havanese', 'japanese_chin', 'keeshond', 'leonberger', 'miniature_pinscher', 'newfoundland', 'pomeranian', 'pug', 'saint_bernard', 'samoyed', 'scottish_terrier', 'shiba_inu', 'staffordshire_bull_terrier', 'wheaten_terrier', 'yorkshire_terrier']
###Markdown
Training: resnet34 Now we will start training our model. We will use a [convolutional neural network](http://cs231n.github.io/convolutional-networks/) backbone and a fully connected head with a single hidden layer as a classifier. Don't know what these things mean? Not to worry, we will dive deeper in the coming lessons. For the moment you need to know that we are building a model which will take images as input and will output the predicted probability for each of the categories (in this case, it will have 37 outputs).We will train for 4 epochs (4 cycles through all our data).
###Code
learn = cnn_learner(data, models.resnet34, metrics=error_rate)
learn.model
learn.fit_one_cycle(4)
learn.save('stage-1')
###Output
_____no_output_____
###Markdown
Results Let's see what results we have got. We will first see which were the categories that the model most confused with one another. We will try to see if what the model predicted was reasonable or not. In this case the mistakes look reasonable (none of the mistakes seems obviously naive). This is an indicator that our classifier is working correctly. Furthermore, when we plot the confusion matrix, we can see that the distribution is heavily skewed: the model makes the same mistakes over and over again but it rarely confuses other categories. This suggests that it just finds it difficult to distinguish some specific categories between each other; this is normal behaviour.
###Code
interp = ClassificationInterpretation.from_learner(learn)
losses,idxs = interp.top_losses()
len(data.valid_ds)==len(losses)==len(idxs)
interp.plot_top_losses(9, figsize=(15,11))
doc(interp.plot_top_losses)
interp.plot_confusion_matrix(figsize=(12,12), dpi=60)
interp.most_confused(min_val=2)
###Output
_____no_output_____
###Markdown
Unfreezing, fine-tuning, and learning rates Since our model is working as we expect it to, we will *unfreeze* our model and train some more.
###Code
learn.unfreeze()
learn.fit_one_cycle(1)
learn.load('stage-1');
learn.lr_find()
learn.recorder.plot()
learn.unfreeze()
learn.fit_one_cycle(2, max_lr=slice(1e-6,1e-4))
###Output
_____no_output_____
###Markdown
That's a pretty accurate model! Training: resnet50 Now we will train in the same way as before but with one caveat: instead of using resnet34 as our backbone we will use resnet50 (resnet34 is a 34 layer residual network while resnet50 has 50 layers. It will be explained later in the course and you can learn the details in the [resnet paper](https://arxiv.org/pdf/1512.03385.pdf)).Basically, resnet50 usually performs better because it is a deeper network with more parameters. Let's see if we can achieve a higher performance here. To help it along, let's us use larger images too, since that way the network can see more detail. We reduce the batch size a bit since otherwise this larger network will require more GPU memory.
###Code
data = ImageDataBunch.from_name_re(path_img, fnames, pat, ds_tfms=get_transforms(),
size=299, bs=bs//2).normalize(imagenet_stats)
learn = cnn_learner(data, models.resnet50, metrics=error_rate)
learn.lr_find()
learn.recorder.plot()
learn.fit_one_cycle(8)
learn.save('stage-1-50')
###Output
_____no_output_____
###Markdown
It's astonishing that it's possible to recognize pet breeds so accurately! Let's see if full fine-tuning helps:
###Code
learn.unfreeze()
learn.fit_one_cycle(3, max_lr=slice(1e-6,1e-4))
###Output
Total time: 03:27
epoch train_loss valid_loss error_rate
1 0.097319 0.155017 0.048038 (01:10)
2 0.074885 0.144853 0.044655 (01:08)
3 0.063509 0.144917 0.043978 (01:08)
###Markdown
If it doesn't, you can always go back to your previous model.
###Code
learn.load('stage-1-50');
interp = ClassificationInterpretation.from_learner(learn)
interp.most_confused(min_val=2)
###Output
_____no_output_____
###Markdown
Other data formats
###Code
path = untar_data(URLs.MNIST_SAMPLE); path
tfms = get_transforms(do_flip=False)
data = ImageDataBunch.from_folder(path, ds_tfms=tfms, size=26)
data.show_batch(rows=3, figsize=(5,5))
learn = cnn_learner(data, models.resnet18, metrics=accuracy)
learn.fit(2)
df = pd.read_csv(path/'labels.csv')
df.head()
data = ImageDataBunch.from_csv(path, ds_tfms=tfms, size=28)
data.show_batch(rows=3, figsize=(5,5))
data.classes
data = ImageDataBunch.from_df(path, df, ds_tfms=tfms, size=24)
data.classes
fn_paths = [path/name for name in df['name']]; fn_paths[:2]
pat = r"/(\d)/\d+\.png$"
data = ImageDataBunch.from_name_re(path, fn_paths, pat=pat, ds_tfms=tfms, size=24)
data.classes
data = ImageDataBunch.from_name_func(path, fn_paths, ds_tfms=tfms, size=24,
label_func = lambda x: '3' if '/3/' in str(x) else '7')
data.classes
labels = [('3' if '/3/' in str(x) else '7') for x in fn_paths]
labels[:5]
data = ImageDataBunch.from_lists(path, fn_paths, labels=labels, ds_tfms=tfms, size=24)
data.classes
###Output
_____no_output_____ |
SceneSplit30.ipynb | ###Markdown
Video Scene Detection based on Optimal Sequential Grouping
###Code
import time
from typing import Tuple, List
from multiprocessing import Pool
import os
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from tqdm import tqdm
import cv2
print(cv2.__version__)
from scipy.spatial import distance
from sklearn import preprocessing
import subprocess
from h_add import get_optimal_sequence_add
from h_nrm import get_optimal_sequence_nrm
from estimate_scenes_count import estimate_scenes_count
from evaluation import calculate_interval_metric
%matplotlib inline
class data_linewidth_plot():
"""
Draws lines that could scale along with figure size
Source: https://stackoverflow.com/questions/19394505/matplotlib-expand-the-line-with-specified-width-in-data-unit/42972469#42972469
"""
def __init__(self, x, y, **kwargs):
self.ax = kwargs.pop("ax", plt.gca())
self.fig = self.ax.get_figure()
self.lw_data = kwargs.pop("linewidth", 1)
self.lw = 1
self.fig.canvas.draw()
self.ppd = 72./self.fig.dpi
self.trans = self.ax.transData.transform
self.linehandle, = self.ax.plot([],[],**kwargs)
if "label" in kwargs: kwargs.pop("label")
self.line, = self.ax.plot(x, y, **kwargs)
self.line.set_color(self.linehandle.get_color())
self._resize()
self.cid = self.fig.canvas.mpl_connect('draw_event', self._resize)
def _resize(self, event=None):
lw = ((self.trans((1, self.lw_data))-self.trans((0, 0)))*self.ppd)[1]
if lw != self.lw:
self.line.set_linewidth(lw)
self.lw = lw
self._redraw_later()
def _redraw_later(self):
self.timer = self.fig.canvas.new_timer(interval=10)
self.timer.single_shot = True
self.timer.add_callback(lambda : self.fig.canvas.draw_idle())
self.timer.start()
def plot_distances_chart(distances: np.ndarray, scene_borders: np.ndarray, ax: matplotlib.axes.Axes) -> None:
"""
Plot scene borders on top of the pairwise distances matrix
:param distances: pairwise distances matrix
:param scene_borders:
"""
ax.imshow(distances, cmap='gray')
borders_from_zero = np.concatenate(([0], scene_borders))
for i in range(1, len(borders_from_zero)):
data_linewidth_plot(
x=[borders_from_zero[i-1], borders_from_zero[i-1]],
y=[borders_from_zero[i-1], borders_from_zero[i]],
ax=ax, linewidth=1,
color='red',
alpha=0.5
)
data_linewidth_plot(
x=[borders_from_zero[i-1], borders_from_zero[i]],
y=[borders_from_zero[i-1], borders_from_zero[i-1]],
ax=ax, linewidth=1,
color='red',
alpha=0.5
)
data_linewidth_plot(
x=[borders_from_zero[i-1], borders_from_zero[i]],
y=[borders_from_zero[i], borders_from_zero[i]],
ax=ax,
linewidth=1,
color='red',
alpha=0.5
)
data_linewidth_plot(
x=[borders_from_zero[i], borders_from_zero[i]],
y=[borders_from_zero[i-1], borders_from_zero[i]],
ax=ax,
linewidth=1,
color='red',
alpha=0.5
)
def get_intervals_from_borders(borders: np.ndarray) -> List[List[Tuple[int, int]]]:
"""
Convert scene borders to intervals
:param borders: list of borders
:return: list of interval tuples where first value - beginning of an interval, the second - end of an interval
"""
intervals = []
prev_border = 0
for cur_border in borders:
intervals.append((prev_border, cur_border))
prev_border = cur_border
return intervals
def get_synth_example(
features_count: int,
scenes_count: int,
random_seed: int = 42
) -> Tuple[np.ndarray, np.ndarray]:
"""
Generates synthetic pairwise distance matrix for features_count shots and scenes_count scenes
:param features_count: count of shots
:param scenes_count: count of diagonal clusters represented scenes
:param random_seed: value for random numbers generator initialization
:return: pairwise distance matrix and scene borders
"""
synth_distances = np.random.uniform(size=(features_count, features_count))
np.random.seed(random_seed)
random_t = np.random.choice(range(2, features_count - 1), size=scenes_count-1, replace=False)
random_t.sort()
for i, t in enumerate(random_t):
if i == 0:
synth_distances[0:t, 0:t] = np.clip(synth_distances[0:t, 0:t] - 0.4, 0., 1.)
else:
synth_distances[random_t[i - 1]:t, random_t[i - 1]:t] = \
np.clip(synth_distances[random_t[i - 1]:t, random_t[i - 1]:t] - 0.4, 0., 1.)
synth_distances[random_t[-1]:features_count, random_t[-1]:features_count] = \
np.clip(synth_distances[random_t[-1]:features_count, random_t[-1]:features_count] - 0.4, 0., 1.)
random_t = np.append(random_t, [features_count])
synth_distances = (synth_distances + synth_distances.T)/2
np.fill_diagonal(synth_distances, 0)
return synth_distances, random_t - 1
###Output
_____no_output_____
###Markdown
Sampling representative frames from each shot
###Code
#uniform sampling, rewrite to extract mid frame directly instead
def uniform_sampling(num_frames,num_shots):
for ind in range(1,num_shots):
shot = os.path.join(data_path, "Honey-final-180k-Scene-{0:03d}.mp4".format(ind))
vcap = cv2.VideoCapture(shot)
frame_count = int(vcap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = vcap.get(cv2.CAP_PROP_FPS)
vid_name=shot.split("/")[-1][:-4]
dur=float(frame_count/fps)
sample_frames= np.linspace(0, dur, num_frames+1)+dur/num_frames/2.
for i in range(num_frames):
os.system(' '.join(('ffmpeg', '-loglevel', 'panic', '-ss', str(sample_frames[i]), '-i', shot, os.path.join(sampled_data_path, vid_name+'-'+str(i+1).zfill(2)+'.jpg'))))
def extract_key_frames(num_shots):
for ind in range(1,num_shots):
shot = os.path.join(data_path, "Honey-final-180k-Scene-{0:03d}.mp4".format(ind))
vid_name=shot.split("/")[-1][:-4]
out = os.path.join(keyframe_data_path, vid_name)
command = "ffmpeg -loglevel warning -i \"{}\" -q:v 2 -vf select=\"eq(pict_type\,PICT_TYPE_I)\" -vsync 0 {}-%03d.jpg".format(shot,out)
subprocess.call(command, shell=True)
data_path ="/Users/raksharamesh/Desktop/DVU_challenge/Dataset/Movies/Honey"
sampled_data_path = os.path.abspath('.')+ "/sampled_frames"
keyframe_data_path = os.path.abspath('.')+ "/keyframe_data_path"
num_shots = len([file for file in os.listdir(data_path)])
print(num_shots, "num shots")
if not os.path.isdir(sampled_data_path):
os.mkdir(os.path.join(sampled_data_path))
num_frames=10
uniform_sampling(num_frames,num_shots)
if not os.path.isdir(keyframe_data_path):
os.mkdir(os.path.join(keyframe_data_path))
extract_key_frames(num_shots)
# for ind in range(1,num_shots):
# shot = os.path.join(data_path, "Honey-final-180k-Scene-{0:03d}.mp4".format(ind))
# num_frames = 10
# uniform_sampling(shot,num_frames)
#list of all uniformly sampled frames from shots
img_name_list = []
mid_frame_list = []
for i,img_name in enumerate(sorted(os.listdir(sampled_data_path))):
img_name_list.append(img_name)
#append only mid frame
if img_name[-5]=="5":
mid_frame_list.append(img_name)
#prefix full path
mid_frame_list = sorted([os.path.join(sampled_data_path,img_name) for img_name in mid_frame_list])
img_name_list = sorted([os.path.join(sampled_data_path, img_name) for img_name in img_name_list])
keyframe_name_list = sorted([os.path.join(keyframe_data_path,img_name) for img_name in os.listdir(keyframe_data_path) if not img_name.startswith('.')])
###Output
253 num shots
###Markdown
Test feature extraction methods
###Code
from sklearn import preprocessing
from collections import defaultdict
def compute_feature_dist(np_array):
distance_np = np.full((252, 252), 0.0)
for ind1 in range(252):
for ind2 in range(252):
distance_np[ind1][ind2] = distance.hamming(np_array[ind1], np_array[ind2])
#print(distance_np)
return distance_np
#testing out different combinations of features
#color histograms from only mid-frame of a shot as features
def feature_set1(mid_frame_list):
np_array = {}
for ind in range(len(mid_frame_list)):
vid_name=img_name_list[ind].split("/")[-1][:-4]
img = cv2.imread(mid_frame_list[ind])
#hsv = cv2.cvtColor(img,cv2.COLOR_RGB2HSV)
#hist = cv2.calcHist([hsv], [0, 1], None, [180,256], [0, 180, 0, 256])
hist = cv2.calcHist([img], [0, 1, 2], None, [8, 8, 8], [0, 256, 0, 256, 0, 256])
hist = hist.flatten().reshape(-1,1)
min_max_scaler = preprocessing.MinMaxScaler()
hist = min_max_scaler.fit_transform(hist)
np_array[ind] = np.array(hist)
np_array[ind] = np_array[ind].reshape(-1, 1)
distance_np = compute_feature_dist(np_array)
return distance_np
#lets average HSV histograms of the keyframes
def feature_set2(keyframe_name_list):
#print(len(keyframe_name_list))
#grouping keyframes of shots together
shot_frames = defaultdict(list)
for img_path in keyframe_name_list:
shot_name=img_path.split('-')[-2]
shot_frames[shot_name].append(img_path)
np_array = {}
#orb = cv2.ORB_create()
for shot_name in shot_frames:
count=1
#hist_sum = np.zeros((180,256))
hist_sum = np.zeros((45,64))
for frame in shot_frames[shot_name]:
frame = cv2.imread(frame)
hsv = cv2.cvtColor(frame,cv2.COLOR_RGB2HSV)
hist = cv2.calcHist([frame], [0, 1, 2], None, [8,8,8],[0, 256, 0, 256, 0, 256])
hist = cv2.calcHist( [hsv], [0, 1], None, [45, 64], [0, 180, 0, 256] )
hist_sum += hist
# also compute ORB descriptors?
# gray= cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
# kp = orb.detect(gray,None)
# kp, des = orb.compute(gray, kp)
# des = des[1,:].reshape(-1,1)
count +=1
hist_avg = hist_sum/count
min_max_scaler = preprocessing.MinMaxScaler()
hist_avg = min_max_scaler.fit_transform(hist_avg)
#print(hist_avg, "hist avg")
ind = list(shot_frames.keys()).index(shot_name)
np_array[ind] = np.array(hist_avg.flatten())
np_array[ind] = np_array[ind].reshape(-1, 1)
#print(np.shape(np_array[ind]))
distance_np = compute_feature_dist(np_array)
return distance_np, shot_frames
#cnn features, using vector from midframe
def feature_set3(mid_frame_list):
np_array = {}
for ind in range(0,252):
#for ind in range(1, 253):
#shot = "/home/va/research/CYLin/scene/Honey/Honey-final-180k-Scene-{0:03d}.npy".format(ind)
shot = "/Users/raksharamesh/Desktop/DVU_challenge/code/video-scene-detection/feature/Honey-final-180k-Scene-{0:03d}.npy".format(ind+1)
np_array[ind] = np.load(shot)
#shot is sampled at 1FPS, dum: num of frames x 2048
#print(np.shape(np_array[ind]))
#np_array[ind] = np.average(np_array[ind], axis=0)
#try mid frame vector instead of averaging
mid_frame = int(np_array[ind].shape[0]/2)
np_array[ind] = np_array[ind][mid_frame]
#print(np_array[ind].shape)
np_array[ind] = np_array[ind].reshape(-1, 1)
#print(np_array[ind].shape)
#print(distance.hamming(np_array[ind], np_array[ind]))
#print(np_array[ind].shape)
distance_np = compute_feature_dist(np_array)
return distance_np
dist1 = feature_set1(mid_frame_list)
dist2, shot_frames = feature_set2(keyframe_name_list)
print(dist1)
#print(np.shape(dist2))
#dist3 = feature_set3(mid_frame_list)
#automate scene count estimation
distance_np = dist2
K = estimate_scenes_count(distance_np)
print(K)
##only use additive cost to evaluate features through plot
scenes_count = K
#scenes_count = 5
optimal_scene_borders_add = get_optimal_sequence_add(distance_np, scenes_count)
figs, axs = plt.subplots(1,1, figsize=(5,7))
figs.suptitle('Scene Borders Prediction Using Additive Cost Function', fontsize=14)
plot_distances_chart(distance_np, optimal_scene_borders_add, axs)
axs.set_title('Predicted Borders Add')
print(optimal_scene_borders_add.shape)
print(optimal_scene_borders_add)
scenes_count = 5
optimal_scene_borders_nrm = get_optimal_sequence_nrm(distance_np, scenes_count)
figs, axs = plt.subplots(1,1, figsize=(5,7))
figs.suptitle('Scene Borders Prediction Using Normalized Cost Function', fontsize=14)
plot_distances_chart(distance_np, optimal_scene_borders_nrm, axs)
axs.set_title('Predicted Borders NRM')
print(optimal_scene_borders_nrm.shape)
print(optimal_scene_borders_nrm)
def visualize(img_path):
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib import colors
img = cv2.imread(img_path)
img_RGB=cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img_RGB)
plt.show()
r, g, b = cv2.split(img_RGB)
fig = plt.figure()
axis = fig.add_subplot(1, 1, 1, projection="3d")
pixel_colors = img_RGB.reshape((np.shape(img_RGB)[0]*np.shape(img_RGB)[1], 3))
norm = colors.Normalize(vmin=-1.,vmax=1.)
norm.autoscale(pixel_colors)
pixel_colors = norm(pixel_colors).tolist()
axis.scatter(r.flatten(), g.flatten(), b.flatten(), facecolors=pixel_colors, marker=".")
axis.set_xlabel("Red")
axis.set_ylabel("Green")
axis.set_zlabel("Blue")
plt.show()
img_HSV = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)
h, s, v = cv2.split(img_HSV)
fig = plt.figure()
axis = fig.add_subplot(1, 1, 1, projection="3d")
axis.scatter(h.flatten(), s.flatten(), v.flatten(), facecolors=pixel_colors, marker=".")
axis.set_xlabel("Hue")
axis.set_ylabel("Saturation")
axis.set_zlabel("Value")
plt.show()
#VISUALIZE RGB & HSV values of key frames of shots
visualize(shot_frames["005"][0])
# Let's generate random pairwise distances matrix, add some uniform noise and calculate optimal borders accordingly to Additive Cost Function.
shots_count = 100
scenes_count = 10
distances, synth_scene_borders = get_synth_example(shots_count, scenes_count)
print(distances.shape, type(distances))
print(np.sum(distances[0]))
print(np.max(distances[1]))
optimal_scene_borders = get_optimal_sequence_add(distances, scenes_count)
# optimal_scene_borders = get_optimal_sequence_nrm(distances, scenes_count)
figs, axs = plt.subplots(1,1, figsize=(5,7))
figs.suptitle('Scene Borders Prediction Using Additive Cost Function', fontsize=14)
plot_distances_chart(distances, synth_scene_borders, axs)
axs.set_title('Ground Truth Borders')
# plot_distances_chart(distances, optimal_scene_borders, axs[1])
# axs[1].set_title('Predicted Borders')
shots_count = 100
scenes_count = 10
distances, synth_scene_borders = get_synth_example(shots_count, scenes_count)
optimal_scene_borders = get_optimal_sequence_add(distances, scenes_count)
figs, axs = plt.subplots(1,2, figsize=(15,7))
figs.suptitle('Scene Borders Prediction Using Additive Cost Function', fontsize=14)
plot_distances_chart(distances, synth_scene_borders, axs[0])
axs[0].set_title('Ground Truth Borders')
plot_distances_chart(distances, optimal_scene_borders, axs[1])
axs[1].set_title('Predicted Borders')
###Output
_____no_output_____
###Markdown
Normalized Cost Function Let's generate random pairwise distances matrix, add some uniform noise and calculate optimal borders accordingly to Normalized Cost Function.
###Code
shots_count = 100
scenes_count = 10
distances, synth_scene_borders = get_synth_example(shots_count, scenes_count)
optimal_scene_borders = get_optimal_sequence_nrm(distances, scenes_count)
figs, axs = plt.subplots(1,2, figsize=(15,7))
figs.suptitle('Scene Borders Prediction Using Normalized Cost Function', fontsize=14)
plot_distances_chart(distances, synth_scene_borders, axs[0])
axs[0].set_title('Ground Truth Borders')
plot_distances_chart(distances, optimal_scene_borders, axs[1])
axs[1].set_title('Predicted Borders')
###Output
_____no_output_____
###Markdown
As you can see, Additive Const Function works a little bit worse than Normalized Cost Function. But one example is not enough. Quality TestsLet's generate 999 synthetic examples and check both algorithm's quality on several metrics.
###Code
test_examples = []
for N in range(12, 123):
for K in range(2,11):
test_examples.append(get_synth_example(N, K))
###Output
_____no_output_____
###Markdown
Find optimal borders for each example with Hadd optimisation:
###Code
predicted_examples_add = []
def predict_examples_add(args):
distances, synth_scene_borders = args
K = estimate_scenes_count(distances)
return get_optimal_sequence_add(distances, K)
with Pool(4) as pool:
predicted_examples_add = list(tqdm(pool.imap(predict_examples_add, test_examples), total=len(test_examples)))
###Output
_____no_output_____
###Markdown
Find optimal borders for each example with Hnrm optimisation:
###Code
predicted_examples_nrm = []
def predict_examples_nrm(args):
distances, synth_scene_borders = args
K = estimate_scenes_count(distances)
return get_optimal_sequence_nrm(distances, K)
with Pool(4) as pool:
predicted_examples_nrm = list(tqdm(pool.imap(predict_examples_nrm, test_examples), total=len(test_examples)))
###Output
_____no_output_____
###Markdown
Convert scene borders to intervals:
###Code
predicted_intervals_add = []
for example in predicted_examples_add:
predicted_intervals_add.append(get_intervals_from_borders(example))
predicted_intervals_nrm = []
for example in predicted_examples_nrm:
predicted_intervals_nrm.append(get_intervals_from_borders(example))
gt_intervals = []
for distances, synth_scene_borders in test_examples:
gt_intervals.append(get_intervals_from_borders(synth_scene_borders))
###Output
_____no_output_____
###Markdown
Get mean precision, recall, F1 and IoU for each result:
###Code
precision_add = calculate_interval_metric(gt_intervals, predicted_intervals_add, 'precision')
recall_add = calculate_interval_metric(gt_intervals, predicted_intervals_add, 'recall')
f1_add = calculate_interval_metric(gt_intervals, predicted_intervals_add, 'f1')
iou_add = calculate_interval_metric(gt_intervals, predicted_intervals_add, 'iou')
precision_nrm = calculate_interval_metric(gt_intervals, predicted_intervals_nrm, 'precision')
recall_nrm = calculate_interval_metric(gt_intervals, predicted_intervals_nrm, 'recall')
f1_nrm = calculate_interval_metric(gt_intervals, predicted_intervals_nrm, 'f1')
iou_nrm = calculate_interval_metric(gt_intervals, predicted_intervals_nrm, 'iou')
print('Precision add: {} Precision nrm: {}'.format(precision_add, precision_nrm))
print('Recall add: {} Recall nrm: {}'.format(recall_add, recall_nrm))
print('F1 add: {} F1 nrm: {}'.format(f1_add, f1_nrm))
print('IoU add: {} IoU nrm: {}'.format(iou_add, iou_nrm))
###Output
_____no_output_____
###Markdown
As we can see, optimisation of the Hnrm metrics works better than Hadd accordingly to each mertics. Time Tests But what about speed?Let's fix scenes count K, and see, how optimization time depends on shots count N:
###Code
test_data = []
K = 5
Ns = range(10, 100)
for N in Ns:
test_data.append(get_synth_example(N, K))
ns_add_times = []
ns_nrm_times = []
for distances, synth_scene_borders in test_data:
start_time = time.time()
optimal_scene_borders = get_optimal_sequence_add(distances, K)
ns_add_times.append(time.time() - start_time)
start_time = time.time()
optimal_scene_borders = get_optimal_sequence_nrm(distances, K)
ns_nrm_times.append(time.time() - start_time)
plt.figure(figsize=(15,7))
ax = plt.gca()
ax.plot(Ns, ns_add_times, label='H_add optimization time in seconds')
ax.plot(Ns, ns_nrm_times, label='H_nrm optimization time in seconds')
ax.set(title='Dependence of the Optimization Time from Shots Number', ylabel='Time (sec)', xlabel='Number of shots')
ax.legend(loc='best')
###Output
_____no_output_____
###Markdown
In one more test we'll fix count of shots N, and see how optimization time depends on scenes count K:
###Code
test_data = []
N = 70
Ks = range(2, 12)
for K in Ks:
test_data.append(get_synth_example(N, K))
ks_add_times = []
ks_nrm_times = []
for distances, synth_scene_borders in test_data:
start_time = time.time()
optimal_scene_borders = get_optimal_sequence_add(distances, len(synth_scene_borders))
ks_add_times.append(time.time() - start_time)
start_time = time.time()
optimal_scene_borders = get_optimal_sequence_nrm(distances, len(synth_scene_borders))
ks_nrm_times.append(time.time() - start_time)
plt.figure(figsize=(15,7))
ax = plt.gca()
ax.plot(Ks, ks_add_times, label='H_add optimization time in seconds')
ax.plot(Ks, ks_nrm_times, label='H_nrm optimization time in seconds')
ax.set(title='Dependence of the Optimization Time from Scenes Number', ylabel='Time (sec)', xlabel='Number of scenes')
ax.legend(loc='best')
###Output
_____no_output_____ |
src/notebooks/327-network-from-correlation-matrix.ipynb | ###Markdown
Welcome in the introductory template of the python graph gallery. Here is how to proceed to add a new `.ipynb` file that will be converted to a blogpost in the gallery! Notebook Metadata It is very important to add the following fields to your notebook. It helps building the page later on:- **slug**: the URL of the blogPost. It should be exactly the same as the file title. Example: `70-basic-density-plot-with-seaborn`- **chartType**: the chart type like density or heatmap. For a complete list see [here](https://github.com/holtzy/The-Python-Graph-Gallery/blob/master/src/util/sectionDescriptions.js), it must be one of the `id` options.- **title**: what will be written in big on top of the blogpost! use html syntax there.- **description**: what will be written just below the title, centered text.- **keyword**: list of keywords related with the blogpost- **seoDescription**: a description for the bloppost meta. Should be a bit shorter than the description and must not contain any html syntax. Add a chart description A chart example always come with some explanation. It must:contain keywordslink to related pages like the parent page (graph section)give explanations. In depth for complicated charts. High level for beginner level charts Add a chart
###Code
import seaborn as sns, numpy as np
np.random.seed(0)
x = np.random.randn(100)
ax = sns.distplot(x)
###Output
_____no_output_____
###Markdown
Suppose that you have 10 individuals, and know how close they are related to each other. It is possible to represent these **relationships** in a network. Each individual will be a **node**. If 2 individuals are close enough (we set a **threshold**), then they are linked by an **edge**. That will show the structure of the population!In this example, we see that our population is clearly split into 2 groups!
###Code
# libraries
import pandas as pd
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
# I build a data set: 10 individuals and 5 variables for each
ind1=[5,10,3,4,8,10,12,1,9,4]
ind5=[1,1,13,4,18,5,2,11,3,8]
df = pd.DataFrame({ 'A':ind1, 'B':ind1 + np.random.randint(10, size=(10)) , 'C':ind1 + np.random.randint(10, size=(10)) , 'D':ind1 + np.random.randint(5, size=(10)) , 'E':ind1 + np.random.randint(5, size=(10)), 'F':ind5, 'G':ind5 + np.random.randint(5, size=(10)) , 'H':ind5 + np.random.randint(5, size=(10)), 'I':ind5 + np.random.randint(5, size=(10)), 'J':ind5 + np.random.randint(5, size=(10))})
# Calculate the correlation between individuals. We have to transpose first, because the corr function calculate the pairwise correlations between columns.
corr = df.corr()
# Transform it in a links data frame (3 columns only):
links = corr.stack().reset_index()
links.columns = ['var1', 'var2', 'value']
# Keep only correlation over a threshold and remove self correlation (cor(A,A)=1)
links_filtered=links.loc[ (links['value'] > 0.8) & (links['var1'] != links['var2']) ]
# Build your graph
G=nx.from_pandas_edgelist(links_filtered, 'var1', 'var2')
# Plot the network:
nx.draw(G, with_labels=True, node_color='orange', node_size=400, edge_color='black', linewidths=1, font_size=15)
###Output
_____no_output_____ |
.ipynb_checkpoints/data-checkpoint.ipynb | ###Markdown
Read the Data from File
###Code
# with open("/home/tyagi/Downloads/Project/annotations_trainval2017/annotations/captions_val2017.json") as file:
# data_val=json.load(file)
with open("/home/tyagi/Downloads/project_data_bhavin/annotations_trainval2014/annotations/captions_val2014.json") as file:
data_val=json.load(file)
###Output
_____no_output_____
###Markdown
data_val is having all data which we load from the json file- data_val is dictionary with all info as value
###Code
print(len(data_val))
for i in data_val.keys():
print(i,len(data_val[i]))
print(data_val["info"])
print(len(data_val['annotations']))
print(data_val['annotations'][1800])
for images in data_val['annotations']:
if images['image_id'] == 322831:
print(images['caption'])
#stemmer = SnowballStemmer('english')
stemmer = PorterStemmer()
def lemmatize_stemming(text):
return WordNetLemmatizer().lemmatize(text, pos='v')
def preprocess(text):
result = []
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(lemmatize_stemming(token)) > 3:
result.append(lemmatize_stemming(token))
return result
for images in data_val['annotations']:
if images['image_id'] == 322831:
result=preprocess(images['caption'])
print(result)
doc_set = []
data_dict={}
for image in data_val['annotations']:
if image["image_id"] in data_dict:
data_dict[image["image_id"]].append(image["caption"])
else:
data_dict.update({image["image_id"]:[image["caption"]]})
print(len(data_dict)) ### image_id(key) -- > text_corpus(value)
print(data_dict[322831])
df=pd.DataFrame(data_dict.items(),columns=['image_id','text_corpus'])
df.iloc[0]['text_corpus']
df.head()
df.shape
def lemmatize_stemm(text):
#return stemmer.stem(WordNetLemmatizer().lemmatize(text, pos='v'))
return WordNetLemmatizer().lemmatize(text, pos='v')
def preprocessing(Bigtext):
result = []
for text in Bigtext:
for token in gensim.utils.simple_preprocess(text):
if token not in gensim.parsing.preprocessing.STOPWORDS and len(lemmatize_stemm(token)) > 3:
#print(token)
result.append(lemmatize_stemm(token))
return result
dict_gensim={}
for i in range(df.shape[0]):
dict_gensim.update({i:preprocessing(df.iloc[i]["text_corpus"])})
print(dict_gensim[0])
dd=pd.Series(dict_gensim)
print(type(dd))
print(dd.shape)
dd.head()
dictionary = gensim.corpora.Dictionary(dd)
print(dictionary)
count = 0
for k, v in dictionary.iteritems():
print(k, v)
count += 1
if count > 10:
break
#dictionary.filter_extremes(no_below=5, no_above=0.5, keep_n=10000)
len(dictionary)
bow_corpus = [dictionary.doc2bow(doc) for doc in dd]
bow_doc_1 = bow_corpus[1]
for i in range(len(bow_doc_1)):
print("Word {} (\"{}\") appears {} time.".format(bow_doc_1[i][0],dictionary[bow_doc_1[i][0]], bow_doc_1[i][1]))
num_topics=10
lda_model = gensim.models.LdaMulticore(bow_corpus, num_topics, id2word=dictionary, passes=10)
for idx, topic in lda_model.print_topics(-1):
print('Topic: {} \nWords: {}'.format(idx, topic))
###Output
Topic: 0
Words: 0.120*"clock" + 0.070*"horse" + 0.069*"build" + 0.049*"tower" + 0.044*"frisbee" + 0.036*"large" + 0.035*"truck" + 0.022*"soccer" + 0.018*"tall" + 0.018*"white"
Topic: 1
Words: 0.097*"sign" + 0.044*"umbrella" + 0.040*"motorcycle" + 0.030*"scissor" + 0.029*"stop" + 0.024*"street" + 0.021*"pair" + 0.018*"airplane" + 0.017*"blue" + 0.017*"plane"
Topic: 2
Words: 0.107*"people" + 0.057*"baseball" + 0.051*"group" + 0.043*"play" + 0.042*"bear" + 0.037*"game" + 0.035*"stand" + 0.032*"snow" + 0.023*"teddy" + 0.020*"player"
Topic: 3
Words: 0.079*"street" + 0.061*"phone" + 0.050*"park" + 0.038*"cell" + 0.033*"city" + 0.031*"bench" + 0.026*"walk" + 0.024*"light" + 0.024*"road" + 0.021*"build"
Topic: 4
Words: 0.115*"train" + 0.090*"skateboard" + 0.045*"track" + 0.034*"board" + 0.032*"trick" + 0.030*"jump" + 0.029*"skate" + 0.026*"station" + 0.025*"person" + 0.017*"ramp"
Topic: 5
Words: 0.055*"stand" + 0.043*"water" + 0.037*"beach" + 0.032*"field" + 0.028*"kite" + 0.028*"grass" + 0.026*"surfboard" + 0.024*"tree" + 0.023*"wave" + 0.021*"walk"
Topic: 6
Words: 0.154*"tennis" + 0.051*"court" + 0.050*"ball" + 0.041*"bathroom" + 0.038*"racket" + 0.034*"player" + 0.030*"toilet" + 0.025*"hold" + 0.025*"play" + 0.024*"sink"
Topic: 7
Words: 0.075*"table" + 0.049*"plate" + 0.039*"pizza" + 0.038*"food" + 0.029*"laptop" + 0.023*"desk" + 0.022*"cake" + 0.016*"sandwich" + 0.015*"white" + 0.015*"glass"
Topic: 8
Words: 0.101*"woman" + 0.063*"hold" + 0.036*"stand" + 0.030*"kitchen" + 0.030*"girl" + 0.026*"young" + 0.023*"wear" + 0.023*"person" + 0.019*"boat" + 0.018*"look"
Topic: 9
Words: 0.058*"room" + 0.030*"table" + 0.029*"live" + 0.028*"flower" + 0.024*"vase" + 0.024*"chair" + 0.020*"white" + 0.020*"window" + 0.016*"wall" + 0.015*"couch"
###Markdown
It describe the Prob each document which is distribution of different topics
###Code
lda_model[bow_corpus[500]]
###Output
_____no_output_____
###Markdown
Writing the ${Prob(topic/text)}$ which works as y for our inception model
###Code
final_dict={}
for j in range(len(bow_corpus)):
li=[0.0]*num_topics ### len == no of topics
probDist=lda_model[bow_corpus[j]]
for i in range(len(probDist)):
li[probDist[i][0]]=probDist[i][1]
final_dict[df.iloc[j]["image_id"]]=li
len(final_dict)
import pickle
f=open("ldaResult","wb")
pickle.dump(final_dict,f)
f.close
###Output
_____no_output_____
###Markdown
Writing the ${Prob(word/topic)}$ which works as y for our inception model
###Code
topic_word_prob={}
for i in range(num_topics):
word_prob={}
ss=lda_model.get_topic_terms(i,topn=75)
for x in ss:
word_prob[dictionary[x[0]]]=x[1]
topic_word_prob[i]=word_prob
import pickle
with open("topic_word_prob","wb") as f:
pickle.dump(topic_word_prob,f)
###Output
_____no_output_____ |
boards/Pynq-Z2/notebooks/filter2d.ipynb | ###Markdown
OpenCV Overlay: Filter2D This notebook illustrates the kinds of things you can do with accelerated openCV cores built as a PYNQ overlay. The overlay consists of a 2D filter and this example notebook does the following.1. Sets up HDMI drivers2. Run software-only filter2D on HDMI input and output results to HDMI output3. Sets up widget for controlling different the filter kernel4. Run hardware accelerated filter2D functionNOTE: Rough FPS values are computed for each stage Program overlayHere we program the overlay and load the pynq python libraries for a memory manager and the accelerator drivers.NOTE: All overlay and python libraries should be loaded prior to assigning the HDMI input/outputs. This is necessary right now to ensure correct functionality but will be enhanced in future releases. For now, please copy this block as is when using it in your own designs.
###Code
# Load filter2D + dilate overlay
from pynq import Overlay
bareHDMI = Overlay("/usr/local/lib/python3.6/dist-packages/"
"pynq_cv/overlays/xv2Filter2DDilate.bit")
import pynq_cv.overlays.xv2Filter2DDilate as xv2
# Load xlnk memory mangager
from pynq import Xlnk
Xlnk.set_allocator_library("/usr/local/lib/python3.6/dist-packages/"
"pynq_cv/overlays/xv2Filter2DDilate.so")
mem_manager = Xlnk()
hdmi_in = bareHDMI.video.hdmi_in
hdmi_out = bareHDMI.video.hdmi_out
###Output
_____no_output_____
###Markdown
Setup and configure HDMI drivers ~15 seconds to initialize HDMI input/output
###Code
from pynq.lib.video import *
hdmi_in.configure(PIXEL_GRAY)
hdmi_out.configure(hdmi_in.mode)
hdmi_in.cacheable_frames = False
hdmi_out.cacheable_frames = False
hdmi_in.start()
hdmi_out.start()
###Output
_____no_output_____
###Markdown
Setup up HDMI input/output parameters These parameters are referenced in later function calls
###Code
mymode = hdmi_in.mode
print("My mode: "+str(mymode))
height = hdmi_in.mode.height
width = hdmi_in.mode.width
bpp = hdmi_in.mode.bits_per_pixel
###Output
My mode: VideoMode: width=1920 height=1080 bpp=8
###Markdown
Run SW Filter2D ~10 seconds
###Code
import numpy as np
import time
import cv2
#Sobel Vertical filter
kernelF = np.array([[1.0,0.0,-1.0],[2.0,0.0,-2.0],[1.0,0.0,-1.0]],np.float32)
numframes = 20
start = time.time()
for _ in range(numframes):
inframe = hdmi_in.readframe()
outframe = hdmi_out.newframe()
cv2.filter2D(inframe, -1, kernelF, dst=outframe)
inframe.freebuffer()
hdmi_out.writeframe(outframe)
end = time.time()
print("Frames per second: " + str(numframes / (end - start)))
###Output
Frames per second: 3.3585366809661044
###Markdown
Show input frame in notebook
###Code
import PIL.Image
image = PIL.Image.fromarray(inframe)
image
###Output
_____no_output_____
###Markdown
Show output frame after filter2D in notebook.
###Code
import PIL.Image
image = PIL.Image.fromarray(outframe)
image
###Output
_____no_output_____
###Markdown
Setup control widgetsHere, we define some kernel configurations that will be used to change the functionality of the 2D filter on the fly. A pulldown menu will appear below this cell to be used to change the filter2D kernel used subsequent cells.
###Code
from ipywidgets import interact, interactive, fixed, interact_manual
from ipywidgets import IntSlider, FloatSlider
import ipywidgets as widgets
#Sobel Vertical filter
kernel_g = np.array([[1.0,0.0,-1.0],[2.0,0.0,-2.0],[1.0,0.0,-1.0]],np.float32)
def setKernelAndFilter3x3(kernelName):
global kernel_g
kernel_g = {
'Laplacian high-pass': np.array([[0.0,1.0,0.0],[1.0,-4.0,1.0],
[0.0,1.0,0.0]],np.float32),
'Gaussian high-pass': np.array([[-0.0625,-0.125,-0.0625],
[-0.125,0.75,-0.125],
[-0.0625,-0.125,-0.0625]],np.float32),
'Average blur': np.ones((3,3),np.float32)/9.0,
'Gaussian blur': np.array([[0.0625,0.125,0.0625],
[0.125,0.25,0.125],
[0.0625,0.125,0.0625]],np.float32),
'Sobel ver': np.array([[1.0,0.0,-1.0],[2.0,0.0,-2.0],
[1.0,0.0,-1.0]],np.float32),
'Sobel hor': np.array([[1.0,2.0,1.0],[0.0,0.0,0.0],
[-1.0,-2.0,-1.0]],np.float32)
}.get(kernelName, np.ones((3,3),np.float32)/9.0)
interact(setKernelAndFilter3x3, kernelName
= ['Sobel ver','Sobel hor','Laplacian high-pass','Gaussian high-pass','Average blur',
'Gaussian blur',]);
###Output
_____no_output_____
###Markdown
Run HW filter2D~20 secondsRunning this kernel with a clock of 100 MHz gives a performance of about ~40 fps.NOTE: In order to allow kernel redefintion on the fly, subsequent function call are run as threads. This means you will not know if the cell is finished based on the cell status on the left. Be sure to wait until FPS information is reported before running other cells. Also note that if you use the widget to change the kernel, the FPS info will show up underneath the widget cell rather than the function block cell.
###Code
import numpy as np
import cv2
from threading import Thread
def loop_hw2_app():
global kernel_g
numframes = 600
start=time.time()
for _ in range(numframes):
outframe = hdmi_out.newframe()
inframe = hdmi_in.readframe()
xv2.filter2D(inframe, -1, kernel_g, dst=outframe, borderType=cv2.BORDER_CONSTANT)
hdmi_out.writeframe(outframe)
inframe.freebuffer()
end=time.time()
print("Frames per second: " + str(numframes / (end - start)))
t = Thread(target=loop_hw2_app)
t.start()
###Output
Frames per second: 43.98824715869524
###Markdown
Show output frame after filter2D in notebook.
###Code
import PIL.Image
image = PIL.Image.fromarray(outframe)
image
###Output
_____no_output_____
###Markdown
Clean up hdmi driversNOTE: This is needed to reset the HDMI drivers in a clean state. If this is not run, subsequent executions of this notebook may show visual artifacts on the HDMI out (usually a shifted output image)
###Code
hdmi_out.close()
hdmi_in.close()
###Output
_____no_output_____ |
notebooks/object_detection.ipynb | ###Markdown
Object Detection Based on Renu Khandelwal's YOLOv3 demo provided [here](https://medium.com/datadriveninvestor/object-detection-using-yolov3-using-keras-80bf35e61ce1). Load dependencies
###Code
import os
import scipy.io
import scipy.misc
import numpy as np
from numpy import expand_dims
import pandas as pd
import PIL
import struct
import cv2
from numpy import expand_dims
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input, Lambda, Conv2D, BatchNormalization, LeakyReLU, ZeroPadding2D, UpSampling2D
from tensorflow.keras.models import load_model, Model
from tensorflow.keras.layers import add, concatenate
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from matplotlib.patches import Rectangle
from skimage.transform import resize
%matplotlib inline
###Output
_____no_output_____
###Markdown
Set hyperparameters
###Code
net_h, net_w = 416, 416
obj_thresh, nms_thresh = 0.5, 0.45
# there are 80 class labels in the MS COCO dataset:
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
###Output
_____no_output_____
###Markdown
Design model architecture
###Code
# define block of conv layers:
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
# use _conv_block() to define model architecture:
def make_yolov3_model():
input_image = Input(shape=(None, None, 3))
# Layer 0 => 4
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
# Layer 5 => 8
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
# Layer 9 => 11
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
# Layer 12 => 15
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
# Layer 16 => 36
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
# Layer 37 => 40
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
# Layer 41 => 61
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
# Layer 62 => 65
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
# Layer 66 => 74
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
# Layer 75 => 79
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
# Layer 80 => 82
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
# Layer 83 => 86
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
# Layer 87 => 91
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
# Layer 92 => 94
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
# Layer 95 => 98
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
# Layer 99 => 106
yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(input_image, [yolo_82, yolo_94, yolo_106])
return model
yolov3 = make_yolov3_model()
# N.B.: uncomment the following line of code to download yolov3 model weights:
! wget -c https://www.dropbox.com/s/88xnszqf7xkf70j/yolov3.h5
yolov3.load_weights('yolov3.h5')
###Output
_____no_output_____
###Markdown
Define object detection-specific functions
###Code
def load_image_pixels(filename, shape):
# load the image to get its shape
image = load_img(filename)
width, height = image.size
# load the image with the required size
image = load_img(filename, target_size=shape)
# convert to numpy array
image = img_to_array(image)
# scale pixel values to [0, 1]
image = image.astype('float32')
image /= 255.0
# add a dimension so that we have one sample
image = expand_dims(image, 0)
return image, width, height
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
# decode_netout() takes each one of the NumPy arrays, one at a time,
# and decodes the candidate bounding boxes and class predictions
def decode_netout(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4:] = _sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[int(row)][int(col)][b][4]
#objectness = netout[..., :4]
if(objectness.all() <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[int(row)][col][b][5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
#box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, None, classes)
boxes.append(box)
return boxes
# to stretch bounding boxes back into the shape of the original image,
# enabling plotting of the original image and with bounding boxes overlain
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w)/image_w) < (float(net_h)/image_h):
new_w = net_w
new_h = (image_h*net_w)/image_w
else:
new_h = net_w
new_w = (image_w*net_h)/image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def draw_boxes(filename, v_boxes, v_labels, v_scores):
# load the image
data = plt.imread(filename)
# plot the image
plt.imshow(data)
# get the context for drawing boxes
ax = plt.gca()
# plot each box
for i in range(len(v_boxes)):
box = v_boxes[i]
# get coordinates
y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
# calculate width and height of the box
width, height = x2 - x1, y2 - y1
# create the shape
rect = Rectangle((x1, y1), width, height, fill=False, color='red')
# draw the box
ax.add_patch(rect)
# draw text and score in top left corner
label = "%s (%.3f)" % (v_labels[i], v_scores[i])
plt.text(x1, y1, label, color='red')
# show the plot
plt.show()
# get all of the results above a threshold
# takes the list of boxes, known labels,
# and our classification threshold as arguments
# and returns parallel lists of boxes, labels, and scores.
def get_boxes(boxes, labels, thresh):
v_boxes, v_labels, v_scores = list(), list(), list()
# enumerate all boxes
for box in boxes:
# enumerate all possible labels
for i in range(len(labels)):
# check if the threshold for this label is high enough
if box.classes[i] > thresh:
v_boxes.append(box)
v_labels.append(labels[i])
v_scores.append(box.classes[i]*100)
# don't break, many labels may trigger for one box
return v_boxes, v_labels, v_scores
###Output
_____no_output_____
###Markdown
Load sample image
###Code
! wget -c https://raw.githubusercontent.com/jonkrohn/DLTFpT/master/notebooks/oboe-with-book.jpg
# define the expected input shape for the model
input_w, input_h = 416, 416
# define our new photo
photo_filename = 'oboe-with-book.jpg'
# load and prepare image
image, image_w, image_h = load_image_pixels(photo_filename, (net_w, net_w))
plt.imshow(plt.imread(photo_filename))
###Output
_____no_output_____
###Markdown
Perform inference
###Code
# make prediction
yolos = yolov3.predict(image)
# define the anchors
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
# define the probability threshold for detected objects
class_threshold = 0.6
boxes = list()
for i in range(len(yolos)):
# decode the output of the network
boxes += decode_netout(yolos[i][0], anchors[i], obj_thresh, net_h, net_w)
# correct the sizes of the bounding boxes
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
# extract the details of the detected objects
v_boxes, v_labels, v_scores = get_boxes(boxes, labels, class_threshold)
# summarize what model found
for i in range(len(v_boxes)):
print(v_labels[i], v_scores[i])
# draw what model found
draw_boxes(photo_filename, v_boxes, v_labels, v_scores)
###Output
_____no_output_____
###Markdown
Object Detection**Authors:**- [Angus Mackenzie](https://github.com/AngusTheMack) ([1106817](mailto:[email protected]))- [Nathan Michlo](https://github.com/nmichlo) ([1386161](mailto:[email protected]))**Achievement** Detecting the bounding box of snakes within images to better perform classification later on. IntroductionThis notebook is based off of techniques from [this article](https://medium.com/@Stormblessed/2460292bcfb) and [this repo](https://github.com/GokulEpiphany/contests-final-code/tree/master/aicrowd-snake-species) by .----------------------
###Code
# Utilities
import sys
import os
from tqdm.notebook import tqdm
import imageio
import matplotlib.pyplot as plt
from fastai.vision.data import ImageList
import json
from pprint import pprint
# Add root of project to PYTHON_PATH so we can import correctly
if os.path.abspath('../') not in {os.path.abspath(path) for path in sys.path}:
sys.path.insert(0, os.path.abspath('../'))
# Import SSIC common stuffs
from ssic.ssic import SSIC
from ssic.util import set_random_seed, cache_data
# if you dont have a .env file set it here
os.environ.setdefault('DATASET_DIR', '~/downloads/datasets/ssic')
# Initialise SSIC paths, data and other stuffs, searches for a .env file in the project with these variables specified, also checkpoints os.environ and sys.path
SSIC.init()
###Output
[[93mRESTORED[0m]: os.environ
[[92mLOADED[0m]:
[[93mRESTORED[0m]: sys.path
[[95mSTORAGE_DIR[0m]: [90m/home/nmichlo/workspace/snake-id/notebooks/out[0m
[[95mDATASET_DIR[0m]: [90m/home/nmichlo/downloads/datasets/ssic[0m
[[95mDATASET_CLASS_CSV[0m]: [90m/home/nmichlo/downloads/datasets/ssic/class_idx_mapping.csv[0m
[[95mDATASET_TRAIN_DIR[0m]: [90m/home/nmichlo/downloads/datasets/ssic/train[0m
[[95mDATASET_TEST_DIR[0m]: [90m/home/nmichlo/downloads/datasets/ssic/round1[0m
###Markdown
User Setup**DATASETS FROM**: https://medium.com/@Stormblessed/2460292bcfb**INSTRUCTIONS**: Download both of the following into the `DATASET_DIR` above, then extract the dataset into that same directory **take care** all the images are not inside a folder within the zip. - labeled dataset: https://drive.google.com/file/d/1q14CtkQ9r7rlxwLuksWAOduhDjUb-bBE/view - drive link: https://drive.google.com/file/d/18dx_5Ngmc56fDRZ6YZA_elX-0ehtV5U6/view Code
###Code
# LOAD IMAGES:
IMAGES_DIR = os.path.join(SSIC.DATASET_DIR, 'train-object-detect')
assert os.path.isdir(IMAGES_DIR)
imagelist = ImageList.from_folder(IMAGES_DIR)
# LOAD ANNOTATIONS:
ANNOTATIONS_PATH = os.path.join(SSIC.DATASET_DIR, 'annotations.json')
assert os.path.isfile(ANNOTATIONS_PATH)
with open(ANNOTATIONS_PATH, 'r') as file:
ANNOTATIONS = json.load(file)
# Show One Example
pprint(ANNOTATIONS[0])
plt.imshow(imageio.imread(SSIC.get_train_image_info()[ANNOTATIONS[0]['filename']]['path']))
class SnakeDetector(nn.Module):
def __init__(self, arch=models.resnet18):
super().__init__()
self.cnn = create_body(arch)
self.head = create_head(num_features_model(self.cnn) * 2, 4)
def forward(self, im):
x = self.cnn(im)
x = self.head(x)
return 2 * (x.sigmoid_() - 0.5)
def loss_fn(preds, targs, class_idxs):
return L1Loss()(preds, targs.squeeze())
###Output
_____no_output_____
###Markdown
Object Detection**Authors:**- [Angus Mackenzie](https://github.com/AngusTheMack) ([1106817](mailto:[email protected]))- [Nathan Michlo](https://github.com/nmichlo) ([1386161](mailto:[email protected]))**Achievement** Detecting the bounding box of snakes within images to better perform classification later on. IntroductionThis notebook is based off of techniques from [this article](https://medium.com/@Stormblessed/2460292bcfb) and [this repo](https://github.com/GokulEpiphany/contests-final-code/tree/master/aicrowd-snake-species) by .----------------------
###Code
# Utilities
import sys
import os
from tqdm.notebook import tqdm
import imageio
import matplotlib.pyplot as plt
from fastai.vision.data import ImageList
import json
from pprint import pprint
# Add root of project to PYTHON_PATH so we can import correctly
if os.path.abspath('../') not in {os.path.abspath(path) for path in sys.path}:
sys.path.insert(0, os.path.abspath('../'))
# Import SSIC common stuffs
from ssic.ssic import SSIC
from ssic.util import set_random_seed, cache_data
# if you dont have a .env file set it here
os.environ.setdefault('DATASET_DIR', '~/downloads/datasets/ssic')
# Initialise SSIC paths, data and other stuffs, searches for a .env file in the project with these variables specified, also checkpoints os.environ and sys.path
SSIC.init()
###Output
[[93mRESTORED[0m]: os.environ
[[92mLOADED[0m]:
[[93mRESTORED[0m]: sys.path
[[95mSTORAGE_DIR[0m]: [90m/home/nmichlo/workspace/snake-id/notebooks/out[0m
[[95mDATASET_DIR[0m]: [90m/home/nmichlo/downloads/datasets/ssic[0m
[[95mDATASET_CLASS_CSV[0m]: [90m/home/nmichlo/downloads/datasets/ssic/class_idx_mapping.csv[0m
[[95mDATASET_TRAIN_DIR[0m]: [90m/home/nmichlo/downloads/datasets/ssic/train[0m
[[95mDATASET_TEST_DIR[0m]: [90m/home/nmichlo/downloads/datasets/ssic/round1[0m
###Markdown
User Setup**DATASETS FROM**: https://medium.com/@Stormblessed/2460292bcfb**INSTRUCTIONS**: Download both of the following into the `DATASET_DIR` above, then extract the dataset into that same directory **take care** all the images are not inside a folder within the zip. - labeled dataset: https://drive.google.com/file/d/1q14CtkQ9r7rlxwLuksWAOduhDjUb-bBE/view - drive link: https://drive.google.com/file/d/18dx_5Ngmc56fDRZ6YZA_elX-0ehtV5U6/view Code
###Code
# LOAD IMAGES:
IMAGES_DIR = os.path.join(SSIC.DATASET_DIR, 'train-object-detect')
assert os.path.isdir(IMAGES_DIR)
imagelist = ImageList.from_folder(IMAGES_DIR)
# LOAD ANNOTATIONS:
ANNOTATIONS_PATH = os.path.join(SSIC.DATASET_DIR, 'annotations.json')
assert os.path.isfile(ANNOTATIONS_PATH)
with open(ANNOTATIONS_PATH, 'r') as file:
ANNOTATIONS = json.load(file)
# Show One Example
pprint(ANNOTATIONS[0])
plt.imshow(imageio.imread(SSIC.get_train_image_info()[ANNOTATIONS[0]['filename']]['path']))
class SnakeDetector(nn.Module):
def __init__(self, arch=models.resnet18):
super().__init__()
self.cnn = create_body(arch)
self.head = create_head(num_features_model(self.cnn) * 2, 4)
def forward(self, im):
x = self.cnn(im)
x = self.head(x)
return 2 * (x.sigmoid_() - 0.5)
def loss_fn(preds, targs, class_idxs):
return L1Loss()(preds, targs.squeeze())
###Output
_____no_output_____
###Markdown
Object Detection Based on Renu Khandelwal's YOLOv3 demo provided [here](https://medium.com/datadriveninvestor/object-detection-using-yolov3-using-keras-80bf35e61ce1). Load dependencies
###Code
import os
import scipy.io
import scipy.misc
import numpy as np
from numpy import expand_dims
import pandas as pd
import PIL
import struct
import cv2
from numpy import expand_dims
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input, Lambda, Conv2D, BatchNormalization, LeakyReLU, ZeroPadding2D, UpSampling2D
from tensorflow.keras.models import load_model, Model
from tensorflow.keras.layers import add, concatenate
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from matplotlib.patches import Rectangle
from skimage.transform import resize
%matplotlib inline
###Output
_____no_output_____
###Markdown
Set hyperparameters
###Code
net_h, net_w = 416, 416
obj_thresh, nms_thresh = 0.5, 0.45
# there are 80 class labels in the MS COCO dataset:
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
###Output
_____no_output_____
###Markdown
Design model architecture
###Code
# define block of conv layers:
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
# use _conv_block() to define model architecture:
def make_yolov3_model():
input_image = Input(shape=(None, None, 3))
# Layer 0 => 4
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
# Layer 5 => 8
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
# Layer 9 => 11
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
# Layer 12 => 15
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
# Layer 16 => 36
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
# Layer 37 => 40
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
# Layer 41 => 61
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
# Layer 62 => 65
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
# Layer 66 => 74
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
# Layer 75 => 79
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
# Layer 80 => 82
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
# Layer 83 => 86
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
# Layer 87 => 91
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
# Layer 92 => 94
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
# Layer 95 => 98
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
# Layer 99 => 106
yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(input_image, [yolo_82, yolo_94, yolo_106])
return model
yolov3 = make_yolov3_model()
# N.B.: uncomment the following line of code to download yolov3 model weights:
! wget -c https://www.dropbox.com/s/88xnszqf7xkf70j/yolov3.h5
yolov3.load_weights('yolov3.h5')
###Output
_____no_output_____
###Markdown
Define object detection-specific functions
###Code
def load_image_pixels(filename, shape):
# load the image to get its shape
image = load_img(filename)
width, height = image.size
# load the image with the required size
image = load_img(filename, target_size=shape)
# convert to numpy array
image = img_to_array(image)
# scale pixel values to [0, 1]
image = image.astype('float32')
image /= 255.0
# add a dimension so that we have one sample
image = expand_dims(image, 0)
return image, width, height
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
# decode_netout() takes each one of the NumPy arrays, one at a time,
# and decodes the candidate bounding boxes and class predictions
def decode_netout(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4:] = _sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[int(row)][int(col)][b][4]
#objectness = netout[..., :4]
if(objectness.all() <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[int(row)][col][b][5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
#box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, None, classes)
boxes.append(box)
return boxes
# to stretch bounding boxes back into the shape of the original image,
# enabling plotting of the original image and with bounding boxes overlain
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w)/image_w) < (float(net_h)/image_h):
new_w = net_w
new_h = (image_h*net_w)/image_w
else:
new_h = net_w
new_w = (image_w*net_h)/image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def draw_boxes(filename, v_boxes, v_labels, v_scores):
# load the image
data = plt.imread(filename)
# plot the image
plt.imshow(data)
# get the context for drawing boxes
ax = plt.gca()
# plot each box
for i in range(len(v_boxes)):
box = v_boxes[i]
# get coordinates
y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
# calculate width and height of the box
width, height = x2 - x1, y2 - y1
# create the shape
rect = Rectangle((x1, y1), width, height, fill=False, color='red')
# draw the box
ax.add_patch(rect)
# draw text and score in top left corner
label = "%s (%.3f)" % (v_labels[i], v_scores[i])
plt.text(x1, y1, label, color='red')
# show the plot
plt.show()
# get all of the results above a threshold
# takes the list of boxes, known labels,
# and our classification threshold as arguments
# and returns parallel lists of boxes, labels, and scores.
def get_boxes(boxes, labels, thresh):
v_boxes, v_labels, v_scores = list(), list(), list()
# enumerate all boxes
for box in boxes:
# enumerate all possible labels
for i in range(len(labels)):
# check if the threshold for this label is high enough
if box.classes[i] > thresh:
v_boxes.append(box)
v_labels.append(labels[i])
v_scores.append(box.classes[i]*100)
# don't break, many labels may trigger for one box
return v_boxes, v_labels, v_scores
###Output
_____no_output_____
###Markdown
Load sample image
###Code
! wget -c https://raw.githubusercontent.com/jonkrohn/DLTFpT/master/notebooks/oboe-with-book.jpg
# define the expected input shape for the model
input_w, input_h = 416, 416
# define our new photo
photo_filename = 'oboe-with-book.jpg'
# load and prepare image
image, image_w, image_h = load_image_pixels(photo_filename, (net_w, net_w))
plt.imshow(plt.imread(photo_filename))
###Output
_____no_output_____
###Markdown
Perform inference
###Code
# make prediction
yolos = yolov3.predict(image)
# define the anchors
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
# define the probability threshold for detected objects
class_threshold = 0.6
boxes = list()
for i in range(len(yolos)):
# decode the output of the network
boxes += decode_netout(yolos[i][0], anchors[i], obj_thresh, net_h, net_w)
# correct the sizes of the bounding boxes
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
# extract the details of the detected objects
v_boxes, v_labels, v_scores = get_boxes(boxes, labels, class_threshold)
# summarize what model found
for i in range(len(v_boxes)):
print(v_labels[i], v_scores[i])
# draw what model found
draw_boxes(photo_filename, v_boxes, v_labels, v_scores)
###Output
dog 99.74095821380615
###Markdown
SSD7 Training TutorialThis tutorial explains how to train an SSD7 on the Udacity road traffic datasets, and just generally how to use this SSD implementation.Disclaimer about SSD7:As you will see below, training SSD7 on the aforementioned datasets yields alright results, but I'd like to emphasize that SSD7 is not a carefully optimized network architecture. The idea was just to build a low-complexity network that is fast (roughly 127 FPS or more than 3 times as fast as SSD300 on a GTX 1070) for testing purposes. Would slightly different anchor box scaling factors or a slightly different number of filters in individual convolution layers make SSD7 significantly better at similar complexity? I don't know, I haven't tried.
###Code
from google.colab import drive
drive.mount('/content/drive')
!unzip "/content/drive/MyDrive/GG_SSD7/Data_SSD_6Class.zip" -d "/content"
!git clone https://github.com/pierluigiferrari/ssd_keras.git
%cd /content/ssd_keras
!pip install keras==2.2.4
!pip install tensorflow-gpu==1.15
import tensorflow as tf
import keras
print(keras.__version__)
print(tf.__version__)
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TerminateOnNaN, CSVLogger
from keras import backend as K
from keras.models import load_model
from math import ceil
import numpy as np
from matplotlib import pyplot as plt
from models.keras_ssd7 import build_model
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
from data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize
from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize
from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation
%matplotlib inline
###Output
_____no_output_____
###Markdown
1. Set the model configuration parametersThe cell below sets a number of parameters that define the model configuration. The parameters set here are being used both by the `build_model()` function that builds the model as well as further down by the constructor for the `SSDInputEncoder` object that is needed to to match ground truth and anchor boxes during the training.Here are just some comments on a few of the parameters, read the documentation for more details:* Set the height, width, and number of color channels to whatever you want the model to accept as image input. If your input images have a different size than you define as the model input here, or if your images have non-uniform size, then you must use the data generator's image transformations (resizing and/or cropping) so that your images end up having the required input size before they are fed to the model. to convert your images to the model input size during training. The SSD300 training tutorial uses the same image pre-processing and data augmentation as the original Caffe implementation, so take a look at that to see one possibility of how to deal with non-uniform-size images.* The number of classes is the number of positive classes in your dataset, e.g. 20 for Pascal VOC or 80 for MS COCO. Class ID 0 must always be reserved for the background class, i.e. your positive classes must have positive integers as their IDs in your dataset.* The `mode` argument in the `build_model()` function determines whether the model will be built with or without a `DecodeDetections` layer as its last layer. In 'training' mode, the model outputs the raw prediction tensor, while in 'inference' and 'inference_fast' modes, the raw predictions are being decoded into absolute coordinates and filtered via confidence thresholding, non-maximum suppression, and top-k filtering. The difference between latter two modes is that 'inference' uses the decoding procedure of the original Caffe implementation, while 'inference_fast' uses a faster, but possibly less accurate decoding procedure.* The reason why the list of scaling factors has 5 elements even though there are only 4 predictor layers in tSSD7 is that the last scaling factor is used for the second aspect-ratio-1 box of the last predictor layer. Refer to the documentation for details.* `build_model()` and `SSDInputEncoder` have two arguments for the anchor box aspect ratios: `aspect_ratios_global` and `aspect_ratios_per_layer`. You can use either of the two, you don't need to set both. If you use `aspect_ratios_global`, then you pass one list of aspect ratios and these aspect ratios will be used for all predictor layers. Every aspect ratio you want to include must be listed once and only once. If you use `aspect_ratios_per_layer`, then you pass a nested list containing lists of aspect ratios for each individual predictor layer. This is what the SSD300 training tutorial does. It's your design choice whether all predictor layers should use the same aspect ratios or whether you think that for your dataset, certain aspect ratios are only necessary for some predictor layers but not for others. Of course more aspect ratios means more predicted boxes, which in turn means increased computational complexity.* If `two_boxes_for_ar1 == True`, then each predictor layer will predict two boxes with aspect ratio one, one a bit smaller, the other one a bit larger.* If `clip_boxes == True`, then the anchor boxes will be clipped so that they lie entirely within the image boundaries. It is recommended not to clip the boxes. The anchor boxes form the reference frame for the localization prediction. This reference frame should be the same at every spatial position.* In the matching process during the training, the anchor box offsets are being divided by the variances. Leaving them at 1.0 for each of the four box coordinates means that they have no effect. Setting them to less than 1.0 spreads the imagined anchor box offset distribution for the respective box coordinate.* `normalize_coords` converts all coordinates from absolute coordinate to coordinates that are relative to the image height and width. This setting has no effect on the outcome of the training.
###Code
img_height = 180 # Height of the input images
img_width = 240 # Width of the input images
img_channels = 3 # Number of color channels of the input images
intensity_mean = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.
intensity_range = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.
n_classes = 6 # Number of positive classes
scales = [0.08, 0.4, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.
aspect_ratios = [0.9, 1.0, 1.1] # The list of aspect ratios for the anchor boxes
two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1
steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended
offsets = None # In case you'd like to set the offsets for the anchor box grids manually; not recommended
clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
variances = [1.0, 1.0, 1.0, 1.0] # The list of variances by which the encoded target coordinates are scaled
normalize_coords = True # Whether or not the model is supposed to use coordinates relative to the image size
###Output
_____no_output_____
###Markdown
2. Build or load the modelYou will want to execute either of the two code cells in the subsequent two sub-sections, not both. 2.1 Create a new modelIf you want to create a new model, this is the relevant section for you. If you want to load a previously saved model, skip ahead to section 2.2.The code cell below does the following things:1. It calls the function `build_model()` to build the model.2. It optionally loads some weights into the model.3. It then compiles the model for the training. In order to do so, we're defining an optimizer (Adam) and a loss function (SSDLoss) to be passed to the `compile()` method.`SSDLoss` is a custom Keras loss function that implements the multi-task log loss for classification and smooth L1 loss for localization. `neg_pos_ratio` and `alpha` are set as in the paper.
###Code
# 1: Build the Keras model
K.clear_session() # Clear previous models from memory.
model = build_model(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='training',
l2_regularization=0.0005,
scales=scales,
aspect_ratios_global=aspect_ratios,
aspect_ratios_per_layer=None,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=intensity_mean,
divide_by_stddev=intensity_range)
# 2: Optional: Load some weights
# model.load_weights('/content/drive/MyDrive/GG_SSD7/Model_Final/Model_SSD7_1212_epoch-120_loss-0.0292_val_loss-0.0788_acc-0.2430.h5', by_name=True)
# 3: Instantiate an Adam optimizer and the SSD loss function and compile the model
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
'''
Arguments for SSDloss:
neg_pos_ratio (int, optional): The maximum ratio of negative (i.e. background)
to positive ground truth boxes to include in the loss computation.
There are no actual background ground truth boxes of course, but `y_true`
contains anchor boxes labeled with the background class. Since
the number of background boxes in `y_true` will usually exceed
the number of positive boxes by far, it is necessary to balance
their influence on the loss. Defaults to 3 following the paper.
n_neg_min (int, optional): The minimum number of negative ground truth boxes to
enter the loss computation *per batch*. This argument can be used to make
sure that the model learns from a minimum number of negatives in batches
in which there are very few, or even none at all, positive ground truth
boxes. It defaults to 0 and if used, it should be set to a value that
stands in reasonable proportion to the batch size used for training.
alpha (float, optional): A factor to weight the localization loss in the
computation of the total loss. Defaults to 1.0 following the paper.
'''
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
model.compile(optimizer=adam, loss=ssd_loss.compute_loss, metrics = ['acc'])
model.summary()
###Output
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 180, 240, 3) 0
__________________________________________________________________________________________________
identity_layer (Lambda) (None, 180, 240, 3) 0 input_1[0][0]
__________________________________________________________________________________________________
input_mean_normalization (Lambd (None, 180, 240, 3) 0 identity_layer[0][0]
__________________________________________________________________________________________________
input_stddev_normalization (Lam (None, 180, 240, 3) 0 input_mean_normalization[0][0]
__________________________________________________________________________________________________
conv1 (Conv2D) (None, 180, 240, 32) 2432 input_stddev_normalization[0][0]
__________________________________________________________________________________________________
bn1 (BatchNormalization) (None, 180, 240, 32) 128 conv1[0][0]
__________________________________________________________________________________________________
elu1 (ELU) (None, 180, 240, 32) 0 bn1[0][0]
__________________________________________________________________________________________________
pool1 (MaxPooling2D) (None, 90, 120, 32) 0 elu1[0][0]
__________________________________________________________________________________________________
conv2 (Conv2D) (None, 90, 120, 48) 13872 pool1[0][0]
__________________________________________________________________________________________________
bn2 (BatchNormalization) (None, 90, 120, 48) 192 conv2[0][0]
__________________________________________________________________________________________________
elu2 (ELU) (None, 90, 120, 48) 0 bn2[0][0]
__________________________________________________________________________________________________
pool2 (MaxPooling2D) (None, 45, 60, 48) 0 elu2[0][0]
__________________________________________________________________________________________________
conv3 (Conv2D) (None, 45, 60, 64) 27712 pool2[0][0]
__________________________________________________________________________________________________
bn3 (BatchNormalization) (None, 45, 60, 64) 256 conv3[0][0]
__________________________________________________________________________________________________
elu3 (ELU) (None, 45, 60, 64) 0 bn3[0][0]
__________________________________________________________________________________________________
pool3 (MaxPooling2D) (None, 22, 30, 64) 0 elu3[0][0]
__________________________________________________________________________________________________
conv4 (Conv2D) (None, 22, 30, 64) 36928 pool3[0][0]
__________________________________________________________________________________________________
bn4 (BatchNormalization) (None, 22, 30, 64) 256 conv4[0][0]
__________________________________________________________________________________________________
elu4 (ELU) (None, 22, 30, 64) 0 bn4[0][0]
__________________________________________________________________________________________________
pool4 (MaxPooling2D) (None, 11, 15, 64) 0 elu4[0][0]
__________________________________________________________________________________________________
conv5 (Conv2D) (None, 11, 15, 48) 27696 pool4[0][0]
__________________________________________________________________________________________________
bn5 (BatchNormalization) (None, 11, 15, 48) 192 conv5[0][0]
__________________________________________________________________________________________________
elu5 (ELU) (None, 11, 15, 48) 0 bn5[0][0]
__________________________________________________________________________________________________
pool5 (MaxPooling2D) (None, 5, 7, 48) 0 elu5[0][0]
__________________________________________________________________________________________________
conv6 (Conv2D) (None, 5, 7, 48) 20784 pool5[0][0]
__________________________________________________________________________________________________
bn6 (BatchNormalization) (None, 5, 7, 48) 192 conv6[0][0]
__________________________________________________________________________________________________
elu6 (ELU) (None, 5, 7, 48) 0 bn6[0][0]
__________________________________________________________________________________________________
pool6 (MaxPooling2D) (None, 2, 3, 48) 0 elu6[0][0]
__________________________________________________________________________________________________
conv7 (Conv2D) (None, 2, 3, 32) 13856 pool6[0][0]
__________________________________________________________________________________________________
bn7 (BatchNormalization) (None, 2, 3, 32) 128 conv7[0][0]
__________________________________________________________________________________________________
elu7 (ELU) (None, 2, 3, 32) 0 bn7[0][0]
__________________________________________________________________________________________________
classes4 (Conv2D) (None, 22, 30, 28) 16156 elu4[0][0]
__________________________________________________________________________________________________
classes5 (Conv2D) (None, 11, 15, 28) 12124 elu5[0][0]
__________________________________________________________________________________________________
classes6 (Conv2D) (None, 5, 7, 28) 12124 elu6[0][0]
__________________________________________________________________________________________________
classes7 (Conv2D) (None, 2, 3, 28) 8092 elu7[0][0]
__________________________________________________________________________________________________
boxes4 (Conv2D) (None, 22, 30, 16) 9232 elu4[0][0]
__________________________________________________________________________________________________
boxes5 (Conv2D) (None, 11, 15, 16) 6928 elu5[0][0]
__________________________________________________________________________________________________
boxes6 (Conv2D) (None, 5, 7, 16) 6928 elu6[0][0]
__________________________________________________________________________________________________
boxes7 (Conv2D) (None, 2, 3, 16) 4624 elu7[0][0]
__________________________________________________________________________________________________
classes4_reshape (Reshape) (None, 2640, 7) 0 classes4[0][0]
__________________________________________________________________________________________________
classes5_reshape (Reshape) (None, 660, 7) 0 classes5[0][0]
__________________________________________________________________________________________________
classes6_reshape (Reshape) (None, 140, 7) 0 classes6[0][0]
__________________________________________________________________________________________________
classes7_reshape (Reshape) (None, 24, 7) 0 classes7[0][0]
__________________________________________________________________________________________________
anchors4 (AnchorBoxes) (None, 22, 30, 4, 8) 0 boxes4[0][0]
__________________________________________________________________________________________________
anchors5 (AnchorBoxes) (None, 11, 15, 4, 8) 0 boxes5[0][0]
__________________________________________________________________________________________________
anchors6 (AnchorBoxes) (None, 5, 7, 4, 8) 0 boxes6[0][0]
__________________________________________________________________________________________________
anchors7 (AnchorBoxes) (None, 2, 3, 4, 8) 0 boxes7[0][0]
__________________________________________________________________________________________________
classes_concat (Concatenate) (None, 3464, 7) 0 classes4_reshape[0][0]
classes5_reshape[0][0]
classes6_reshape[0][0]
classes7_reshape[0][0]
__________________________________________________________________________________________________
boxes4_reshape (Reshape) (None, 2640, 4) 0 boxes4[0][0]
__________________________________________________________________________________________________
boxes5_reshape (Reshape) (None, 660, 4) 0 boxes5[0][0]
__________________________________________________________________________________________________
boxes6_reshape (Reshape) (None, 140, 4) 0 boxes6[0][0]
__________________________________________________________________________________________________
boxes7_reshape (Reshape) (None, 24, 4) 0 boxes7[0][0]
__________________________________________________________________________________________________
anchors4_reshape (Reshape) (None, 2640, 8) 0 anchors4[0][0]
__________________________________________________________________________________________________
anchors5_reshape (Reshape) (None, 660, 8) 0 anchors5[0][0]
__________________________________________________________________________________________________
anchors6_reshape (Reshape) (None, 140, 8) 0 anchors6[0][0]
__________________________________________________________________________________________________
anchors7_reshape (Reshape) (None, 24, 8) 0 anchors7[0][0]
__________________________________________________________________________________________________
classes_softmax (Activation) (None, 3464, 7) 0 classes_concat[0][0]
__________________________________________________________________________________________________
boxes_concat (Concatenate) (None, 3464, 4) 0 boxes4_reshape[0][0]
boxes5_reshape[0][0]
boxes6_reshape[0][0]
boxes7_reshape[0][0]
__________________________________________________________________________________________________
anchors_concat (Concatenate) (None, 3464, 8) 0 anchors4_reshape[0][0]
anchors5_reshape[0][0]
anchors6_reshape[0][0]
anchors7_reshape[0][0]
__________________________________________________________________________________________________
predictions (Concatenate) (None, 3464, 19) 0 classes_softmax[0][0]
boxes_concat[0][0]
anchors_concat[0][0]
==================================================================================================
Total params: 220,832
Trainable params: 220,160
Non-trainable params: 672
__________________________________________________________________________________________________
###Markdown
2.2 Load a saved modelIf you have previously created and saved a model and would now like to load it, simply execute the next code cell. The only thing you need to do is to set the path to the saved model HDF5 file that you would like to load.The SSD model contains custom objects: Neither the loss function, nor the anchor box or detection decoding layer types are contained in the Keras core library, so we need to provide them to the model loader.This next code cell assumes that you want to load a model that was created in 'training' mode. If you want to load a model that was created in 'inference' or 'inference_fast' mode, you'll have to add the `DecodeDetections` or `DecodeDetectionsFast` layer type to the `custom_objects` dictionary below. 3. Set up the data generators for the trainingThe code cells below set up data generators for the training and validation datasets to train the model. You will have to set the file paths to your dataset. Depending on the annotations format of your dataset, you might also have to switch from the CSV parser to the XML or JSON parser, or you might have to write a new parser method in the `DataGenerator` class that can handle whatever format your annotations are in. The [README](https://github.com/pierluigiferrari/ssd_keras/blob/master/README.md) of this repository provides a summary of the design of the `DataGenerator`, which should help you in case you need to write a new parser or adapt one of the existing parsers to your needs.Note that the generator provides two options to speed up the training. By default, it loads the individual images for a batch from disk. This has two disadvantages. First, for compressed image formats like JPG, this is a huge computational waste, because every image needs to be decompressed again and again every time it is being loaded. Second, the images on disk are likely not stored in a contiguous block of memory, which may also slow down the loading process. The first option that `DataGenerator` provides to deal with this is to load the entire dataset into memory, which reduces the access time for any image to a negligible amount, but of course this is only an option if you have enough free memory to hold the whole dataset. As a second option, `DataGenerator` provides the possibility to convert the dataset into a single HDF5 file. This HDF5 file stores the images as uncompressed arrays in a contiguous block of memory, which dramatically speeds up the loading time. It's not as good as having the images in memory, but it's a lot better than the default option of loading them from their compressed JPG state every time they are needed. Of course such an HDF5 dataset may require significantly more disk space than the compressed images. You can later load these HDF5 datasets directly in the constructor.Set the batch size to to your preference and to what your GPU memory allows, it's not the most important hyperparameter. The Caffe implementation uses a batch size of 32, but smaller batch sizes work fine, too.The `DataGenerator` itself is fairly generic. I doesn't contain any data augmentation or bounding box encoding logic. Instead, you pass a list of image transformations and an encoder for the bounding boxes in the `transformations` and `label_encoder` arguments of the data generator's `generate()` method, and the data generator will then apply those given transformations and the encoding to the data. Everything here is preset already, but if you'd like to learn more about the data generator and its data augmentation capabilities, take a look at the detailed tutorial in [this](https://github.com/pierluigiferrari/data_generator_object_detection_2d) repository.The image processing chain defined further down in the object named `data_augmentation_chain` is just one possibility of what a data augmentation pipeline for unform-size images could look like. Feel free to put together other image processing chains, you can use the `DataAugmentationConstantInputSize` class as a template. Or you could use the original SSD data augmentation pipeline by instantiting an `SSDDataAugmentation` object and passing that to the generator instead. This procedure is not exactly efficient, but it evidently produces good results on multiple datasets.An `SSDInputEncoder` object, `ssd_input_encoder`, is passed to both the training and validation generators. As explained above, it matches the ground truth labels to the model's anchor boxes and encodes the box coordinates into the format that the model needs. Note:The example setup below was used to train SSD7 on two road traffic datasets released by [Udacity](https://github.com/udacity/self-driving-car/tree/master/annotations) with around 20,000 images in total and 5 object classes (car, truck, pedestrian, bicyclist, traffic light), although the vast majority of the objects are cars. The original datasets have a constant image size of 1200x1920 RGB. I consolidated the two datasets, removed a few bad samples (although there are probably many more), and resized the images to 300x480 RGB, i.e. to one sixteenth of the original image size. In case you'd like to train a model on the same dataset, you can download the consolidated and resized dataset I used [here](https://drive.google.com/open?id=1tfBFavijh4UTG4cGqIKwhcklLXUDuY0D) (about 900 MB).
###Code
# 1: Instantiate two `DataGenerator` objects: One for training, one for validation.
# Optional: If you have enough memory, consider loading the images into memory for the reasons explained above.
train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
# 2: Parse the image and label lists for the training and validation datasets.
# TODO: Set the paths to your dataset here.
# Images
train_images_dir = '/content/Data_SSD_6Class/Data'
val_images_dir = '/content/Data_SSD_6Class/Data'
# Ground truth
train_labels_filename = '/content/Data_SSD_6Class/train.csv'
val_labels_filename = '/content/Data_SSD_6Class/test.csv'
train_dataset.parse_csv(images_dir=train_images_dir,
labels_filename=train_labels_filename,
input_format=['image_name', 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'], # This is the order of the first six columns in the CSV file that contains the labels for your dataset. If your labels are in XML format, maybe the XML parser will be helpful, check the documentation.
include_classes='all')
val_dataset.parse_csv(images_dir=val_images_dir,
labels_filename=val_labels_filename,
input_format=['image_name', 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'],
include_classes='all')
# Optional: Convert the dataset into an HDF5 dataset. This will require more disk space, but will
# speed up the training. Doing this is not relevant in case you activated the `load_images_into_memory`
# option in the constructor, because in that cas the images are in memory already anyway. If you don't
# want to create HDF5 datasets, comment out the subsequent two function calls.
# train_dataset.create_hdf5_dataset(file_path='train_dataset.h5',
# resize=False,
# variable_image_size=True,
# verbose=True)
# val_dataset.create_hdf5_dataset(file_path='val_dataset.h5',
# resize=False,
# variable_image_size=True,
# verbose=True)
# Get the number of samples in the training and validations datasets.
train_dataset_size = train_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
print("Number of images in the training dataset:\t{:>6}".format(train_dataset_size))
print("Number of images in the validation dataset:\t{:>6}".format(val_dataset_size))
# 3: Set the batch size.
batch_size = 64
# 4: Define the image processing chain.
data_augmentation_chain = DataAugmentationConstantInputSize(#random_brightness=(-30, 30, 0.5),
# random_contrast=(0.5, 1.8, 0.5),
# random_saturation=(0.5, 1.8, 0.5),
# random_hue=(18, 0.5),
# random_translate=((0.03,0.5), (0.03,0.5), 0.5),
# random_scale=(0.5, 2.0, 0.5),
n_trials_max=3,
clip_boxes=True,
overlap_criterion='area',
bounds_box_filter=(0.75, 1.0),
bounds_validator=(0.75, 1.0),
n_boxes_min=1,
background=(0,0,0))
# 5: Instantiate an encoder that can encode ground truth labels into the format needed by the SSD loss function.
# The encoder constructor needs the spatial dimensions of the model's predictor layers to create the anchor boxes.
predictor_sizes = [model.get_layer('classes4').output_shape[1:3],
model.get_layer('classes5').output_shape[1:3],
model.get_layer('classes6').output_shape[1:3],
model.get_layer('classes7').output_shape[1:3]]
ssd_input_encoder = SSDInputEncoder(img_height=img_height,
img_width=img_width,
n_classes=n_classes,
predictor_sizes=predictor_sizes,
scales=scales,
aspect_ratios_global=aspect_ratios,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
matching_type='multi',
pos_iou_threshold=0.5,
neg_iou_limit=0.3,
normalize_coords=normalize_coords)
# 6: Create the generator handles that will be passed to Keras' `fit_generator()` function.
train_generator = train_dataset.generate(batch_size=batch_size,
shuffle=True,
transformations=[],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
val_generator = val_dataset.generate(batch_size=batch_size,
shuffle=False,
transformations=[],
label_encoder=ssd_input_encoder,
returns={'processed_images',
'encoded_labels'},
keep_images_without_gt=False)
###Output
_____no_output_____
###Markdown
4. Set the remaining training parameters and train the modelWe've already chosen an optimizer and a learning rate and set the batch size above, now let's set the remaining training parameters.I'll set a few Keras callbacks below, one for early stopping, one to reduce the learning rate if the training stagnates, one to save the best models during the training, and one to continuously stream the training history to a CSV file after every epoch. Logging to a CSV file makes sense, because if we didn't do that, in case the training terminates with an exception at some point or if the kernel of this Jupyter notebook dies for some reason or anything like that happens, we would lose the entire history for the trained epochs. Feel free to add more callbacks if you want TensorBoard summaries or whatever.
###Code
# Define model callbacks.
# TODO: Set the filepath under which you want to save the weights.
model_checkpoint = ModelCheckpoint(filepath='/content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-{epoch:02d}_loss-{loss:.4f}_val_loss-{val_loss:.4f}_acc-{acc:.4f}.h5',
monitor='loss',
verbose=1,
save_best_only=True,
save_weights_only=False,
mode='auto',
period=1)
csv_logger = CSVLogger(filename='ssd7_training_log.csv',
separator=',',
append=True)
early_stopping = EarlyStopping(monitor='loss',
min_delta=0.0,
patience=10,
verbose=1)
reduce_learning_rate = ReduceLROnPlateau(monitor='loss',
factor=0.2,
patience=5,
verbose=1,
epsilon=0.001,
cooldown=0,
min_lr=0.00001)
callbacks = [model_checkpoint,
csv_logger,
early_stopping,
reduce_learning_rate]
###Output
/usr/local/lib/python3.6/dist-packages/keras/callbacks.py:1065: UserWarning: `epsilon` argument is deprecated and will be removed, use `min_delta` instead.
warnings.warn('`epsilon` argument is deprecated and '
###Markdown
I'll set one epoch to consist of 1,000 training steps I'll arbitrarily set the number of epochs to 20 here. This does not imply that 20,000 training steps is the right number. Depending on the model, the dataset, the learning rate, etc. you might have to train much longer to achieve convergence, or maybe less.Instead of trying to train a model to convergence in one go, you might want to train only for a few epochs at a time.In order to only run a partial training and resume smoothly later on, there are a few things you should note:1. Always load the full model if you can, rather than building a new model and loading previously saved weights into it. Optimizers like SGD or Adam keep running averages of past gradient moments internally. If you always save and load full models when resuming a training, then the state of the optimizer is maintained and the training picks up exactly where it left off. If you build a new model and load weights into it, the optimizer is being initialized from scratch, which, especially in the case of Adam, leads to small but unnecessary setbacks every time you resume the training with previously saved weights.2. You should tell `fit_generator()` which epoch to start from, otherwise it will start with epoch 0 every time you resume the training. Set `initial_epoch` to be the next epoch of your training. Note that this parameter is zero-based, i.e. the first epoch is epoch 0. If you had trained for 10 epochs previously and now you'd want to resume the training from there, you'd set `initial_epoch = 10` (since epoch 10 is the eleventh epoch). Furthermore, set `final_epoch` to the last epoch you want to run. To stick with the previous example, if you had trained for 10 epochs previously and now you'd want to train for another 10 epochs, you'd set `initial_epoch = 10` and `final_epoch = 20`.3. Callbacks like `ModelCheckpoint` or `ReduceLROnPlateau` are stateful, so you might want ot save their state somehow if you want to pick up a training exactly where you left off.
###Code
# TODO: Set the epochs to train for.
# If you're resuming a previous training, set `initial_epoch` and `final_epoch` accordingly.
initial_epoch = 0
final_epoch = 200
steps_per_epoch = np.ceil(float(train_dataset_size) / float(batch_size))
# steps_per_epoch = 1000
history = model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=final_epoch,
callbacks=callbacks,
validation_data=val_generator,
validation_steps=ceil(val_dataset_size/batch_size),
initial_epoch=initial_epoch)
###Output
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:986: The name tf.assign_add is deprecated. Please use tf.compat.v1.assign_add instead.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:973: The name tf.assign is deprecated. Please use tf.compat.v1.assign instead.
Epoch 1/200
139/139 [==============================] - 42s 305ms/step - loss: 7.4824 - acc: 0.0033 - val_loss: 4.5273 - val_acc: 0.0035
Epoch 00001: loss improved from inf to 7.48945, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-01_loss-7.4895_val_loss-4.5273_acc-0.0033.h5
Epoch 2/200
139/139 [==============================] - 34s 246ms/step - loss: 3.0589 - acc: 0.0034 - val_loss: 2.8613 - val_acc: 0.0036
Epoch 00002: loss improved from 7.48945 to 3.06018, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-02_loss-3.0602_val_loss-2.8613_acc-0.0034.h5
Epoch 3/200
139/139 [==============================] - 36s 259ms/step - loss: 1.9952 - acc: 0.0035 - val_loss: 2.1890 - val_acc: 0.0036
Epoch 00003: loss improved from 3.06018 to 1.99600, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-03_loss-1.9960_val_loss-2.1890_acc-0.0035.h5
Epoch 4/200
139/139 [==============================] - 36s 259ms/step - loss: 1.3903 - acc: 0.0044 - val_loss: 1.4314 - val_acc: 0.0052
Epoch 00004: loss improved from 1.99600 to 1.39054, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-04_loss-1.3905_val_loss-1.4314_acc-0.0044.h5
Epoch 5/200
139/139 [==============================] - 36s 258ms/step - loss: 1.1123 - acc: 0.0062 - val_loss: 1.1189 - val_acc: 0.0069
Epoch 00005: loss improved from 1.39054 to 1.11246, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-05_loss-1.1125_val_loss-1.1189_acc-0.0062.h5
Epoch 6/200
139/139 [==============================] - 35s 255ms/step - loss: 0.9222 - acc: 0.0089 - val_loss: 0.9721 - val_acc: 0.0103
Epoch 00006: loss improved from 1.11246 to 0.92227, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-06_loss-0.9223_val_loss-0.9721_acc-0.0089.h5
Epoch 7/200
139/139 [==============================] - 35s 252ms/step - loss: 0.8234 - acc: 0.0132 - val_loss: 0.9681 - val_acc: 0.0167
Epoch 00007: loss improved from 0.92227 to 0.82371, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-07_loss-0.8237_val_loss-0.9681_acc-0.0132.h5
Epoch 8/200
139/139 [==============================] - 35s 253ms/step - loss: 0.7259 - acc: 0.0187 - val_loss: 0.7996 - val_acc: 0.0234
Epoch 00008: loss improved from 0.82371 to 0.72615, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-08_loss-0.7261_val_loss-0.7996_acc-0.0187.h5
Epoch 9/200
139/139 [==============================] - 35s 253ms/step - loss: 0.6657 - acc: 0.0268 - val_loss: 0.7697 - val_acc: 0.0338
Epoch 00009: loss improved from 0.72615 to 0.66586, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-09_loss-0.6659_val_loss-0.7697_acc-0.0268.h5
Epoch 10/200
139/139 [==============================] - 34s 247ms/step - loss: 0.6103 - acc: 0.0357 - val_loss: 0.7066 - val_acc: 0.0333
Epoch 00010: loss improved from 0.66586 to 0.61056, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-10_loss-0.6106_val_loss-0.7066_acc-0.0357.h5
Epoch 11/200
139/139 [==============================] - 35s 255ms/step - loss: 0.5663 - acc: 0.0467 - val_loss: 0.6731 - val_acc: 0.0546
Epoch 00011: loss improved from 0.61056 to 0.56641, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-11_loss-0.5664_val_loss-0.6731_acc-0.0467.h5
Epoch 12/200
139/139 [==============================] - 36s 255ms/step - loss: 0.5177 - acc: 0.0583 - val_loss: 0.6529 - val_acc: 0.0380
Epoch 00012: loss improved from 0.56641 to 0.51768, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-12_loss-0.5177_val_loss-0.6529_acc-0.0583.h5
Epoch 13/200
139/139 [==============================] - 36s 261ms/step - loss: 0.4827 - acc: 0.0734 - val_loss: 0.6354 - val_acc: 0.1008
Epoch 00013: loss improved from 0.51768 to 0.48272, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-13_loss-0.4827_val_loss-0.6354_acc-0.0734.h5
Epoch 14/200
139/139 [==============================] - 36s 260ms/step - loss: 0.4554 - acc: 0.0852 - val_loss: 0.7237 - val_acc: 0.1648
Epoch 00014: loss improved from 0.48272 to 0.45539, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-14_loss-0.4554_val_loss-0.7237_acc-0.0851.h5
Epoch 15/200
139/139 [==============================] - 36s 261ms/step - loss: 0.4328 - acc: 0.0969 - val_loss: 0.5936 - val_acc: 0.0619
Epoch 00015: loss improved from 0.45539 to 0.43256, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-15_loss-0.4326_val_loss-0.5936_acc-0.0969.h5
Epoch 16/200
139/139 [==============================] - 37s 263ms/step - loss: 0.4158 - acc: 0.1122 - val_loss: 0.5941 - val_acc: 0.1578
Epoch 00016: loss improved from 0.43256 to 0.41569, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-16_loss-0.4157_val_loss-0.5941_acc-0.1121.h5
Epoch 17/200
139/139 [==============================] - 36s 259ms/step - loss: 0.3962 - acc: 0.1216 - val_loss: 0.5484 - val_acc: 0.1330
Epoch 00017: loss improved from 0.41569 to 0.39631, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-17_loss-0.3963_val_loss-0.5484_acc-0.1216.h5
Epoch 18/200
139/139 [==============================] - 36s 262ms/step - loss: 0.3740 - acc: 0.1346 - val_loss: 0.5962 - val_acc: 0.1377
Epoch 00018: loss improved from 0.39631 to 0.37403, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-18_loss-0.3740_val_loss-0.5962_acc-0.1346.h5
Epoch 19/200
139/139 [==============================] - 37s 263ms/step - loss: 0.3533 - acc: 0.1505 - val_loss: 0.5617 - val_acc: 0.1583
Epoch 00019: loss improved from 0.37403 to 0.35309, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-19_loss-0.3531_val_loss-0.5617_acc-0.1504.h5
Epoch 20/200
139/139 [==============================] - 36s 261ms/step - loss: 0.3483 - acc: 0.1648 - val_loss: 0.5461 - val_acc: 0.2383
Epoch 00020: loss improved from 0.35309 to 0.34830, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-20_loss-0.3483_val_loss-0.5461_acc-0.1647.h5
Epoch 21/200
139/139 [==============================] - 36s 260ms/step - loss: 0.3359 - acc: 0.1758 - val_loss: 0.5152 - val_acc: 0.1642
Epoch 00021: loss improved from 0.34830 to 0.33591, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-21_loss-0.3359_val_loss-0.5152_acc-0.1758.h5
Epoch 22/200
139/139 [==============================] - 37s 263ms/step - loss: 0.3099 - acc: 0.1894 - val_loss: 0.4961 - val_acc: 0.1803
Epoch 00022: loss improved from 0.33591 to 0.30985, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-22_loss-0.3098_val_loss-0.4961_acc-0.1894.h5
Epoch 23/200
139/139 [==============================] - 36s 262ms/step - loss: 0.3003 - acc: 0.1964 - val_loss: 0.5666 - val_acc: 0.2061
Epoch 00023: loss improved from 0.30985 to 0.30032, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-23_loss-0.3003_val_loss-0.5666_acc-0.1964.h5
Epoch 24/200
139/139 [==============================] - 37s 264ms/step - loss: 0.3470 - acc: 0.2022 - val_loss: 0.5100 - val_acc: 0.2316
Epoch 00024: loss did not improve from 0.30032
Epoch 25/200
139/139 [==============================] - 36s 261ms/step - loss: 0.2965 - acc: 0.2181 - val_loss: 0.5090 - val_acc: 0.2466
Epoch 00025: loss improved from 0.30032 to 0.29650, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-25_loss-0.2965_val_loss-0.5090_acc-0.2181.h5
Epoch 26/200
139/139 [==============================] - 36s 261ms/step - loss: 0.2698 - acc: 0.2325 - val_loss: 0.4867 - val_acc: 0.1816
Epoch 00026: loss improved from 0.29650 to 0.26971, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-26_loss-0.2697_val_loss-0.4867_acc-0.2325.h5
Epoch 27/200
139/139 [==============================] - 36s 260ms/step - loss: 0.2605 - acc: 0.2320 - val_loss: 0.4681 - val_acc: 0.2731
Epoch 00027: loss improved from 0.26971 to 0.26039, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-27_loss-0.2604_val_loss-0.4681_acc-0.2319.h5
Epoch 28/200
139/139 [==============================] - 36s 257ms/step - loss: 0.2716 - acc: 0.2381 - val_loss: 0.5655 - val_acc: 0.2274
Epoch 00028: loss did not improve from 0.26039
Epoch 29/200
139/139 [==============================] - 36s 257ms/step - loss: 0.3067 - acc: 0.2529 - val_loss: 7.7941 - val_acc: 0.3654
Epoch 00029: loss did not improve from 0.26039
Epoch 30/200
139/139 [==============================] - 36s 260ms/step - loss: 0.3048 - acc: 0.2695 - val_loss: 0.4669 - val_acc: 0.2752
Epoch 00030: loss did not improve from 0.26039
Epoch 31/200
139/139 [==============================] - 36s 258ms/step - loss: 0.2570 - acc: 0.2865 - val_loss: 0.4488 - val_acc: 0.3247
Epoch 00031: loss improved from 0.26039 to 0.25712, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-31_loss-0.2571_val_loss-0.4488_acc-0.2865.h5
Epoch 32/200
139/139 [==============================] - 36s 261ms/step - loss: 0.2293 - acc: 0.2854 - val_loss: 0.4235 - val_acc: 0.3077
Epoch 00032: loss improved from 0.25712 to 0.22920, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-32_loss-0.2292_val_loss-0.4235_acc-0.2854.h5
Epoch 33/200
139/139 [==============================] - 35s 254ms/step - loss: 0.2177 - acc: 0.2845 - val_loss: 0.4702 - val_acc: 0.4216
Epoch 00033: loss improved from 0.22920 to 0.21766, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-33_loss-0.2177_val_loss-0.4702_acc-0.2845.h5
Epoch 34/200
139/139 [==============================] - 36s 256ms/step - loss: 0.2203 - acc: 0.2848 - val_loss: 0.4146 - val_acc: 0.2513
Epoch 00034: loss did not improve from 0.21766
Epoch 35/200
139/139 [==============================] - 36s 258ms/step - loss: 0.2101 - acc: 0.2874 - val_loss: 0.4657 - val_acc: 0.3319
Epoch 00035: loss improved from 0.21766 to 0.21010, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-35_loss-0.2101_val_loss-0.4657_acc-0.2874.h5
Epoch 36/200
139/139 [==============================] - 35s 252ms/step - loss: 0.2334 - acc: 0.3039 - val_loss: 0.5085 - val_acc: 0.2633
Epoch 00036: loss did not improve from 0.21010
Epoch 37/200
139/139 [==============================] - 36s 256ms/step - loss: 0.3184 - acc: 0.3254 - val_loss: 0.5725 - val_acc: 0.4479
Epoch 00037: loss did not improve from 0.21010
Epoch 38/200
139/139 [==============================] - 35s 249ms/step - loss: 0.2484 - acc: 0.3628 - val_loss: 0.4252 - val_acc: 0.3309
Epoch 00038: loss did not improve from 0.21010
Epoch 39/200
139/139 [==============================] - 35s 254ms/step - loss: 0.2231 - acc: 0.3545 - val_loss: 0.3821 - val_acc: 0.4686
Epoch 00039: loss did not improve from 0.21010
Epoch 40/200
139/139 [==============================] - 34s 247ms/step - loss: 0.2062 - acc: 0.3486 - val_loss: 0.3945 - val_acc: 0.3885
Epoch 00040: loss improved from 0.21010 to 0.20623, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-40_loss-0.2062_val_loss-0.3945_acc-0.3486.h5
Epoch 41/200
139/139 [==============================] - 35s 250ms/step - loss: 0.1986 - acc: 0.3526 - val_loss: 0.4171 - val_acc: 0.3456
Epoch 00041: loss improved from 0.20623 to 0.19865, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-41_loss-0.1986_val_loss-0.4171_acc-0.3526.h5
Epoch 42/200
139/139 [==============================] - 35s 249ms/step - loss: 0.1855 - acc: 0.3446 - val_loss: 0.4218 - val_acc: 0.1913
Epoch 00042: loss improved from 0.19865 to 0.18548, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-42_loss-0.1855_val_loss-0.4218_acc-0.3446.h5
Epoch 43/200
139/139 [==============================] - 34s 247ms/step - loss: 0.1883 - acc: 0.3432 - val_loss: 0.3729 - val_acc: 0.2832
Epoch 00043: loss did not improve from 0.18548
Epoch 44/200
139/139 [==============================] - 35s 249ms/step - loss: 0.1812 - acc: 0.3455 - val_loss: 0.4014 - val_acc: 0.4072
Epoch 00044: loss improved from 0.18548 to 0.18124, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-44_loss-0.1812_val_loss-0.4014_acc-0.3455.h5
Epoch 45/200
139/139 [==============================] - 34s 246ms/step - loss: 0.2588 - acc: 0.3837 - val_loss: 0.5093 - val_acc: 0.7619
Epoch 00045: loss did not improve from 0.18124
Epoch 46/200
139/139 [==============================] - 35s 249ms/step - loss: 0.2471 - acc: 0.4166 - val_loss: 0.4188 - val_acc: 0.3917
Epoch 00046: loss did not improve from 0.18124
Epoch 47/200
139/139 [==============================] - 34s 245ms/step - loss: 0.2122 - acc: 0.4448 - val_loss: 0.4543 - val_acc: 0.5889
Epoch 00047: loss did not improve from 0.18124
Epoch 48/200
139/139 [==============================] - 35s 249ms/step - loss: 0.1935 - acc: 0.4395 - val_loss: 0.4220 - val_acc: 0.5668
Epoch 00048: loss did not improve from 0.18124
Epoch 49/200
139/139 [==============================] - 35s 251ms/step - loss: 0.1833 - acc: 0.4261 - val_loss: 0.3721 - val_acc: 0.4478
Epoch 00049: loss did not improve from 0.18124
Epoch 00049: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026.
Epoch 50/200
139/139 [==============================] - 34s 246ms/step - loss: 0.1520 - acc: 0.4188 - val_loss: 0.3300 - val_acc: 0.4245
Epoch 00050: loss improved from 0.18124 to 0.15204, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-50_loss-0.1520_val_loss-0.3300_acc-0.4188.h5
Epoch 51/200
139/139 [==============================] - 35s 254ms/step - loss: 0.1422 - acc: 0.4052 - val_loss: 0.3280 - val_acc: 0.4115
Epoch 00051: loss improved from 0.15204 to 0.14220, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-51_loss-0.1422_val_loss-0.3280_acc-0.4053.h5
Epoch 52/200
139/139 [==============================] - 34s 244ms/step - loss: 0.1390 - acc: 0.3939 - val_loss: 0.3252 - val_acc: 0.4049
Epoch 00052: loss improved from 0.14220 to 0.13899, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-52_loss-0.1390_val_loss-0.3252_acc-0.3939.h5
Epoch 53/200
139/139 [==============================] - 34s 248ms/step - loss: 0.1360 - acc: 0.3836 - val_loss: 0.3227 - val_acc: 0.3917
Epoch 00053: loss improved from 0.13899 to 0.13597, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-53_loss-0.1360_val_loss-0.3227_acc-0.3836.h5
Epoch 54/200
139/139 [==============================] - 34s 246ms/step - loss: 0.1331 - acc: 0.3738 - val_loss: 0.3209 - val_acc: 0.3876
Epoch 00054: loss improved from 0.13597 to 0.13307, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-54_loss-0.1331_val_loss-0.3209_acc-0.3738.h5
Epoch 55/200
139/139 [==============================] - 35s 250ms/step - loss: 0.1306 - acc: 0.3644 - val_loss: 0.3187 - val_acc: 0.3784
Epoch 00055: loss improved from 0.13307 to 0.13056, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-55_loss-0.1306_val_loss-0.3187_acc-0.3644.h5
Epoch 56/200
139/139 [==============================] - 34s 246ms/step - loss: 0.1275 - acc: 0.3556 - val_loss: 0.3160 - val_acc: 0.3697
Epoch 00056: loss improved from 0.13056 to 0.12755, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-56_loss-0.1275_val_loss-0.3160_acc-0.3557.h5
Epoch 57/200
139/139 [==============================] - 34s 247ms/step - loss: 0.1247 - acc: 0.3471 - val_loss: 0.3142 - val_acc: 0.3648
Epoch 00057: loss improved from 0.12755 to 0.12474, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-57_loss-0.1247_val_loss-0.3142_acc-0.3471.h5
Epoch 58/200
139/139 [==============================] - 35s 248ms/step - loss: 0.1221 - acc: 0.3389 - val_loss: 0.3085 - val_acc: 0.3401
Epoch 00058: loss improved from 0.12474 to 0.12214, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-58_loss-0.1221_val_loss-0.3085_acc-0.3389.h5
Epoch 59/200
139/139 [==============================] - 35s 250ms/step - loss: 0.1193 - acc: 0.3301 - val_loss: 0.3093 - val_acc: 0.3363
Epoch 00059: loss improved from 0.12214 to 0.11927, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-59_loss-0.1193_val_loss-0.3093_acc-0.3301.h5
Epoch 60/200
139/139 [==============================] - 36s 256ms/step - loss: 0.1161 - acc: 0.3216 - val_loss: 0.3007 - val_acc: 0.3290
Epoch 00060: loss improved from 0.11927 to 0.11607, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-60_loss-0.1161_val_loss-0.3007_acc-0.3216.h5
Epoch 61/200
139/139 [==============================] - 35s 249ms/step - loss: 0.1131 - acc: 0.3132 - val_loss: 0.2993 - val_acc: 0.3112
Epoch 00061: loss improved from 0.11607 to 0.11312, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-61_loss-0.1131_val_loss-0.2993_acc-0.3132.h5
Epoch 62/200
139/139 [==============================] - 35s 250ms/step - loss: 0.1103 - acc: 0.3043 - val_loss: 0.3003 - val_acc: 0.3096
Epoch 00062: loss improved from 0.11312 to 0.11031, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-62_loss-0.1103_val_loss-0.3003_acc-0.3043.h5
Epoch 63/200
139/139 [==============================] - 35s 248ms/step - loss: 0.1074 - acc: 0.2966 - val_loss: 0.3017 - val_acc: 0.3075
Epoch 00063: loss improved from 0.11031 to 0.10738, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-63_loss-0.1074_val_loss-0.3017_acc-0.2965.h5
Epoch 64/200
139/139 [==============================] - 35s 251ms/step - loss: 0.1045 - acc: 0.2888 - val_loss: 0.2926 - val_acc: 0.3004
Epoch 00064: loss improved from 0.10738 to 0.10447, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-64_loss-0.1045_val_loss-0.2926_acc-0.2888.h5
Epoch 65/200
139/139 [==============================] - 35s 249ms/step - loss: 0.1014 - acc: 0.2802 - val_loss: 0.2900 - val_acc: 0.2871
Epoch 00065: loss improved from 0.10447 to 0.10140, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-65_loss-0.1014_val_loss-0.2900_acc-0.2802.h5
Epoch 66/200
139/139 [==============================] - 35s 249ms/step - loss: 0.0984 - acc: 0.2733 - val_loss: 0.2857 - val_acc: 0.2854
Epoch 00066: loss improved from 0.10140 to 0.09844, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-66_loss-0.0984_val_loss-0.2857_acc-0.2733.h5
Epoch 67/200
139/139 [==============================] - 35s 249ms/step - loss: 0.0956 - acc: 0.2669 - val_loss: 0.2813 - val_acc: 0.2716
Epoch 00067: loss improved from 0.09844 to 0.09558, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-67_loss-0.0956_val_loss-0.2813_acc-0.2669.h5
Epoch 68/200
139/139 [==============================] - 35s 251ms/step - loss: 0.0924 - acc: 0.2607 - val_loss: 0.2729 - val_acc: 0.2516
Epoch 00068: loss improved from 0.09558 to 0.09238, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-68_loss-0.0924_val_loss-0.2729_acc-0.2607.h5
Epoch 69/200
139/139 [==============================] - 35s 252ms/step - loss: 0.0910 - acc: 0.2548 - val_loss: 0.2736 - val_acc: 0.2358
Epoch 00069: loss improved from 0.09238 to 0.09101, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-69_loss-0.0910_val_loss-0.2736_acc-0.2548.h5
Epoch 70/200
139/139 [==============================] - 34s 247ms/step - loss: 0.0876 - acc: 0.2503 - val_loss: 0.2707 - val_acc: 0.2516
Epoch 00070: loss improved from 0.09101 to 0.08758, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-70_loss-0.0876_val_loss-0.2707_acc-0.2503.h5
Epoch 71/200
139/139 [==============================] - 36s 257ms/step - loss: 0.0857 - acc: 0.2453 - val_loss: 0.2733 - val_acc: 0.2497
Epoch 00071: loss improved from 0.08758 to 0.08570, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-71_loss-0.0857_val_loss-0.2733_acc-0.2453.h5
Epoch 72/200
139/139 [==============================] - 36s 257ms/step - loss: 0.0825 - acc: 0.2446 - val_loss: 0.2718 - val_acc: 0.2333
Epoch 00072: loss improved from 0.08570 to 0.08248, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-72_loss-0.0825_val_loss-0.2718_acc-0.2446.h5
Epoch 73/200
139/139 [==============================] - 35s 249ms/step - loss: 0.0845 - acc: 0.2368 - val_loss: 0.2965 - val_acc: 0.2295
Epoch 00073: loss did not improve from 0.08248
Epoch 74/200
139/139 [==============================] - 35s 249ms/step - loss: 0.0806 - acc: 0.2402 - val_loss: 0.2991 - val_acc: 0.2904
Epoch 00074: loss improved from 0.08248 to 0.08056, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-74_loss-0.0806_val_loss-0.2991_acc-0.2402.h5
Epoch 75/200
139/139 [==============================] - 34s 247ms/step - loss: 0.0802 - acc: 0.2445 - val_loss: 0.2917 - val_acc: 0.2429
Epoch 00075: loss improved from 0.08056 to 0.08019, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-75_loss-0.0802_val_loss-0.2917_acc-0.2445.h5
Epoch 76/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0908 - acc: 0.2594 - val_loss: 0.2842 - val_acc: 0.2277
Epoch 00076: loss did not improve from 0.08019
Epoch 77/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0903 - acc: 0.2796 - val_loss: 0.3613 - val_acc: 0.4488
Epoch 00077: loss did not improve from 0.08019
Epoch 78/200
139/139 [==============================] - 35s 254ms/step - loss: 0.0845 - acc: 0.2973 - val_loss: 0.4472 - val_acc: 0.3449
Epoch 00078: loss did not improve from 0.08019
Epoch 79/200
139/139 [==============================] - 34s 245ms/step - loss: 0.0806 - acc: 0.2953 - val_loss: 0.2584 - val_acc: 0.2734
Epoch 00079: loss did not improve from 0.08019
Epoch 00079: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05.
Epoch 80/200
139/139 [==============================] - 34s 247ms/step - loss: 0.0724 - acc: 0.2947 - val_loss: 0.2571 - val_acc: 0.3193
Epoch 00080: loss improved from 0.08019 to 0.07245, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-80_loss-0.0724_val_loss-0.2571_acc-0.2947.h5
Epoch 81/200
139/139 [==============================] - 34s 247ms/step - loss: 0.0710 - acc: 0.2933 - val_loss: 0.2574 - val_acc: 0.3170
Epoch 00081: loss improved from 0.07245 to 0.07100, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-81_loss-0.0710_val_loss-0.2574_acc-0.2933.h5
Epoch 82/200
139/139 [==============================] - 34s 248ms/step - loss: 0.0703 - acc: 0.2903 - val_loss: 0.2579 - val_acc: 0.3146
Epoch 00082: loss improved from 0.07100 to 0.07026, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-82_loss-0.0703_val_loss-0.2579_acc-0.2903.h5
Epoch 83/200
139/139 [==============================] - 34s 248ms/step - loss: 0.0697 - acc: 0.2879 - val_loss: 0.2565 - val_acc: 0.3053
Epoch 00083: loss improved from 0.07026 to 0.06970, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-83_loss-0.0697_val_loss-0.2565_acc-0.2879.h5
Epoch 84/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0692 - acc: 0.2846 - val_loss: 0.2567 - val_acc: 0.3035
Epoch 00084: loss improved from 0.06970 to 0.06913, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-84_loss-0.0691_val_loss-0.2567_acc-0.2846.h5
Epoch 85/200
139/139 [==============================] - 35s 249ms/step - loss: 0.0686 - acc: 0.2820 - val_loss: 0.2564 - val_acc: 0.2991
Epoch 00085: loss improved from 0.06913 to 0.06861, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-85_loss-0.0686_val_loss-0.2564_acc-0.2820.h5
Epoch 86/200
139/139 [==============================] - 34s 244ms/step - loss: 0.0680 - acc: 0.2785 - val_loss: 0.2553 - val_acc: 0.2964
Epoch 00086: loss improved from 0.06861 to 0.06797, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-86_loss-0.0680_val_loss-0.2553_acc-0.2785.h5
Epoch 87/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0674 - acc: 0.2756 - val_loss: 0.2567 - val_acc: 0.2935
Epoch 00087: loss improved from 0.06797 to 0.06739, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-87_loss-0.0674_val_loss-0.2567_acc-0.2756.h5
Epoch 88/200
139/139 [==============================] - 34s 247ms/step - loss: 0.0668 - acc: 0.2722 - val_loss: 0.2551 - val_acc: 0.2879
Epoch 00088: loss improved from 0.06739 to 0.06680, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-88_loss-0.0668_val_loss-0.2551_acc-0.2722.h5
Epoch 89/200
139/139 [==============================] - 34s 245ms/step - loss: 0.0662 - acc: 0.2694 - val_loss: 0.2564 - val_acc: 0.2914
Epoch 00089: loss improved from 0.06680 to 0.06619, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-89_loss-0.0662_val_loss-0.2564_acc-0.2694.h5
Epoch 90/200
139/139 [==============================] - 34s 247ms/step - loss: 0.0656 - acc: 0.2661 - val_loss: 0.2553 - val_acc: 0.2818
Epoch 00090: loss improved from 0.06619 to 0.06557, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-90_loss-0.0656_val_loss-0.2553_acc-0.2661.h5
Epoch 91/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0649 - acc: 0.2628 - val_loss: 0.2550 - val_acc: 0.2815
Epoch 00091: loss improved from 0.06557 to 0.06493, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-91_loss-0.0649_val_loss-0.2550_acc-0.2628.h5
Epoch 92/200
139/139 [==============================] - 34s 248ms/step - loss: 0.0643 - acc: 0.2595 - val_loss: 0.2543 - val_acc: 0.2781
Epoch 00092: loss improved from 0.06493 to 0.06428, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-92_loss-0.0643_val_loss-0.2543_acc-0.2595.h5
Epoch 93/200
139/139 [==============================] - 34s 246ms/step - loss: 0.0636 - acc: 0.2560 - val_loss: 0.2536 - val_acc: 0.2729
Epoch 00093: loss improved from 0.06428 to 0.06358, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-93_loss-0.0636_val_loss-0.2536_acc-0.2560.h5
Epoch 94/200
139/139 [==============================] - 35s 252ms/step - loss: 0.0629 - acc: 0.2526 - val_loss: 0.2532 - val_acc: 0.2691
Epoch 00094: loss improved from 0.06358 to 0.06288, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-94_loss-0.0629_val_loss-0.2532_acc-0.2526.h5
Epoch 95/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0622 - acc: 0.2492 - val_loss: 0.2512 - val_acc: 0.2609
Epoch 00095: loss improved from 0.06288 to 0.06223, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-95_loss-0.0622_val_loss-0.2512_acc-0.2492.h5
Epoch 96/200
139/139 [==============================] - 35s 254ms/step - loss: 0.0615 - acc: 0.2462 - val_loss: 0.2520 - val_acc: 0.2618
Epoch 00096: loss improved from 0.06223 to 0.06151, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-96_loss-0.0615_val_loss-0.2520_acc-0.2462.h5
Epoch 97/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0608 - acc: 0.2427 - val_loss: 0.2508 - val_acc: 0.2568
Epoch 00097: loss improved from 0.06151 to 0.06081, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-97_loss-0.0608_val_loss-0.2508_acc-0.2427.h5
Epoch 98/200
139/139 [==============================] - 35s 251ms/step - loss: 0.0603 - acc: 0.2402 - val_loss: 0.2500 - val_acc: 0.2579
Epoch 00098: loss improved from 0.06081 to 0.06025, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-98_loss-0.0603_val_loss-0.2500_acc-0.2402.h5
Epoch 99/200
139/139 [==============================] - 34s 248ms/step - loss: 0.0595 - acc: 0.2371 - val_loss: 0.2520 - val_acc: 0.2542
Epoch 00099: loss improved from 0.06025 to 0.05947, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-99_loss-0.0595_val_loss-0.2520_acc-0.2371.h5
Epoch 100/200
139/139 [==============================] - 34s 246ms/step - loss: 0.0586 - acc: 0.2338 - val_loss: 0.2485 - val_acc: 0.2521
Epoch 00100: loss improved from 0.05947 to 0.05857, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-100_loss-0.0586_val_loss-0.2485_acc-0.2338.h5
Epoch 101/200
139/139 [==============================] - 35s 249ms/step - loss: 0.0577 - acc: 0.2317 - val_loss: 0.2464 - val_acc: 0.2421
Epoch 00101: loss improved from 0.05857 to 0.05774, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-101_loss-0.0577_val_loss-0.2464_acc-0.2317.h5
Epoch 102/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0570 - acc: 0.2282 - val_loss: 0.2490 - val_acc: 0.2441
Epoch 00102: loss improved from 0.05774 to 0.05703, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-102_loss-0.0570_val_loss-0.2490_acc-0.2282.h5
Epoch 103/200
139/139 [==============================] - 34s 247ms/step - loss: 0.0563 - acc: 0.2258 - val_loss: 0.2435 - val_acc: 0.2348
Epoch 00103: loss improved from 0.05703 to 0.05629, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-103_loss-0.0563_val_loss-0.2435_acc-0.2258.h5
Epoch 104/200
139/139 [==============================] - 35s 249ms/step - loss: 0.0556 - acc: 0.2239 - val_loss: 0.2475 - val_acc: 0.2426
Epoch 00104: loss improved from 0.05629 to 0.05561, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-104_loss-0.0556_val_loss-0.2475_acc-0.2239.h5
Epoch 105/200
139/139 [==============================] - 35s 252ms/step - loss: 0.0548 - acc: 0.2219 - val_loss: 0.2479 - val_acc: 0.2370
Epoch 00105: loss improved from 0.05561 to 0.05482, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-105_loss-0.0548_val_loss-0.2479_acc-0.2220.h5
Epoch 106/200
139/139 [==============================] - 35s 254ms/step - loss: 0.0540 - acc: 0.2194 - val_loss: 0.2422 - val_acc: 0.2238
Epoch 00106: loss improved from 0.05482 to 0.05405, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-106_loss-0.0541_val_loss-0.2422_acc-0.2194.h5
Epoch 107/200
139/139 [==============================] - 35s 253ms/step - loss: 0.0531 - acc: 0.2181 - val_loss: 0.2416 - val_acc: 0.2261
Epoch 00107: loss improved from 0.05405 to 0.05315, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-107_loss-0.0531_val_loss-0.2416_acc-0.2181.h5
Epoch 108/200
139/139 [==============================] - 35s 253ms/step - loss: 0.0524 - acc: 0.2160 - val_loss: 0.2440 - val_acc: 0.2281
Epoch 00108: loss improved from 0.05315 to 0.05237, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-108_loss-0.0524_val_loss-0.2440_acc-0.2160.h5
Epoch 109/200
139/139 [==============================] - 35s 251ms/step - loss: 0.0516 - acc: 0.2145 - val_loss: 0.2474 - val_acc: 0.2387
Epoch 00109: loss improved from 0.05237 to 0.05157, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-109_loss-0.0516_val_loss-0.2474_acc-0.2145.h5
Epoch 110/200
139/139 [==============================] - 35s 253ms/step - loss: 0.0512 - acc: 0.2129 - val_loss: 0.2477 - val_acc: 0.2326
Epoch 00110: loss improved from 0.05157 to 0.05125, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-110_loss-0.0512_val_loss-0.2477_acc-0.2129.h5
Epoch 111/200
139/139 [==============================] - 35s 252ms/step - loss: 0.0502 - acc: 0.2119 - val_loss: 0.2403 - val_acc: 0.2248
Epoch 00111: loss improved from 0.05125 to 0.05016, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-111_loss-0.0502_val_loss-0.2403_acc-0.2119.h5
Epoch 112/200
139/139 [==============================] - 35s 255ms/step - loss: 0.0494 - acc: 0.2109 - val_loss: 0.2404 - val_acc: 0.2164
Epoch 00112: loss improved from 0.05016 to 0.04936, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-112_loss-0.0494_val_loss-0.2404_acc-0.2109.h5
Epoch 113/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0487 - acc: 0.2099 - val_loss: 0.2396 - val_acc: 0.2226
Epoch 00113: loss improved from 0.04936 to 0.04869, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-113_loss-0.0487_val_loss-0.2396_acc-0.2099.h5
Epoch 114/200
139/139 [==============================] - 35s 251ms/step - loss: 0.0482 - acc: 0.2077 - val_loss: 0.2390 - val_acc: 0.2211
Epoch 00114: loss improved from 0.04869 to 0.04820, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-114_loss-0.0482_val_loss-0.2390_acc-0.2077.h5
Epoch 115/200
139/139 [==============================] - 35s 251ms/step - loss: 0.0476 - acc: 0.2076 - val_loss: 0.2481 - val_acc: 0.2297
Epoch 00115: loss improved from 0.04820 to 0.04758, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-115_loss-0.0476_val_loss-0.2481_acc-0.2076.h5
Epoch 116/200
139/139 [==============================] - 34s 245ms/step - loss: 0.0477 - acc: 0.2065 - val_loss: 0.2368 - val_acc: 0.2179
Epoch 00116: loss did not improve from 0.04758
Epoch 117/200
139/139 [==============================] - 34s 244ms/step - loss: 0.0462 - acc: 0.2069 - val_loss: 0.2373 - val_acc: 0.2175
Epoch 00117: loss improved from 0.04758 to 0.04620, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-117_loss-0.0462_val_loss-0.2373_acc-0.2069.h5
Epoch 118/200
139/139 [==============================] - 34s 245ms/step - loss: 0.0457 - acc: 0.2061 - val_loss: 0.2431 - val_acc: 0.2297
Epoch 00118: loss improved from 0.04620 to 0.04575, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-118_loss-0.0457_val_loss-0.2431_acc-0.2061.h5
Epoch 119/200
139/139 [==============================] - 34s 246ms/step - loss: 0.0452 - acc: 0.2055 - val_loss: 0.2349 - val_acc: 0.2225
Epoch 00119: loss improved from 0.04575 to 0.04523, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-119_loss-0.0452_val_loss-0.2349_acc-0.2055.h5
Epoch 120/200
139/139 [==============================] - 35s 251ms/step - loss: 0.0445 - acc: 0.2048 - val_loss: 0.2380 - val_acc: 0.2232
Epoch 00120: loss improved from 0.04523 to 0.04455, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-120_loss-0.0446_val_loss-0.2380_acc-0.2048.h5
Epoch 121/200
139/139 [==============================] - 35s 253ms/step - loss: 0.0440 - acc: 0.2048 - val_loss: 0.2362 - val_acc: 0.2228
Epoch 00121: loss improved from 0.04455 to 0.04396, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-121_loss-0.0440_val_loss-0.2362_acc-0.2048.h5
Epoch 122/200
139/139 [==============================] - 36s 257ms/step - loss: 0.0434 - acc: 0.2035 - val_loss: 0.2348 - val_acc: 0.2134
Epoch 00122: loss improved from 0.04396 to 0.04342, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-122_loss-0.0434_val_loss-0.2348_acc-0.2035.h5
Epoch 123/200
139/139 [==============================] - 35s 252ms/step - loss: 0.0428 - acc: 0.2034 - val_loss: 0.2321 - val_acc: 0.2184
Epoch 00123: loss improved from 0.04342 to 0.04278, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-123_loss-0.0428_val_loss-0.2321_acc-0.2034.h5
Epoch 124/200
139/139 [==============================] - 35s 251ms/step - loss: 0.0423 - acc: 0.2038 - val_loss: 0.2321 - val_acc: 0.2144
Epoch 00124: loss improved from 0.04278 to 0.04227, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-124_loss-0.0423_val_loss-0.2321_acc-0.2038.h5
Epoch 125/200
139/139 [==============================] - 35s 253ms/step - loss: 0.0416 - acc: 0.2030 - val_loss: 0.2355 - val_acc: 0.2246
Epoch 00125: loss improved from 0.04227 to 0.04161, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-125_loss-0.0416_val_loss-0.2355_acc-0.2030.h5
Epoch 126/200
139/139 [==============================] - 35s 255ms/step - loss: 0.0413 - acc: 0.2028 - val_loss: 0.2378 - val_acc: 0.2256
Epoch 00126: loss improved from 0.04161 to 0.04131, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-126_loss-0.0413_val_loss-0.2378_acc-0.2028.h5
Epoch 127/200
139/139 [==============================] - 36s 257ms/step - loss: 0.0411 - acc: 0.2028 - val_loss: 0.2381 - val_acc: 0.2240
Epoch 00127: loss improved from 0.04131 to 0.04106, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-127_loss-0.0411_val_loss-0.2381_acc-0.2028.h5
Epoch 128/200
139/139 [==============================] - 35s 255ms/step - loss: 0.0404 - acc: 0.2031 - val_loss: 0.2348 - val_acc: 0.2200
Epoch 00128: loss improved from 0.04106 to 0.04037, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-128_loss-0.0404_val_loss-0.2348_acc-0.2031.h5
Epoch 129/200
139/139 [==============================] - 35s 254ms/step - loss: 0.0402 - acc: 0.2026 - val_loss: 0.2409 - val_acc: 0.2239
Epoch 00129: loss improved from 0.04037 to 0.04019, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-129_loss-0.0402_val_loss-0.2409_acc-0.2026.h5
Epoch 130/200
139/139 [==============================] - 34s 248ms/step - loss: 0.0396 - acc: 0.2027 - val_loss: 0.2346 - val_acc: 0.2194
Epoch 00130: loss improved from 0.04019 to 0.03963, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-130_loss-0.0396_val_loss-0.2346_acc-0.2027.h5
Epoch 131/200
139/139 [==============================] - 36s 255ms/step - loss: 0.0390 - acc: 0.2034 - val_loss: 0.2311 - val_acc: 0.2211
Epoch 00131: loss improved from 0.03963 to 0.03900, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-131_loss-0.0390_val_loss-0.2311_acc-0.2034.h5
Epoch 132/200
139/139 [==============================] - 35s 254ms/step - loss: 0.0384 - acc: 0.2033 - val_loss: 0.2291 - val_acc: 0.2022
Epoch 00132: loss improved from 0.03900 to 0.03838, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-132_loss-0.0384_val_loss-0.2291_acc-0.2033.h5
Epoch 133/200
139/139 [==============================] - 35s 249ms/step - loss: 0.0378 - acc: 0.2039 - val_loss: 0.2333 - val_acc: 0.2189
Epoch 00133: loss improved from 0.03838 to 0.03783, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-133_loss-0.0378_val_loss-0.2333_acc-0.2039.h5
Epoch 134/200
139/139 [==============================] - 35s 255ms/step - loss: 0.0374 - acc: 0.2024 - val_loss: 0.2317 - val_acc: 0.2069
Epoch 00134: loss improved from 0.03783 to 0.03740, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-134_loss-0.0374_val_loss-0.2317_acc-0.2024.h5
Epoch 135/200
139/139 [==============================] - 36s 256ms/step - loss: 0.0372 - acc: 0.2028 - val_loss: 0.2402 - val_acc: 0.2348
Epoch 00135: loss improved from 0.03740 to 0.03716, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-135_loss-0.0372_val_loss-0.2402_acc-0.2028.h5
Epoch 136/200
139/139 [==============================] - 35s 253ms/step - loss: 0.0366 - acc: 0.2027 - val_loss: 0.2305 - val_acc: 0.2126
Epoch 00136: loss improved from 0.03716 to 0.03656, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-136_loss-0.0366_val_loss-0.2305_acc-0.2027.h5
Epoch 137/200
139/139 [==============================] - 34s 248ms/step - loss: 0.0361 - acc: 0.2033 - val_loss: 0.2441 - val_acc: 0.2318
Epoch 00137: loss improved from 0.03656 to 0.03612, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-137_loss-0.0361_val_loss-0.2441_acc-0.2033.h5
Epoch 138/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0363 - acc: 0.2032 - val_loss: 0.2360 - val_acc: 0.2135
Epoch 00138: loss did not improve from 0.03612
Epoch 139/200
139/139 [==============================] - 34s 242ms/step - loss: 0.0367 - acc: 0.2051 - val_loss: 0.2368 - val_acc: 0.2255
Epoch 00139: loss did not improve from 0.03612
Epoch 140/200
139/139 [==============================] - 36s 257ms/step - loss: 0.0355 - acc: 0.2050 - val_loss: 0.2351 - val_acc: 0.2223
Epoch 00140: loss improved from 0.03612 to 0.03553, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-140_loss-0.0355_val_loss-0.2351_acc-0.2050.h5
Epoch 141/200
139/139 [==============================] - 35s 252ms/step - loss: 0.0351 - acc: 0.2054 - val_loss: 0.2355 - val_acc: 0.2263
Epoch 00141: loss improved from 0.03553 to 0.03511, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-141_loss-0.0351_val_loss-0.2355_acc-0.2054.h5
Epoch 142/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0344 - acc: 0.2058 - val_loss: 0.2316 - val_acc: 0.2232
Epoch 00142: loss improved from 0.03511 to 0.03440, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-142_loss-0.0344_val_loss-0.2316_acc-0.2058.h5
Epoch 143/200
139/139 [==============================] - 34s 248ms/step - loss: 0.0341 - acc: 0.2048 - val_loss: 0.2298 - val_acc: 0.2198
Epoch 00143: loss improved from 0.03440 to 0.03411, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-143_loss-0.0341_val_loss-0.2298_acc-0.2049.h5
Epoch 144/200
139/139 [==============================] - 34s 246ms/step - loss: 0.0339 - acc: 0.2039 - val_loss: 0.2286 - val_acc: 0.2215
Epoch 00144: loss improved from 0.03411 to 0.03394, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-144_loss-0.0339_val_loss-0.2286_acc-0.2039.h5
Epoch 145/200
139/139 [==============================] - 34s 245ms/step - loss: 0.0337 - acc: 0.2057 - val_loss: 0.2323 - val_acc: 0.2204
Epoch 00145: loss improved from 0.03394 to 0.03370, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-145_loss-0.0337_val_loss-0.2323_acc-0.2057.h5
Epoch 146/200
139/139 [==============================] - 34s 242ms/step - loss: 0.0332 - acc: 0.2058 - val_loss: 0.2329 - val_acc: 0.2166
Epoch 00146: loss improved from 0.03370 to 0.03324, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-146_loss-0.0332_val_loss-0.2329_acc-0.2058.h5
Epoch 147/200
139/139 [==============================] - 34s 246ms/step - loss: 0.0329 - acc: 0.2060 - val_loss: 0.2315 - val_acc: 0.2254
Epoch 00147: loss improved from 0.03324 to 0.03292, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-147_loss-0.0329_val_loss-0.2315_acc-0.2060.h5
Epoch 148/200
139/139 [==============================] - 35s 250ms/step - loss: 0.0325 - acc: 0.2059 - val_loss: 0.2324 - val_acc: 0.2286
Epoch 00148: loss improved from 0.03292 to 0.03247, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-148_loss-0.0325_val_loss-0.2324_acc-0.2059.h5
Epoch 149/200
139/139 [==============================] - 35s 254ms/step - loss: 0.0323 - acc: 0.2054 - val_loss: 0.2322 - val_acc: 0.2204
Epoch 00149: loss improved from 0.03247 to 0.03228, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-149_loss-0.0323_val_loss-0.2322_acc-0.2054.h5
Epoch 150/200
139/139 [==============================] - 35s 252ms/step - loss: 0.0318 - acc: 0.2052 - val_loss: 0.2280 - val_acc: 0.2147
Epoch 00150: loss improved from 0.03228 to 0.03177, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-150_loss-0.0318_val_loss-0.2280_acc-0.2052.h5
Epoch 151/200
139/139 [==============================] - 38s 274ms/step - loss: 0.0317 - acc: 0.2048 - val_loss: 0.2300 - val_acc: 0.2191
Epoch 00151: loss improved from 0.03177 to 0.03172, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-151_loss-0.0317_val_loss-0.2300_acc-0.2048.h5
Epoch 152/200
139/139 [==============================] - 38s 276ms/step - loss: 0.0315 - acc: 0.2056 - val_loss: 0.2289 - val_acc: 0.2204
Epoch 00152: loss improved from 0.03172 to 0.03148, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-152_loss-0.0315_val_loss-0.2289_acc-0.2057.h5
Epoch 153/200
139/139 [==============================] - 39s 281ms/step - loss: 0.0311 - acc: 0.2069 - val_loss: 0.2288 - val_acc: 0.2323
Epoch 00153: loss improved from 0.03148 to 0.03109, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-153_loss-0.0311_val_loss-0.2288_acc-0.2069.h5
Epoch 154/200
139/139 [==============================] - 39s 281ms/step - loss: 0.0305 - acc: 0.2076 - val_loss: 0.2297 - val_acc: 0.2247
Epoch 00154: loss improved from 0.03109 to 0.03046, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-154_loss-0.0305_val_loss-0.2297_acc-0.2076.h5
Epoch 155/200
139/139 [==============================] - 38s 273ms/step - loss: 0.0303 - acc: 0.2066 - val_loss: 0.2323 - val_acc: 0.2334
Epoch 00155: loss improved from 0.03046 to 0.03028, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-155_loss-0.0303_val_loss-0.2323_acc-0.2066.h5
Epoch 156/200
139/139 [==============================] - 37s 269ms/step - loss: 0.0299 - acc: 0.2070 - val_loss: 0.2335 - val_acc: 0.2297
Epoch 00156: loss improved from 0.03028 to 0.02992, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-156_loss-0.0299_val_loss-0.2335_acc-0.2070.h5
Epoch 157/200
139/139 [==============================] - 37s 266ms/step - loss: 0.0295 - acc: 0.2070 - val_loss: 0.2326 - val_acc: 0.2242
Epoch 00157: loss improved from 0.02992 to 0.02952, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-157_loss-0.0295_val_loss-0.2326_acc-0.2070.h5
Epoch 158/200
139/139 [==============================] - 38s 270ms/step - loss: 0.0294 - acc: 0.2064 - val_loss: 0.2433 - val_acc: 0.2503
Epoch 00158: loss improved from 0.02952 to 0.02936, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-158_loss-0.0294_val_loss-0.2433_acc-0.2064.h5
Epoch 159/200
139/139 [==============================] - 36s 262ms/step - loss: 0.0303 - acc: 0.2069 - val_loss: 0.2223 - val_acc: 0.2105
Epoch 00159: loss did not improve from 0.02936
Epoch 160/200
139/139 [==============================] - 38s 271ms/step - loss: 0.0297 - acc: 0.2096 - val_loss: 0.2289 - val_acc: 0.2308
Epoch 00160: loss did not improve from 0.02936
Epoch 161/200
139/139 [==============================] - 37s 268ms/step - loss: 0.0287 - acc: 0.2103 - val_loss: 0.2339 - val_acc: 0.2455
Epoch 00161: loss improved from 0.02936 to 0.02868, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-161_loss-0.0287_val_loss-0.2339_acc-0.2103.h5
Epoch 162/200
139/139 [==============================] - 37s 264ms/step - loss: 0.0284 - acc: 0.2096 - val_loss: 0.2260 - val_acc: 0.2144
Epoch 00162: loss improved from 0.02868 to 0.02835, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-162_loss-0.0284_val_loss-0.2260_acc-0.2096.h5
Epoch 163/200
139/139 [==============================] - 37s 264ms/step - loss: 0.0329 - acc: 0.2027 - val_loss: 0.2307 - val_acc: 0.2426
Epoch 00163: loss did not improve from 0.02835
Epoch 164/200
139/139 [==============================] - 37s 269ms/step - loss: 0.0288 - acc: 0.2150 - val_loss: 0.2317 - val_acc: 0.2416
Epoch 00164: loss did not improve from 0.02835
Epoch 165/200
139/139 [==============================] - 36s 261ms/step - loss: 0.0283 - acc: 0.2151 - val_loss: 0.2272 - val_acc: 0.2355
Epoch 00165: loss improved from 0.02835 to 0.02828, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-165_loss-0.0283_val_loss-0.2272_acc-0.2151.h5
Epoch 166/200
139/139 [==============================] - 38s 275ms/step - loss: 0.0278 - acc: 0.2157 - val_loss: 0.2267 - val_acc: 0.2415
Epoch 00166: loss improved from 0.02828 to 0.02779, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-166_loss-0.0278_val_loss-0.2267_acc-0.2157.h5
Epoch 167/200
139/139 [==============================] - 38s 275ms/step - loss: 0.0278 - acc: 0.2141 - val_loss: 0.2261 - val_acc: 0.2262
Epoch 00167: loss did not improve from 0.02779
Epoch 00167: ReduceLROnPlateau reducing learning rate to 1e-05.
Epoch 168/200
139/139 [==============================] - 37s 266ms/step - loss: 0.0273 - acc: 0.2136 - val_loss: 0.2289 - val_acc: 0.2353
Epoch 00168: loss improved from 0.02779 to 0.02728, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-168_loss-0.0273_val_loss-0.2289_acc-0.2136.h5
Epoch 169/200
139/139 [==============================] - 37s 267ms/step - loss: 0.0269 - acc: 0.2138 - val_loss: 0.2299 - val_acc: 0.2383
Epoch 00169: loss improved from 0.02728 to 0.02689, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-169_loss-0.0269_val_loss-0.2299_acc-0.2138.h5
Epoch 170/200
139/139 [==============================] - 37s 264ms/step - loss: 0.0268 - acc: 0.2127 - val_loss: 0.2317 - val_acc: 0.2412
Epoch 00170: loss improved from 0.02689 to 0.02678, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-170_loss-0.0268_val_loss-0.2317_acc-0.2127.h5
Epoch 171/200
139/139 [==============================] - 37s 264ms/step - loss: 0.0267 - acc: 0.2121 - val_loss: 0.2296 - val_acc: 0.2350
Epoch 00171: loss improved from 0.02678 to 0.02668, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-171_loss-0.0267_val_loss-0.2296_acc-0.2121.h5
Epoch 172/200
139/139 [==============================] - 37s 266ms/step - loss: 0.0266 - acc: 0.2115 - val_loss: 0.2298 - val_acc: 0.2347
Epoch 00172: loss improved from 0.02668 to 0.02662, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-172_loss-0.0266_val_loss-0.2298_acc-0.2115.h5
Epoch 173/200
139/139 [==============================] - 37s 263ms/step - loss: 0.0266 - acc: 0.2109 - val_loss: 0.2292 - val_acc: 0.2330
Epoch 00173: loss improved from 0.02662 to 0.02658, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-173_loss-0.0266_val_loss-0.2292_acc-0.2109.h5
Epoch 174/200
139/139 [==============================] - 37s 266ms/step - loss: 0.0265 - acc: 0.2099 - val_loss: 0.2301 - val_acc: 0.2336
Epoch 00174: loss improved from 0.02658 to 0.02646, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-174_loss-0.0265_val_loss-0.2301_acc-0.2099.h5
Epoch 175/200
139/139 [==============================] - 38s 271ms/step - loss: 0.0264 - acc: 0.2096 - val_loss: 0.2295 - val_acc: 0.2322
Epoch 00175: loss improved from 0.02646 to 0.02642, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-175_loss-0.0264_val_loss-0.2295_acc-0.2096.h5
Epoch 176/200
139/139 [==============================] - 37s 266ms/step - loss: 0.0263 - acc: 0.2087 - val_loss: 0.2308 - val_acc: 0.2341
Epoch 00176: loss improved from 0.02642 to 0.02635, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-176_loss-0.0263_val_loss-0.2308_acc-0.2087.h5
Epoch 177/200
139/139 [==============================] - 38s 271ms/step - loss: 0.0263 - acc: 0.2082 - val_loss: 0.2283 - val_acc: 0.2295
Epoch 00177: loss improved from 0.02635 to 0.02630, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-177_loss-0.0263_val_loss-0.2283_acc-0.2082.h5
Epoch 178/200
139/139 [==============================] - 37s 266ms/step - loss: 0.0262 - acc: 0.2078 - val_loss: 0.2293 - val_acc: 0.2315
Epoch 00178: loss improved from 0.02630 to 0.02621, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-178_loss-0.0262_val_loss-0.2293_acc-0.2078.h5
Epoch 179/200
139/139 [==============================] - 38s 271ms/step - loss: 0.0261 - acc: 0.2075 - val_loss: 0.2293 - val_acc: 0.2332
Epoch 00179: loss improved from 0.02621 to 0.02614, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-179_loss-0.0261_val_loss-0.2293_acc-0.2075.h5
Epoch 180/200
139/139 [==============================] - 37s 269ms/step - loss: 0.0261 - acc: 0.2072 - val_loss: 0.2288 - val_acc: 0.2302
Epoch 00180: loss improved from 0.02614 to 0.02608, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-180_loss-0.0261_val_loss-0.2288_acc-0.2072.h5
Epoch 181/200
139/139 [==============================] - 38s 274ms/step - loss: 0.0260 - acc: 0.2068 - val_loss: 0.2295 - val_acc: 0.2276
Epoch 00181: loss improved from 0.02608 to 0.02598, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-181_loss-0.0260_val_loss-0.2295_acc-0.2068.h5
Epoch 182/200
139/139 [==============================] - 40s 286ms/step - loss: 0.0260 - acc: 0.2059 - val_loss: 0.2264 - val_acc: 0.2230
Epoch 00182: loss improved from 0.02598 to 0.02596, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-182_loss-0.0260_val_loss-0.2264_acc-0.2059.h5
Epoch 183/200
139/139 [==============================] - 38s 272ms/step - loss: 0.0259 - acc: 0.2058 - val_loss: 0.2295 - val_acc: 0.2280
Epoch 00183: loss improved from 0.02596 to 0.02586, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-183_loss-0.0259_val_loss-0.2295_acc-0.2058.h5
Epoch 184/200
139/139 [==============================] - 38s 273ms/step - loss: 0.0258 - acc: 0.2057 - val_loss: 0.2274 - val_acc: 0.2264
Epoch 00184: loss improved from 0.02586 to 0.02576, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-184_loss-0.0258_val_loss-0.2274_acc-0.2057.h5
Epoch 185/200
139/139 [==============================] - 39s 278ms/step - loss: 0.0257 - acc: 0.2051 - val_loss: 0.2306 - val_acc: 0.2301
Epoch 00185: loss improved from 0.02576 to 0.02566, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-185_loss-0.0257_val_loss-0.2306_acc-0.2051.h5
Epoch 186/200
139/139 [==============================] - 39s 280ms/step - loss: 0.0256 - acc: 0.2047 - val_loss: 0.2307 - val_acc: 0.2319
Epoch 00186: loss improved from 0.02566 to 0.02561, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-186_loss-0.0256_val_loss-0.2307_acc-0.2047.h5
Epoch 187/200
139/139 [==============================] - 38s 276ms/step - loss: 0.0255 - acc: 0.2049 - val_loss: 0.2289 - val_acc: 0.2271
Epoch 00187: loss improved from 0.02561 to 0.02552, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-187_loss-0.0255_val_loss-0.2289_acc-0.2049.h5
Epoch 188/200
139/139 [==============================] - 35s 255ms/step - loss: 0.0254 - acc: 0.2043 - val_loss: 0.2286 - val_acc: 0.2247
Epoch 00188: loss improved from 0.02552 to 0.02544, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-188_loss-0.0254_val_loss-0.2286_acc-0.2043.h5
Epoch 189/200
139/139 [==============================] - 35s 248ms/step - loss: 0.0253 - acc: 0.2043 - val_loss: 0.2286 - val_acc: 0.2292
Epoch 00189: loss improved from 0.02544 to 0.02535, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-189_loss-0.0253_val_loss-0.2286_acc-0.2043.h5
Epoch 190/200
139/139 [==============================] - 34s 243ms/step - loss: 0.0253 - acc: 0.2038 - val_loss: 0.2297 - val_acc: 0.2286
Epoch 00190: loss improved from 0.02535 to 0.02525, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-190_loss-0.0253_val_loss-0.2297_acc-0.2038.h5
Epoch 191/200
139/139 [==============================] - 34s 246ms/step - loss: 0.0252 - acc: 0.2040 - val_loss: 0.2296 - val_acc: 0.2303
Epoch 00191: loss improved from 0.02525 to 0.02525, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-191_loss-0.0252_val_loss-0.2296_acc-0.2040.h5
Epoch 192/200
139/139 [==============================] - 34s 242ms/step - loss: 0.0251 - acc: 0.2039 - val_loss: 0.2279 - val_acc: 0.2248
Epoch 00192: loss improved from 0.02525 to 0.02514, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-192_loss-0.0251_val_loss-0.2279_acc-0.2039.h5
Epoch 193/200
139/139 [==============================] - 34s 245ms/step - loss: 0.0250 - acc: 0.2035 - val_loss: 0.2259 - val_acc: 0.2209
Epoch 00193: loss improved from 0.02514 to 0.02503, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-193_loss-0.0250_val_loss-0.2259_acc-0.2035.h5
Epoch 194/200
139/139 [==============================] - 34s 242ms/step - loss: 0.0250 - acc: 0.2032 - val_loss: 0.2286 - val_acc: 0.2260
Epoch 00194: loss improved from 0.02503 to 0.02498, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-194_loss-0.0250_val_loss-0.2286_acc-0.2032.h5
Epoch 195/200
139/139 [==============================] - 36s 257ms/step - loss: 0.0249 - acc: 0.2032 - val_loss: 0.2269 - val_acc: 0.2220
Epoch 00195: loss improved from 0.02498 to 0.02487, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-195_loss-0.0249_val_loss-0.2269_acc-0.2032.h5
Epoch 196/200
139/139 [==============================] - 36s 258ms/step - loss: 0.0248 - acc: 0.2033 - val_loss: 0.2295 - val_acc: 0.2282
Epoch 00196: loss improved from 0.02487 to 0.02480, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-196_loss-0.0248_val_loss-0.2295_acc-0.2033.h5
Epoch 197/200
139/139 [==============================] - 34s 247ms/step - loss: 0.0247 - acc: 0.2031 - val_loss: 0.2292 - val_acc: 0.2257
Epoch 00197: loss improved from 0.02480 to 0.02470, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-197_loss-0.0247_val_loss-0.2292_acc-0.2031.h5
Epoch 198/200
139/139 [==============================] - 35s 251ms/step - loss: 0.0246 - acc: 0.2030 - val_loss: 0.2294 - val_acc: 0.2304
Epoch 00198: loss improved from 0.02470 to 0.02460, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-198_loss-0.0246_val_loss-0.2294_acc-0.2030.h5
Epoch 199/200
139/139 [==============================] - 36s 259ms/step - loss: 0.0245 - acc: 0.2029 - val_loss: 0.2285 - val_acc: 0.2257
Epoch 00199: loss improved from 0.02460 to 0.02453, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-199_loss-0.0245_val_loss-0.2285_acc-0.2029.h5
Epoch 200/200
139/139 [==============================] - 36s 261ms/step - loss: 0.0245 - acc: 0.2025 - val_loss: 0.2310 - val_acc: 0.2294
Epoch 00200: loss improved from 0.02453 to 0.02448, saving model to /content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_ver2_epoch-200_loss-0.0245_val_loss-0.2310_acc-0.2025.h5
###Markdown
Let's look at how the training and validation loss evolved to check whether our training is going in the right direction:
###Code
plt.figure(figsize=(20,12))
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.legend(loc='upper right', prop={'size': 24});
###Output
_____no_output_____
###Markdown
The validation loss has been decreasing at a similar pace as the training loss, indicating that our model has been learning effectively over the last 30 epochs. We could try to train longer and see if the validation loss can be decreased further. Once the validation loss stops decreasing for a couple of epochs in a row, that's when we will want to stop training. Our final weights will then be the weights of the epoch that had the lowest validation loss. 5. Make predictionsNow let's make some predictions on the validation dataset with the trained model. For convenience we'll use the validation generator which we've already set up above. Feel free to change the batch size.You can set the `shuffle` option to `False` if you would like to check the model's progress on the same image(s) over the course of the training.
###Code
!unzip '/content/drive/MyDrive/Colab_GG/0212.zip' -d '/content/Folder'
import cv2
model.load_weights('/content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_6class_epoch-20_loss-0.0316_val_loss-1.1837_acc-0.3576.h5')
# 3: Make a prediction
img_predict = cv2.imread('/content/SSD_103.png')
# img_predict = cv2.flip(img_predict,1)
img = img_predict
img = cv2.resize(img,(240,180))
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = np.reshape(img,(1,180,240,3))
y_pred = model.predict(img)
# 4: Decode the raw prediction `y_pred`
i=0
y_pred_decoded = decode_detections(y_pred,
confidence_thresh=0.5,
iou_threshold=0.01,
top_k=200,
normalize_coords=normalize_coords,
img_height=img_height,
img_width=img_width)
np.set_printoptions(precision=2, suppress=True, linewidth=90)
print("Predicted boxes:\n")
print(' class conf xmin ymin xmax ymax')
print(y_pred_decoded[i])
# 5: Draw the predicted boxes onto the image
img = img_predict
img = cv2.resize(img,(240,180))
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
plt.figure(figsize=(20,12))
plt.imshow(img)
current_axis = plt.gca()
colors = plt.cm.hsv(np.linspace(0, 1, n_classes+1)).tolist() # Set the colors for the bounding boxes
classes = ['background','stop','left','right','straight','noleft','noright'] # Just so we can print class names onto the image instead of IDs
# Draw the predicted boxes in blue
for box in y_pred_decoded[i]:
xmin = box[-4]
ymin = box[-3]
xmax = box[-2]
ymax = box[-1]
color = colors[int(box[0])]
label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
current_axis.add_patch(plt.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin, color=color, fill=False, linewidth=2))
current_axis.text(xmin, ymin, label, size='x-large', color='white', bbox={'facecolor':color, 'alpha':1.0})
###Output
Predicted boxes:
class conf xmin ymin xmax ymax
[[ 5. 0.95 189.4 52.07 213.36 74.34]]
###Markdown
Predict Video
###Code
import cv2
import numpy as np
from google.colab.patches import cv2_imshow
model.load_weights('/content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1812_MapCT_epoch-200_loss-0.0332_val_loss-1.4684_acc-0.2957.h5')
capture = cv2.VideoCapture('/content/drive/MyDrive/Video/logVideobien.avi')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('/content/ouput_1.avi',fourcc, 30.0, (240,180))
i=0
while(True):
ret = capture.grab()
ret, frame = capture.retrieve()
img = frame
img = cv2.resize(img,(240,180))
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = np.reshape(img,(1,180,240,3))
y_pred = model.predict(img)
y_pred_decoded = decode_detections(y_pred,
confidence_thresh=0.9,
iou_threshold=0.01,
top_k=200,
normalize_coords=normalize_coords,
img_height=img_height,
img_width=img_width)
np.set_printoptions(precision=2, suppress=True, linewidth=90)
img = frame
img = cv2.resize(img,(240,180))
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
classes = ['background','stop','left','right','straight','noleft','noright'] # Just so we can print class names onto the image instead of IDs
# Draw the predicted boxes in blue
for box in y_pred_decoded[0]:
xmin = int(box[-4])
ymin = int(box[-3])
xmax = int(box[-2])
ymax = int(box[-1])
label = '{}: {:.2f}'.format(classes[int(box[0])], box[1])
cv2.rectangle(img,(xmin,ymin),(xmax,ymax),(0,255,0),1)
cv2.putText(img,label,(xmin,ymin-5), cv2.FONT_HERSHEY_SIMPLEX, 0.3,(255,255,255),1,cv2.LINE_AA)
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
i += 1
print("Frame Proccessing: ",i)
# cv2_imshow(img)
out.write(img)
capture.release()
###Output
Frame Proccessing: 1
Frame Proccessing: 2
Frame Proccessing: 3
Frame Proccessing: 4
Frame Proccessing: 5
Frame Proccessing: 6
Frame Proccessing: 7
Frame Proccessing: 8
Frame Proccessing: 9
Frame Proccessing: 10
Frame Proccessing: 11
Frame Proccessing: 12
Frame Proccessing: 13
Frame Proccessing: 14
Frame Proccessing: 15
Frame Proccessing: 16
Frame Proccessing: 17
Frame Proccessing: 18
Frame Proccessing: 19
Frame Proccessing: 20
Frame Proccessing: 21
Frame Proccessing: 22
Frame Proccessing: 23
Frame Proccessing: 24
Frame Proccessing: 25
Frame Proccessing: 26
Frame Proccessing: 27
Frame Proccessing: 28
Frame Proccessing: 29
Frame Proccessing: 30
Frame Proccessing: 31
Frame Proccessing: 32
Frame Proccessing: 33
Frame Proccessing: 34
Frame Proccessing: 35
Frame Proccessing: 36
Frame Proccessing: 37
Frame Proccessing: 38
Frame Proccessing: 39
Frame Proccessing: 40
Frame Proccessing: 41
Frame Proccessing: 42
Frame Proccessing: 43
Frame Proccessing: 44
Frame Proccessing: 45
Frame Proccessing: 46
Frame Proccessing: 47
Frame Proccessing: 48
Frame Proccessing: 49
Frame Proccessing: 50
Frame Proccessing: 51
Frame Proccessing: 52
Frame Proccessing: 53
Frame Proccessing: 54
Frame Proccessing: 55
Frame Proccessing: 56
Frame Proccessing: 57
Frame Proccessing: 58
Frame Proccessing: 59
Frame Proccessing: 60
Frame Proccessing: 61
Frame Proccessing: 62
Frame Proccessing: 63
Frame Proccessing: 64
Frame Proccessing: 65
Frame Proccessing: 66
Frame Proccessing: 67
Frame Proccessing: 68
Frame Proccessing: 69
Frame Proccessing: 70
Frame Proccessing: 71
Frame Proccessing: 72
Frame Proccessing: 73
Frame Proccessing: 74
Frame Proccessing: 75
Frame Proccessing: 76
Frame Proccessing: 77
Frame Proccessing: 78
Frame Proccessing: 79
Frame Proccessing: 80
Frame Proccessing: 81
Frame Proccessing: 82
Frame Proccessing: 83
Frame Proccessing: 84
Frame Proccessing: 85
Frame Proccessing: 86
Frame Proccessing: 87
Frame Proccessing: 88
Frame Proccessing: 89
Frame Proccessing: 90
Frame Proccessing: 91
Frame Proccessing: 92
Frame Proccessing: 93
Frame Proccessing: 94
Frame Proccessing: 95
Frame Proccessing: 96
Frame Proccessing: 97
Frame Proccessing: 98
Frame Proccessing: 99
Frame Proccessing: 100
Frame Proccessing: 101
Frame Proccessing: 102
Frame Proccessing: 103
Frame Proccessing: 104
Frame Proccessing: 105
Frame Proccessing: 106
Frame Proccessing: 107
Frame Proccessing: 108
Frame Proccessing: 109
Frame Proccessing: 110
Frame Proccessing: 111
Frame Proccessing: 112
Frame Proccessing: 113
Frame Proccessing: 114
Frame Proccessing: 115
Frame Proccessing: 116
Frame Proccessing: 117
Frame Proccessing: 118
Frame Proccessing: 119
Frame Proccessing: 120
Frame Proccessing: 121
Frame Proccessing: 122
Frame Proccessing: 123
Frame Proccessing: 124
Frame Proccessing: 125
Frame Proccessing: 126
Frame Proccessing: 127
Frame Proccessing: 128
Frame Proccessing: 129
Frame Proccessing: 130
Frame Proccessing: 131
Frame Proccessing: 132
Frame Proccessing: 133
Frame Proccessing: 134
Frame Proccessing: 135
Frame Proccessing: 136
Frame Proccessing: 137
Frame Proccessing: 138
Frame Proccessing: 139
Frame Proccessing: 140
Frame Proccessing: 141
Frame Proccessing: 142
Frame Proccessing: 143
Frame Proccessing: 144
Frame Proccessing: 145
Frame Proccessing: 146
Frame Proccessing: 147
Frame Proccessing: 148
Frame Proccessing: 149
Frame Proccessing: 150
Frame Proccessing: 151
Frame Proccessing: 152
Frame Proccessing: 153
Frame Proccessing: 154
Frame Proccessing: 155
Frame Proccessing: 156
Frame Proccessing: 157
Frame Proccessing: 158
Frame Proccessing: 159
Frame Proccessing: 160
Frame Proccessing: 161
Frame Proccessing: 162
Frame Proccessing: 163
Frame Proccessing: 164
Frame Proccessing: 165
Frame Proccessing: 166
Frame Proccessing: 167
Frame Proccessing: 168
Frame Proccessing: 169
Frame Proccessing: 170
Frame Proccessing: 171
Frame Proccessing: 172
Frame Proccessing: 173
Frame Proccessing: 174
Frame Proccessing: 175
Frame Proccessing: 176
Frame Proccessing: 177
Frame Proccessing: 178
Frame Proccessing: 179
Frame Proccessing: 180
Frame Proccessing: 181
Frame Proccessing: 182
Frame Proccessing: 183
Frame Proccessing: 184
Frame Proccessing: 185
Frame Proccessing: 186
Frame Proccessing: 187
Frame Proccessing: 188
Frame Proccessing: 189
Frame Proccessing: 190
Frame Proccessing: 191
Frame Proccessing: 192
Frame Proccessing: 193
Frame Proccessing: 194
Frame Proccessing: 195
Frame Proccessing: 196
Frame Proccessing: 197
Frame Proccessing: 198
Frame Proccessing: 199
Frame Proccessing: 200
Frame Proccessing: 201
Frame Proccessing: 202
Frame Proccessing: 203
Frame Proccessing: 204
Frame Proccessing: 205
Frame Proccessing: 206
Frame Proccessing: 207
Frame Proccessing: 208
Frame Proccessing: 209
Frame Proccessing: 210
Frame Proccessing: 211
Frame Proccessing: 212
Frame Proccessing: 213
Frame Proccessing: 214
Frame Proccessing: 215
Frame Proccessing: 216
Frame Proccessing: 217
Frame Proccessing: 218
Frame Proccessing: 219
Frame Proccessing: 220
Frame Proccessing: 221
Frame Proccessing: 222
Frame Proccessing: 223
Frame Proccessing: 224
Frame Proccessing: 225
Frame Proccessing: 226
Frame Proccessing: 227
Frame Proccessing: 228
Frame Proccessing: 229
Frame Proccessing: 230
Frame Proccessing: 231
Frame Proccessing: 232
Frame Proccessing: 233
Frame Proccessing: 234
Frame Proccessing: 235
Frame Proccessing: 236
Frame Proccessing: 237
Frame Proccessing: 238
Frame Proccessing: 239
Frame Proccessing: 240
Frame Proccessing: 241
Frame Proccessing: 242
Frame Proccessing: 243
Frame Proccessing: 244
Frame Proccessing: 245
Frame Proccessing: 246
Frame Proccessing: 247
Frame Proccessing: 248
Frame Proccessing: 249
Frame Proccessing: 250
Frame Proccessing: 251
Frame Proccessing: 252
Frame Proccessing: 253
Frame Proccessing: 254
Frame Proccessing: 255
Frame Proccessing: 256
Frame Proccessing: 257
Frame Proccessing: 258
Frame Proccessing: 259
Frame Proccessing: 260
Frame Proccessing: 261
Frame Proccessing: 262
Frame Proccessing: 263
Frame Proccessing: 264
Frame Proccessing: 265
Frame Proccessing: 266
Frame Proccessing: 267
Frame Proccessing: 268
Frame Proccessing: 269
Frame Proccessing: 270
Frame Proccessing: 271
Frame Proccessing: 272
Frame Proccessing: 273
Frame Proccessing: 274
Frame Proccessing: 275
Frame Proccessing: 276
Frame Proccessing: 277
Frame Proccessing: 278
Frame Proccessing: 279
Frame Proccessing: 280
Frame Proccessing: 281
Frame Proccessing: 282
Frame Proccessing: 283
Frame Proccessing: 284
Frame Proccessing: 285
Frame Proccessing: 286
Frame Proccessing: 287
Frame Proccessing: 288
Frame Proccessing: 289
Frame Proccessing: 290
Frame Proccessing: 291
Frame Proccessing: 292
Frame Proccessing: 293
Frame Proccessing: 294
Frame Proccessing: 295
Frame Proccessing: 296
Frame Proccessing: 297
Frame Proccessing: 298
Frame Proccessing: 299
Frame Proccessing: 300
Frame Proccessing: 301
Frame Proccessing: 302
Frame Proccessing: 303
Frame Proccessing: 304
Frame Proccessing: 305
Frame Proccessing: 306
Frame Proccessing: 307
Frame Proccessing: 308
Frame Proccessing: 309
Frame Proccessing: 310
Frame Proccessing: 311
Frame Proccessing: 312
Frame Proccessing: 313
Frame Proccessing: 314
Frame Proccessing: 315
Frame Proccessing: 316
Frame Proccessing: 317
Frame Proccessing: 318
Frame Proccessing: 319
Frame Proccessing: 320
Frame Proccessing: 321
Frame Proccessing: 322
Frame Proccessing: 323
Frame Proccessing: 324
Frame Proccessing: 325
Frame Proccessing: 326
Frame Proccessing: 327
Frame Proccessing: 328
Frame Proccessing: 329
Frame Proccessing: 330
Frame Proccessing: 331
Frame Proccessing: 332
Frame Proccessing: 333
Frame Proccessing: 334
Frame Proccessing: 335
Frame Proccessing: 336
Frame Proccessing: 337
Frame Proccessing: 338
Frame Proccessing: 339
Frame Proccessing: 340
Frame Proccessing: 341
Frame Proccessing: 342
Frame Proccessing: 343
Frame Proccessing: 344
Frame Proccessing: 345
Frame Proccessing: 346
Frame Proccessing: 347
Frame Proccessing: 348
Frame Proccessing: 349
Frame Proccessing: 350
Frame Proccessing: 351
Frame Proccessing: 352
Frame Proccessing: 353
Frame Proccessing: 354
Frame Proccessing: 355
Frame Proccessing: 356
Frame Proccessing: 357
Frame Proccessing: 358
Frame Proccessing: 359
Frame Proccessing: 360
Frame Proccessing: 361
Frame Proccessing: 362
Frame Proccessing: 363
Frame Proccessing: 364
Frame Proccessing: 365
Frame Proccessing: 366
Frame Proccessing: 367
Frame Proccessing: 368
Frame Proccessing: 369
Frame Proccessing: 370
Frame Proccessing: 371
Frame Proccessing: 372
Frame Proccessing: 373
Frame Proccessing: 374
Frame Proccessing: 375
Frame Proccessing: 376
Frame Proccessing: 377
Frame Proccessing: 378
Frame Proccessing: 379
Frame Proccessing: 380
Frame Proccessing: 381
Frame Proccessing: 382
Frame Proccessing: 383
Frame Proccessing: 384
Frame Proccessing: 385
Frame Proccessing: 386
Frame Proccessing: 387
Frame Proccessing: 388
Frame Proccessing: 389
Frame Proccessing: 390
Frame Proccessing: 391
Frame Proccessing: 392
Frame Proccessing: 393
Frame Proccessing: 394
Frame Proccessing: 395
Frame Proccessing: 396
Frame Proccessing: 397
Frame Proccessing: 398
Frame Proccessing: 399
Frame Proccessing: 400
Frame Proccessing: 401
Frame Proccessing: 402
Frame Proccessing: 403
Frame Proccessing: 404
Frame Proccessing: 405
Frame Proccessing: 406
Frame Proccessing: 407
Frame Proccessing: 408
Frame Proccessing: 409
Frame Proccessing: 410
Frame Proccessing: 411
Frame Proccessing: 412
Frame Proccessing: 413
Frame Proccessing: 414
Frame Proccessing: 415
Frame Proccessing: 416
Frame Proccessing: 417
Frame Proccessing: 418
Frame Proccessing: 419
Frame Proccessing: 420
Frame Proccessing: 421
Frame Proccessing: 422
Frame Proccessing: 423
Frame Proccessing: 424
Frame Proccessing: 425
Frame Proccessing: 426
Frame Proccessing: 427
Frame Proccessing: 428
Frame Proccessing: 429
Frame Proccessing: 430
Frame Proccessing: 431
Frame Proccessing: 432
Frame Proccessing: 433
Frame Proccessing: 434
Frame Proccessing: 435
Frame Proccessing: 436
Frame Proccessing: 437
Frame Proccessing: 438
Frame Proccessing: 439
Frame Proccessing: 440
Frame Proccessing: 441
Frame Proccessing: 442
Frame Proccessing: 443
Frame Proccessing: 444
Frame Proccessing: 445
Frame Proccessing: 446
Frame Proccessing: 447
Frame Proccessing: 448
Frame Proccessing: 449
Frame Proccessing: 450
Frame Proccessing: 451
Frame Proccessing: 452
Frame Proccessing: 453
Frame Proccessing: 454
Frame Proccessing: 455
Frame Proccessing: 456
Frame Proccessing: 457
Frame Proccessing: 458
Frame Proccessing: 459
Frame Proccessing: 460
Frame Proccessing: 461
Frame Proccessing: 462
Frame Proccessing: 463
Frame Proccessing: 464
Frame Proccessing: 465
Frame Proccessing: 466
Frame Proccessing: 467
Frame Proccessing: 468
Frame Proccessing: 469
Frame Proccessing: 470
Frame Proccessing: 471
Frame Proccessing: 472
Frame Proccessing: 473
Frame Proccessing: 474
Frame Proccessing: 475
Frame Proccessing: 476
Frame Proccessing: 477
Frame Proccessing: 478
Frame Proccessing: 479
Frame Proccessing: 480
Frame Proccessing: 481
Frame Proccessing: 482
Frame Proccessing: 483
Frame Proccessing: 484
Frame Proccessing: 485
Frame Proccessing: 486
Frame Proccessing: 487
Frame Proccessing: 488
Frame Proccessing: 489
Frame Proccessing: 490
Frame Proccessing: 491
Frame Proccessing: 492
Frame Proccessing: 493
Frame Proccessing: 494
Frame Proccessing: 495
Frame Proccessing: 496
Frame Proccessing: 497
Frame Proccessing: 498
Frame Proccessing: 499
Frame Proccessing: 500
Frame Proccessing: 501
Frame Proccessing: 502
Frame Proccessing: 503
Frame Proccessing: 504
Frame Proccessing: 505
Frame Proccessing: 506
Frame Proccessing: 507
Frame Proccessing: 508
Frame Proccessing: 509
Frame Proccessing: 510
Frame Proccessing: 511
Frame Proccessing: 512
Frame Proccessing: 513
Frame Proccessing: 514
Frame Proccessing: 515
Frame Proccessing: 516
Frame Proccessing: 517
Frame Proccessing: 518
Frame Proccessing: 519
Frame Proccessing: 520
Frame Proccessing: 521
Frame Proccessing: 522
Frame Proccessing: 523
Frame Proccessing: 524
Frame Proccessing: 525
Frame Proccessing: 526
Frame Proccessing: 527
Frame Proccessing: 528
Frame Proccessing: 529
Frame Proccessing: 530
Frame Proccessing: 531
Frame Proccessing: 532
Frame Proccessing: 533
Frame Proccessing: 534
Frame Proccessing: 535
Frame Proccessing: 536
Frame Proccessing: 537
Frame Proccessing: 538
Frame Proccessing: 539
Frame Proccessing: 540
Frame Proccessing: 541
Frame Proccessing: 542
Frame Proccessing: 543
Frame Proccessing: 544
Frame Proccessing: 545
Frame Proccessing: 546
Frame Proccessing: 547
Frame Proccessing: 548
Frame Proccessing: 549
Frame Proccessing: 550
Frame Proccessing: 551
Frame Proccessing: 552
Frame Proccessing: 553
Frame Proccessing: 554
Frame Proccessing: 555
Frame Proccessing: 556
Frame Proccessing: 557
Frame Proccessing: 558
Frame Proccessing: 559
Frame Proccessing: 560
Frame Proccessing: 561
Frame Proccessing: 562
Frame Proccessing: 563
Frame Proccessing: 564
Frame Proccessing: 565
Frame Proccessing: 566
Frame Proccessing: 567
Frame Proccessing: 568
Frame Proccessing: 569
Frame Proccessing: 570
Frame Proccessing: 571
Frame Proccessing: 572
Frame Proccessing: 573
Frame Proccessing: 574
Frame Proccessing: 575
Frame Proccessing: 576
Frame Proccessing: 577
Frame Proccessing: 578
Frame Proccessing: 579
Frame Proccessing: 580
Frame Proccessing: 581
Frame Proccessing: 582
Frame Proccessing: 583
Frame Proccessing: 584
Frame Proccessing: 585
Frame Proccessing: 586
Frame Proccessing: 587
Frame Proccessing: 588
Frame Proccessing: 589
Frame Proccessing: 590
Frame Proccessing: 591
Frame Proccessing: 592
Frame Proccessing: 593
Frame Proccessing: 594
Frame Proccessing: 595
Frame Proccessing: 596
Frame Proccessing: 597
Frame Proccessing: 598
Frame Proccessing: 599
Frame Proccessing: 600
Frame Proccessing: 601
Frame Proccessing: 602
Frame Proccessing: 603
Frame Proccessing: 604
Frame Proccessing: 605
Frame Proccessing: 606
Frame Proccessing: 607
Frame Proccessing: 608
Frame Proccessing: 609
Frame Proccessing: 610
Frame Proccessing: 611
Frame Proccessing: 612
Frame Proccessing: 613
Frame Proccessing: 614
Frame Proccessing: 615
Frame Proccessing: 616
Frame Proccessing: 617
Frame Proccessing: 618
Frame Proccessing: 619
Frame Proccessing: 620
Frame Proccessing: 621
Frame Proccessing: 622
Frame Proccessing: 623
Frame Proccessing: 624
Frame Proccessing: 625
Frame Proccessing: 626
Frame Proccessing: 627
Frame Proccessing: 628
Frame Proccessing: 629
Frame Proccessing: 630
Frame Proccessing: 631
Frame Proccessing: 632
Frame Proccessing: 633
Frame Proccessing: 634
Frame Proccessing: 635
Frame Proccessing: 636
Frame Proccessing: 637
Frame Proccessing: 638
Frame Proccessing: 639
Frame Proccessing: 640
Frame Proccessing: 641
Frame Proccessing: 642
Frame Proccessing: 643
Frame Proccessing: 644
Frame Proccessing: 645
Frame Proccessing: 646
Frame Proccessing: 647
Frame Proccessing: 648
Frame Proccessing: 649
Frame Proccessing: 650
Frame Proccessing: 651
Frame Proccessing: 652
Frame Proccessing: 653
Frame Proccessing: 654
Frame Proccessing: 655
Frame Proccessing: 656
Frame Proccessing: 657
Frame Proccessing: 658
Frame Proccessing: 659
Frame Proccessing: 660
Frame Proccessing: 661
Frame Proccessing: 662
Frame Proccessing: 663
Frame Proccessing: 664
Frame Proccessing: 665
Frame Proccessing: 666
Frame Proccessing: 667
Frame Proccessing: 668
Frame Proccessing: 669
Frame Proccessing: 670
Frame Proccessing: 671
Frame Proccessing: 672
Frame Proccessing: 673
Frame Proccessing: 674
Frame Proccessing: 675
Frame Proccessing: 676
Frame Proccessing: 677
Frame Proccessing: 678
Frame Proccessing: 679
Frame Proccessing: 680
Frame Proccessing: 681
Frame Proccessing: 682
Frame Proccessing: 683
Frame Proccessing: 684
Frame Proccessing: 685
Frame Proccessing: 686
Frame Proccessing: 687
Frame Proccessing: 688
Frame Proccessing: 689
Frame Proccessing: 690
Frame Proccessing: 691
Frame Proccessing: 692
Frame Proccessing: 693
Frame Proccessing: 694
Frame Proccessing: 695
Frame Proccessing: 696
Frame Proccessing: 697
Frame Proccessing: 698
Frame Proccessing: 699
Frame Proccessing: 700
Frame Proccessing: 701
Frame Proccessing: 702
Frame Proccessing: 703
Frame Proccessing: 704
Frame Proccessing: 705
Frame Proccessing: 706
Frame Proccessing: 707
Frame Proccessing: 708
Frame Proccessing: 709
Frame Proccessing: 710
Frame Proccessing: 711
Frame Proccessing: 712
Frame Proccessing: 713
Frame Proccessing: 714
Frame Proccessing: 715
Frame Proccessing: 716
Frame Proccessing: 717
Frame Proccessing: 718
Frame Proccessing: 719
Frame Proccessing: 720
Frame Proccessing: 721
Frame Proccessing: 722
Frame Proccessing: 723
Frame Proccessing: 724
Frame Proccessing: 725
Frame Proccessing: 726
Frame Proccessing: 727
Frame Proccessing: 728
Frame Proccessing: 729
Frame Proccessing: 730
Frame Proccessing: 731
Frame Proccessing: 732
Frame Proccessing: 733
Frame Proccessing: 734
Frame Proccessing: 735
Frame Proccessing: 736
Frame Proccessing: 737
Frame Proccessing: 738
Frame Proccessing: 739
Frame Proccessing: 740
Frame Proccessing: 741
Frame Proccessing: 742
Frame Proccessing: 743
Frame Proccessing: 744
Frame Proccessing: 745
Frame Proccessing: 746
Frame Proccessing: 747
Frame Proccessing: 748
Frame Proccessing: 749
Frame Proccessing: 750
Frame Proccessing: 751
Frame Proccessing: 752
Frame Proccessing: 753
Frame Proccessing: 754
Frame Proccessing: 755
Frame Proccessing: 756
Frame Proccessing: 757
Frame Proccessing: 758
Frame Proccessing: 759
Frame Proccessing: 760
Frame Proccessing: 761
Frame Proccessing: 762
Frame Proccessing: 763
Frame Proccessing: 764
Frame Proccessing: 765
Frame Proccessing: 766
Frame Proccessing: 767
Frame Proccessing: 768
Frame Proccessing: 769
Frame Proccessing: 770
Frame Proccessing: 771
Frame Proccessing: 772
Frame Proccessing: 773
Frame Proccessing: 774
Frame Proccessing: 775
Frame Proccessing: 776
Frame Proccessing: 777
Frame Proccessing: 778
Frame Proccessing: 779
Frame Proccessing: 780
Frame Proccessing: 781
Frame Proccessing: 782
Frame Proccessing: 783
Frame Proccessing: 784
Frame Proccessing: 785
Frame Proccessing: 786
Frame Proccessing: 787
Frame Proccessing: 788
Frame Proccessing: 789
Frame Proccessing: 790
Frame Proccessing: 791
Frame Proccessing: 792
Frame Proccessing: 793
Frame Proccessing: 794
Frame Proccessing: 795
Frame Proccessing: 796
Frame Proccessing: 797
Frame Proccessing: 798
Frame Proccessing: 799
Frame Proccessing: 800
Frame Proccessing: 801
Frame Proccessing: 802
Frame Proccessing: 803
Frame Proccessing: 804
Frame Proccessing: 805
Frame Proccessing: 806
Frame Proccessing: 807
Frame Proccessing: 808
Frame Proccessing: 809
Frame Proccessing: 810
Frame Proccessing: 811
Frame Proccessing: 812
Frame Proccessing: 813
Frame Proccessing: 814
Frame Proccessing: 815
Frame Proccessing: 816
Frame Proccessing: 817
Frame Proccessing: 818
Frame Proccessing: 819
Frame Proccessing: 820
Frame Proccessing: 821
Frame Proccessing: 822
Frame Proccessing: 823
Frame Proccessing: 824
Frame Proccessing: 825
Frame Proccessing: 826
Frame Proccessing: 827
Frame Proccessing: 828
Frame Proccessing: 829
Frame Proccessing: 830
Frame Proccessing: 831
Frame Proccessing: 832
Frame Proccessing: 833
Frame Proccessing: 834
Frame Proccessing: 835
Frame Proccessing: 836
Frame Proccessing: 837
Frame Proccessing: 838
Frame Proccessing: 839
Frame Proccessing: 840
Frame Proccessing: 841
Frame Proccessing: 842
Frame Proccessing: 843
Frame Proccessing: 844
Frame Proccessing: 845
Frame Proccessing: 846
Frame Proccessing: 847
Frame Proccessing: 848
Frame Proccessing: 849
Frame Proccessing: 850
Frame Proccessing: 851
Frame Proccessing: 852
Frame Proccessing: 853
Frame Proccessing: 854
Frame Proccessing: 855
Frame Proccessing: 856
Frame Proccessing: 857
Frame Proccessing: 858
Frame Proccessing: 859
Frame Proccessing: 860
Frame Proccessing: 861
Frame Proccessing: 862
Frame Proccessing: 863
Frame Proccessing: 864
Frame Proccessing: 865
Frame Proccessing: 866
Frame Proccessing: 867
Frame Proccessing: 868
Frame Proccessing: 869
Frame Proccessing: 870
Frame Proccessing: 871
Frame Proccessing: 872
Frame Proccessing: 873
Frame Proccessing: 874
Frame Proccessing: 875
Frame Proccessing: 876
Frame Proccessing: 877
Frame Proccessing: 878
Frame Proccessing: 879
Frame Proccessing: 880
Frame Proccessing: 881
Frame Proccessing: 882
Frame Proccessing: 883
Frame Proccessing: 884
Frame Proccessing: 885
Frame Proccessing: 886
Frame Proccessing: 887
Frame Proccessing: 888
Frame Proccessing: 889
Frame Proccessing: 890
Frame Proccessing: 891
Frame Proccessing: 892
Frame Proccessing: 893
Frame Proccessing: 894
Frame Proccessing: 895
Frame Proccessing: 896
Frame Proccessing: 897
Frame Proccessing: 898
Frame Proccessing: 899
Frame Proccessing: 900
Frame Proccessing: 901
Frame Proccessing: 902
Frame Proccessing: 903
Frame Proccessing: 904
Frame Proccessing: 905
Frame Proccessing: 906
Frame Proccessing: 907
Frame Proccessing: 908
Frame Proccessing: 909
Frame Proccessing: 910
Frame Proccessing: 911
Frame Proccessing: 912
Frame Proccessing: 913
Frame Proccessing: 914
Frame Proccessing: 915
Frame Proccessing: 916
Frame Proccessing: 917
Frame Proccessing: 918
Frame Proccessing: 919
Frame Proccessing: 920
Frame Proccessing: 921
Frame Proccessing: 922
Frame Proccessing: 923
Frame Proccessing: 924
Frame Proccessing: 925
Frame Proccessing: 926
Frame Proccessing: 927
Frame Proccessing: 928
Frame Proccessing: 929
Frame Proccessing: 930
Frame Proccessing: 931
Frame Proccessing: 932
Frame Proccessing: 933
Frame Proccessing: 934
Frame Proccessing: 935
Frame Proccessing: 936
Frame Proccessing: 937
Frame Proccessing: 938
Frame Proccessing: 939
Frame Proccessing: 940
Frame Proccessing: 941
Frame Proccessing: 942
Frame Proccessing: 943
Frame Proccessing: 944
Frame Proccessing: 945
Frame Proccessing: 946
Frame Proccessing: 947
Frame Proccessing: 948
Frame Proccessing: 949
Frame Proccessing: 950
Frame Proccessing: 951
Frame Proccessing: 952
Frame Proccessing: 953
Frame Proccessing: 954
Frame Proccessing: 955
Frame Proccessing: 956
Frame Proccessing: 957
Frame Proccessing: 958
Frame Proccessing: 959
Frame Proccessing: 960
Frame Proccessing: 961
Frame Proccessing: 962
Frame Proccessing: 963
Frame Proccessing: 964
Frame Proccessing: 965
Frame Proccessing: 966
Frame Proccessing: 967
Frame Proccessing: 968
Frame Proccessing: 969
Frame Proccessing: 970
Frame Proccessing: 971
Frame Proccessing: 972
Frame Proccessing: 973
Frame Proccessing: 974
Frame Proccessing: 975
Frame Proccessing: 976
Frame Proccessing: 977
Frame Proccessing: 978
Frame Proccessing: 979
Frame Proccessing: 980
Frame Proccessing: 981
Frame Proccessing: 982
Frame Proccessing: 983
Frame Proccessing: 984
Frame Proccessing: 985
Frame Proccessing: 986
Frame Proccessing: 987
Frame Proccessing: 988
Frame Proccessing: 989
Frame Proccessing: 990
Frame Proccessing: 991
Frame Proccessing: 992
Frame Proccessing: 993
Frame Proccessing: 994
Frame Proccessing: 995
Frame Proccessing: 996
Frame Proccessing: 997
Frame Proccessing: 998
Frame Proccessing: 999
Frame Proccessing: 1000
Frame Proccessing: 1001
Frame Proccessing: 1002
Frame Proccessing: 1003
Frame Proccessing: 1004
Frame Proccessing: 1005
Frame Proccessing: 1006
Frame Proccessing: 1007
Frame Proccessing: 1008
Frame Proccessing: 1009
Frame Proccessing: 1010
Frame Proccessing: 1011
Frame Proccessing: 1012
Frame Proccessing: 1013
Frame Proccessing: 1014
Frame Proccessing: 1015
Frame Proccessing: 1016
Frame Proccessing: 1017
Frame Proccessing: 1018
Frame Proccessing: 1019
Frame Proccessing: 1020
Frame Proccessing: 1021
Frame Proccessing: 1022
Frame Proccessing: 1023
Frame Proccessing: 1024
Frame Proccessing: 1025
Frame Proccessing: 1026
Frame Proccessing: 1027
Frame Proccessing: 1028
Frame Proccessing: 1029
Frame Proccessing: 1030
Frame Proccessing: 1031
Frame Proccessing: 1032
Frame Proccessing: 1033
Frame Proccessing: 1034
Frame Proccessing: 1035
Frame Proccessing: 1036
Frame Proccessing: 1037
Frame Proccessing: 1038
Frame Proccessing: 1039
Frame Proccessing: 1040
Frame Proccessing: 1041
Frame Proccessing: 1042
Frame Proccessing: 1043
Frame Proccessing: 1044
Frame Proccessing: 1045
Frame Proccessing: 1046
Frame Proccessing: 1047
Frame Proccessing: 1048
Frame Proccessing: 1049
Frame Proccessing: 1050
Frame Proccessing: 1051
Frame Proccessing: 1052
Frame Proccessing: 1053
Frame Proccessing: 1054
Frame Proccessing: 1055
Frame Proccessing: 1056
Frame Proccessing: 1057
Frame Proccessing: 1058
Frame Proccessing: 1059
Frame Proccessing: 1060
Frame Proccessing: 1061
Frame Proccessing: 1062
Frame Proccessing: 1063
Frame Proccessing: 1064
Frame Proccessing: 1065
Frame Proccessing: 1066
Frame Proccessing: 1067
Frame Proccessing: 1068
Frame Proccessing: 1069
Frame Proccessing: 1070
Frame Proccessing: 1071
Frame Proccessing: 1072
Frame Proccessing: 1073
Frame Proccessing: 1074
Frame Proccessing: 1075
Frame Proccessing: 1076
Frame Proccessing: 1077
Frame Proccessing: 1078
Frame Proccessing: 1079
Frame Proccessing: 1080
Frame Proccessing: 1081
Frame Proccessing: 1082
Frame Proccessing: 1083
Frame Proccessing: 1084
Frame Proccessing: 1085
Frame Proccessing: 1086
Frame Proccessing: 1087
Frame Proccessing: 1088
Frame Proccessing: 1089
Frame Proccessing: 1090
Frame Proccessing: 1091
Frame Proccessing: 1092
Frame Proccessing: 1093
Frame Proccessing: 1094
Frame Proccessing: 1095
Frame Proccessing: 1096
Frame Proccessing: 1097
Frame Proccessing: 1098
Frame Proccessing: 1099
Frame Proccessing: 1100
Frame Proccessing: 1101
Frame Proccessing: 1102
Frame Proccessing: 1103
Frame Proccessing: 1104
Frame Proccessing: 1105
Frame Proccessing: 1106
Frame Proccessing: 1107
Frame Proccessing: 1108
Frame Proccessing: 1109
Frame Proccessing: 1110
Frame Proccessing: 1111
Frame Proccessing: 1112
Frame Proccessing: 1113
Frame Proccessing: 1114
Frame Proccessing: 1115
Frame Proccessing: 1116
Frame Proccessing: 1117
Frame Proccessing: 1118
Frame Proccessing: 1119
Frame Proccessing: 1120
Frame Proccessing: 1121
Frame Proccessing: 1122
Frame Proccessing: 1123
Frame Proccessing: 1124
Frame Proccessing: 1125
Frame Proccessing: 1126
Frame Proccessing: 1127
Frame Proccessing: 1128
Frame Proccessing: 1129
Frame Proccessing: 1130
Frame Proccessing: 1131
Frame Proccessing: 1132
Frame Proccessing: 1133
Frame Proccessing: 1134
Frame Proccessing: 1135
Frame Proccessing: 1136
Frame Proccessing: 1137
Frame Proccessing: 1138
Frame Proccessing: 1139
Frame Proccessing: 1140
Frame Proccessing: 1141
Frame Proccessing: 1142
Frame Proccessing: 1143
Frame Proccessing: 1144
Frame Proccessing: 1145
Frame Proccessing: 1146
Frame Proccessing: 1147
Frame Proccessing: 1148
Frame Proccessing: 1149
Frame Proccessing: 1150
Frame Proccessing: 1151
Frame Proccessing: 1152
Frame Proccessing: 1153
Frame Proccessing: 1154
Frame Proccessing: 1155
Frame Proccessing: 1156
Frame Proccessing: 1157
Frame Proccessing: 1158
Frame Proccessing: 1159
Frame Proccessing: 1160
Frame Proccessing: 1161
Frame Proccessing: 1162
Frame Proccessing: 1163
Frame Proccessing: 1164
Frame Proccessing: 1165
Frame Proccessing: 1166
Frame Proccessing: 1167
Frame Proccessing: 1168
Frame Proccessing: 1169
Frame Proccessing: 1170
Frame Proccessing: 1171
Frame Proccessing: 1172
Frame Proccessing: 1173
Frame Proccessing: 1174
Frame Proccessing: 1175
Frame Proccessing: 1176
Frame Proccessing: 1177
Frame Proccessing: 1178
Frame Proccessing: 1179
Frame Proccessing: 1180
Frame Proccessing: 1181
Frame Proccessing: 1182
Frame Proccessing: 1183
Frame Proccessing: 1184
Frame Proccessing: 1185
Frame Proccessing: 1186
Frame Proccessing: 1187
Frame Proccessing: 1188
Frame Proccessing: 1189
Frame Proccessing: 1190
Frame Proccessing: 1191
Frame Proccessing: 1192
Frame Proccessing: 1193
Frame Proccessing: 1194
Frame Proccessing: 1195
Frame Proccessing: 1196
Frame Proccessing: 1197
Frame Proccessing: 1198
Frame Proccessing: 1199
Frame Proccessing: 1200
Frame Proccessing: 1201
Frame Proccessing: 1202
Frame Proccessing: 1203
Frame Proccessing: 1204
Frame Proccessing: 1205
Frame Proccessing: 1206
Frame Proccessing: 1207
Frame Proccessing: 1208
Frame Proccessing: 1209
Frame Proccessing: 1210
Frame Proccessing: 1211
Frame Proccessing: 1212
Frame Proccessing: 1213
Frame Proccessing: 1214
Frame Proccessing: 1215
Frame Proccessing: 1216
Frame Proccessing: 1217
Frame Proccessing: 1218
Frame Proccessing: 1219
Frame Proccessing: 1220
Frame Proccessing: 1221
Frame Proccessing: 1222
Frame Proccessing: 1223
Frame Proccessing: 1224
Frame Proccessing: 1225
Frame Proccessing: 1226
Frame Proccessing: 1227
Frame Proccessing: 1228
Frame Proccessing: 1229
Frame Proccessing: 1230
Frame Proccessing: 1231
Frame Proccessing: 1232
Frame Proccessing: 1233
Frame Proccessing: 1234
Frame Proccessing: 1235
Frame Proccessing: 1236
Frame Proccessing: 1237
Frame Proccessing: 1238
Frame Proccessing: 1239
Frame Proccessing: 1240
Frame Proccessing: 1241
Frame Proccessing: 1242
Frame Proccessing: 1243
Frame Proccessing: 1244
Frame Proccessing: 1245
Frame Proccessing: 1246
Frame Proccessing: 1247
Frame Proccessing: 1248
Frame Proccessing: 1249
Frame Proccessing: 1250
Frame Proccessing: 1251
Frame Proccessing: 1252
Frame Proccessing: 1253
Frame Proccessing: 1254
Frame Proccessing: 1255
Frame Proccessing: 1256
Frame Proccessing: 1257
Frame Proccessing: 1258
Frame Proccessing: 1259
Frame Proccessing: 1260
Frame Proccessing: 1261
Frame Proccessing: 1262
Frame Proccessing: 1263
Frame Proccessing: 1264
Frame Proccessing: 1265
Frame Proccessing: 1266
Frame Proccessing: 1267
Frame Proccessing: 1268
Frame Proccessing: 1269
Frame Proccessing: 1270
Frame Proccessing: 1271
Frame Proccessing: 1272
Frame Proccessing: 1273
Frame Proccessing: 1274
Frame Proccessing: 1275
Frame Proccessing: 1276
Frame Proccessing: 1277
Frame Proccessing: 1278
Frame Proccessing: 1279
Frame Proccessing: 1280
Frame Proccessing: 1281
Frame Proccessing: 1282
Frame Proccessing: 1283
Frame Proccessing: 1284
Frame Proccessing: 1285
Frame Proccessing: 1286
Frame Proccessing: 1287
Frame Proccessing: 1288
Frame Proccessing: 1289
Frame Proccessing: 1290
Frame Proccessing: 1291
Frame Proccessing: 1292
Frame Proccessing: 1293
Frame Proccessing: 1294
Frame Proccessing: 1295
Frame Proccessing: 1296
Frame Proccessing: 1297
Frame Proccessing: 1298
Frame Proccessing: 1299
Frame Proccessing: 1300
Frame Proccessing: 1301
Frame Proccessing: 1302
Frame Proccessing: 1303
Frame Proccessing: 1304
Frame Proccessing: 1305
Frame Proccessing: 1306
Frame Proccessing: 1307
Frame Proccessing: 1308
Frame Proccessing: 1309
Frame Proccessing: 1310
Frame Proccessing: 1311
Frame Proccessing: 1312
Frame Proccessing: 1313
Frame Proccessing: 1314
Frame Proccessing: 1315
Frame Proccessing: 1316
Frame Proccessing: 1317
Frame Proccessing: 1318
Frame Proccessing: 1319
Frame Proccessing: 1320
Frame Proccessing: 1321
Frame Proccessing: 1322
Frame Proccessing: 1323
Frame Proccessing: 1324
Frame Proccessing: 1325
Frame Proccessing: 1326
Frame Proccessing: 1327
Frame Proccessing: 1328
Frame Proccessing: 1329
Frame Proccessing: 1330
Frame Proccessing: 1331
Frame Proccessing: 1332
Frame Proccessing: 1333
Frame Proccessing: 1334
Frame Proccessing: 1335
Frame Proccessing: 1336
Frame Proccessing: 1337
Frame Proccessing: 1338
Frame Proccessing: 1339
Frame Proccessing: 1340
Frame Proccessing: 1341
Frame Proccessing: 1342
Frame Proccessing: 1343
Frame Proccessing: 1344
Frame Proccessing: 1345
Frame Proccessing: 1346
Frame Proccessing: 1347
Frame Proccessing: 1348
Frame Proccessing: 1349
Frame Proccessing: 1350
Frame Proccessing: 1351
Frame Proccessing: 1352
Frame Proccessing: 1353
Frame Proccessing: 1354
Frame Proccessing: 1355
Frame Proccessing: 1356
Frame Proccessing: 1357
Frame Proccessing: 1358
Frame Proccessing: 1359
Frame Proccessing: 1360
Frame Proccessing: 1361
Frame Proccessing: 1362
Frame Proccessing: 1363
Frame Proccessing: 1364
Frame Proccessing: 1365
Frame Proccessing: 1366
Frame Proccessing: 1367
Frame Proccessing: 1368
Frame Proccessing: 1369
Frame Proccessing: 1370
Frame Proccessing: 1371
Frame Proccessing: 1372
Frame Proccessing: 1373
Frame Proccessing: 1374
Frame Proccessing: 1375
Frame Proccessing: 1376
Frame Proccessing: 1377
Frame Proccessing: 1378
Frame Proccessing: 1379
Frame Proccessing: 1380
Frame Proccessing: 1381
Frame Proccessing: 1382
Frame Proccessing: 1383
Frame Proccessing: 1384
Frame Proccessing: 1385
Frame Proccessing: 1386
Frame Proccessing: 1387
Frame Proccessing: 1388
Frame Proccessing: 1389
Frame Proccessing: 1390
Frame Proccessing: 1391
Frame Proccessing: 1392
Frame Proccessing: 1393
Frame Proccessing: 1394
Frame Proccessing: 1395
Frame Proccessing: 1396
Frame Proccessing: 1397
Frame Proccessing: 1398
Frame Proccessing: 1399
Frame Proccessing: 1400
Frame Proccessing: 1401
Frame Proccessing: 1402
Frame Proccessing: 1403
Frame Proccessing: 1404
Frame Proccessing: 1405
Frame Proccessing: 1406
Frame Proccessing: 1407
Frame Proccessing: 1408
Frame Proccessing: 1409
Frame Proccessing: 1410
Frame Proccessing: 1411
Frame Proccessing: 1412
Frame Proccessing: 1413
Frame Proccessing: 1414
Frame Proccessing: 1415
Frame Proccessing: 1416
Frame Proccessing: 1417
Frame Proccessing: 1418
Frame Proccessing: 1419
Frame Proccessing: 1420
Frame Proccessing: 1421
Frame Proccessing: 1422
Frame Proccessing: 1423
Frame Proccessing: 1424
Frame Proccessing: 1425
Frame Proccessing: 1426
Frame Proccessing: 1427
Frame Proccessing: 1428
Frame Proccessing: 1429
Frame Proccessing: 1430
Frame Proccessing: 1431
Frame Proccessing: 1432
Frame Proccessing: 1433
Frame Proccessing: 1434
Frame Proccessing: 1435
Frame Proccessing: 1436
Frame Proccessing: 1437
Frame Proccessing: 1438
Frame Proccessing: 1439
Frame Proccessing: 1440
Frame Proccessing: 1441
Frame Proccessing: 1442
Frame Proccessing: 1443
Frame Proccessing: 1444
Frame Proccessing: 1445
Frame Proccessing: 1446
Frame Proccessing: 1447
Frame Proccessing: 1448
Frame Proccessing: 1449
Frame Proccessing: 1450
Frame Proccessing: 1451
Frame Proccessing: 1452
Frame Proccessing: 1453
Frame Proccessing: 1454
Frame Proccessing: 1455
Frame Proccessing: 1456
Frame Proccessing: 1457
Frame Proccessing: 1458
Frame Proccessing: 1459
Frame Proccessing: 1460
Frame Proccessing: 1461
Frame Proccessing: 1462
Frame Proccessing: 1463
Frame Proccessing: 1464
Frame Proccessing: 1465
Frame Proccessing: 1466
Frame Proccessing: 1467
Frame Proccessing: 1468
Frame Proccessing: 1469
Frame Proccessing: 1470
Frame Proccessing: 1471
Frame Proccessing: 1472
Frame Proccessing: 1473
Frame Proccessing: 1474
Frame Proccessing: 1475
Frame Proccessing: 1476
Frame Proccessing: 1477
Frame Proccessing: 1478
Frame Proccessing: 1479
Frame Proccessing: 1480
Frame Proccessing: 1481
Frame Proccessing: 1482
Frame Proccessing: 1483
Frame Proccessing: 1484
Frame Proccessing: 1485
Frame Proccessing: 1486
Frame Proccessing: 1487
Frame Proccessing: 1488
Frame Proccessing: 1489
Frame Proccessing: 1490
Frame Proccessing: 1491
Frame Proccessing: 1492
Frame Proccessing: 1493
Frame Proccessing: 1494
Frame Proccessing: 1495
Frame Proccessing: 1496
Frame Proccessing: 1497
Frame Proccessing: 1498
Frame Proccessing: 1499
Frame Proccessing: 1500
Frame Proccessing: 1501
Frame Proccessing: 1502
Frame Proccessing: 1503
Frame Proccessing: 1504
Frame Proccessing: 1505
Frame Proccessing: 1506
Frame Proccessing: 1507
Frame Proccessing: 1508
Frame Proccessing: 1509
Frame Proccessing: 1510
Frame Proccessing: 1511
Frame Proccessing: 1512
Frame Proccessing: 1513
Frame Proccessing: 1514
Frame Proccessing: 1515
Frame Proccessing: 1516
Frame Proccessing: 1517
Frame Proccessing: 1518
Frame Proccessing: 1519
Frame Proccessing: 1520
Frame Proccessing: 1521
Frame Proccessing: 1522
Frame Proccessing: 1523
Frame Proccessing: 1524
Frame Proccessing: 1525
Frame Proccessing: 1526
Frame Proccessing: 1527
Frame Proccessing: 1528
Frame Proccessing: 1529
Frame Proccessing: 1530
Frame Proccessing: 1531
Frame Proccessing: 1532
Frame Proccessing: 1533
Frame Proccessing: 1534
Frame Proccessing: 1535
Frame Proccessing: 1536
Frame Proccessing: 1537
Frame Proccessing: 1538
Frame Proccessing: 1539
Frame Proccessing: 1540
Frame Proccessing: 1541
Frame Proccessing: 1542
Frame Proccessing: 1543
Frame Proccessing: 1544
Frame Proccessing: 1545
Frame Proccessing: 1546
Frame Proccessing: 1547
Frame Proccessing: 1548
Frame Proccessing: 1549
Frame Proccessing: 1550
Frame Proccessing: 1551
Frame Proccessing: 1552
Frame Proccessing: 1553
Frame Proccessing: 1554
Frame Proccessing: 1555
Frame Proccessing: 1556
Frame Proccessing: 1557
Frame Proccessing: 1558
Frame Proccessing: 1559
Frame Proccessing: 1560
Frame Proccessing: 1561
Frame Proccessing: 1562
Frame Proccessing: 1563
Frame Proccessing: 1564
Frame Proccessing: 1565
Frame Proccessing: 1566
Frame Proccessing: 1567
Frame Proccessing: 1568
Frame Proccessing: 1569
Frame Proccessing: 1570
Frame Proccessing: 1571
Frame Proccessing: 1572
Frame Proccessing: 1573
Frame Proccessing: 1574
Frame Proccessing: 1575
Frame Proccessing: 1576
Frame Proccessing: 1577
Frame Proccessing: 1578
Frame Proccessing: 1579
Frame Proccessing: 1580
Frame Proccessing: 1581
Frame Proccessing: 1582
Frame Proccessing: 1583
Frame Proccessing: 1584
Frame Proccessing: 1585
Frame Proccessing: 1586
Frame Proccessing: 1587
Frame Proccessing: 1588
Frame Proccessing: 1589
Frame Proccessing: 1590
Frame Proccessing: 1591
Frame Proccessing: 1592
Frame Proccessing: 1593
Frame Proccessing: 1594
Frame Proccessing: 1595
Frame Proccessing: 1596
Frame Proccessing: 1597
Frame Proccessing: 1598
Frame Proccessing: 1599
Frame Proccessing: 1600
Frame Proccessing: 1601
Frame Proccessing: 1602
Frame Proccessing: 1603
Frame Proccessing: 1604
Frame Proccessing: 1605
Frame Proccessing: 1606
Frame Proccessing: 1607
Frame Proccessing: 1608
Frame Proccessing: 1609
Frame Proccessing: 1610
Frame Proccessing: 1611
Frame Proccessing: 1612
Frame Proccessing: 1613
Frame Proccessing: 1614
Frame Proccessing: 1615
Frame Proccessing: 1616
Frame Proccessing: 1617
Frame Proccessing: 1618
Frame Proccessing: 1619
Frame Proccessing: 1620
Frame Proccessing: 1621
Frame Proccessing: 1622
Frame Proccessing: 1623
Frame Proccessing: 1624
Frame Proccessing: 1625
Frame Proccessing: 1626
Frame Proccessing: 1627
Frame Proccessing: 1628
Frame Proccessing: 1629
Frame Proccessing: 1630
Frame Proccessing: 1631
Frame Proccessing: 1632
Frame Proccessing: 1633
Frame Proccessing: 1634
Frame Proccessing: 1635
Frame Proccessing: 1636
Frame Proccessing: 1637
Frame Proccessing: 1638
Frame Proccessing: 1639
Frame Proccessing: 1640
Frame Proccessing: 1641
Frame Proccessing: 1642
Frame Proccessing: 1643
Frame Proccessing: 1644
Frame Proccessing: 1645
Frame Proccessing: 1646
Frame Proccessing: 1647
Frame Proccessing: 1648
Frame Proccessing: 1649
Frame Proccessing: 1650
Frame Proccessing: 1651
Frame Proccessing: 1652
Frame Proccessing: 1653
Frame Proccessing: 1654
Frame Proccessing: 1655
Frame Proccessing: 1656
Frame Proccessing: 1657
Frame Proccessing: 1658
Frame Proccessing: 1659
Frame Proccessing: 1660
Frame Proccessing: 1661
Frame Proccessing: 1662
Frame Proccessing: 1663
Frame Proccessing: 1664
Frame Proccessing: 1665
Frame Proccessing: 1666
Frame Proccessing: 1667
Frame Proccessing: 1668
Frame Proccessing: 1669
Frame Proccessing: 1670
Frame Proccessing: 1671
Frame Proccessing: 1672
Frame Proccessing: 1673
Frame Proccessing: 1674
Frame Proccessing: 1675
Frame Proccessing: 1676
Frame Proccessing: 1677
Frame Proccessing: 1678
Frame Proccessing: 1679
Frame Proccessing: 1680
Frame Proccessing: 1681
Frame Proccessing: 1682
Frame Proccessing: 1683
Frame Proccessing: 1684
Frame Proccessing: 1685
Frame Proccessing: 1686
Frame Proccessing: 1687
Frame Proccessing: 1688
Frame Proccessing: 1689
Frame Proccessing: 1690
Frame Proccessing: 1691
Frame Proccessing: 1692
Frame Proccessing: 1693
Frame Proccessing: 1694
Frame Proccessing: 1695
Frame Proccessing: 1696
Frame Proccessing: 1697
Frame Proccessing: 1698
Frame Proccessing: 1699
Frame Proccessing: 1700
Frame Proccessing: 1701
Frame Proccessing: 1702
Frame Proccessing: 1703
Frame Proccessing: 1704
Frame Proccessing: 1705
Frame Proccessing: 1706
Frame Proccessing: 1707
Frame Proccessing: 1708
Frame Proccessing: 1709
Frame Proccessing: 1710
Frame Proccessing: 1711
Frame Proccessing: 1712
Frame Proccessing: 1713
Frame Proccessing: 1714
Frame Proccessing: 1715
Frame Proccessing: 1716
Frame Proccessing: 1717
Frame Proccessing: 1718
Frame Proccessing: 1719
Frame Proccessing: 1720
Frame Proccessing: 1721
Frame Proccessing: 1722
Frame Proccessing: 1723
Frame Proccessing: 1724
Frame Proccessing: 1725
Frame Proccessing: 1726
Frame Proccessing: 1727
Frame Proccessing: 1728
Frame Proccessing: 1729
Frame Proccessing: 1730
Frame Proccessing: 1731
Frame Proccessing: 1732
Frame Proccessing: 1733
Frame Proccessing: 1734
Frame Proccessing: 1735
Frame Proccessing: 1736
Frame Proccessing: 1737
Frame Proccessing: 1738
Frame Proccessing: 1739
Frame Proccessing: 1740
Frame Proccessing: 1741
Frame Proccessing: 1742
Frame Proccessing: 1743
Frame Proccessing: 1744
Frame Proccessing: 1745
Frame Proccessing: 1746
Frame Proccessing: 1747
Frame Proccessing: 1748
Frame Proccessing: 1749
Frame Proccessing: 1750
Frame Proccessing: 1751
Frame Proccessing: 1752
Frame Proccessing: 1753
Frame Proccessing: 1754
Frame Proccessing: 1755
Frame Proccessing: 1756
Frame Proccessing: 1757
Frame Proccessing: 1758
Frame Proccessing: 1759
Frame Proccessing: 1760
Frame Proccessing: 1761
Frame Proccessing: 1762
Frame Proccessing: 1763
Frame Proccessing: 1764
Frame Proccessing: 1765
Frame Proccessing: 1766
Frame Proccessing: 1767
Frame Proccessing: 1768
Frame Proccessing: 1769
Frame Proccessing: 1770
Frame Proccessing: 1771
Frame Proccessing: 1772
Frame Proccessing: 1773
Frame Proccessing: 1774
Frame Proccessing: 1775
Frame Proccessing: 1776
Frame Proccessing: 1777
Frame Proccessing: 1778
Frame Proccessing: 1779
Frame Proccessing: 1780
Frame Proccessing: 1781
Frame Proccessing: 1782
Frame Proccessing: 1783
Frame Proccessing: 1784
Frame Proccessing: 1785
Frame Proccessing: 1786
Frame Proccessing: 1787
Frame Proccessing: 1788
Frame Proccessing: 1789
Frame Proccessing: 1790
Frame Proccessing: 1791
Frame Proccessing: 1792
Frame Proccessing: 1793
Frame Proccessing: 1794
Frame Proccessing: 1795
Frame Proccessing: 1796
Frame Proccessing: 1797
Frame Proccessing: 1798
Frame Proccessing: 1799
Frame Proccessing: 1800
Frame Proccessing: 1801
Frame Proccessing: 1802
Frame Proccessing: 1803
Frame Proccessing: 1804
Frame Proccessing: 1805
Frame Proccessing: 1806
Frame Proccessing: 1807
Frame Proccessing: 1808
Frame Proccessing: 1809
Frame Proccessing: 1810
Frame Proccessing: 1811
Frame Proccessing: 1812
Frame Proccessing: 1813
Frame Proccessing: 1814
Frame Proccessing: 1815
Frame Proccessing: 1816
Frame Proccessing: 1817
Frame Proccessing: 1818
Frame Proccessing: 1819
Frame Proccessing: 1820
Frame Proccessing: 1821
Frame Proccessing: 1822
Frame Proccessing: 1823
Frame Proccessing: 1824
Frame Proccessing: 1825
Frame Proccessing: 1826
Frame Proccessing: 1827
Frame Proccessing: 1828
Frame Proccessing: 1829
Frame Proccessing: 1830
Frame Proccessing: 1831
Frame Proccessing: 1832
Frame Proccessing: 1833
Frame Proccessing: 1834
Frame Proccessing: 1835
Frame Proccessing: 1836
Frame Proccessing: 1837
Frame Proccessing: 1838
Frame Proccessing: 1839
Frame Proccessing: 1840
Frame Proccessing: 1841
Frame Proccessing: 1842
Frame Proccessing: 1843
Frame Proccessing: 1844
Frame Proccessing: 1845
Frame Proccessing: 1846
Frame Proccessing: 1847
Frame Proccessing: 1848
Frame Proccessing: 1849
Frame Proccessing: 1850
Frame Proccessing: 1851
Frame Proccessing: 1852
Frame Proccessing: 1853
Frame Proccessing: 1854
Frame Proccessing: 1855
Frame Proccessing: 1856
Frame Proccessing: 1857
Frame Proccessing: 1858
Frame Proccessing: 1859
Frame Proccessing: 1860
Frame Proccessing: 1861
Frame Proccessing: 1862
Frame Proccessing: 1863
Frame Proccessing: 1864
Frame Proccessing: 1865
Frame Proccessing: 1866
Frame Proccessing: 1867
Frame Proccessing: 1868
Frame Proccessing: 1869
Frame Proccessing: 1870
Frame Proccessing: 1871
Frame Proccessing: 1872
Frame Proccessing: 1873
Frame Proccessing: 1874
Frame Proccessing: 1875
Frame Proccessing: 1876
Frame Proccessing: 1877
Frame Proccessing: 1878
Frame Proccessing: 1879
Frame Proccessing: 1880
Frame Proccessing: 1881
Frame Proccessing: 1882
Frame Proccessing: 1883
Frame Proccessing: 1884
Frame Proccessing: 1885
Frame Proccessing: 1886
Frame Proccessing: 1887
Frame Proccessing: 1888
Frame Proccessing: 1889
Frame Proccessing: 1890
Frame Proccessing: 1891
Frame Proccessing: 1892
Frame Proccessing: 1893
Frame Proccessing: 1894
Frame Proccessing: 1895
Frame Proccessing: 1896
Frame Proccessing: 1897
Frame Proccessing: 1898
Frame Proccessing: 1899
Frame Proccessing: 1900
Frame Proccessing: 1901
Frame Proccessing: 1902
Frame Proccessing: 1903
Frame Proccessing: 1904
Frame Proccessing: 1905
Frame Proccessing: 1906
Frame Proccessing: 1907
Frame Proccessing: 1908
Frame Proccessing: 1909
Frame Proccessing: 1910
Frame Proccessing: 1911
Frame Proccessing: 1912
Frame Proccessing: 1913
Frame Proccessing: 1914
Frame Proccessing: 1915
Frame Proccessing: 1916
Frame Proccessing: 1917
Frame Proccessing: 1918
Frame Proccessing: 1919
Frame Proccessing: 1920
Frame Proccessing: 1921
Frame Proccessing: 1922
Frame Proccessing: 1923
Frame Proccessing: 1924
Frame Proccessing: 1925
Frame Proccessing: 1926
Frame Proccessing: 1927
Frame Proccessing: 1928
Frame Proccessing: 1929
Frame Proccessing: 1930
Frame Proccessing: 1931
Frame Proccessing: 1932
Frame Proccessing: 1933
Frame Proccessing: 1934
Frame Proccessing: 1935
Frame Proccessing: 1936
Frame Proccessing: 1937
Frame Proccessing: 1938
Frame Proccessing: 1939
Frame Proccessing: 1940
Frame Proccessing: 1941
Frame Proccessing: 1942
Frame Proccessing: 1943
Frame Proccessing: 1944
Frame Proccessing: 1945
Frame Proccessing: 1946
Frame Proccessing: 1947
Frame Proccessing: 1948
Frame Proccessing: 1949
Frame Proccessing: 1950
Frame Proccessing: 1951
Frame Proccessing: 1952
Frame Proccessing: 1953
Frame Proccessing: 1954
Frame Proccessing: 1955
Frame Proccessing: 1956
Frame Proccessing: 1957
Frame Proccessing: 1958
Frame Proccessing: 1959
Frame Proccessing: 1960
Frame Proccessing: 1961
Frame Proccessing: 1962
Frame Proccessing: 1963
Frame Proccessing: 1964
Frame Proccessing: 1965
Frame Proccessing: 1966
Frame Proccessing: 1967
Frame Proccessing: 1968
Frame Proccessing: 1969
Frame Proccessing: 1970
Frame Proccessing: 1971
Frame Proccessing: 1972
Frame Proccessing: 1973
Frame Proccessing: 1974
Frame Proccessing: 1975
Frame Proccessing: 1976
Frame Proccessing: 1977
Frame Proccessing: 1978
Frame Proccessing: 1979
Frame Proccessing: 1980
Frame Proccessing: 1981
Frame Proccessing: 1982
Frame Proccessing: 1983
Frame Proccessing: 1984
Frame Proccessing: 1985
Frame Proccessing: 1986
Frame Proccessing: 1987
Frame Proccessing: 1988
Frame Proccessing: 1989
Frame Proccessing: 1990
Frame Proccessing: 1991
Frame Proccessing: 1992
Frame Proccessing: 1993
Frame Proccessing: 1994
Frame Proccessing: 1995
Frame Proccessing: 1996
Frame Proccessing: 1997
Frame Proccessing: 1998
Frame Proccessing: 1999
Frame Proccessing: 2000
Frame Proccessing: 2001
Frame Proccessing: 2002
Frame Proccessing: 2003
Frame Proccessing: 2004
Frame Proccessing: 2005
Frame Proccessing: 2006
Frame Proccessing: 2007
Frame Proccessing: 2008
Frame Proccessing: 2009
Frame Proccessing: 2010
Frame Proccessing: 2011
Frame Proccessing: 2012
Frame Proccessing: 2013
Frame Proccessing: 2014
Frame Proccessing: 2015
Frame Proccessing: 2016
Frame Proccessing: 2017
Frame Proccessing: 2018
Frame Proccessing: 2019
Frame Proccessing: 2020
Frame Proccessing: 2021
Frame Proccessing: 2022
Frame Proccessing: 2023
Frame Proccessing: 2024
Frame Proccessing: 2025
Frame Proccessing: 2026
Frame Proccessing: 2027
Frame Proccessing: 2028
Frame Proccessing: 2029
Frame Proccessing: 2030
Frame Proccessing: 2031
Frame Proccessing: 2032
Frame Proccessing: 2033
Frame Proccessing: 2034
Frame Proccessing: 2035
Frame Proccessing: 2036
Frame Proccessing: 2037
Frame Proccessing: 2038
Frame Proccessing: 2039
Frame Proccessing: 2040
Frame Proccessing: 2041
Frame Proccessing: 2042
Frame Proccessing: 2043
Frame Proccessing: 2044
Frame Proccessing: 2045
Frame Proccessing: 2046
Frame Proccessing: 2047
Frame Proccessing: 2048
Frame Proccessing: 2049
Frame Proccessing: 2050
Frame Proccessing: 2051
Frame Proccessing: 2052
Frame Proccessing: 2053
Frame Proccessing: 2054
Frame Proccessing: 2055
Frame Proccessing: 2056
Frame Proccessing: 2057
Frame Proccessing: 2058
Frame Proccessing: 2059
Frame Proccessing: 2060
Frame Proccessing: 2061
Frame Proccessing: 2062
Frame Proccessing: 2063
Frame Proccessing: 2064
Frame Proccessing: 2065
Frame Proccessing: 2066
Frame Proccessing: 2067
Frame Proccessing: 2068
Frame Proccessing: 2069
Frame Proccessing: 2070
Frame Proccessing: 2071
Frame Proccessing: 2072
Frame Proccessing: 2073
Frame Proccessing: 2074
Frame Proccessing: 2075
Frame Proccessing: 2076
Frame Proccessing: 2077
Frame Proccessing: 2078
Frame Proccessing: 2079
Frame Proccessing: 2080
Frame Proccessing: 2081
Frame Proccessing: 2082
Frame Proccessing: 2083
Frame Proccessing: 2084
Frame Proccessing: 2085
Frame Proccessing: 2086
Frame Proccessing: 2087
Frame Proccessing: 2088
Frame Proccessing: 2089
Frame Proccessing: 2090
Frame Proccessing: 2091
Frame Proccessing: 2092
Frame Proccessing: 2093
Frame Proccessing: 2094
Frame Proccessing: 2095
Frame Proccessing: 2096
Frame Proccessing: 2097
Frame Proccessing: 2098
Frame Proccessing: 2099
Frame Proccessing: 2100
Frame Proccessing: 2101
Frame Proccessing: 2102
Frame Proccessing: 2103
Frame Proccessing: 2104
Frame Proccessing: 2105
Frame Proccessing: 2106
Frame Proccessing: 2107
Frame Proccessing: 2108
Frame Proccessing: 2109
Frame Proccessing: 2110
Frame Proccessing: 2111
Frame Proccessing: 2112
Frame Proccessing: 2113
Frame Proccessing: 2114
Frame Proccessing: 2115
Frame Proccessing: 2116
Frame Proccessing: 2117
Frame Proccessing: 2118
Frame Proccessing: 2119
Frame Proccessing: 2120
Frame Proccessing: 2121
Frame Proccessing: 2122
Frame Proccessing: 2123
Frame Proccessing: 2124
Frame Proccessing: 2125
Frame Proccessing: 2126
Frame Proccessing: 2127
Frame Proccessing: 2128
Frame Proccessing: 2129
Frame Proccessing: 2130
Frame Proccessing: 2131
Frame Proccessing: 2132
Frame Proccessing: 2133
Frame Proccessing: 2134
Frame Proccessing: 2135
Frame Proccessing: 2136
Frame Proccessing: 2137
Frame Proccessing: 2138
Frame Proccessing: 2139
Frame Proccessing: 2140
Frame Proccessing: 2141
Frame Proccessing: 2142
Frame Proccessing: 2143
Frame Proccessing: 2144
Frame Proccessing: 2145
Frame Proccessing: 2146
Frame Proccessing: 2147
Frame Proccessing: 2148
Frame Proccessing: 2149
Frame Proccessing: 2150
Frame Proccessing: 2151
Frame Proccessing: 2152
Frame Proccessing: 2153
Frame Proccessing: 2154
Frame Proccessing: 2155
Frame Proccessing: 2156
Frame Proccessing: 2157
Frame Proccessing: 2158
Frame Proccessing: 2159
Frame Proccessing: 2160
Frame Proccessing: 2161
Frame Proccessing: 2162
Frame Proccessing: 2163
Frame Proccessing: 2164
Frame Proccessing: 2165
Frame Proccessing: 2166
Frame Proccessing: 2167
Frame Proccessing: 2168
Frame Proccessing: 2169
Frame Proccessing: 2170
Frame Proccessing: 2171
Frame Proccessing: 2172
Frame Proccessing: 2173
Frame Proccessing: 2174
Frame Proccessing: 2175
Frame Proccessing: 2176
Frame Proccessing: 2177
Frame Proccessing: 2178
Frame Proccessing: 2179
Frame Proccessing: 2180
Frame Proccessing: 2181
Frame Proccessing: 2182
Frame Proccessing: 2183
Frame Proccessing: 2184
Frame Proccessing: 2185
Frame Proccessing: 2186
Frame Proccessing: 2187
Frame Proccessing: 2188
Frame Proccessing: 2189
Frame Proccessing: 2190
Frame Proccessing: 2191
Frame Proccessing: 2192
Frame Proccessing: 2193
Frame Proccessing: 2194
Frame Proccessing: 2195
Frame Proccessing: 2196
Frame Proccessing: 2197
Frame Proccessing: 2198
Frame Proccessing: 2199
Frame Proccessing: 2200
Frame Proccessing: 2201
Frame Proccessing: 2202
Frame Proccessing: 2203
Frame Proccessing: 2204
Frame Proccessing: 2205
Frame Proccessing: 2206
Frame Proccessing: 2207
Frame Proccessing: 2208
Frame Proccessing: 2209
Frame Proccessing: 2210
Frame Proccessing: 2211
Frame Proccessing: 2212
Frame Proccessing: 2213
Frame Proccessing: 2214
Frame Proccessing: 2215
Frame Proccessing: 2216
Frame Proccessing: 2217
Frame Proccessing: 2218
Frame Proccessing: 2219
Frame Proccessing: 2220
Frame Proccessing: 2221
Frame Proccessing: 2222
Frame Proccessing: 2223
Frame Proccessing: 2224
Frame Proccessing: 2225
Frame Proccessing: 2226
Frame Proccessing: 2227
Frame Proccessing: 2228
Frame Proccessing: 2229
Frame Proccessing: 2230
Frame Proccessing: 2231
Frame Proccessing: 2232
Frame Proccessing: 2233
Frame Proccessing: 2234
Frame Proccessing: 2235
Frame Proccessing: 2236
Frame Proccessing: 2237
Frame Proccessing: 2238
Frame Proccessing: 2239
Frame Proccessing: 2240
Frame Proccessing: 2241
Frame Proccessing: 2242
Frame Proccessing: 2243
Frame Proccessing: 2244
Frame Proccessing: 2245
Frame Proccessing: 2246
Frame Proccessing: 2247
Frame Proccessing: 2248
Frame Proccessing: 2249
Frame Proccessing: 2250
Frame Proccessing: 2251
Frame Proccessing: 2252
Frame Proccessing: 2253
Frame Proccessing: 2254
Frame Proccessing: 2255
Frame Proccessing: 2256
Frame Proccessing: 2257
Frame Proccessing: 2258
Frame Proccessing: 2259
Frame Proccessing: 2260
Frame Proccessing: 2261
Frame Proccessing: 2262
Frame Proccessing: 2263
Frame Proccessing: 2264
Frame Proccessing: 2265
Frame Proccessing: 2266
Frame Proccessing: 2267
Frame Proccessing: 2268
Frame Proccessing: 2269
Frame Proccessing: 2270
Frame Proccessing: 2271
Frame Proccessing: 2272
Frame Proccessing: 2273
Frame Proccessing: 2274
Frame Proccessing: 2275
Frame Proccessing: 2276
Frame Proccessing: 2277
Frame Proccessing: 2278
Frame Proccessing: 2279
Frame Proccessing: 2280
Frame Proccessing: 2281
Frame Proccessing: 2282
Frame Proccessing: 2283
Frame Proccessing: 2284
Frame Proccessing: 2285
Frame Proccessing: 2286
Frame Proccessing: 2287
Frame Proccessing: 2288
Frame Proccessing: 2289
Frame Proccessing: 2290
Frame Proccessing: 2291
Frame Proccessing: 2292
Frame Proccessing: 2293
Frame Proccessing: 2294
Frame Proccessing: 2295
Frame Proccessing: 2296
Frame Proccessing: 2297
Frame Proccessing: 2298
Frame Proccessing: 2299
Frame Proccessing: 2300
Frame Proccessing: 2301
Frame Proccessing: 2302
Frame Proccessing: 2303
Frame Proccessing: 2304
Frame Proccessing: 2305
Frame Proccessing: 2306
Frame Proccessing: 2307
Frame Proccessing: 2308
Frame Proccessing: 2309
Frame Proccessing: 2310
Frame Proccessing: 2311
Frame Proccessing: 2312
Frame Proccessing: 2313
Frame Proccessing: 2314
Frame Proccessing: 2315
Frame Proccessing: 2316
Frame Proccessing: 2317
Frame Proccessing: 2318
Frame Proccessing: 2319
Frame Proccessing: 2320
Frame Proccessing: 2321
Frame Proccessing: 2322
Frame Proccessing: 2323
Frame Proccessing: 2324
Frame Proccessing: 2325
Frame Proccessing: 2326
Frame Proccessing: 2327
Frame Proccessing: 2328
Frame Proccessing: 2329
Frame Proccessing: 2330
Frame Proccessing: 2331
Frame Proccessing: 2332
Frame Proccessing: 2333
Frame Proccessing: 2334
Frame Proccessing: 2335
Frame Proccessing: 2336
Frame Proccessing: 2337
Frame Proccessing: 2338
Frame Proccessing: 2339
Frame Proccessing: 2340
Frame Proccessing: 2341
Frame Proccessing: 2342
Frame Proccessing: 2343
Frame Proccessing: 2344
Frame Proccessing: 2345
Frame Proccessing: 2346
Frame Proccessing: 2347
Frame Proccessing: 2348
Frame Proccessing: 2349
Frame Proccessing: 2350
Frame Proccessing: 2351
Frame Proccessing: 2352
Frame Proccessing: 2353
Frame Proccessing: 2354
Frame Proccessing: 2355
Frame Proccessing: 2356
Frame Proccessing: 2357
Frame Proccessing: 2358
Frame Proccessing: 2359
Frame Proccessing: 2360
Frame Proccessing: 2361
Frame Proccessing: 2362
Frame Proccessing: 2363
Frame Proccessing: 2364
Frame Proccessing: 2365
Frame Proccessing: 2366
Frame Proccessing: 2367
Frame Proccessing: 2368
Frame Proccessing: 2369
Frame Proccessing: 2370
Frame Proccessing: 2371
Frame Proccessing: 2372
Frame Proccessing: 2373
Frame Proccessing: 2374
Frame Proccessing: 2375
Frame Proccessing: 2376
Frame Proccessing: 2377
Frame Proccessing: 2378
Frame Proccessing: 2379
Frame Proccessing: 2380
Frame Proccessing: 2381
Frame Proccessing: 2382
Frame Proccessing: 2383
Frame Proccessing: 2384
Frame Proccessing: 2385
Frame Proccessing: 2386
Frame Proccessing: 2387
Frame Proccessing: 2388
Frame Proccessing: 2389
Frame Proccessing: 2390
Frame Proccessing: 2391
Frame Proccessing: 2392
Frame Proccessing: 2393
Frame Proccessing: 2394
Frame Proccessing: 2395
Frame Proccessing: 2396
Frame Proccessing: 2397
Frame Proccessing: 2398
Frame Proccessing: 2399
Frame Proccessing: 2400
Frame Proccessing: 2401
Frame Proccessing: 2402
Frame Proccessing: 2403
Frame Proccessing: 2404
Frame Proccessing: 2405
Frame Proccessing: 2406
Frame Proccessing: 2407
Frame Proccessing: 2408
Frame Proccessing: 2409
Frame Proccessing: 2410
Frame Proccessing: 2411
Frame Proccessing: 2412
Frame Proccessing: 2413
Frame Proccessing: 2414
Frame Proccessing: 2415
###Markdown
Convert H5 to Pb
###Code
model.load_weights('/content/drive/MyDrive/GG_SSD7/Model/Model_SSD7_1912_All_epoch-200_loss-0.0268_val_loss-0.2245_acc-0.1870.h5')
from keras import backend as K
# This line must be executed before loading Keras model.
K.set_learning_phase(0)
print('Output',model.outputs)
# [<tf.Tensor 'dense_2/Softmax:0' shape=(?, 10) dtype=float32>]
print('Input',model.inputs)
# [<tf.Tensor 'conv2d_1_input:0' shape=(?, 28, 28, 1) dtype=float32>]
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
"""
Freezes the state of a session into a pruned computation graph.
Creates a new computation graph where variable nodes are replaced by
constants taking their current value in the session. The new graph will be
pruned so subgraphs that are not necessary to compute the requested
outputs are removed.
@param session The TensorFlow session to be frozen.
@param keep_var_names A list of variable names that should not be frozen,
or None to freeze all the variables in the graph.
@param output_names Names of the relevant graph outputs.
@param clear_devices Remove the device directives from the graph for better portability.
@return The frozen graph definition.
"""
from tensorflow.python.framework.graph_util import convert_variables_to_constants
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
# Graph -> GraphDef ProtoBuf
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = convert_variables_to_constants(session, input_graph_def,
output_names, freeze_var_names)
return frozen_graph
frozen_graph = freeze_session(K.get_session(),
output_names=[out.op.name for out in model.outputs])
tf.train.write_graph(frozen_graph, "/content/models", "SSD_model_1912_6class.pb", as_text=False)
###Output
_____no_output_____
###Markdown
Object Detection Based on Renu Khandelwal's YOLOv3 demo provided [here](https://medium.com/datadriveninvestor/object-detection-using-yolov3-using-keras-80bf35e61ce1). Load dependencies
###Code
import os
import scipy.io
import scipy.misc
import numpy as np
from numpy import expand_dims
import pandas as pd
import PIL
import struct
import cv2
from numpy import expand_dims
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input, Lambda, Conv2D, BatchNormalization, LeakyReLU, ZeroPadding2D, UpSampling2D
from tensorflow.keras.models import load_model, Model
from tensorflow.keras.layers import add, concatenate
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.preprocessing.image import img_to_array
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from matplotlib.patches import Rectangle
from skimage.transform import resize
%matplotlib inline
###Output
_____no_output_____
###Markdown
Set hyperparameters
###Code
net_h, net_w = 416, 416
obj_thresh, nms_thresh = 0.5, 0.45
# there are 80 class labels in the MS COCO dataset:
labels = ["person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", "truck", \
"boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", \
"bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", \
"backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", \
"sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", \
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", \
"apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", \
"chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", "laptop", "mouse", \
"remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", \
"book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush"]
###Output
_____no_output_____
###Markdown
Design model architecture
###Code
# define block of conv layers:
def _conv_block(inp, convs, skip=True):
x = inp
count = 0
for conv in convs:
if count == (len(convs) - 2) and skip:
skip_connection = x
count += 1
if conv['stride'] > 1: x = ZeroPadding2D(((1,0),(1,0)))(x) # peculiar padding as darknet prefer left and top
x = Conv2D(conv['filter'],
conv['kernel'],
strides=conv['stride'],
padding='valid' if conv['stride'] > 1 else 'same', # peculiar padding as darknet prefer left and top
name='conv_' + str(conv['layer_idx']),
use_bias=False if conv['bnorm'] else True)(x)
if conv['bnorm']: x = BatchNormalization(epsilon=0.001, name='bnorm_' + str(conv['layer_idx']))(x)
if conv['leaky']: x = LeakyReLU(alpha=0.1, name='leaky_' + str(conv['layer_idx']))(x)
return add([skip_connection, x]) if skip else x
# use _conv_block() to define model architecture:
def make_yolov3_model():
input_image = Input(shape=(None, None, 3))
# Layer 0 => 4
x = _conv_block(input_image, [{'filter': 32, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 0},
{'filter': 64, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 1},
{'filter': 32, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 2},
{'filter': 64, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 3}])
# Layer 5 => 8
x = _conv_block(x, [{'filter': 128, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 5},
{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 6},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 7}])
# Layer 9 => 11
x = _conv_block(x, [{'filter': 64, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 9},
{'filter': 128, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 10}])
# Layer 12 => 15
x = _conv_block(x, [{'filter': 256, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 12},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 13},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 14}])
# Layer 16 => 36
for i in range(7):
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 16+i*3},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 17+i*3}])
skip_36 = x
# Layer 37 => 40
x = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 37},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 38},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 39}])
# Layer 41 => 61
for i in range(7):
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 41+i*3},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 42+i*3}])
skip_61 = x
# Layer 62 => 65
x = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 2, 'bnorm': True, 'leaky': True, 'layer_idx': 62},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 63},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 64}])
# Layer 66 => 74
for i in range(3):
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 66+i*3},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 67+i*3}])
# Layer 75 => 79
x = _conv_block(x, [{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 75},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 76},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 77},
{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 78},
{'filter': 512, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 79}], skip=False)
# Layer 80 => 82
yolo_82 = _conv_block(x, [{'filter': 1024, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 80},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 81}], skip=False)
# Layer 83 => 86
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 84}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_61])
# Layer 87 => 91
x = _conv_block(x, [{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 87},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 88},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 89},
{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 90},
{'filter': 256, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 91}], skip=False)
# Layer 92 => 94
yolo_94 = _conv_block(x, [{'filter': 512, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 92},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 93}], skip=False)
# Layer 95 => 98
x = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 96}], skip=False)
x = UpSampling2D(2)(x)
x = concatenate([x, skip_36])
# Layer 99 => 106
yolo_106 = _conv_block(x, [{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 99},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 100},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 101},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 102},
{'filter': 128, 'kernel': 1, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 103},
{'filter': 256, 'kernel': 3, 'stride': 1, 'bnorm': True, 'leaky': True, 'layer_idx': 104},
{'filter': 255, 'kernel': 1, 'stride': 1, 'bnorm': False, 'leaky': False, 'layer_idx': 105}], skip=False)
model = Model(input_image, [yolo_82, yolo_94, yolo_106])
return model
yolov3 = make_yolov3_model()
# N.B.: uncomment the following line of code to download yolov3 model weights:
# ! wget -c https://www.dropbox.com/s/88xnszqf7xkf70j/yolov3.h5
yolov3.load_weights('yolov3.h5')
###Output
_____no_output_____
###Markdown
Define object detection-specific functions
###Code
def load_image_pixels(filename, shape):
# load the image to get its shape
image = load_img(filename)
width, height = image.size
# load the image with the required size
image = load_img(filename, target_size=shape)
# convert to numpy array
image = img_to_array(image)
# scale pixel values to [0, 1]
image = image.astype('float32')
image /= 255.0
# add a dimension so that we have one sample
image = expand_dims(image, 0)
return image, width, height
class BoundBox:
def __init__(self, xmin, ymin, xmax, ymax, objness = None, classes = None):
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
self.objness = objness
self.classes = classes
self.label = -1
self.score = -1
def get_label(self):
if self.label == -1:
self.label = np.argmax(self.classes)
return self.label
def get_score(self):
if self.score == -1:
self.score = self.classes[self.get_label()]
return self.score
def _sigmoid(x):
return 1. / (1. + np.exp(-x))
def _interval_overlap(interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2,x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2,x4) - x3
def bbox_iou(box1, box2):
intersect_w = _interval_overlap([box1.xmin, box1.xmax], [box2.xmin, box2.xmax])
intersect_h = _interval_overlap([box1.ymin, box1.ymax], [box2.ymin, box2.ymax])
intersect = intersect_w * intersect_h
w1, h1 = box1.xmax-box1.xmin, box1.ymax-box1.ymin
w2, h2 = box2.xmax-box2.xmin, box2.ymax-box2.ymin
union = w1*h1 + w2*h2 - intersect
return float(intersect) / union
def do_nms(boxes, nms_thresh):
if len(boxes) > 0:
nb_class = len(boxes[0].classes)
else:
return
for c in range(nb_class):
sorted_indices = np.argsort([-box.classes[c] for box in boxes])
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].classes[c] == 0: continue
for j in range(i+1, len(sorted_indices)):
index_j = sorted_indices[j]
if bbox_iou(boxes[index_i], boxes[index_j]) >= nms_thresh:
boxes[index_j].classes[c] = 0
# decode_netout() takes each one of the NumPy arrays, one at a time,
# and decodes the candidate bounding boxes and class predictions
def decode_netout(netout, anchors, obj_thresh, net_h, net_w):
grid_h, grid_w = netout.shape[:2]
nb_box = 3
netout = netout.reshape((grid_h, grid_w, nb_box, -1))
nb_class = netout.shape[-1] - 5
boxes = []
netout[..., :2] = _sigmoid(netout[..., :2])
netout[..., 4:] = _sigmoid(netout[..., 4:])
netout[..., 5:] = netout[..., 4][..., np.newaxis] * netout[..., 5:]
netout[..., 5:] *= netout[..., 5:] > obj_thresh
for i in range(grid_h*grid_w):
row = i / grid_w
col = i % grid_w
for b in range(nb_box):
# 4th element is objectness score
objectness = netout[int(row)][int(col)][b][4]
#objectness = netout[..., :4]
if(objectness.all() <= obj_thresh): continue
# first 4 elements are x, y, w, and h
x, y, w, h = netout[int(row)][int(col)][b][:4]
x = (col + x) / grid_w # center position, unit: image width
y = (row + y) / grid_h # center position, unit: image height
w = anchors[2 * b + 0] * np.exp(w) / net_w # unit: image width
h = anchors[2 * b + 1] * np.exp(h) / net_h # unit: image height
# last elements are class probabilities
classes = netout[int(row)][col][b][5:]
box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, objectness, classes)
#box = BoundBox(x-w/2, y-h/2, x+w/2, y+h/2, None, classes)
boxes.append(box)
return boxes
# to stretch bounding boxes back into the shape of the original image,
# enabling plotting of the original image and with bounding boxes overlain
def correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w):
if (float(net_w)/image_w) < (float(net_h)/image_h):
new_w = net_w
new_h = (image_h*net_w)/image_w
else:
new_h = net_w
new_w = (image_w*net_h)/image_h
for i in range(len(boxes)):
x_offset, x_scale = (net_w - new_w)/2./net_w, float(new_w)/net_w
y_offset, y_scale = (net_h - new_h)/2./net_h, float(new_h)/net_h
boxes[i].xmin = int((boxes[i].xmin - x_offset) / x_scale * image_w)
boxes[i].xmax = int((boxes[i].xmax - x_offset) / x_scale * image_w)
boxes[i].ymin = int((boxes[i].ymin - y_offset) / y_scale * image_h)
boxes[i].ymax = int((boxes[i].ymax - y_offset) / y_scale * image_h)
def draw_boxes(filename, v_boxes, v_labels, v_scores):
# load the image
data = plt.imread(filename)
# plot the image
plt.imshow(data)
# get the context for drawing boxes
ax = plt.gca()
# plot each box
for i in range(len(v_boxes)):
box = v_boxes[i]
# get coordinates
y1, x1, y2, x2 = box.ymin, box.xmin, box.ymax, box.xmax
# calculate width and height of the box
width, height = x2 - x1, y2 - y1
# create the shape
rect = Rectangle((x1, y1), width, height, fill=False, color='red')
# draw the box
ax.add_patch(rect)
# draw text and score in top left corner
label = "%s (%.3f)" % (v_labels[i], v_scores[i])
plt.text(x1, y1, label, color='red')
# show the plot
plt.show()
# get all of the results above a threshold
# takes the list of boxes, known labels,
# and our classification threshold as arguments
# and returns parallel lists of boxes, labels, and scores.
def get_boxes(boxes, labels, thresh):
v_boxes, v_labels, v_scores = list(), list(), list()
# enumerate all boxes
for box in boxes:
# enumerate all possible labels
for i in range(len(labels)):
# check if the threshold for this label is high enough
if box.classes[i] > thresh:
v_boxes.append(box)
v_labels.append(labels[i])
v_scores.append(box.classes[i]*100)
# don't break, many labels may trigger for one box
return v_boxes, v_labels, v_scores
###Output
_____no_output_____
###Markdown
Load sample image
###Code
# define the expected input shape for the model
input_w, input_h = 416, 416
# define our new photo
photo_filename = 'oboe-with-book.jpg'
# load and prepare image
image, image_w, image_h = load_image_pixels(photo_filename, (net_w, net_w))
plt.imshow(plt.imread(photo_filename))
###Output
_____no_output_____
###Markdown
Perform inference
###Code
# make prediction
yolos = yolov3.predict(image)
# define the anchors
anchors = [[116,90, 156,198, 373,326], [30,61, 62,45, 59,119], [10,13, 16,30, 33,23]]
# define the probability threshold for detected objects
class_threshold = 0.6
boxes = list()
for i in range(len(yolos)):
# decode the output of the network
boxes += decode_netout(yolos[i][0], anchors[i], obj_thresh, net_h, net_w)
# correct the sizes of the bounding boxes
correct_yolo_boxes(boxes, image_h, image_w, net_h, net_w)
# suppress non-maximal boxes
do_nms(boxes, nms_thresh)
# extract the details of the detected objects
v_boxes, v_labels, v_scores = get_boxes(boxes, labels, class_threshold)
# summarize what model found
for i in range(len(v_boxes)):
print(v_labels[i], v_scores[i])
# draw what model found
draw_boxes(photo_filename, v_boxes, v_labels, v_scores)
###Output
dog 99.74095821380615
|
docs/common_cards_coins_dice.ipynb | ###Markdown
Symbulate Documentation Cards, coins, and dice Many probabilistic situations involving physical objects like cards, coins, and dice can be specified with [BoxModel](probspace.htmlboxmodel). Be sure to import Symbulate using the following commands.
###Code
from symbulate import *
%matplotlib inline
###Output
_____no_output_____
###Markdown
*Example*. Rolling a fair n-sided die (with n=6).
###Code
n = 6
die = list(range(1, n+1))
P = BoxModel(die)
RV(P).sim(10000).plot()
###Output
_____no_output_____
###Markdown
*Example.* Flipping a fair coin twice and recording the results in sequence.
###Code
P = BoxModel(['H', 'T'], size=2, order_matters=True)
P.sim(10000).tabulate(normalize=True)
###Output
_____no_output_____
###Markdown
*Example.* Unequally likely outcomes on a colored "spinner".
###Code
P = BoxModel(['orange', 'brown', 'yellow'], probs=[0.5, 0.25, 0.25])
P.sim(10000).tabulate(normalize = True)
###Output
_____no_output_____
###Markdown
`DeckOfCards()` is a special case of BoxModel for drawing from a standard deck of 52 cards. By default `replace=False`.*Example.* Simulated hands of 5 cards each.
###Code
DeckOfCards(size=5).sim(3)
###Output
_____no_output_____ |
dmu1/dmu1_ml_Herschel-Stripe-82/1.4_UKIDSS-LAS.ipynb | ###Markdown
Herschel Stripe 82 master catalogue Preparation of UKIRT Infrared Deep Sky Survey / Large Area Survey (UKIDSS/LAS)Information about UKIDSS can be found at http://www.ukidss.org/surveys/surveys.htmlThe catalogue comes from `dmu0_UKIDSS-LAS`.In the catalogue, we keep:- The identifier (it's unique in the catalogue);- The position;- The stellarity;- The magnitude for each band in aperture 3 (2 arcsec).- The hall magnitude is described as the total magnitude.J band magnitudes are available in two eopchs. We take the first arbitrarily.The magnitudes are “*Vega like*”. The AB offsets are given by Hewett *et al.* (2016):| Band | AB offset ||------|-----------|| Y | 0.634 || J | 0.938 || H | 1.379 || K | 1.900 |Each source is associated with an epoch. These range between 2005 and 2007. We take 2006 for the epoch.
###Code
from herschelhelp_internal import git_version
print("This notebook was run with herschelhelp_internal version: \n{}".format(git_version()))
%matplotlib inline
#%config InlineBackend.figure_format = 'svg'
import matplotlib.pyplot as plt
plt.rc('figure', figsize=(10, 6))
from collections import OrderedDict
import os
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.table import Column, Table
import numpy as np
from herschelhelp_internal.flagging import gaia_flag_column
from herschelhelp_internal.masterlist import nb_astcor_diag_plot, remove_duplicates
from herschelhelp_internal.utils import astrometric_correction, mag_to_flux
OUT_DIR = os.environ.get('TMP_DIR', "./data_tmp")
try:
os.makedirs(OUT_DIR)
except FileExistsError:
pass
RA_COL = "las_ra"
DEC_COL = "las_dec"
###Output
_____no_output_____
###Markdown
I - Column selection
###Code
#Is the following standard (different names for radec vs mag)?
imported_columns = OrderedDict({
'SOURCEID': 'las_id',
'RA': 'las_ra',
'Dec': 'las_dec',
'YHALLMAG': 'm_ukidss_y',
'YHALLMAGERR': 'merr_ukidss_y',
'YAPERMAG3': 'm_ap_ukidss_y',
'YAPERMAG3ERR': 'merr_ap_ukidss_y',
'J_1HALLMAG': 'm_ukidss_j',
'J_1HALLMAGERR': 'merr_ukidss_j',
'J_1APERMAG3': 'm_ap_ukidss_j',
'J_1APERMAG3ERR': 'merr_ap_ukidss_j',
'HAPERMAG3': 'm_ap_ukidss_h',
'HAPERMAG3ERR': 'merr_ap_ukidss_h',
'HHALLMAG': 'm_ukidss_h',
'HHALLMAGERR': 'merr_ukidss_h',
'KAPERMAG3': 'm_ap_ukidss_k',
'KAPERMAG3ERR': 'merr_ap_ukidss_k',
'KHALLMAG': 'm_ukidss_k',
'KHALLMAGERR': 'merr_ukidss_k',
'PSTAR': 'las_stellarity'
})
catalogue = Table.read(
"../../dmu0/dmu0_UKIDSS-LAS/data/UKIDSS-LAS_Herschel-Stripe-82.fits")[list(imported_columns)]
for column in imported_columns:
catalogue[column].name = imported_columns[column]
#Epochs between 2005 and 2007. Rough average:
epoch = 2006
# Clean table metadata
catalogue.meta = None
# Adding flux and band-flag columns
for col in catalogue.colnames:
if col.startswith('m_'):
errcol = "merr{}".format(col[1:])
# LAS uses a huge negative number for missing values
catalogue[col][catalogue[col] < -100] = np.nan
catalogue[errcol][catalogue[errcol] < -100] = np.nan
# Vega to AB correction
if col.endswith('y'):
catalogue[col] += 0.634
elif col.endswith('j'):
catalogue[col] += 0.938
elif col.endswith('h'):
catalogue[col] += 1.379
elif col.endswith('k'):
catalogue[col] += 1.900
else:
print("{} column has wrong band...".format(col))
flux, error = mag_to_flux(np.array(catalogue[col]), np.array(catalogue[errcol]))
# Fluxes are added in µJy
catalogue.add_column(Column(flux * 1.e6, name="f{}".format(col[1:])))
catalogue.add_column(Column(error * 1.e6, name="f{}".format(errcol[1:])))
# Band-flag column
if "ap" not in col:
catalogue.add_column(Column(np.zeros(len(catalogue), dtype=bool), name="flag{}".format(col[1:])))
# TODO: Set to True the flag columns for fluxes that should not be used for SED fitting.
catalogue[:10].show_in_notebook()
###Output
_____no_output_____
###Markdown
II - Removal of duplicated sources We remove duplicated objects from the input catalogues.
###Code
SORT_COLS = ['merr_ap_ukidss_j', 'merr_ap_ukidss_k']
FLAG_NAME = 'las_flag_cleaned'
nb_orig_sources = len(catalogue)
catalogue = remove_duplicates(catalogue, RA_COL, DEC_COL, sort_col=SORT_COLS, flag_name=FLAG_NAME)
nb_sources = len(catalogue)
print("The initial catalogue had {} sources.".format(nb_orig_sources))
print("The cleaned catalogue has {} sources ({} removed).".format(nb_sources, nb_orig_sources - nb_sources))
print("The cleaned catalogue has {} sources flagged as having been cleaned".format(np.sum(catalogue[FLAG_NAME])))
###Output
/opt/anaconda3/envs/herschelhelp_internal/lib/python3.6/site-packages/astropy/table/column.py:1096: MaskedArrayFutureWarning: setting an item on a masked array which has a shared mask will not copy the mask and also change the original mask array in the future.
Check the NumPy 1.11 release notes for more information.
ma.MaskedArray.__setitem__(self, index, value)
###Markdown
III - Astrometry correctionWe match the astrometry to the Gaia one. We limit the Gaia catalogue to sources with a g band flux between the 30th and the 70th percentile. Some quick tests show that this give the lower dispersion in the results.
###Code
gaia = Table.read("../../dmu0/dmu0_GAIA/data/GAIA_Herschel-Stripe-82.fits")
gaia_coords = SkyCoord(gaia['ra'], gaia['dec'])
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec, near_ra0=True)
delta_ra, delta_dec = astrometric_correction(
SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]),
gaia_coords, near_ra0=True
)
print("RA correction: {}".format(delta_ra))
print("Dec correction: {}".format(delta_dec))
catalogue[RA_COL] += delta_ra.to(u.deg)
catalogue[DEC_COL] += delta_dec.to(u.deg)
nb_astcor_diag_plot(catalogue[RA_COL], catalogue[DEC_COL],
gaia_coords.ra, gaia_coords.dec, near_ra0=True)
###Output
_____no_output_____
###Markdown
IV - Flagging Gaia objects
###Code
catalogue.add_column(
gaia_flag_column(SkyCoord(catalogue[RA_COL], catalogue[DEC_COL]), epoch, gaia)
)
GAIA_FLAG_NAME = "las_flag_gaia"
catalogue['flag_gaia'].name = GAIA_FLAG_NAME
print("{} sources flagged.".format(np.sum(catalogue[GAIA_FLAG_NAME] > 0)))
###Output
438850 sources flagged.
###Markdown
V - Flagging objects near bright stars VI - Saving to disk
###Code
catalogue.write("{}/UKIDSS-LAS.fits".format(OUT_DIR), overwrite=True)
###Output
_____no_output_____ |
Inference C++ models using SageMaker Processing.ipynb | ###Markdown
Machine learning has been existing for decades. Before the prevalence of doing machine learning with Python, many other languages such as Java, C++ were used to build models. Refactoring legacy models in C++ or Java could be forbiddingly expensive and time consuming. In this notebook, we are going to demonstrate inferencing C++ models by building a custom container first and then run a ScriptProcessor job. C++ model We use a simple C++ test file for demonstration purpose. This C++ program accepts input data as a series of strings separated by a comma. For example, “2,3“ represents a row of input data, labeled 2 and 3 in two separate columns. We use a simple linear regression model y=x1 + x2 for demonstration purpose. Customer can modify the C++ inference code to inference more realistic and sophisticated models.
###Code
!pygmentize test.cpp
###Output
_____no_output_____
###Markdown
We use `g++` to compile the `test.cpp` file into an executable file `a.out`
###Code
!g++ -std=c++11 test.cpp
###Output
_____no_output_____
###Markdown
We run a quick test on `a.out` to make sure it works as expected.
###Code
%%sh
./a.out '9,8'
###Output
_____no_output_____
###Markdown
SageMaker Processing Amazon SageMaker Processing is a new capability of Amazon SageMaker (https://aws.amazon.com/sagemaker/) for running processing and model evaluation workloads with a fully managed experience. Amazon SageMaker Processing lets customers run analytics jobs for data engineering and model evaluation on Amazon SageMaker easily and at scale. SageMaker Processing allows customers to enjoy the benefits of a fully managed environment with all the security and compliance guarantees built into Amazon SageMaker. With Amazon SageMaker Processing, customers have the flexibility of using the built-in data processing containers or bringing their own containers and submitting custom jobs to run on managed infrastructure. Once submitted, Amazon SageMaker launches the compute instances, processes and analyzes the input data and releases the resources upon completion. The processing container is defined as shown below. We have Anaconda and Pandas installed into the container. `a.out` is the C++ executable that contains the model inference logic. `process_script.py` is the Python script we use to call C++ executable and save results. We build the Docker container and push it to Amazon Elastic Container Registry. Build container
###Code
%%sh
# The name of our algorithm
algorithm_name=cpp_processing
#cd container
chmod +x process_script.py
chmod +x a.out
account=$(aws sts get-caller-identity --query Account --output text)
# Get the region defined in the current configuration (default to us-west-2 if none defined)
region=$(aws configure get region)
fullname="${account}.dkr.ecr.${region}.amazonaws.com/${algorithm_name}:latest"
# If the repository doesn't exist in ECR, create it.
aws ecr describe-repositories --repository-names "${algorithm_name}" > /dev/null 2>&1
if [ $? -ne 0 ]
then
aws ecr create-repository --repository-name "${algorithm_name}" > /dev/null
fi
# Get the login command from ECR and execute it directly
aws ecr get-login-password --region ${region} | docker login --username AWS --password-stdin ${fullname}
# Build the docker image locally with the image name and then push it to ECR
# with the full name.
docker build -t ${algorithm_name} .
docker tag ${algorithm_name} ${fullname}
docker push ${fullname}
###Output
_____no_output_____
###Markdown
SageMaker Processing script Next, use the Amazon SageMaker Python SDK to submit a processing job. We use the container that was just built and `process_script.py` script for calling the C++ model.The `process_script.py` first finds all data files under `/opt/ml/processing/input/`. These data files are downloaded by SageMaker from S3 to designated local directory in the container. By default, when you use multiple instances, data from S3 are duplicated to each container instance. That means every instance get the full dataset. By setting `s3_data_distribution_type='ShardedByS3Key'`, each instance gets approximately 1/n of the number of total input data files. We read each data file into memory and convert it into a long string ready for C++ executable to consume. The`subprocess` module from Python allows us to run the C++ executable and connect to output and error pipes. Output is saved as csv file to `/opt/ml/processing/output`. Upon completion, SageMaker Processing will upload files in this directory to S3. The main script looks like below:We read each data file into memory and convert it into a long string ready for C++ executable to consume. The subprocess module from Python allows us to run the C++ executable and connect to output and error pipes. Output is saved as a csv file to /opt/ml/processing/output. Upon completion, SageMaker Processing will upload files in this directory to S3. The main script looks like below:```pythondef call_one_exe(a): p = subprocess.Popen(["./a.out", a],stdout=subprocess.PIPE) p_out, err= p.communicate() output = p_out.decode("utf-8") return output.split(',')if __name__=='__main__': parse is only needed if we want to pass arg parser = argparse.ArgumentParser() args, _ = parser.parse_known_args() print('Received arguments {}'.format(args)) files = glob('/opt/ml/processing/input/*.csv') for i, f in enumerate(files): try: data = pd.read_csv(f, header=None, engine='python') string = str(list(data.values.flat)).replace(' ','')[1:-1] string looks like 2,3,5,6,7,8. Space is removed. '[' and ']' are also removed. predictions = call_one_exe(string) output_path = os.path.join('/opt/ml/processing/output', str(i)+'_out.csv') print('Saving training features to {}'.format(output_path)) pd.DataFrame({'results':predictions}).to_csv(output_path, header=False, index=False) except Exception as e: print(str(e)) ``` Run a processing job The next step would be to configure a processing job using the ScriptProcessor object.
###Code
import boto3, os, sagemaker
from sagemaker.processing import ScriptProcessor, ProcessingInput, ProcessingOutput
from sagemaker import get_execution_role
sagemaker_session = sagemaker.Session()
default_s3_bucket = sagemaker_session.default_bucket()
client = boto3.client('sts')
Account_number = client.get_caller_identity()['Account']
###Output
_____no_output_____
###Markdown
10 sample data files are included in this demo. Each file contains 5000 raws of arbitrarily generated data. We first upload these files to S3.
###Code
input_data = sagemaker_session.upload_data(path='./data_files',
bucket=default_s3_bucket,
key_prefix='data_for_inference_with_cpp_model')
###Output
_____no_output_____
###Markdown
Now let us run a processing job using the Docker image and preprocessing script you just created. We pass the Amazon S3 input and output paths, which are required by our preprocessing script. Here, we also specify the number of instances and instance type for the processing job.
###Code
role = get_execution_role()
script_processor = ScriptProcessor(command=['python3'],
image_uri=Account_number + '.dkr.ecr.us-east-1.amazonaws.com/cpp_processing:latest',
role=role,
instance_count=1,
base_job_name = 'run-exe-processing',
instance_type='ml.c5.xlarge')
output_location = os.path.join('s3://',default_s3_bucket, 'processing_output')
script_processor.run(code='process_script.py',
inputs=[ProcessingInput(
source=input_data,
destination='/opt/ml/processing/input')],
outputs=[ProcessingOutput(source='/opt/ml/processing/output',
destination=output_location)]
)
###Output
_____no_output_____
###Markdown
Inspect the preprocessed datasetTake a look at a few rows of one dataset to make sure the preprocessing was successful.
###Code
print('Top 5 rows from 1_out.csv')
!aws s3 cp $output_location/0_out.csv - | head -n5
###Output
_____no_output_____ |
_posts/matplotlib/static-image/static-image.ipynb | ###Markdown
New to Plotly?Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).You can set up Plotly to work in [online](https://plot.ly/python/getting-started/initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/start-plotting-online).We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started! Version CheckPlotly's python package is updated frequently. Run `pip install plotly --upgrade` to use the latest version.
###Code
import plotly
plotly.__version__
# Import modules
import plotly.plotly as py
import plotly.tools as tls
import matplotlib.pyplot as plt
# Create a plot
x = [1, 2]
y = [2, 4]
plt.plot(x, y)
# Get Matplotlib Figure
mpl_fig = plt.gcf()
# Convert to plotly figure
plotly_fig = tls.mpl_to_plotly(mpl_fig)
# Save image
py.image.save_as(plotly_fig, 'your_image_filename.png')
###Output
_____no_output_____
###Markdown
In Addition, you can specify the size of the matplotlib/plotly figure to be saved by using following method:
###Code
import plotly.plotly as py
py.image.save_as(plotly_fig, 'your_image_filename.png', height=desired_height, width=desired_width)
###Output
_____no_output_____
###Markdown
You can also display inline static images in IPython:
###Code
import plotly.plotly as py
py.image.ishow(plotly_fig)
###Output
_____no_output_____
###Markdown
You can view the static version of any Plotly graph by appending `.png`, `.pdf`, `.eps`, or `.svg` to the end of the URL. For example, view the static image of https://plot.ly/~chris/1638 at https://plot.ly/~chris/1638.png. Combine this with the requests package and download the latest version of your Plotly graph:
###Code
import requests
image_bytes = requests.get('https://plot.ly/~chris/1638.png').content
###Output
_____no_output_____
###Markdown
Reference See [https://plot.ly/python/static-image-export/](https://plot.ly/python/static-image-export/) for more information and chart attribute options!
###Code
help(py.image)
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'static-image.ipynb', 'matplotlib/static-image/', 'Matplotlib Static Image Export',
'How to export plotly matplotlib graphs as static images in Python. Plotly supports png, svg, jpg, and pdf image export.',
title = 'Matplotlib Static Image Export | Plotly',
has_thumbnail='true',
thumbnail='thumbnail/png-export.png',
language='matplotlib',
page_type='example_index',
display_as='basic', order=5,
ipynb='~notebook_demo/232')
###Output
_____no_output_____ |
docs/notebooks/tensorflow_binary_classification.ipynb | ###Markdown
TensorFlow2: Training Loop.  Although Keras is suitable for the vast majority of use cases, in the following scenarios, it may make sense to forgo `model.fit()` to manually define a training loop:- Maintaining legacy code and retraining old models.- Custom batch/ epoch operations like gradients and backpropagation.> Disclaimer; This notebook demonstrates how to manually define a training loop for queued tuning of a binary classification model. However, it is only included to prove that AIQC technically supports TensorFlow out-of-the-box with `analysis_type='keras'`, and to demonstrate how expert practicioners to do continue to use their favorite tools. We neither claim to be experts on the inner-workings of TensorFlow, nor do we intend to troubleshoot advanced methodologies for users that are in over their heads.Reference this repository for more TensorFlow cookbooks: > https://github.com/IvanBongiorni/TensorFlow2.0_Notebooks
###Code
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from sklearn.preprocessing import LabelBinarizer, PowerTransformer
import aiqc
from aiqc import datum
###Output
_____no_output_____
###Markdown
--- Example Data Reference [Example Datasets](example_datasets.ipynb) for more information.
###Code
df = datum.to_pandas('sonar.csv')
df.head()
###Output
_____no_output_____
###Markdown
--- a) High-Level API Reference [High-Level API Docs](api_high_level.ipynb) for more information including how to work with non-tabular data.
###Code
splitset = aiqc.Pipeline.Tabular.make(
dataFrame_or_filePath = df
, label_column = 'object'
, size_test = 0.22
, size_validation = 0.12
, label_encoder = LabelBinarizer(sparse_output=False)
, feature_encoders = [{
"sklearn_preprocess": PowerTransformer(method='yeo-johnson', copy=False)
, "dtypes": ['float64']
}]
, dtype = None
, features_excluded = None
, fold_count = None
, bin_count = None
)
def fn_build(features_shape, label_shape, **hp):
model = Sequential(name='Sonar')
model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform'))
model.add(Dropout(0.30))
model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform'))
model.add(Dropout(0.30))
model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(units=label_shape[0], activation='sigmoid', kernel_initializer='glorot_uniform'))
return model
def fn_lose(**hp):
loser = tf.losses.BinaryCrossentropy()
return loser
def fn_optimize(**hp):
optimizer = tf.optimizers.Adamax()
return optimizer
def fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp):
batched_train_features, batched_train_labels = aiqc.tf_batcher(
features = samples_train['features']
, labels = samples_train['labels']
, batch_size = 5
)
# Still necessary for saving entire model.
model.compile(loss=loser, optimizer=optimizer)
## --- Metrics ---
acc = tf.metrics.BinaryAccuracy()
# Mirrors `keras.model.History.history` object.
history = {
'loss':list(), 'accuracy': list(),
'val_loss':list(), 'val_accuracy':list()
}
## --- Training loop ---
for epoch in range(hp['epochs']):
# --- Batch training ---
for i, batch in enumerate(batched_train_features):
with tf.GradientTape() as tape:
batch_loss = loser(
batched_train_labels[i],
model(batched_train_features[i])
)
# Update weights based on the gradient of the loss function.
gradients = tape.gradient(batch_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
## --- Epoch metrics ---
# Overall performance on training data.
train_probability = model.predict(samples_train['features'])
train_loss = loser(samples_train['labels'], train_probability)
train_acc = acc(samples_train['labels'], train_probability)
history['loss'].append(float(train_loss))
history['accuracy'].append(float(train_acc))
# Performance on evaluation data.
eval_probability = model.predict(samples_evaluate['features'])
eval_loss = loser(samples_evaluate['labels'], eval_probability)
eval_acc = acc(samples_evaluate['labels'], eval_probability)
history['val_loss'].append(float(eval_loss))
history['val_accuracy'].append(float(eval_acc))
# Attach history to the model so we can return a single object.
model.history.history = history
return model
hyperparameters = {
"neuron_count": [25, 50]
, "epochs": [75, 150]
}
queue = aiqc.Experiment.make(
library = "keras"
, analysis_type = "classification_binary"
, fn_build = fn_build
, fn_train = fn_train
, fn_lose = fn_lose
, fn_optimize = fn_optimize
, splitset_id = splitset.id
, repeat_count = 1
, hide_test = False
, hyperparameters = hyperparameters
, fn_predict = None #automated
, foldset_id = None
)
queue.run_jobs()
###Output
🔮 Training Models 🔮: 100%|██████████████████████████████████████████| 4/4 [02:12<00:00, 33.19s/it]
###Markdown
For more information on visualization of performance metrics, reference the [Visualization & Metrics](visualization.html) documentation. --- b) Low-Level API Reference [Low-Level API Docs](api_high_level.ipynb) for more information including how to work with non-tabular data and defining optimizers.
###Code
dataset = aiqc.Dataset.Tabular.from_pandas(df)
label_column = 'object'
label = dataset.make_label(columns=[label_column])
labelcoder = label.make_labelcoder(
sklearn_preprocess = LabelBinarizer(sparse_output=False)
)
feature = dataset.make_feature(exclude_columns=[label_column])
encoderset = feature.make_encoderset()
featurecoder_0 = encoderset.make_featurecoder(
sklearn_preprocess = PowerTransformer(method='yeo-johnson', copy=False)
, dtypes = ['float64']
)
splitset = aiqc.Splitset.make(
feature_ids = [feature.id]
, label_id = label.id
, size_test = 0.22
, size_validation = 0.12
)
def fn_build(features_shape, label_shape, **hp):
model = Sequential(name='Sonar')
model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform'))
model.add(Dropout(0.30))
model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform'))
model.add(Dropout(0.30))
model.add(Dense(hp['neuron_count'], activation='relu', kernel_initializer='he_uniform'))
model.add(Dense(units=label_shape[0], activation='sigmoid', kernel_initializer='glorot_uniform'))
return model
def fn_lose(**hp):
loser = tf.losses.BinaryCrossentropy()
return loser
def fn_optimize(**hp):
optimizer = tf.optimizers.Adamax()
return optimizer
def fn_train(model, loser, optimizer, samples_train, samples_evaluate, **hp):
batched_train_features, batched_train_labels = aiqc.tf_batcher(
features = samples_train['features']
, labels = samples_train['labels']
, batch_size = 5
)
# Still necessary for saving entire model.
model.compile(loss=loser, optimizer=optimizer)
## --- Metrics ---
acc = tf.metrics.BinaryAccuracy()
# Mirrors `keras.model.History.history` object.
history = {
'loss':list(), 'accuracy': list(),
'val_loss':list(), 'val_accuracy':list()
}
## --- Training loop ---
for epoch in range(hp['epochs']):
# --- Batch training ---
for i, batch in enumerate(batched_train_features):
with tf.GradientTape() as tape:
batch_loss = loser(
batched_train_labels[i],
model(batched_train_features[i])
)
# Update weights based on the gradient of the loss function.
gradients = tape.gradient(batch_loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
## --- Epoch metrics ---
# Overall performance on training data.
train_probability = model.predict(samples_train['features'])
train_loss = loser(samples_train['labels'], train_probability)
train_acc = acc(samples_train['labels'], train_probability)
history['loss'].append(float(train_loss))
history['accuracy'].append(float(train_acc))
# Performance on evaluation data.
eval_probability = model.predict(samples_evaluate['features'])
eval_loss = loser(samples_evaluate['labels'], eval_probability)
eval_acc = acc(samples_evaluate['labels'], eval_probability)
history['val_loss'].append(float(eval_loss))
history['val_accuracy'].append(float(eval_acc))
# Attach history to the model so we can return a single object.
model.history.history = history
return model
algorithm = aiqc.Algorithm.make(
library = "keras"
, analysis_type = "classification_binary"
, fn_build = fn_build
, fn_train = fn_train
, fn_lose = fn_lose
, fn_optimize = fn_optimize
)
hyperparameters = {
"neuron_count": [25, 50]
, "epochs": [75, 150]
}
hyperparameters = {
"neuron_count": [25, 50]
, "epochs": [75, 150]
}
hyperparamset = algorithm.make_hyperparamset(
hyperparameters = hyperparameters
)
queue = algorithm.make_queue(
splitset_id = splitset.id
, hyperparamset_id = hyperparamset.id
, repeat_count = 2
)
queue.run_jobs()
###Output
🔮 Training Models 🔮: 100%|██████████████████████████████████████████| 8/8 [04:19<00:00, 32.46s/it]
|
03_Project_1_Analyzing TV Data/Analyzing TV Data.ipynb | ###Markdown
###Code
!git clone https://github.com/mohd-faizy/DataScience-With-Python.git
###Output
Cloning into 'DataScience-With-Python'...
remote: Enumerating objects: 179, done.[K
remote: Counting objects: 100% (179/179), done.[K
remote: Compressing objects: 100% (167/167), done.[K
remote: Total 179 (delta 16), reused 173 (delta 10), pack-reused 0[K
Receiving objects: 100% (179/179), 14.70 MiB | 10.09 MiB/s, done.
Resolving deltas: 100% (16/16), done.
###Markdown
1. TV, halftime shows, and the Big GameWhether or not you like football, the Super Bowl is a spectacle. There's a little something for everyone at your Super Bowl party. Drama in the form of blowouts, comebacks, and controversy for the sports fan. There are the ridiculously expensive ads, some hilarious, others gut-wrenching, thought-provoking, and weird. The half-time shows with the biggest musicians in the world, sometimes riding giant mechanical tigers or leaping from the roof of the stadium. It's a show, baby. And in this notebook, we're going to find out how some of the elements of this show interact with each other. After exploring and cleaning our data a little, we're going to answer questions like:What are the most extreme game outcomes?How does the game affect television viewership?How have viewership, TV ratings, and ad cost evolved over time?Who are the most prolific musicians in terms of halftime show performances?Left Shark Steals The Show. Katy Perry performing at halftime of Super Bowl XLIX. Photo by Huntley Paton. Attribution-ShareAlike 2.0 Generic (CC BY-SA 2.0).The dataset we'll use was scraped and polished from Wikipedia. It is made up of three CSV files, one with game data, one with TV data, and one with halftime musician data for all 52 Super Bowls through 2018. Let's take a look, using display() instead of print() since its output is much prettier in Jupyter Notebooks.
###Code
# Import pandas
import pandas as pd
# Load the CSV data into DataFrames
super_bowls = pd.read_csv('/content/DataScience-With-Python/03__Project__/datasets/super_bowls.csv')
tv = pd.read_csv('/content/DataScience-With-Python/03__Project__/datasets/tv.csv')
halftime_musicians = pd.read_csv('/content/DataScience-With-Python/03__Project__/datasets/halftime_musicians.csv')
# Display the first five rows of each DataFrame
display(super_bowls.head())
display(tv.head())
display(halftime_musicians.head())
###Output
_____no_output_____
###Markdown
2. Taking note of dataset issuesFor the Super Bowl game data, we can see the dataset appears whole except for missing values in the backup quarterback columns (qb_winner_2 and qb_loser_2), which make sense given most starting QBs in the Super Bowl (qb_winner_1 and qb_loser_1) play the entire game.From the visual inspection of TV and halftime musicians data, there is only one missing value displayed, but I've got a hunch there are more. The Super Bowl goes all the way back to 1967, and the more granular columns (e.g. the number of songs for halftime musicians) probably weren't tracked reliably over time. Wikipedia is great but not perfect.An inspection of the .info() output for tv and halftime_musicians shows us that there are multiple columns with null values.
###Code
# Summary of the TV data to inspect
tv.info()
print('\n')
# Summary of the halftime musician data to inspect
halftime_musicians.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 53 entries, 0 to 52
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 super_bowl 53 non-null int64
1 network 53 non-null object
2 avg_us_viewers 53 non-null int64
3 total_us_viewers 15 non-null float64
4 rating_household 53 non-null float64
5 share_household 53 non-null int64
6 rating_18_49 15 non-null float64
7 share_18_49 6 non-null float64
8 ad_cost 53 non-null int64
dtypes: float64(4), int64(4), object(1)
memory usage: 3.9+ KB
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 134 entries, 0 to 133
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 super_bowl 134 non-null int64
1 musician 134 non-null object
2 num_songs 88 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 3.3+ KB
###Markdown
3. Combined points distributionFor the TV data, the following columns have missing values and a lot of them:total_us_viewers (amount of U.S. viewers who watched at least some part of the broadcast)rating_18_49 (average % of U.S. adults 18-49 who live in a household with a TV that were watching for the entire broadcast)share_18_49 (average % of U.S. adults 18-49 who live in a household with a TV in use that were watching for the entire broadcast)For the halftime musician data, there are missing numbers of songs performed (num_songs) for about a third of the performances.There are a lot of potential reasons for these missing values. Was the data ever tracked? Was it lost in history? Is the research effort to make this data whole worth it? Maybe. Watching every Super Bowl halftime show to get song counts would be pretty fun. But we don't have the time to do that kind of stuff now! Let's take note of where the dataset isn't perfect and start uncovering some insights.Let's start by looking at combined points for each Super Bowl by visualizing the distribution. Let's also pinpoint the Super Bowls with the highest and lowest scores.
###Code
# Import matplotlib and set plotting style
from matplotlib import pyplot as plt
%matplotlib inline
plt.style.use('seaborn')
# Plot a histogram of combined points
plt.hist(super_bowls.combined_pts)
plt.xlabel('Combined Points')
plt.ylabel('Number of Super Bowls')
plt.show()
# Display the highest- and lowest-scoring Super Bowls
display(super_bowls[super_bowls['combined_pts'] > 70])
display(super_bowls[super_bowls['combined_pts'] < 25])
###Output
_____no_output_____
###Markdown
4. Point difference distributionMost combined scores are around 40-50 points, with the extremes being roughly equal distance away in opposite directions. Going up to the highest combined scores at 74 and 75, we find two games featuring dominant quarterback performances. One even happened recently in 2018's Super Bowl LII where Tom Brady's Patriots lost to Nick Foles' underdog Eagles 41-33 for a combined score of 74.Going down to the lowest combined scores, we have Super Bowl III and VII, which featured tough defenses that dominated. We also have Super Bowl IX in New Orleans in 1975, whose 16-6 score can be attributed to inclement weather. The field was slick from overnight rain, and it was cold at 46 °F (8 °C), making it hard for the Steelers and Vikings to do much offensively. This was the second-coldest Super Bowl ever and the last to be played in inclement weather for over 30 years. The NFL realized people like points, I guess.UPDATE: In Super Bowl LIII in 2019, the Patriots and Rams broke the record for the lowest-scoring Super Bowl with a combined score of 16 points (13-3 for the Patriots).Let's take a look at point difference now.
###Code
# Plot a histogram of point differences
plt.hist(super_bowls.difference_pts)
plt.xlabel('Point Difference')
plt.ylabel('Number of Super Bowls')
plt.show()
# Display the closest game(s) and biggest blowouts
display(super_bowls[super_bowls['difference_pts'] == 1])
display(super_bowls[super_bowls['difference_pts'] >= 35])
###Output
_____no_output_____
###Markdown
5. Do blowouts translate to lost viewers?The vast majority of Super Bowls are close games. Makes sense. Both teams are likely to be deserving if they've made it this far. The closest game ever was when the Buffalo Bills lost to the New York Giants by 1 point in 1991, which was best remembered for Scott Norwood's last-second missed field goal attempt that went wide right, kicking off four Bills Super Bowl losses in a row. Poor Scott. The biggest point discrepancy ever was 45 points (!) where Hall of Famer Joe Montana's led the San Francisco 49ers to victory in 1990, one year before the closest game ever.I remember watching the Seahawks crush the Broncos by 35 points (43-8) in 2014, which was a boring experience in my opinion. The game was never really close. I'm pretty sure we changed the channel at the end of the third quarter. Let's combine our game data and TV to see if this is a universal phenomenon. Do large point differences translate to lost viewers? We can plot household share (average percentage of U.S. households with a TV in use that were watching for the entire broadcast) vs. point difference to find out.
###Code
# Join game and TV data, filtering out SB I because it was split over two networks
games_tv = pd.merge(tv[tv['super_bowl'] > 1], super_bowls, on='super_bowl')
# Import seaborn
import seaborn as sns
# Create a scatter plot with a linear regression model fit
sns.regplot(x='difference_pts', y='share_household', data=games_tv)
###Output
_____no_output_____
###Markdown
6. Viewership and the ad industry over timeThe downward sloping regression line and the 95% confidence interval for that regression suggest that bailing on the game if it is a blowout is common. Though it matches our intuition, we must take it with a grain of salt because the linear relationship in the data is weak due to our small sample size of 52 games.Regardless of the score though, I bet most people stick it out for the halftime show, which is good news for the TV networks and advertisers. A 30-second spot costs a pretty \$5 million now, but has it always been that way? And how have number of viewers and household ratings trended alongside ad cost? We can find out using line plots that share a "Super Bowl" x-axis.
###Code
# Create a figure with 3x1 subplot and activate the top subplot
plt.subplot(3, 1, 1)
plt.plot(tv.super_bowl, tv.avg_us_viewers, color='#648FFF')
plt.title('Average Number of US Viewers')
# Activate the middle subplot
plt.subplot(3, 1, 2)
plt.plot(tv.super_bowl, tv.rating_household, color='#DC267F')
plt.title('Household Rating')
# Activate the bottom subplot
plt.subplot(3, 1, 3)
plt.plot(tv.super_bowl, tv.ad_cost, color='#FFB000')
plt.title('Ad Cost')
plt.xlabel('SUPER BOWL')
# Improve the spacing between subplots
plt.tight_layout()
###Output
_____no_output_____
###Markdown
7. Halftime shows weren't always this greatWe can see viewers increased before ad costs did. Maybe the networks weren't very data savvy and were slow to react? Makes sense since DataCamp didn't exist back then.Another hypothesis: maybe halftime shows weren't that good in the earlier years? The modern spectacle of the Super Bowl has a lot to do with the cultural prestige of big halftime acts. I went down a YouTube rabbit hole and it turns out the old ones weren't up to today's standards. Some offenders:Super Bowl XXVI in 1992: A Frosty The Snowman rap performed by children.Super Bowl XXIII in 1989: An Elvis impersonator that did magic tricks and didn't even sing one Elvis song.Super Bowl XXI in 1987: Tap dancing ponies. (Okay, that's pretty awesome actually.)It turns out Michael Jackson's Super Bowl XXVII performance, one of the most watched events in American TV history, was when the NFL realized the value of Super Bowl airtime and decided they needed to sign big name acts from then on out. The halftime shows before MJ indeed weren't that impressive, which we can see by filtering our halftime_musician data.
###Code
# Display all halftime musicians for Super Bowls up to and including Super Bowl XXVII
halftime_musicians[halftime_musicians.super_bowl <= 27]
###Output
_____no_output_____
###Markdown
8. Who has the most halftime show appearances?Lots of marching bands. American jazz clarinetist Pete Fountain. Miss Texas 1973 playing a violin. Nothing against those performers, they're just simply not Beyoncé. To be fair, no one is.Let's see all of the musicians that have done more than one halftime show, including their performance counts.
###Code
# Count halftime show appearances for each musician and sort them from most to least
halftime_appearances = halftime_musicians.groupby('musician').count()['super_bowl'].reset_index()
halftime_appearances = halftime_appearances.sort_values('super_bowl', ascending=False)
halftime_appearances.head()
# Display musicians with more than one halftime show appearance
halftime_appearances[halftime_appearances['super_bowl'] > 1]
###Output
_____no_output_____
###Markdown
9. Who performed the most songs in a halftime show?The world famous Grambling State University Tiger Marching Band takes the crown with six appearances. Beyoncé, Justin Timberlake, Nelly, and Bruno Mars are the only post-Y2K musicians with multiple appearances (two each).From our previous inspections, the num_songs column has lots of missing values:A lot of the marching bands don't have num_songs entries.For non-marching bands, missing data starts occurring at Super Bowl XX.Let's filter out marching bands by filtering out musicians with the word "Marching" in them and the word "Spirit" (a common naming convention for marching bands is "Spirit of [something]"). Then we'll filter for Super Bowls after Super Bowl XX to address the missing data issue, then let's see who has the most number of songs.
###Code
# Filter out most marching bands
no_bands = halftime_musicians[~halftime_musicians.musician.str.contains('Marching')]
no_bands = no_bands[~no_bands.musician.str.contains('Spirit')]
# Plot a histogram of number of songs per performance
most_songs = int(max(no_bands['num_songs'].values))
plt.hist(no_bands.num_songs.dropna(), bins=10)
plt.xlabel('Number of Songs Per Halftime Show Performance')
plt.ylabel('Number of Musicians')
plt.show()
# Sort the non-band musicians by number of songs per appearance...
no_bands = no_bands.sort_values('num_songs', ascending=False)
# ...and display the top 15
display(no_bands.head(15))
###Output
_____no_output_____
###Markdown
10. ConclusionSo most non-band musicians do 1-3 songs per halftime show. It's important to note that the duration of the halftime show is fixed (roughly 12 minutes) so songs per performance is more a measure of how many hit songs you have. JT went off in 2018, wow. 11 songs! Diana Ross comes in second with 10 in her medley in 1996.In this notebook, we loaded, cleaned, then explored Super Bowl game, television, and halftime show data. We visualized the distributions of combined points, point differences, and halftime show performances using histograms. We used line plots to see how ad cost increases lagged behind viewership increases. And we discovered that blowouts do appear to lead to a drop in viewers.This year's Big Game will be here before you know it. Who do you think will win Super Bowl LIII?UPDATE: Spoiler alert.
###Code
# 2018-2019 conference champions
patriots = 'New England Patriots'
rams = 'Los Angeles Rams'
# Who will win Super Bowl LIII?
super_bowl_LIII_winner = patriots
print('The winner of Super Bowl LIII will be the', super_bowl_LIII_winner)
###Output
The winner of Super Bowl LIII will be the New England Patriots
|
import_ase_molecule.ipynb | ###Markdown
Import Molecule from ASE into AiiDA
###Code
from __future__ import print_function
#from aiida import load_dbenv, is_dbenv_loaded
#from aiida.backends import settings
#if not is_dbenv_loaded():
# load_dbenv(profile=settings.AIIDADB_PROFILE)
#from aiida.orm.data.structure import StructureData
import ase.build
from ase.collections import g2
import ipywidgets as ipw
from IPython.display import display, clear_output
#import nglview
import StringIO
viewer = nglview.NGLWidget()
coords = ipw.HTML()
display(ipw.VBox([viewer, coords]))
def update_view():
global atoms
if hasattr(viewer, "component_0"):
viewer.component_0.remove_ball_and_stick()
viewer.component_0.remove_unitcell()
cid = viewer.component_0.id
viewer.remove_component(cid)
viewer.add_component(nglview.ASEStructure(atoms)) # adds ball+stick
viewer.add_unitcell()
viewer.center_view()
tmp = StringIO.StringIO()
atoms.write(tmp, format="xyz")
coords.value = "<pre>"+tmp.getvalue()+"</pre>"
tmp.close()
###Output
_____no_output_____
###Markdown
Step 1: Select Molecule
###Code
def on_mol_change(c):
global atoms
atoms = ase.build.molecule(inp_mol.value)
update_view()
inp_mol = ipw.Dropdown(options=g2.names, value="H2O")
inp_mol.observe(on_mol_change, names='value')
display(inp_mol)
on_mol_change(None)
###Output
_____no_output_____
###Markdown
Step 2: Define Cell
###Code
def on_click_vac(b):
global atoms
atoms.center(vacuum=inp_vac.value)
atoms.pbc = (True,True,True)
update_view()
inp_vac = ipw.FloatText(value=2.5)
lab_vac = ipw.Label(u'Vacuum [Å]:')
btn_vac = ipw.Button(description="Add Vacuum")
btn_vac.on_click(on_click_vac)
display(ipw.HBox([lab_vac, inp_vac, btn_vac]))
###Output
_____no_output_____
###Markdown
Step 3: Store in AiiDA Database
###Code
def on_click_store(b):
global atoms
with store_out:
clear_output()
s = StructureData(ase=atoms)
s.description = inp_descr.value
s.store()
print("Stored in AiiDA: "+repr(s))
store_out = ipw.Output()
inp_descr = ipw.Text(placeholder="Description (optional)")
btn_store = ipw.Button(description='Store in AiiDA')
btn_store.on_click(on_click_store)
display(ipw.HBox([btn_store, inp_descr]), store_out)
###Output
_____no_output_____ |
docs/source/example_with_the_list_of_inputs.ipynb | ###Markdown
Example : k times repetition with the list of k input files DeepBiome package takes microbiome abundance data as input and uses the phylogenetic taxonomy to guide the decision of the optimal number of layers and neurons in the deep learning architecture.To use DeepBiome, you can experiment (1) __k times repetition__ or (2) __k fold cross-validation__.For each experiment, we asuume that the dataset is given by- __A list of k input files for k times repetition.__- __One input file for k fold cross-validation.__This notebook contains an example of (1) __k times repetition__ for the deep neural netowrk using deepbiome. 1. Load libraryFirst, we have to load deepbiome package. The deepbiome package is build on the tensorflow and keras library
###Code
import os
import logging
import json
from pkg_resources import resource_filename
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from deepbiome import deepbiome
###Output
Using TensorFlow backend.
###Markdown
2. Prepare the datasetIn this example, we assume that we have __a list of k input files for k times repetition.__DeepBiome needs 4 data files as follows:1. __the tree information__1. __the lists of the input files__ (each file has all sample's information for one repetition)1. __the list of the names of input files__ 1. __y__In addition, we can set __the training index for each repetition__. If we set the index file, DeepBiome builds the training set for each repetition based on each fold index in the index file. If not, DeepBiome will generate the index file locally.Eath data should have the csv format. Below is the example of each file. Example of the tree informationFirst we need a file about the phylogenetic tree information. This tree information file should have the format below:
###Code
tree_information = pd.read_csv(resource_filename('deepbiome', 'tests/data/genus48_dic.csv'))
tree_information
###Output
_____no_output_____
###Markdown
Example of the list of the name of input filesIn this example. we assume that input is given by the lists of files. Each file has all sample's information for one repeatition.If we want to use the list of the input files, we need to make a list of the names of each input file. Below is an example file for `k=1000` repetition.
###Code
list_of_input_files = pd.read_csv(resource_filename('deepbiome', 'tests/data/gcount_list.csv'), header=None)
list_of_input_files.head()
list_of_input_files.tail()
###Output
_____no_output_____
###Markdown
Example of the lists of the input filesBelow is an example of each input file. This example has 1000 samples as rows, and the abandunce of each microbiome as columns. Below is an example file for `k=1000` repetition. This example is `gcount_0001.csv` for the first repetition in the list of the names of input files above. This file has the 4 samples' microbiome abandunce.
###Code
x_1 = pd.read_csv(resource_filename('deepbiome', 'tests/data/count/%s' % list_of_input_files.iloc[0,0]))
x_1.head()
x_1.tail()
###Output
_____no_output_____
###Markdown
Example of the Y (regression)This is an example of the output file for regression problem. One column contains y samples for one repeatition. For each repeatition (column) has outputs of 4 samples for each repeatition. Below example file has 1000 samples in row, `k=1000` repetition in column.
###Code
y = pd.read_csv(resource_filename('deepbiome', 'tests/data/regression_y.csv'))
y.head()
y.tail()
###Output
_____no_output_____
###Markdown
For one repeatition, the deepbiome will use the one column.
###Code
y.iloc[:,0].head()
y.iloc[:,0].tail()
###Output
_____no_output_____
###Markdown
Example of the Y (classification)This is an example of the output file for classification problem. Below example file has 1000 samples in rows, 1000 repetitions in columns.
###Code
y = pd.read_csv(resource_filename('deepbiome', 'tests/data/classification_y.csv'))
y.head()
y.tail()
###Output
_____no_output_____
###Markdown
For one repeatition, the deepbiome will use the one column.
###Code
y.iloc[:,0].head()
y.iloc[:,0].tail()
###Output
_____no_output_____
###Markdown
Exmple of the training index file for repetitionFor each repeatition, we have to set the training and test set. If the index file is given, the deepbiome library set the training set and test set based on the index file. Below is the example of the index file. Each column has the training indexs for each repeatition. The deepbiome will only use the samples in this index set for training.
###Code
idxs = pd.read_csv(resource_filename('deepbiome', 'tests/data/regression_idx.csv'), dtype=np.int)
idxs.head()
idxs.tail()
###Output
_____no_output_____
###Markdown
Below is the index set for 1st repetition. From 1000 samples above, it uses 750 samples for training.
###Code
idxs.iloc[:,0].head()
idxs.iloc[:,0].tail()
###Output
_____no_output_____
###Markdown
3. Prepare the configurationFor detailed configuration, we used python dictionary as inputs for the main training function.You can build the configuration information for the network training by:1. the python dictionary format1. the configufation file (.cfg).In this notebook, we showed the dictionary python dictionary format configuration.Please check the detailed information about each options in the [documantation](https://young-won.github.io/deepbiome/prerequisites.htmlconfiguration) For preparing the configuration about the network information (`network_info`)For giving the information about the training hyper-parameter, you have to provide the dictionary for configuration to the `netowrk_info` field.Your configuration for the network training should include the information about:
###Code
network_info = {
'architecture_info': {
'batch_normalization': 'False',
'drop_out': '0',
'weight_initial': 'glorot_uniform',
'weight_l1_penalty':'0.01',
'weight_decay': 'phylogenetic_tree',
},
'model_info': {
'lr': '0.01',
'decay': '0.001',
'loss': 'binary_crossentropy',
'metrics': 'binary_accuracy, sensitivity, specificity, gmeasure, auc',
'texa_selection_metrics': 'accuracy, sensitivity, specificity, gmeasure',
'network_class': 'DeepBiomeNetwork',
'optimizer': 'adam',
'reader_class': 'MicroBiomeClassificationReader',
'normalizer': 'normalize_minmax',
},
'training_info': {
'batch_size': '50',
'epochs': '100'
},
'validation_info': {
'batch_size': 'None',
'validation_size': '0.2'
},
'test_info': {
'batch_size': 'None'
}
}
###Output
_____no_output_____
###Markdown
For preparing the configuration about the path information (`path_info`)To give the information about the path of dataset, paths for saving the trained weights and the evaluation results, we provide the dictionary for configuration to the `path_info` feild.Your configuration for the path information should include the information about:
###Code
path_info = {
'data_info': {
'count_list_path': resource_filename('deepbiome', 'tests/data/gcount_list.csv'),
'count_path': resource_filename('deepbiome', 'tests/data/count'),
'data_path': resource_filename('deepbiome', 'tests/data'),
'idx_path': resource_filename('deepbiome', 'tests/data/classification_idx.csv'),
'tree_info_path': resource_filename('deepbiome', 'tests/data/genus48_dic.csv'),
'x_path': '',
'y_path': 'classification_y.csv'
},
'model_info': {
'evaluation': 'eval.npy',
'history': 'hist.json',
'model_dir': './example_result/',
'weight': 'weight.h5'
}
}
###Output
_____no_output_____
###Markdown
4. DeepBiome TrainingNow we can train the DeepBiome network based on the configurations. For logging, we use the python logging library.
###Code
logging.basicConfig(format = '[%(name)-8s|%(levelname)s|%(filename)s:%(lineno)s] %(message)s',
level=logging.DEBUG)
log = logging.getLogger()
###Output
_____no_output_____
###Markdown
The deeobiome_train function provide the test evaluation, train evaluation and the deepbiome network instance.If we set `number_of_fold`, then the deepbiome package do the cross-validation based on that value. If not, the deepbiome package do the cross-validation based on the index file. If both `number_of_fold` option and the index file is not given, then the library do leave-one-out-cross-validation (LOOCV).
###Code
test_evaluation, train_evaluation, network = deepbiome.deepbiome_train(log, network_info, path_info, number_of_fold=3)
###Output
[root |INFO|deepbiome.py:100] -----------------------------------------------------------------
[root |INFO|deepbiome.py:137] -------1 simulation start!----------------------------------
[root |INFO|readers.py:58] -----------------------------------------------------------------------
[root |INFO|readers.py:59] Construct Dataset
[root |INFO|readers.py:60] -----------------------------------------------------------------------
[root |INFO|readers.py:61] Load data
[root |INFO|deepbiome.py:147] -----------------------------------------------------------------
[root |INFO|deepbiome.py:148] Build network for 1 simulation
[root |INFO|build_network.py:513] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:514] Read phylogenetic tree information from /DATA/home/muha/github_repos/deepbiome/deepbiome/tests/data/genus48_dic.csv
[root |INFO|build_network.py:518] Phylogenetic tree level list: ['Genus', 'Family', 'Order', 'Class', 'Phylum']
[root |INFO|build_network.py:519] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:526] Genus: 48
[root |INFO|build_network.py:526] Family: 40
[root |INFO|build_network.py:526] Order: 23
[root |INFO|build_network.py:526] Class: 17
[root |INFO|build_network.py:526] Phylum: 9
[root |INFO|build_network.py:529] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:530] Phylogenetic_tree_dict info: ['Phylum', 'Family', 'Order', 'Class', 'Genus', 'Number']
[root |INFO|build_network.py:531] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:541] Build edge weights between [ Genus, Family]
[root |INFO|build_network.py:541] Build edge weights between [Family, Order]
[root |INFO|build_network.py:541] Build edge weights between [ Order, Class]
[root |INFO|build_network.py:541] Build edge weights between [ Class, Phylum]
[root |INFO|build_network.py:554] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:570] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:571] Build network based on phylogenetic tree information
[root |INFO|build_network.py:572] ------------------------------------------------------------------------------------------
###Markdown
`deepbiome_train` saves the trained model weights, evaluation results and history based on the path information from the configuration.From the example above, we can check that `hist_*.json`, `weight_*.h5`, `test_eval.npy`, `train_eval.npy` files were saved.
###Code
os.listdir(path_info['model_info']['model_dir'])
###Output
_____no_output_____
###Markdown
Lets check the history files.
###Code
with open('./%s/hist_0.json' % path_info['model_info']['model_dir'], 'r') as f:
history = json.load(f)
plt.plot(history['val_loss'], label='Validation')
plt.plot(history['loss'], label='Training')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
###Output
_____no_output_____
###Markdown
Test evauation and train evauation is the numpy array of the shape (number of fold, number of evaluation measures).
###Code
test_evaluation
train_evaluation
###Output
_____no_output_____
###Markdown
5. Load the pre-trained network for trainingIf you have pre-trianed model, you can use the pre-trained weight for next training. For using pre-trained weights, you have to use `warm_start` option in `training_inro` with addding the file path of the pre-trained weights in the `warm_start_model` option. Below is the example:
###Code
warm_start_network_info = {
'architecture_info': {
'batch_normalization': 'False',
'drop_out': '0',
'weight_initial': 'glorot_uniform',
'weight_l1_penalty':'0.01',
'weight_decay': 'phylogenetic_tree',
},
'model_info': {
'decay': '0.001',
'loss': 'binary_crossentropy',
'lr': '0.01',
'metrics': 'binary_accuracy, sensitivity, specificity, gmeasure, auc',
'network_class': 'DeepBiomeNetwork',
'normalizer': 'normalize_minmax',
'optimizer': 'adam',
'reader_class': 'MicroBiomeClassificationReader',
'texa_selection_metrics': 'accuracy, sensitivity, specificity, gmeasure'
},
'training_info': {
'warm_start':'True',
'warm_start_model':'./example_result/weight.h5',
'batch_size': '200',
'epochs': '100'
},
'validation_info': {
'batch_size': 'None',
'validation_size': '0.2'
},
'test_info': {
'batch_size': 'None'
}
}
test_evaluation, train_evaluation, network = deepbiome.deepbiome_train(log, warm_start_network_info, path_info,
number_of_fold=3)
###Output
[root |INFO|deepbiome.py:100] -----------------------------------------------------------------
[root |INFO|deepbiome.py:137] -------1 simulation start!----------------------------------
[root |INFO|readers.py:58] -----------------------------------------------------------------------
[root |INFO|readers.py:59] Construct Dataset
[root |INFO|readers.py:60] -----------------------------------------------------------------------
[root |INFO|readers.py:61] Load data
[root |INFO|deepbiome.py:147] -----------------------------------------------------------------
[root |INFO|deepbiome.py:148] Build network for 1 simulation
[root |INFO|build_network.py:513] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:514] Read phylogenetic tree information from /DATA/home/muha/github_repos/deepbiome/deepbiome/tests/data/genus48_dic.csv
[root |INFO|build_network.py:518] Phylogenetic tree level list: ['Genus', 'Family', 'Order', 'Class', 'Phylum']
[root |INFO|build_network.py:519] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:526] Genus: 48
[root |INFO|build_network.py:526] Family: 40
[root |INFO|build_network.py:526] Order: 23
[root |INFO|build_network.py:526] Class: 17
[root |INFO|build_network.py:526] Phylum: 9
[root |INFO|build_network.py:529] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:530] Phylogenetic_tree_dict info: ['Phylum', 'Family', 'Order', 'Class', 'Genus', 'Number']
[root |INFO|build_network.py:531] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:541] Build edge weights between [ Genus, Family]
[root |INFO|build_network.py:541] Build edge weights between [Family, Order]
[root |INFO|build_network.py:541] Build edge weights between [ Order, Class]
[root |INFO|build_network.py:541] Build edge weights between [ Class, Phylum]
[root |INFO|build_network.py:554] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:570] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:571] Build network based on phylogenetic tree information
[root |INFO|build_network.py:572] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:648] ------------------------------------------------------------------------------------------
###Markdown
Let's check the history plot again.
###Code
with open('./%s/hist_0.json' % path_info['model_info']['model_dir'], 'r') as f:
history = json.load(f)
plt.plot(history['val_loss'], label='Validation')
plt.plot(history['loss'], label='Training')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
###Output
_____no_output_____
###Markdown
6. Load the pre-trained network for testingIf you want to test the trained model, you can use the `deepbiome_test` function. If you use the index file, this function provide the evaluation using test index (index set not included in the index file) for each fold. If not, this function provide the evaluation using the whole samples. If `number_of_fold` is setted as `k`, the function will test the model only with first `k` folds.
###Code
test_network_info = {
'architecture_info': {
'batch_normalization': 'False',
'drop_out': '0',
'weight_initial': 'glorot_uniform',
'weight_l1_penalty':'0.01',
'weight_decay': 'phylogenetic_tree',
},
'model_info': {
'lr': '0.01',
'decay': '0.001',
'loss': 'binary_crossentropy',
'metrics': 'binary_accuracy, sensitivity, specificity, gmeasure, auc',
'texa_selection_metrics': 'accuracy, sensitivity, specificity, gmeasure',
'network_class': 'DeepBiomeNetwork',
'optimizer': 'adam',
'reader_class': 'MicroBiomeClassificationReader',
'normalizer': 'normalize_minmax',
},
'test_info': {
'batch_size': 'None'
}
}
test_path_info = {
'data_info': {
'count_list_path': resource_filename('deepbiome', 'tests/data/gcount_list.csv'),
'count_path': resource_filename('deepbiome', 'tests/data/count'),
'data_path': resource_filename('deepbiome', 'tests/data'),
'idx_path': resource_filename('deepbiome', 'tests/data/classification_idx.csv'),
'tree_info_path': resource_filename('deepbiome', 'tests/data/genus48_dic.csv'),
'x_path': '',
'y_path': 'classification_y.csv'
},
'model_info': {
'evaluation': 'eval.npy',
'model_dir': './example_result/',
'weight': 'weight.h5'
}
}
evaluation = deepbiome.deepbiome_test(log, test_network_info, test_path_info, number_of_fold=3)
###Output
[root |INFO|deepbiome.py:262] -----------------------------------------------------------------
[root |INFO|deepbiome.py:294] Test Evaluation : ['loss' 'binary_accuracy' 'sensitivity' 'specificity' 'gmeasure' 'auc']
[root |INFO|deepbiome.py:296] -------1 fold test start!----------------------------------
[root |INFO|readers.py:58] -----------------------------------------------------------------------
[root |INFO|readers.py:59] Construct Dataset
[root |INFO|readers.py:60] -----------------------------------------------------------------------
[root |INFO|readers.py:61] Load data
[root |INFO|deepbiome.py:306] -----------------------------------------------------------------
[root |INFO|deepbiome.py:307] Build network for 1 fold testing
[root |INFO|build_network.py:513] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:514] Read phylogenetic tree information from /DATA/home/muha/github_repos/deepbiome/deepbiome/tests/data/genus48_dic.csv
[root |INFO|build_network.py:518] Phylogenetic tree level list: ['Genus', 'Family', 'Order', 'Class', 'Phylum']
[root |INFO|build_network.py:519] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:526] Genus: 48
[root |INFO|build_network.py:526] Family: 40
[root |INFO|build_network.py:526] Order: 23
[root |INFO|build_network.py:526] Class: 17
[root |INFO|build_network.py:526] Phylum: 9
[root |INFO|build_network.py:529] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:530] Phylogenetic_tree_dict info: ['Phylum', 'Family', 'Order', 'Class', 'Genus', 'Number']
[root |INFO|build_network.py:531] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:541] Build edge weights between [ Genus, Family]
[root |INFO|build_network.py:541] Build edge weights between [Family, Order]
[root |INFO|build_network.py:541] Build edge weights between [ Order, Class]
[root |INFO|build_network.py:541] Build edge weights between [ Class, Phylum]
[root |INFO|build_network.py:554] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:570] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:571] Build network based on phylogenetic tree information
[root |INFO|build_network.py:572] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:648] ------------------------------------------------------------------------------------------
###Markdown
This function provides the evaluation result as a numpy array with a shape of (number of fold, number of evaluation measures).
###Code
print(' %s' % ''.join(['%16s'%'loss']+ ['%16s'%s.strip() for s in network_info['model_info']['metrics'].split(',')]))
print('Mean: %s' % ''.join(['%16.4f'%v for v in np.mean(evaluation, axis=0)]))
print('Std : %s' % ''.join(['%16.4f'%v for v in np.std(evaluation, axis=0)]))
###Output
loss binary_accuracy sensitivity specificity gmeasure auc
Mean: 0.2632 0.9040 0.9597 0.7801 0.8646 0.9561
Std : 0.0265 0.0214 0.0009 0.0590 0.0333 0.0067
###Markdown
7. Load the pre-trained network for predictionFor prediction using the pre-trained model, we can use the `deepbiome_prediction` function. If `number_of_fold` is set to `k`, the function will predict only with first `k` folds sample's outputs.
###Code
prediction_network_info = {
'architecture_info': {
'batch_normalization': 'False',
'drop_out': '0',
'weight_initial': 'glorot_uniform',
'weight_l1_penalty':'0.01',
'weight_decay': 'phylogenetic_tree',
},
'model_info': {
'decay': '0.001',
'loss': 'binary_crossentropy',
'lr': '0.01',
'metrics': 'binary_accuracy, sensitivity, specificity, gmeasure, auc',
'network_class': 'DeepBiomeNetwork',
'normalizer': 'normalize_minmax',
'optimizer': 'adam',
'reader_class': 'MicroBiomeClassificationReader',
'texa_selection_metrics': 'accuracy, sensitivity, specificity, gmeasure'
},
'test_info': {
'batch_size': 'None'
}
}
prediction_path_info = {
'data_info': {
'count_list_path': resource_filename('deepbiome', 'tests/data/gcount_list.csv'),
'count_path': resource_filename('deepbiome', 'tests/data/count'),
'data_path': resource_filename('deepbiome', 'tests/data'),
'tree_info_path': resource_filename('deepbiome', 'tests/data/genus48_dic.csv'),
'x_path': '',
},
'model_info': {
'model_dir': './example_result/',
'weight': 'weight_0.h5'
}
}
prediction = deepbiome.deepbiome_prediction(log, prediction_network_info, prediction_path_info,
num_classes = 1, number_of_fold=3)
prediction.shape
prediction[0,:10]
###Output
_____no_output_____
###Markdown
Example : k times repetition with the list of k input files DeepBiome package takes microbiome abundance data as input and uses the phylogenetic taxonomy to guide the decision of the optimal number of layers and neurons in the deep learning architecture.To use DeepBiome, you can experiment (1) __k times repetition__ or (2) __k fold cross-validation__.For each experiment, we asuume that the dataset is given by- __A list of k input files for k times repetition.__- __One input file for k fold cross-validation.__This notebook contains an example of (1) __k times repetition__ for the deep neural netowrk using deepbiome. 1. Load libraryFirst, we load the DeepBiome package. The DeepBiome package is built on the tensorflow and keras library
###Code
import os
import logging
import json
from pkg_resources import resource_filename
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pd.set_option('display.float_format', lambda x: '%.03f' % x)
np.set_printoptions(formatter={'float_kind':lambda x: '%.03f' % x})
from deepbiome import deepbiome
###Output
Using TensorFlow backend.
###Markdown
2. Prepare the datasetIn this example, we assume that we have __a list of k input files for k times repetition.__DeepBiome needs 4 data files as follows:1. __the tree information__1. __the lists of the input files__ (each file has all sample's information for one repetition)1. __the list of the names of input files__ 1. __y__In addition, we can set __the training index for each repetition__. If we set the index file, DeepBiome builds the training set for each repetition based on each column of the index file. If not, DeepBiome will generate the index file locally. Each data should have the csv format as follow: Example of the tree informationFirst we need a file about the phylogenetic tree information. This tree information file should have the format below:
###Code
tree_information = pd.read_csv(resource_filename('deepbiome', 'tests/data/genus48_dic.csv'))
tree_information
###Output
_____no_output_____
###Markdown
This file has `.csv` format below:
###Code
with open(resource_filename('deepbiome', 'tests/data/genus48_dic.csv')) as f:
print(f.read())
###Output
Genus,Family,Order,Class,Phylum
Streptococcus,Streptococcaceae,Lactobacillales,Bacilli,Firmicutes
Tropheryma,Cellulomonadaceae,Actinomycetales,Actinobacteria,Actinobacteria
Veillonella,Veillonellaceae,Selenomonadales,Negativicutes,Firmicutes
Actinomyces,Actinomycetaceae,Actinomycetales,Actinobacteria,Actinobacteria
Flavobacterium,Flavobacteriaceae,Flavobacteriales,Flavobacteria,Bacteroidetes
Prevotella,Prevotellaceae,Bacteroidales,Bacteroidia,Bacteroidetes
Porphyromonas,Porphyromonadaceae,Bacteroidales,Bacteroidia,Bacteroidetes
Parvimonas,Clostridiales_Incertae_Sedis_XI,Clostridiales,Clostridia,Firmicutes
Fusobacterium,Fusobacteriaceae,Fusobacteriales,Fusobacteria,Fusobacteria
Propionibacterium,Propionibacteriaceae,Actinomycetales,Actinobacteria,Actinobacteria
Gemella,Bacillales_Incertae_Sedis_XI,Bacillales,Bacilli,Firmicutes
Rothia,Micrococcaceae,Actinomycetales,Actinobacteria,Actinobacteria
Granulicatella,Carnobacteriaceae,Lactobacillales,Bacilli,Firmicutes
Neisseria,Neisseriaceae,Neisseriales,Betaproteobacteria,Proteobacteria
Lactobacillus,Lactobacillaceae,Lactobacillales,Bacilli,Firmicutes
Megasphaera,Veillonellaceae,Selenomonadales,Negativicutes,Firmicutes
Catonella,Lachnospiraceae,Clostridiales,Clostridia,Firmicutes
Atopobium,Coriobacteriaceae,Coriobacteriales,Actinobacteria,Actinobacteria
Campylobacter,Campylobacteraceae,Campylobacterales,Epsilonproteobacteria,Proteobacteria
Capnocytophaga,Flavobacteriaceae,Flavobacteriales,Flavobacteria,Bacteroidetes
Solobacterium,Erysipelotrichaceae,Erysipelotrichales,Erysipelotrichia,Firmicutes
Moryella,Lachnospiraceae,Clostridiales,Clostridia,Firmicutes
TM7_genera_incertae_sedis,TM7_genera_incertae_sedis,TM7_genera_incertae_sedis,TM7_genera_incertae_sedis,TM7
Staphylococcus,Staphylococcaceae,Bacillales,Bacilli,Firmicutes
Filifactor,Peptostreptococcaceae,Clostridiales,Clostridia,Firmicutes
Oribacterium,Lachnospiraceae,Clostridiales,Clostridia,Firmicutes
Burkholderia,Burkholderiaceae,Burkholderiales,Betaproteobacteria,Proteobacteria
Sneathia,Leptotrichiaceae,Fusobacteriales,Fusobacteria,Fusobacteria
Treponema,Spirochaetaceae,Spirochaetales,Spirochaetes,Spirochaetes
Moraxella,Moraxellaceae,Pseudomonadales,Gammaproteobacteria,Proteobacteria
Haemophilus,Pasteurellaceae,Pasteurellales,Gammaproteobacteria,Proteobacteria
Selenomonas,Veillonellaceae,Selenomonadales,Negativicutes,Firmicutes
Corynebacterium,Corynebacteriaceae,Actinomycetales,Actinobacteria,Actinobacteria
Rhizobium,Rhizobiaceae,Rhizobiales,Alphaproteobacteria,Proteobacteria
Bradyrhizobium,Bradyrhizobiaceae,Rhizobiales,Alphaproteobacteria,Proteobacteria
Methylobacterium,Methylobacteriaceae,Rhizobiales,Alphaproteobacteria,Proteobacteria
OD1_genera_incertae_sedis,OD1_genera_incertae_sedis,OD1_genera_incertae_sedis,OD1_genera_incertae_sedis,OD1
Finegoldia,Clostridiales_Incertae_Sedis_XI,Clostridiales,Clostridia,Firmicutes
Microbacterium,Microbacteriaceae,Actinomycetales,Actinobacteria,Actinobacteria
Sphingomonas,Sphingomonadaceae,Sphingomonadales,Alphaproteobacteria,Proteobacteria
Chryseobacterium,Flavobacteriaceae,Flavobacteriales,Flavobacteria,Bacteroidetes
Bacteroides,Bacteroidaceae,Bacteroidales,Bacteroidia,Bacteroidetes
Bdellovibrio,Bdellovibrionaceae,Bdellovibrionales,Deltaproteobacteria,Proteobacteria
Streptophyta,Chloroplast,Chloroplast,Chloroplast,Cyanobacteria_Chloroplast
Lachnospiracea_incertae_sedis,Lachnospiraceae,Clostridiales,Clostridia,Firmicutes
Paracoccus,Rhodobacteraceae,Rhodobacterales,Alphaproteobacteria,Proteobacteria
Fastidiosipila,Ruminococcaceae,Clostridiales,Clostridia,Firmicutes
Pseudonocardia,Pseudonocardiaceae,Actinomycetales,Actinobacteria,Actinobacteria
###Markdown
Example of the list of the name of input filesIn this example. we assume that input is given by the lists of files. Each file has all sample's information for one repeatition.If we want to use the list of the input files, we need to make a list of the names of each input file. Below is an example file for `k=3` repetition.
###Code
list_of_input_files = pd.read_csv(resource_filename('deepbiome', 'tests/data/gcount_list.csv'))
list_of_input_files
###Output
_____no_output_____
###Markdown
Example of the lists of the input filesBelow is an example of each input file. This example has 1000 samples as rows, and the abandunces of each microbiomes as columns. Below is an example file for `k=3` repetition. This example is `gcount_0001.csv` for the first repetition in the list of the names of input files above. This file has the 1000 samples' microbiome abandunce. __The order of the microbiome should be same as the order of the microbiome in the Genus level in the tree information above.__
###Code
x_1 = pd.read_csv(resource_filename('deepbiome', 'tests/data/count/%s' % list_of_input_files.iloc[0,0]))
x_1.head()
x_1.tail()
###Output
_____no_output_____
###Markdown
This file has .csv format below:
###Code
with open(resource_filename('deepbiome', 'tests/data/count/%s' % list_of_input_files.iloc[0,0])) as f:
x_csv = f.readlines()
_ = [print(l) for l in x_csv[:10]]
###Output
"Streptococcus","Tropheryma","Veillonella","Actinomyces","Flavobacterium","Prevotella","Porphyromonas","Parvimonas","Fusobacterium","Propionibacterium","Gemella","Rothia","Granulicatella","Neisseria","Lactobacillus","Megasphaera","Catonella","Atopobium","Campylobacter","Capnocytophaga","Solobacterium","Moryella","TM7_genera_incertae_sedis","Staphylococcus","Filifactor","Oribacterium","Burkholderia","Sneathia","Treponema","Moraxella","Haemophilus","Selenomonas","Corynebacterium","Rhizobium","Bradyrhizobium","Methylobacterium","OD1_genera_incertae_sedis","Finegoldia","Microbacterium","Sphingomonas","Chryseobacterium","Bacteroides","Bdellovibrio","Streptophyta","Lachnospiracea_incertae_sedis","Paracoccus","Fastidiosipila","Pseudonocardia"
841,0,813,505,5,3224,0,362,11,65,156,1,55,0,1,20,382,1,333,24,80,43,309,2,3,4,0,1,32,0,2,4,382,0,0,96,23,0,0,87,0,0,0,0,0,0,0,2133
1445,0,1,573,0,1278,82,85,69,154,436,3,0,61,440,0,394,83,33,123,0,49,414,0,0,37,0,0,42,0,0,384,27,0,0,0,146,0,0,1,2,0,0,0,0,0,0,3638
1259,0,805,650,0,1088,0,0,74,0,155,228,430,765,0,0,11,102,68,90,77,83,322,10,0,7,0,122,76,0,1,25,0,0,0,44,13,0,0,2,8,1,39,0,0,0,0,3445
982,0,327,594,0,960,81,19,9,0,45,457,1049,0,3,450,19,170,388,147,0,0,41,63,0,1,0,0,121,0,0,1,0,0,0,0,344,0,157,1,0,4,60,0,0,0,0,3507
1162,0,130,969,163,1515,167,4,162,3,12,0,48,73,93,259,52,0,201,85,14,14,434,2,0,0,0,0,187,0,0,188,45,0,0,0,4,0,0,9,0,0,0,0,60,0,0,3945
1956,37,41,661,47,1555,374,7,142,19,61,226,0,30,52,0,6,480,142,148,9,575,12,0,0,0,0,3,168,0,56,50,0,0,0,98,989,0,0,12,0,0,0,0,0,0,0,2044
1037,14,83,1595,132,305,103,174,1195,0,410,224,1,320,26,0,476,0,7,37,46,61,20,0,0,0,0,0,226,0,239,8,1,0,0,0,0,188,0,20,4,0,4,0,0,0,0,3044
641,0,172,179,0,1312,84,9,81,376,128,223,160,0,532,155,89,355,1,282,0,0,25,0,0,43,0,9,311,0,0,0,0,0,0,0,845,0,0,8,0,0,0,0,0,0,0,3980
852,146,504,99,2,376,116,152,67,0,120,3,23,2,34,0,127,75,240,60,42,0,9,0,15,0,62,0,13,0,197,187,396,0,0,20,51,0,0,3,0,0,0,0,0,0,0,6007
###Markdown
Example of the Y (regression)This is an example of the output file for regression problem. One column contains outputs of the samples for one repetition. Below example file has 1000 samples in rows, `k=3` repetitions in columns.
###Code
y = pd.read_csv(resource_filename('deepbiome', 'tests/data/regression_y.csv'))
y.head()
y.tail()
###Output
_____no_output_____
###Markdown
For one repetition, the DeepBiome will use the one column.
###Code
y.iloc[:,0].head()
y.iloc[:,0].tail()
###Output
_____no_output_____
###Markdown
Example of the Y (classification)This is an example of the output file for classification problem. Below example file has 1000 samples in rows, 3 repetitions in columns.
###Code
y = pd.read_csv(resource_filename('deepbiome', 'tests/data/classification_y.csv'))
y.head()
y.tail()
###Output
_____no_output_____
###Markdown
For one repetition, the DeepBiome will use the one column.
###Code
y.iloc[:,0].head()
y.iloc[:,0].tail()
###Output
_____no_output_____
###Markdown
Exmple of the training index file for repetitionFor each repetition, we have to set the training and test set. If the index file is given, DeepBiome sets the training set and test set based on the index file. Below is the example of the index file. Each column has the training indexs for each repetition. The DeepBiome will only use the samples in this index set for training.
###Code
idxs = pd.read_csv(resource_filename('deepbiome', 'tests/data/regression_idx.csv'), dtype=np.int)
idxs.head()
idxs.tail()
###Output
_____no_output_____
###Markdown
Below is the index set for 1st repetition. From 1000 samples above, it uses 750 samples for training.
###Code
idxs.iloc[:,0].head()
idxs.iloc[:,0].tail()
###Output
_____no_output_____
###Markdown
3. Prepare the configurationFor detailed configuration, we can build the configuration information for the network training by:1. the python dictionary format1. the configufation file (.cfg).In this notebook, we show the python dictionary format configuration.Please check the detailed information about each option in the [documantation](https://young-won.github.io/deepbiome/prerequisites.htmlconfiguration) For preparing the configuration about the network information (`network_info`)To give the information about the training process, we provide a dictionary of configurations to the `netowrk_info` field.Your configuration for the network training should include the information about:
###Code
network_info = {
'architecture_info': {
'batch_normalization': 'False',
'drop_out': '0',
'weight_initial': 'glorot_uniform',
'weight_l1_penalty':'0.',
'weight_decay': 'phylogenetic_tree',
},
'model_info': {
'lr': '0.01',
'decay': '0.001',
'loss': 'binary_crossentropy',
'metrics': 'binary_accuracy, sensitivity, specificity, gmeasure, auc',
'taxa_selection_metrics': 'sensitivity, specificity, gmeasure, accuracy',
'network_class': 'DeepBiomeNetwork',
'optimizer': 'adam',
'reader_class': 'MicroBiomeClassificationReader',
'normalizer': 'normalize_minmax',
},
'training_info': {
'batch_size': '50',
'epochs': '10',
'callbacks': 'ModelCheckpoint',
'monitor': 'val_loss',
'mode' : 'min',
'min_delta': '1e-7',
},
'validation_info': {
'batch_size': 'None',
'validation_size': '0.2'
},
'test_info': {
'batch_size': 'None'
}
}
###Output
_____no_output_____
###Markdown
For preparing the configuration about the path information (`path_info`)To give the information about the path of dataset, paths for saving the trained weights and the evaluation results, we provide a dictionary of configurations to the `path_info` field.Your configuration for the network training should include the information about:
###Code
path_info = {
'data_info': {
'count_list_path': resource_filename('deepbiome', 'tests/data/gcount_list.csv'),
'count_path': resource_filename('deepbiome', 'tests/data/count'),
'data_path': resource_filename('deepbiome', 'tests/data'),
'idx_path': resource_filename('deepbiome', 'tests/data/classification_idx.csv'),
'tree_info_path': resource_filename('deepbiome', 'tests/data/genus48_dic.csv'),
'x_path': '',
'y_path': 'classification_y.csv'
},
'model_info': {
'evaluation': 'eval.npy',
'history': 'hist.json',
'model_dir': './example_result/',
'weight': 'weight.h5'
}
}
###Output
_____no_output_____
###Markdown
4. DeepBiome TrainingNow we can train the DeepBiome network based on the configurations. For logging, we use the python logging library.
###Code
logging.basicConfig(format = '[%(name)-8s|%(levelname)s|%(filename)s:%(lineno)s] %(message)s',
level=logging.DEBUG)
log = logging.getLogger()
###Output
_____no_output_____
###Markdown
The deeobiome_train function provide the test evaluation, train evaluation and the deepbiome network instance.If we set `number_of_fold`, then DeepBiome performs cross-validation based on that value. If not, DeepBiome package performs cross-validation based on the index file. If both `number_of_fold` option and the index file are missing, then the library performs leave-one-out-cross-validation (LOOCV).
###Code
test_evaluation, train_evaluation, network = deepbiome.deepbiome_train(log, network_info, path_info, number_of_fold=None)
###Output
[root |INFO|deepbiome.py:115] -----------------------------------------------------------------
[root |INFO|deepbiome.py:153] -------1 simulation start!----------------------------------
[root |INFO|readers.py:58] -----------------------------------------------------------------------
[root |INFO|readers.py:59] Construct Dataset
[root |INFO|readers.py:60] -----------------------------------------------------------------------
[root |INFO|readers.py:61] Load data
[root |INFO|deepbiome.py:164] -----------------------------------------------------------------
[root |INFO|deepbiome.py:165] Build network for 1 simulation
[root |INFO|build_network.py:521] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:522] Read phylogenetic tree information from /DATA/home/muha/github_repos/deepbiome/deepbiome/tests/data/genus48_dic.csv
[root |INFO|build_network.py:528] Phylogenetic tree level list: ['Genus', 'Family', 'Order', 'Class', 'Phylum']
[root |INFO|build_network.py:529] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:537] Genus: 48
[root |INFO|build_network.py:537] Family: 40
[root |INFO|build_network.py:537] Order: 23
[root |INFO|build_network.py:537] Class: 17
[root |INFO|build_network.py:537] Phylum: 9
[root |INFO|build_network.py:546] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:547] Phylogenetic_tree_dict info: ['Phylum', 'Order', 'Family', 'Class', 'Number', 'Genus']
[root |INFO|build_network.py:548] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:558] Build edge weights between [ Genus, Family]
[root |INFO|build_network.py:558] Build edge weights between [Family, Order]
[root |INFO|build_network.py:558] Build edge weights between [ Order, Class]
[root |INFO|build_network.py:558] Build edge weights between [ Class, Phylum]
[root |INFO|build_network.py:571] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:586] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:587] Build network based on phylogenetic tree information
[root |INFO|build_network.py:588] ------------------------------------------------------------------------------------------
###Markdown
The `deepbiome_train` saves the trained model weights, evaluation results and history based on the path information from the configuration. Lets check the history files.
###Code
with open('./%s/hist_0.json' % path_info['model_info']['model_dir'], 'r') as f:
history = json.load(f)
plt.plot(history['val_loss'], label='Validation')
plt.plot(history['loss'], label='Training')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
###Output
_____no_output_____
###Markdown
Test evauation and train evauation is the numpy array of the shape (number of repetitions, number of evaluation measures).
###Code
test_evaluation
train_evaluation
###Output
_____no_output_____
###Markdown
5. Load the pre-trained network for trainingIf you have a pre-trianed model, you warm_start next training using the pre-trained weights by setting the `warm_start` option in `training_info` to `True`. The file path of the pre-trained weights passed in the `warm_start_model` option. Below is the example:
###Code
warm_start_network_info = {
'architecture_info': {
'batch_normalization': 'False',
'drop_out': '0',
'weight_initial': 'glorot_uniform',
'weight_l1_penalty':'0.',
'weight_decay': 'phylogenetic_tree',
},
'model_info': {
'decay': '0.001',
'loss': 'binary_crossentropy',
'lr': '0.01',
'metrics': 'binary_accuracy, sensitivity, specificity, gmeasure',
'network_class': 'DeepBiomeNetwork',
'normalizer': 'normalize_minmax',
'optimizer': 'adam',
'reader_class': 'MicroBiomeClassificationReader',
'taxa_selection_metrics': 'sensitivity, specificity, gmeasure, accuracy',
},
'training_info': {
'warm_start':'True',
'warm_start_model':'./example_result/weight.h5',
'batch_size': '50',
'epochs': '10',
'callbacks': 'ModelCheckpoint',
'monitor': 'val_loss',
'mode' : 'min',
'min_delta': '1e-7',
},
'validation_info': {
'batch_size': 'None',
'validation_size': '0.2'
},
'test_info': {
'batch_size': 'None'
}
}
test_evaluation, train_evaluation, network = deepbiome.deepbiome_train(log, warm_start_network_info, path_info,
number_of_fold=None)
###Output
[root |INFO|deepbiome.py:115] -----------------------------------------------------------------
[root |INFO|deepbiome.py:153] -------1 simulation start!----------------------------------
[root |INFO|readers.py:58] -----------------------------------------------------------------------
[root |INFO|readers.py:59] Construct Dataset
[root |INFO|readers.py:60] -----------------------------------------------------------------------
[root |INFO|readers.py:61] Load data
[root |INFO|deepbiome.py:164] -----------------------------------------------------------------
[root |INFO|deepbiome.py:165] Build network for 1 simulation
[root |INFO|build_network.py:521] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:522] Read phylogenetic tree information from /DATA/home/muha/github_repos/deepbiome/deepbiome/tests/data/genus48_dic.csv
[root |INFO|build_network.py:528] Phylogenetic tree level list: ['Genus', 'Family', 'Order', 'Class', 'Phylum']
[root |INFO|build_network.py:529] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:537] Genus: 48
[root |INFO|build_network.py:537] Family: 40
[root |INFO|build_network.py:537] Order: 23
[root |INFO|build_network.py:537] Class: 17
[root |INFO|build_network.py:537] Phylum: 9
[root |INFO|build_network.py:546] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:547] Phylogenetic_tree_dict info: ['Phylum', 'Order', 'Family', 'Class', 'Number', 'Genus']
[root |INFO|build_network.py:548] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:558] Build edge weights between [ Genus, Family]
[root |INFO|build_network.py:558] Build edge weights between [Family, Order]
[root |INFO|build_network.py:558] Build edge weights between [ Order, Class]
[root |INFO|build_network.py:558] Build edge weights between [ Class, Phylum]
[root |INFO|build_network.py:571] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:586] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:587] Build network based on phylogenetic tree information
[root |INFO|build_network.py:588] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:670] ------------------------------------------------------------------------------------------
###Markdown
Let's check the history plot again.
###Code
with open('./%s/hist_0.json' % path_info['model_info']['model_dir'], 'r') as f:
history = json.load(f)
plt.plot(history['val_loss'], label='Validation')
plt.plot(history['loss'], label='Training')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.show()
###Output
_____no_output_____
###Markdown
6. Load the pre-trained network for testingTo test the trained model, we can use the `deepbiome_test` function. If you use the index file (`idx_path`), this function provides the evaluation using the test index (index set not included in the index file) for each fold. If not, this function provides the evaluation using the whole samples. If `number_of_fold` is set to `k`, the function will test the model only with first `k` repetitions.We can use the testing metrics different with the training. In the example below, we additionally used `AUC` metric.
###Code
test_network_info = {
'architecture_info': {
'batch_normalization': 'False',
'drop_out': '0',
'weight_initial': 'glorot_uniform',
'weight_l1_penalty':'0.',
'weight_decay': 'phylogenetic_tree',
},
'model_info': {
'lr': '0.01',
'decay': '0.001',
'loss': 'binary_crossentropy',
'metrics': 'binary_accuracy, sensitivity, specificity, gmeasure, auc',
'taxa_selection_metrics': 'sensitivity, specificity, gmeasure, accuracy',
'network_class': 'DeepBiomeNetwork',
'optimizer': 'adam',
'reader_class': 'MicroBiomeClassificationReader',
'normalizer': 'normalize_minmax',
},
'test_info': {
'batch_size': 'None'
}
}
test_path_info = {
'data_info': {
'count_list_path': resource_filename('deepbiome', 'tests/data/gcount_list.csv'),
'count_path': resource_filename('deepbiome', 'tests/data/count'),
'data_path': resource_filename('deepbiome', 'tests/data'),
'idx_path': resource_filename('deepbiome', 'tests/data/classification_idx.csv'),
'tree_info_path': resource_filename('deepbiome', 'tests/data/genus48_dic.csv'),
'x_path': '',
'y_path': 'classification_y.csv'
},
'model_info': {
'evaluation': 'eval.npy',
'model_dir': './example_result/',
'weight': 'weight.h5'
}
}
evaluation = deepbiome.deepbiome_test(log, test_network_info, test_path_info, number_of_fold=None)
###Output
[root |INFO|deepbiome.py:293] -----------------------------------------------------------------
[root |INFO|deepbiome.py:325] Test Evaluation : ['loss' 'binary_accuracy' 'sensitivity' 'specificity' 'gmeasure' 'auc']
[root |INFO|deepbiome.py:327] -------1 fold test start!----------------------------------
[root |INFO|readers.py:58] -----------------------------------------------------------------------
[root |INFO|readers.py:59] Construct Dataset
[root |INFO|readers.py:60] -----------------------------------------------------------------------
[root |INFO|readers.py:61] Load data
[root |INFO|deepbiome.py:338] -----------------------------------------------------------------
[root |INFO|deepbiome.py:339] Build network for 1 fold testing
[root |INFO|build_network.py:521] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:522] Read phylogenetic tree information from /DATA/home/muha/github_repos/deepbiome/deepbiome/tests/data/genus48_dic.csv
[root |INFO|build_network.py:528] Phylogenetic tree level list: ['Genus', 'Family', 'Order', 'Class', 'Phylum']
[root |INFO|build_network.py:529] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:537] Genus: 48
[root |INFO|build_network.py:537] Family: 40
[root |INFO|build_network.py:537] Order: 23
[root |INFO|build_network.py:537] Class: 17
[root |INFO|build_network.py:537] Phylum: 9
[root |INFO|build_network.py:546] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:547] Phylogenetic_tree_dict info: ['Phylum', 'Order', 'Family', 'Class', 'Number', 'Genus']
[root |INFO|build_network.py:548] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:558] Build edge weights between [ Genus, Family]
[root |INFO|build_network.py:558] Build edge weights between [Family, Order]
[root |INFO|build_network.py:558] Build edge weights between [ Order, Class]
[root |INFO|build_network.py:558] Build edge weights between [ Class, Phylum]
[root |INFO|build_network.py:571] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:586] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:587] Build network based on phylogenetic tree information
[root |INFO|build_network.py:588] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:670] ------------------------------------------------------------------------------------------
###Markdown
This function provides the evaluation result as a numpy array with a shape of (number of repetition, number of evaluation measures).
###Code
print(' %s' % ''.join(['%16s'%'loss']+ ['%16s'%s.strip() for s in network_info['model_info']['metrics'].split(',')]))
print('Mean: %s' % ''.join(['%16.3f'%v for v in np.mean(evaluation, axis=0)]))
print('Std : %s' % ''.join(['%16.3f'%v for v in np.std(evaluation, axis=0)]))
###Output
loss binary_accuracy sensitivity specificity gmeasure auc
Mean: 0.573 0.719 0.986 0.103 0.181 0.750
Std : 0.052 0.022 0.020 0.145 0.256 0.170
###Markdown
7. Load the pre-trained network for predictionIf you want to predict using the pre-trained model, you can use the `deepbiome_prediction` function. If `number_of_fold` is setted as `k`, the function will predict only with first `k` repetitions sample's outputs. If `change_weight_for_each_fold` is set as `False`, the function will predict the output of every repetition by same weight from the given path. If `change_weight_for_each_fold` is set as `True`, the function will predict the output of by each repetition weight.If 'get_y=True', the function will provide a list of tuples (prediction, true output) as a output with the shape of `(n_samples, 2, n_classes)`. If 'get_y=False', the function will provide predictions only. The output will have the shape of `(n_samples, n_classes)`. 7.1 Prediction with fixed weightIf we want to predict new data from one pre-trained model, we can use the option below. We fixed the weight `weight_0.h5` for predicting the whole samples (without using index file).
###Code
prediction_network_info = {
'architecture_info': {
'batch_normalization': 'False',
'drop_out': '0',
'weight_initial': 'glorot_uniform',
'weight_l1_penalty':'0.',
'weight_decay': 'phylogenetic_tree',
},
'model_info': {
'decay': '0.001',
'loss': 'binary_crossentropy',
'lr': '0.01',
'metrics': 'binary_accuracy, sensitivity, specificity, gmeasure, auc',
'network_class': 'DeepBiomeNetwork',
'normalizer': 'normalize_minmax',
'optimizer': 'adam',
'reader_class': 'MicroBiomeClassificationReader',
'taxa_selection_metrics': 'sensitivity, specificity, gmeasure, accuracy',
},
'test_info': {
'batch_size': 'None'
}
}
prediction_path_info = {
'data_info': {
'count_list_path': resource_filename('deepbiome', 'tests/data/gcount_list.csv'),
'count_path': resource_filename('deepbiome', 'tests/data/count'),
'data_path': resource_filename('deepbiome', 'tests/data'),
'tree_info_path': resource_filename('deepbiome', 'tests/data/genus48_dic.csv'),
'x_path': '',
},
'model_info': {
'model_dir': './example_result/',
'weight': 'weight_0.h5'
}
}
prediction = deepbiome.deepbiome_prediction(log, prediction_network_info, prediction_path_info,
num_classes = 1, number_of_fold=None)
prediction.shape
prediction[0,:10]
###Output
_____no_output_____
###Markdown
7.2 Prediction with each fold weightIf we want to predict the test outputs from each repetitions, we can use the option belows.The example below shows how to predict the 5 repetition outputs.We set `idx_path` for using the index file `classification_idx.csv` to predict only the test set for each repetition.
###Code
prediction_network_info = {
'architecture_info': {
'batch_normalization': 'False',
'drop_out': '0',
'weight_initial': 'glorot_uniform',
'weight_l1_penalty':'0.',
'weight_decay': 'phylogenetic_tree',
},
'model_info': {
'decay': '0.001',
'loss': 'binary_crossentropy',
'lr': '0.01',
'metrics': 'binary_accuracy, sensitivity, specificity, gmeasure, auc',
'network_class': 'DeepBiomeNetwork',
'normalizer': 'normalize_minmax',
'optimizer': 'adam',
'reader_class': 'MicroBiomeClassificationReader',
'taxa_selection_metrics': 'sensitivity, specificity, gmeasure, accuracy',
},
'test_info': {
'batch_size': 'None'
}
}
prediction_path_info = {
'data_info': {
'count_list_path': resource_filename('deepbiome', 'tests/data/gcount_list.csv'),
'count_path': resource_filename('deepbiome', 'tests/data/count'),
'data_path': resource_filename('deepbiome', 'tests/data'),
'idx_path': resource_filename('deepbiome', 'tests/data/classification_idx.csv'),
'tree_info_path': resource_filename('deepbiome', 'tests/data/genus48_dic.csv'),
'x_path': '',
'y_path': 'classification_y.csv'
},
'model_info': {
'model_dir': './example_result/',
'weight': 'weight.h5'
}
}
###Output
_____no_output_____
###Markdown
To predict the CV outputs from each fold, we set `change_weight_for_each_fold = True`. Also, we set `get_y=True` to get the paired output of each prediction too.
###Code
prediction = deepbiome.deepbiome_prediction(log, prediction_network_info, prediction_path_info,
num_classes = 1, number_of_fold=None,
change_weight_for_each_fold = True,
get_y=True)
###Output
[root |INFO|deepbiome.py:450] -----------------------------------------------------------------
[root |INFO|deepbiome.py:480] -------1 th repeatition prediction start!----------------------------------
[root |INFO|readers.py:58] -----------------------------------------------------------------------
[root |INFO|readers.py:59] Construct Dataset
[root |INFO|readers.py:60] -----------------------------------------------------------------------
[root |INFO|readers.py:61] Load data
[root |INFO|deepbiome.py:498] -----------------------------------------------------------------
[root |INFO|build_network.py:521] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:522] Read phylogenetic tree information from /DATA/home/muha/github_repos/deepbiome/deepbiome/tests/data/genus48_dic.csv
[root |INFO|build_network.py:528] Phylogenetic tree level list: ['Genus', 'Family', 'Order', 'Class', 'Phylum']
[root |INFO|build_network.py:529] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:537] Genus: 48
[root |INFO|build_network.py:537] Family: 40
[root |INFO|build_network.py:537] Order: 23
[root |INFO|build_network.py:537] Class: 17
[root |INFO|build_network.py:537] Phylum: 9
[root |INFO|build_network.py:546] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:547] Phylogenetic_tree_dict info: ['Phylum', 'Order', 'Family', 'Class', 'Number', 'Genus']
[root |INFO|build_network.py:548] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:558] Build edge weights between [ Genus, Family]
[root |INFO|build_network.py:558] Build edge weights between [Family, Order]
[root |INFO|build_network.py:558] Build edge weights between [ Order, Class]
[root |INFO|build_network.py:558] Build edge weights between [ Class, Phylum]
[root |INFO|build_network.py:571] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:586] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:587] Build network based on phylogenetic tree information
[root |INFO|build_network.py:588] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:670] ------------------------------------------------------------------------------------------
###Markdown
We gathered the outputs from each fold.
###Code
prediction = np.vstack(prediction)
###Output
_____no_output_____
###Markdown
Since we set the option `get_y=True`, the output has the shape of `(n_samples, 2, n_classes)`. With this options, we can get the predictions of test set and the true output of each predictions.Now, we can calculate the test performance by the test predictions.
###Code
predict_output = prediction[:,0]
true_output = prediction[:,1]
log.info('Shape of the predict function ourput: %s' % str(prediction.shape))
log.info('Shape of the prediction: %s' % str(predict_output.shape))
log.info('Shape of the true_output for each prediction: %s' % str(true_output.shape))
log.info('CV accuracy: %6.3f' % np.mean((predict_output >= 0.5) == true_output))
###Output
[root |INFO|<ipython-input-44-ecaee2413087>:1] CV accuracy: 0.719
###Markdown
8. Load trained weight matrixThe `deepbiome_get_trained_weight` function convert the trained weight `*.h5` saved from the `deepbiome_train` to a list of numpy arrays.In this exampe, the list has numpy array of weights from 6 layers. (`[genus to family, family to order, order to Class, class to phylum, phylum to output]`)
###Code
weight_path = '%s/%s' % (path_info['model_info']['model_dir'], 'weight_0.h5')
trained_weight_list = deepbiome.deepbiome_get_trained_weight(log, network_info, path_info, num_classes=1, weight_path=weight_path)
log.info(len(trained_weight_list))
###Output
[root |INFO|readers.py:58] -----------------------------------------------------------------------
[root |INFO|readers.py:59] Construct Dataset
[root |INFO|readers.py:60] -----------------------------------------------------------------------
[root |INFO|readers.py:61] Load data
[root |INFO|build_network.py:521] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:522] Read phylogenetic tree information from /DATA/home/muha/github_repos/deepbiome/deepbiome/tests/data/genus48_dic.csv
[root |INFO|build_network.py:528] Phylogenetic tree level list: ['Genus', 'Family', 'Order', 'Class', 'Phylum']
[root |INFO|build_network.py:529] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:537] Genus: 48
[root |INFO|build_network.py:537] Family: 40
[root |INFO|build_network.py:537] Order: 23
[root |INFO|build_network.py:537] Class: 17
[root |INFO|build_network.py:537] Phylum: 9
[root |INFO|build_network.py:546] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:547] Phylogenetic_tree_dict info: ['Phylum', 'Order', 'Family', 'Class', 'Number', 'Genus']
[root |INFO|build_network.py:548] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:558] Build edge weights between [ Genus, Family]
[root |INFO|build_network.py:558] Build edge weights between [Family, Order]
[root |INFO|build_network.py:558] Build edge weights between [ Order, Class]
[root |INFO|build_network.py:558] Build edge weights between [ Class, Phylum]
[root |INFO|build_network.py:571] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:586] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:587] Build network based on phylogenetic tree information
[root |INFO|build_network.py:588] ------------------------------------------------------------------------------------------
[root |INFO|build_network.py:670] ------------------------------------------------------------------------------------------
###Markdown
First weight between the `genus` and `family` layers has the shape of `(number of genus = 48, number of family = 40)`
###Code
log.info(trained_weight_list[0].shape)
###Output
[root |INFO|<ipython-input-46-c71fa46ab178>:1] (48, 40)
###Markdown
9. Taxa selection performanceIf we know the true disease path, we can calculate the taxa selection performance by `deepbiome_taxa_selection_performance` funciton. First, we prepared the true weight list based on the true disease path. For each fold, we prepared 4 weights from the 5 layers (`[genus to family, family to order, order to Class, class to phylum]`). An example of the list of the true weights from each fold is as follow:
###Code
true_tree_weight_list = np.load(resource_filename('deepbiome', 'tests/data/true_weight_list.npy'), allow_pickle=True)
log.info(true_tree_weight_list.shape)
###Output
[root |INFO|<ipython-input-47-7f16305fbcb7>:2] (5, 4)
###Markdown
The first weight between the genus and family layers for first epoch has the shape below:
###Code
log.info(true_tree_weight_list[0][0].shape)
###Output
[root |INFO|<ipython-input-48-7f1406e7d9a7>:1] (48, 40)
###Markdown
We will calculate the taxa selection performance of the trained weight below:
###Code
trained_weight_path_list = ['%s/weight_%d.h5' % (path_info['model_info']['model_dir'], i) for i in range(3)]
trained_weight_path_list
###Output
_____no_output_____
###Markdown
This is the summary of the taxa selection accuracy of trained weights from each fold.
###Code
summary = deepbiome.deepbiome_taxa_selection_performance(log, network_info, path_info, num_classes=1,
true_tree_weight_list=true_tree_weight_list,
trained_weight_path_list = trained_weight_path_list)
summary
###Output
_____no_output_____ |
notebooks/violations_dob.ipynb | ###Markdown
1 data retrieval
###Code
sql = 'select * from violations_dob limit 1;'
columns = get_query_as_df(sql).T.index
sorted(columns)
[c for c in columns if "pena" in c]
sql = 'select isn_dob_bis_viol, issue_date_year, issue_date_month from violations_dob'
violations_dob= get_query_as_df(sql)
violations_dob.head()
###Output
_____no_output_____
###Markdown
`ISN_DOB_BIS_VIOL` :An internal code that serves as a unique value. 2. High-level Analysis
###Code
violations_to_plot = violations_dob[
["issue_date_year",
"issue_date_month"]
].groupby(
["issue_date_year",
"issue_date_month"]
).size()
violations_to_plot= violations_to_plot.loc[2000:2019].to_frame()
violations_to_plot= convert_year_month_to_labels(table=violations_to_plot,
month_col='issue_date_month',
year_col='issue_date_year')
violations_to_plot.columns = ['issue_date_year','issue_date_month','number_of_violations']
peak_inds, trough_inds = features.get_peak_and_trough_indices_simple(violations_to_plot.number_of_violations)
peaks = violations_to_plot.number_of_violations.iloc[peak_inds]
peaks_important = peaks.loc[peaks > 10**4]
violations_to_plot.number_of_violations = violations_to_plot.number_of_violations / 1000
fig, ax = plt.subplots()
ax.xaxis.set_major_locator(plt.MaxNLocator(10))
violations_to_plot.number_of_violations.plot(color=nt_blue)
y_ticks = ax.get_yticks()
max_y_val = y_ticks[-1]
for date, val in peaks_important.iteritems():
x = violations_to_plot.index.get_loc(date)
y = violations_to_plot.loc[date, "number_of_violations"]
ax.axvline(x, ymax=y / max_y_val, color=nt_black, linestyle="--", linewidth=".5")
ax.title.set_text("DoB Violation Issuances, Monthly")
ylab = ax.set_ylabel("Number of Violations (k)")
xlab = ax.set_xlabel(None)
savefig("violation_spikes.png", fig, bottom=.2)
pd.options.display.max_rows = 250
first_half = violations_to_plot.loc[
slice("2000-1", "2009-12"), :
].number_of_violations.to_frame(
).reset_index()
first_half.columns = ["date_1", "num_1"]
second_half = violations_to_plot.loc[
slice("2010-1", "2019-12"), :
].number_of_violations.to_frame(
).reset_index()
second_half.columns = ["date_2", "num_2"]
joined = first_half.join(second_half, how="outer")
joined
###Output
_____no_output_____ |
01BERT_WordPred.ipynb | ###Markdown
###Code
pip install -U pytorch-pretrained-bert
import torch
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
import logging
logging.basicConfig(level=logging.INFO)
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
text = '[CLS] I want to [MASK] the car because it is cheap . [SEP]'
tokenized_text = tokenizer.tokenize(text)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Create the segments tensors.
segments_ids = [0] * len(tokenized_text)
#Masked index
masked_index = 4
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
# Load pre-trained model (weights)
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
model.eval()
# Predict all tokens
with torch.no_grad():
predictions = model(tokens_tensor, segments_tensors)
predicted_index = torch.argmax(predictions[0, masked_index]).item()
predicted_token = tokenizer.convert_ids_to_tokens([predicted_index])[0]
print("Predicted Token is :\t",predicted_token)
###Output
INFO:pytorch_pretrained_bert.modeling:loading archive file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz from cache at /root/.pytorch_pretrained_bert/9c41111e2de84547a463fd39217199738d1e3deb72d4fec4399e6e241983c6f0.ae3cef932725ca7a30cdcb93fc6e09150a55e2a130ec7af63975a16c153ae2ba
INFO:pytorch_pretrained_bert.modeling:extracting archive file /root/.pytorch_pretrained_bert/9c41111e2de84547a463fd39217199738d1e3deb72d4fec4399e6e241983c6f0.ae3cef932725ca7a30cdcb93fc6e09150a55e2a130ec7af63975a16c153ae2ba to temp dir /tmp/tmp97q6hpqi
INFO:pytorch_pretrained_bert.modeling:Model config {
"attention_probs_dropout_prob": 0.1,
"hidden_act": "gelu",
"hidden_dropout_prob": 0.1,
"hidden_size": 768,
"initializer_range": 0.02,
"intermediate_size": 3072,
"max_position_embeddings": 512,
"num_attention_heads": 12,
"num_hidden_layers": 12,
"type_vocab_size": 2,
"vocab_size": 30522
}
INFO:pytorch_pretrained_bert.modeling:Weights from pretrained model not used in BertForMaskedLM: ['cls.seq_relationship.weight', 'cls.seq_relationship.bias']
|
end-to-end-heart-disease-classification.ipynb | ###Markdown
Model Comparison
###Code
model_compare = pd.DataFrame(model_scores, index=["accuracy"])
model_compare.T.plot.bar();
# Let's tune KNN
train_scores = []
test_scores = []
# Create a list of differnt values for n_neighbors
neighbors = range(1, 21)
# Setup KNN instance
knn = KNeighborsClassifier()
# Loop through different n_neighbors
for i in neighbors:
knn.set_params(n_neighbors=i)
# Fit the algorithm
knn.fit(X_train, y_train)
# Update the training scores list
train_scores.append(knn.score(X_train, y_train))
# Update the test scores list
test_scores.append(knn.score(X_test, y_test))
plt.plot(neighbors, train_scores, label="Train score")
plt.plot(neighbors, test_scores, label="Test score")
plt.xticks(np.arange(1, 21, 1))
plt.xlabel("Number of neighbors")
plt.ylabel("Model score")
plt.legend()
print(f"Maximum KNN score on the test data: {max(test_scores)*100:.2f}%")
# Create a hyperparameter grid for LogisticRegression# Create
log_reg_grid = {"C": np.logspace(-4, 4, 20),
"solver": ["liblinear"]}
# Create a hyperparameter grid for RandomForestClassifier
rf_grid = {"n_estimators": np.arange(10, 1000, 50),
"max_depth": [None, 3, 5, 10],
"min_samples_split": np.arange(2, 20, 2),
"min_samples_leaf": np.arange(1, 20, 2)}
# Tune LogisticRegression
np.random.seed(42)
# Setup random hyperparameter search for LogisticRegression
rs_log_reg = RandomizedSearchCV(LogisticRegression(),
param_distributions=log_reg_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit random hyperparameter search model for LogisticRegression
rs_log_reg.fit(X_train, y_train)
rs_log_reg.best_params_
rs_log_reg.score(X_test, y_test)
# Setup random seed
np.random.seed(42)
# Setup random hyperparameter search for RandomForestClassifier
rs_rf = RandomizedSearchCV(RandomForestClassifier(),
param_distributions=rf_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit random hyperparameter search model for RandomForestClassifier()
rs_rf.fit(X_train, y_train)
# Find the best hyperparameters
rs_rf.best_params_
# Evaluate the randomized search RandomForestClassifier model
rs_rf.score(X_test, y_test)
# Different hyperparameters for our LogisticRegression model
log_reg_grid = {"C": np.logspace(-4, 4, 30),
"solver": ["liblinear"]}
# Setup grid hyperparameter search for LogisticRegression
gs_log_reg = GridSearchCV(LogisticRegression(),
param_grid=log_reg_grid,
cv=5,
verbose=True)
# Fit grid hyperparameter search model
gs_log_reg.fit(X_train, y_train);
# Check the best hyperparmaters
gs_log_reg.best_params_
# Evaluate the grid search LogisticRegression model
gs_log_reg.score(X_test, y_test)
# Make predictions with tuned model
y_preds = gs_log_reg.predict(X_test)
y_preds
y_test
# Plot ROC curve and calculate and calculate AUC metric
plot_roc_curve(gs_log_reg, X_test, y_test);
# Confusion matrix
print(confusion_matrix(y_test, y_preds))
sns.set(font_scale=1.5)
def plot_conf_mat(y_test, y_preds):
"""
Plots a nice looking confusion matrix using Seaborn's heatmap()
"""
fig, ax = plt.subplots(figsize=(3, 3))
ax = sns.heatmap(confusion_matrix(y_test, y_preds),
annot=True,
cbar=False)
plt.xlabel("True label")
plt.ylabel("Predicted label")
plot_conf_mat(y_test, y_preds)
print(classification_report(y_test, y_preds))
# Check best hyperparameters
gs_log_reg.best_params_
# Create a new classifier with best parameters
clf = LogisticRegression(C=0.20433597178569418,
solver="liblinear")
# Cross-validated accuracy
cv_acc = cross_val_score(clf,
X,
y,
cv=5,
scoring="accuracy")
cv_acc
cv_acc = np.mean(cv_acc)
cv_acc
# Cross-validated precision
cv_precision = cross_val_score(clf,
X,
y,
cv=5,
scoring="precision")
cv_precision = np.mean(cv_precision)
cv_precision
# Cross-validated recall
cv_recall = cross_val_score(clf,
X,
y,
cv=5,
scoring="recall")
cv_recall = np.mean(cv_recall)
cv_recall
# Cross-validated f1-score
cv_f1 = cross_val_score(clf,
X,
y,
cv=5,
scoring="f1")
cv_f1 = np.mean(cv_f1)
cv_f1
# Visualize cross-validated metrics
cv_metrics = pd.DataFrame({"Accuracy": cv_acc,
"Precision": cv_precision,
"Recall": cv_recall,
"F1": cv_f1},
index=[0])
cv_metrics.T.plot.bar(title="Cross-validated classification metrics",
legend=False);
# Fit an instance of LogisticRegression
clf = LogisticRegression(C=0.20433597178569418,
solver="liblinear")
clf.fit(X_train, y_train);
gs_log_reg.best_params_
# Check coef_
clf.coef_
# Match coef's of features to columns
feature_dict = dict(zip(df.columns, list(clf.coef_[0])))
feature_dict
# Visualize feature importance
feature_df = pd.DataFrame(feature_dict, index=[0])
feature_df.T.plot.bar(title="Feature Importance", legend=False);
pd.crosstab(df["sex"], df["target"])
pd.crosstab(df["slope"], df["target"])
###Output
_____no_output_____
###Markdown
Predicting the Heart Disease using Machine LearningThis notebook gives an insight into various Python-based machine learning and data science libraries in an attempt to build a machine learning model capable of predicting whether or not someone has heart disease based on their medical attributes.We're going to take the following approach:1. Problem definition2. Data3. Evaluation4. Features5. Modelling6. Experimentation 1. Problem DefinitionIn a statement> Given clinical parameters about a patient, can we predict whether or not they have heart disease 2. DataThe original data came from Cleavland data from the UCI Machine Learning RepositoryThere is also a version of it availabel on Kaggle 3.Evaluation> If we can reach 95% accuracy at predicting whether or not a patient has heart disease during the proof of concept, we'll pursue the project. 4. Features**This is where you'll get different information about each of the features in your data. You can do this via doing your own research (such as looking at the links above) or by talking to a subject matter expert.****Create data dictionary**1. age - age in years 2. sex - (1 = male; 0 = female) 3. cp - chest pain type * 0: Typical angina: chest pain related decrease blood supply to the heart * 1: Atypical angina: chest pain not related to heart * 2: Non-anginal pain: typically esophageal spasms (non heart related) * 3: Asymptomatic: chest pain not showing signs of disease4. trestbps - resting blood pressure (in mm Hg on admission to the hospital) * anything above 130-140 is typically cause for concern5. chol - serum cholestoral in mg/dl * serum = LDL + HDL + .2 * triglycerides * above 200 is cause for concern6. fbs - (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false) * '>126' mg/dL signals diabetes7. restecg - resting electrocardiographic results * 0: Nothing to note * 1: ST-T Wave abnormality - can range from mild symptoms to severe problems - signals non-normal heart beat * 2: Possible or definite left ventricular hypertrophy - Enlarged heart's main pumping chamber8. thalach - maximum heart rate achieved 9. exang - exercise induced angina (1 = yes; 0 = no) 10. oldpeak - ST depression induced by exercise relative to rest * looks at stress of heart during excercise * unhealthy heart will stress more11. slope - the slope of the peak exercise ST segment * 0: Upsloping: better heart rate with excercise (uncommon) * 1: Flatsloping: minimal change (typical healthy heart) * 2: Downslopins: signs of unhealthy heart12. ca - number of major vessels (0-3) colored by flourosopy * colored vessel means the doctor can see the blood passing through * the more blood movement the better (no clots)13. thal - thalium stress result * 1,3: normal * 6: fixed defect: used to be defect but ok now * 7: reversable defect: no proper blood movement when excercising 14. target - have disease or not (1=yes, 0=no) (= the predicted attribute)**Note:** No personal identifiable information (PPI) can be found in the dataset. Preparing the toolsWe're going to use Pandas, NumPy, Matplolib for Data Analysis and Manipulaton
###Code
# Import all the tools
# Regualar EDA(Exploratory Data Analysis) and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# We want our plots to appear inside the notebook.
%matplotlib inline
# Jupyter themes
from jupyterthemes import jtplot
jtplot.style(theme='monokai',context='notebook',ticks=True,grid=False)
# Models from Scikit-Learn
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
# Model evaluations
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RandomizedSearchCV,GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import plot_roc_curve,plot_confusion_matrix
###Output
_____no_output_____
###Markdown
Load data
###Code
df = pd.read_csv('heart-disease.csv')
df.head()
df.shape
###Output
_____no_output_____
###Markdown
Data Exploration (EDA or expolratory data analysis)The goal here is to find out more about the data and become a subject matter expert on the data set you're working with.1. What question(s) are trying to solve?2. What kind of data do we have and how do we treat different types?3. What's missing from the data and how do you deal with it?4. Where are the outliers and why should you care about them?5. How can add, change, or remove to get more out of data
###Code
df.tail()
# Let's find out how many of each class there are
df['target'].value_counts()
df['target'].value_counts(normalize=True)
df['target'].value_counts().plot(kind='bar',color=['salmon','lightblue'])
plt.title("Heart Disease Frequency")
plt.xlabel("1 = Disease, 0 = No Disease")
plt.ylabel('Amount')
plt.xticks(rotation=0);
df.info()
# Are there any missing values?
df.isna().sum()
df.describe()
###Output
_____no_output_____
###Markdown
Heart Disease Frequency according to Sex
###Code
df['sex'].value_counts()
# Compare target column with sex column
pd.crosstab(df['target'],df['sex'])
## Create a plot of crosstab
pd.crosstab(df.target,df.sex).plot(kind='bar',
figsize=(10,6),
color=['salmon','lightblue'])
plt.title('Heart Disease Frequency for Sex');
plt.xlabel('0 : No Disease,1 : Disease')
plt.ylabel("Amount")
plt.legend(['Female','Male'])
plt.xticks(rotation=0);
df.head()
df.thalach.value_counts()
###Output
_____no_output_____
###Markdown
Age vs Max Heart Rate for Heart Disease
###Code
plt.figure(figsize=(10,6))
# Scatter with positive examples
plt.scatter(df.age[df['target']==1],
df.thalach[df['target']==1],
color='salmon')
# Scatter with negative examples
plt.scatter(df.age[df.target==0],
df.thalach[df.target==0],
color='lightblue')
# Add some helpful info
plt.title("Heart Disease in function of Age and Max Heart Rate")
plt.xlabel('Age')
plt.ylabel('Max Heart Rate')
plt.legend([f'Disease - {len(df[df.target==1])}',f'No Disease - {len(df[df.target==0])}']);
# Check the distribution of the age by histogram
df.age.plot(kind='hist');
###Output
_____no_output_____
###Markdown
Heart Disease Frequency per Chest Pain Typecp - chest pain type * 0: Typical angina: chest pain related decrease blood supply to the heart* 1: Atypical angina: chest pain not related to heart* 2: Non-anginal pain: typically esophageal spasms (non heart related)* 3: Asymptomatic: chest pain not showing signs of disease
###Code
df.cp.value_counts()
pd.crosstab(df['target'],df['cp'])
# Make the crosstab more visual
pd.crosstab(df.cp,df.target).plot.bar(color=['salmon','lightblue'],figsize=(10,6))
plt.xticks(rotation=0);
# Add some communication
plt.title('Heart disease Frequency for chest pain type')
plt.xlabel('Chest pain type')
plt.ylabel('Amount')
plt.legend(['No Disease','Disease']);
df.head()
###Output
_____no_output_____
###Markdown
Correlation between Independent variables
###Code
df.corr()
# Let's make our correlation matrix a little pretty
corr_matrix = df.corr()
plt.figure(figsize=(15,10));
sns.heatmap(corr_matrix,
annot=True,
linewidths=0.5,
fmt='.2f',
cmap='YlGnBu');
df.head()
###Output
_____no_output_____
###Markdown
> exang - exercise induced angina (1 = yes; 0 = no)
###Code
df.exang.value_counts()
pd.crosstab(df.exang,df.target)
pd.crosstab(df.exang,df.target).plot(kind='bar',color=['salmon','lightblue'])
plt.legend(['No Disease','Disease'])
plt.ylabel('Amount')
plt.title('Heart disease Frequency for Exang')
plt.xticks(rotation=0);
###Output
_____no_output_____
###Markdown
5. Modelling
###Code
df.head()
# Split it into X,y
X = df.drop(['target'],axis=1)
y = df['target']
X
y
# Split data into train,test sets
np.random.seed(42)
X_train,X_test,y_train,y_test = train_test_split(X,y,
test_size=0.2)
X_train
y_train,len(y_train)
###Output
_____no_output_____
###Markdown
Now we've got our data split into training and test sets, it's time to build a machine learning model.We'll train in (find the patterns) on the training set.We'll test it (use the patterns) on the test set.We're going to try 3 different machine learning models:1. Logistic Regression2. K-Nearest Neighbours Classifier3. Random Forest Classifier
###Code
# Put models into dictionary
models = {'Logistic Regression': LogisticRegression(),
'KNN': KNeighborsClassifier(),
'Random Forest': RandomForestClassifier()}
# Create function to fit and score models
def fit_and_score(models,X_train,X_test,y_train,y_test):
"""
Fits and evaluate given machine learning models.
models: a dict of different Scikit-Learn machine learning models
X_train: training data (no labels)
X_test: testing data (no labels)
y_train: training labels
y_test: test labels
"""
# Set random seed
np.random.seed(42)
# Make a dictionary to keep model score
model_scores = {}
# Loop through models
for name,model in models.items():
# Fit the model to the data
model.fit(X_train,y_train)
# Evaluate the model and append its score to model_scores
model_scores[name]=model.score(X_test,y_test)
return model_scores
model_scores = fit_and_score(models,
X_train=X_train,
X_test=X_test,
y_train=y_train,
y_test=y_test)
model_scores
###Output
I:\Desktop\heart-disease-project\env\lib\site-packages\sklearn\linear_model\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
n_iter_i = _check_optimize_result(
###Markdown
Model Comparision
###Code
model_compare = pd.DataFrame(model_scores,index=['Accuracy'])
model_compare
model_compare.T.plot.bar();
plt.title
plt.xlabel('Model')
plt.ylabel('Score')
plt.xticks(rotation=0);
###Output
_____no_output_____
###Markdown
Now we've got baseline model... and we know a model's first productions aren't always what we should based our next steps off. What should we do?Let's look at the following:* Hyperparameter tuning* Feature importance* Confusion matrix* Corss-validation* Precision* Recall* F1 score* Classification report* ROC score* Area under the curve (AUC) Hyperparameter tuning (by hand)
###Code
# let's tune KNN
train_scores = []
test_scores = []
# Create a list of different values for n_neighbors
neighbors = range(1,21)
# Setup KNN
knn = KNeighborsClassifier()
# Loop through different n_neighbors
for i in neighbors:
knn.set_params(n_neighbors=i)
# fit the algorithm
knn.fit(X_train,y_train)
# Updating training scores list
train_scores.append(knn.score(X_train,y_train))
# Updating test scores list
test_scores.append(knn.score(X_test,y_test))
train_scores
test_scores
plt.plot(neighbors,train_scores,'--',label='Train Scores')
plt.plot(neighbors,test_scores,'--',label='Test Scores')
plt.xticks(np.arange(1,21,1))
plt.xlabel('Number of Neighbors')
plt.ylabel('Model Score')
plt.legend();
print(f"Maximum KNN score on the test data:{max(test_scores) * 100:.2f}%")
###Output
Maximum KNN score on the test data:75.41%
###Markdown
Hyperparameter tuning by RandomizedSearchCVWe're going to tune:* Logistic Regression* RandomForestClassifier... using RandomizedSearchCV
###Code
# Create a hyperparameter grid for LogisticRegression
# Different LogisticRegression hyperparameters
log_reg_grid = {"C":np.logspace(-4,4,20),
"solver": ["liblinear"]}
# Create a hyperparameter grid forr RandomForestClassifier
rf_grid = {"n_estimators": np.arange(10,100,10),
"max_depth": [None,3,5,10],
"min_samples_split": np.arange(2,20,2),
"min_samples_leaf": np.arange(1,20,2)}
###Output
_____no_output_____
###Markdown
Now we've got hyperparameter girds setup for each of our models, let's tune them using `RandomizedSearchCV`
###Code
# Tune logistic regression
np.random.seed(42)
# Setup random hyperparameter search for logistic regression
rs_log_reg = RandomizedSearchCV(estimator=LogisticRegression(),
param_distributions=log_reg_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit radom hyperparameter search model for logistic regression
rs_log_reg.fit(X_train,y_train)
# Best parameters for logistic regression from above gird
rs_log_reg.best_params_
rs_log_reg.score(X_test,y_test)
###Output
_____no_output_____
###Markdown
Now we've tuned Logistic Regression(), let's do the same for RandomForestClassifier()
###Code
# Setup Random seed
np.random.seed(42)
# Setup Random Hyperparameter search for RandomForestClassifier())
rs_rf = RandomizedSearchCV(estimator=RandomForestClassifier(),
param_distributions=rf_grid,
n_iter=20,
cv=5,
verbose=True)
# Fit the random hyperparameter search model for RandomForestClassifier
rs_rf.fit(X_train,y_train)
# Best parameters
rs_rf.best_params_
# Evaluate the randomized search RandomForestclassifer model
rs_rf.score(X_test,y_test)
model_scores
###Output
_____no_output_____
###Markdown
Hyperparameter tuning* By hand* RandomizedSearchCV* GridSearchCV Hyperparameter tuning using `GridSearchCV`Since our Logistic Regression model provides best score so far, we'll try and improve using GridSearchCV
###Code
# Different hyperparameters for our logistic regression mdel
# Create hyperparameter gird
log_reg_grid = {"C": np.logspace(-4,4,30),
"solver": ["liblinear"]}
# Setup grid hyperparameter search for logistic regression
gs_log_reg = GridSearchCV(estimator=LogisticRegression(),
param_grid=log_reg_grid,
cv=5,
verbose=True)
# Fit grid hyperparameter search model
gs_log_reg.fit(X_train,y_train)
# Check the best hyperparameters
gs_log_reg.best_params_
# Evaluate our gridsearch Logistic Regression model
gs_log_reg.score(X_test,y_test)
model_scores
###Output
_____no_output_____
###Markdown
Evaluating our tuned machine learning classifier, beyond accuracy* ROC curve and AUC score* classification report* confusion matrix* Precision* Recall* F1 score... and it would be great if cross-validation was used where possible> To make comparisions and evaluate our trained model, first we need to make some predictions.
###Code
# Make predictioins with tuned model
y_preds = gs_log_reg.predict(X_test)
y_preds
y_test
# Import ROC curve function from sklearn.metrics
from sklearn.metrics import plot_roc_curve
plot_roc_curve(gs_log_reg,
X_test,
y_test);
# Confusion matrix
conf_matrix = confusion_matrix(y_test,y_preds)
conf_matrix
# Import Seaborn
import seaborn as sns
sns.set(font_scale=1.5) # Increase font size
def plot_conf_mat(y_test,y_preds):
"""
Plots a confusion matrix using Seaborn's heatmap
"""
fig,ax = plt.subplots(figsize=(3,3))
ax = sns.heatmap(confusion_matrix(y_test,y_preds),
annot=True, # Annotate the boxes
cbar=False)
plt.title('Confusion matrix')
plt.xlabel('Predicted labels')
plt.ylabel('True labels');
plot_conf_mat(y_test,y_preds)
# Sklearn inbuilt plot_confusion_matrix
from sklearn.metrics import plot_confusion_matrix
# Plot confusion matrix
plot_confusion_matrix(gs_log_reg,X_test,y_test);
###Output
_____no_output_____
###Markdown
Now we've got roc curve, AUC metric and a confusion matrix, let's get a classification report as well as cross-validated precision, recall and f1-score
###Code
print(classification_report(y_test,y_preds))
###Output
precision recall f1-score support
0 0.89 0.86 0.88 29
1 0.88 0.91 0.89 32
accuracy 0.89 61
macro avg 0.89 0.88 0.88 61
weighted avg 0.89 0.89 0.89 61
###Markdown
Caluclate evaluation metrics using cross-validation We're going to caluclate accuracy, precision, recall and f1 score of our model using cross-validation and to do so we'll be using `cross_val_score`
###Code
# Check best hyperparameters
gs_log_reg.best_params_
gs_log_reg.score(X_test,y_test)
# import cross_val_score
from sklearn.model_selection import cross_val_score
# Instantiate best model with best hyperparameters (found in GridSearchCV)
clf = LogisticRegression(C = 0.20433597178569418,
solver= 'liblinear')
# Cross-validated accuracy score
cv_acc = cross_val_score(clf,X,y,
cv=5,
scoring='accuracy')
cv_acc
cv_acc = np.mean(cv_acc)
cv_acc
# Cross-validated precision score
cv_precision = cross_val_score(clf,
X,
y,
cv=5,
scoring='precision')
cv_precision
cv_precision = np.mean(cv_precision)
cv_precision
# Cross-validated recall
cv_recall = cross_val_score(clf,
X,
y,
cv=5,
scoring='recall')
cv_recall = np.mean(cv_recall)
cv_recall
# Cross-validated f1-score
cv_f1 = cross_val_score(clf,
X,
y,
cv=5,
scoring='f1')
cv_f1 = np.mean(cv_f1)
cv_f1
# visualize the cross-validated metrics
cv_metrics = pd.DataFrame({'Accuracy':cv_acc,
'Precison':cv_precision,
'Recall':cv_recall,
'F1':cv_f1},index=['Score'])
cv_metrics
cv_metrics.T.plot.bar(title='Cross-validated Metrics',
legend=False)
plt.xticks(rotation=0);
###Output
_____no_output_____
###Markdown
Feature importance> Which features contributing most to the outcomes of the model? and how did they contributeFinding feature importance is different for each machine learning model.One way to find the feature importance is to search for "{model name} feature importance"Let's find the feature importance for our LogisticRegression model
###Code
# fit the instance of logistic regression
gs_log_reg.best_params_
clf = LogisticRegression(C = 0.20433597178569418,
solver='liblinear')
clf.fit(X_train,y_train);
# Check coef_
clf.coef_
df.head()
# Match coef's of features to columns
feature_dict = dict(zip(df.columns,list(clf.coef_[0])))
feature_dict
# Visualize feature importance
feature_df = pd.DataFrame(feature_dict,index=[0])
feature_df
feature_df.T.plot.bar(title='Feature Importance',legend=False);
pd.crosstab(df.sex,df.target)
###Output
_____no_output_____
###Markdown
slope - the slope of the peak exercise ST segment0: Upsloping: better heart rate with excercise (uncommon) 1: Flatsloping: minimal change (typical healthy heart) 2: Downslopins: signs of unhealthy heart
###Code
pd.crosstab(df.slope,df.target)
###Output
_____no_output_____
###Markdown
Predicting heart disease using machine learning This notebook looks into using various Python-based machine learning and data science libraries in an attempt to build a machine learning model capable pf predicting whether someone has heart disease on their medical attributesWe're doing to take the following approach:1. Problem definition 2. Data3. Evaluation4. Features5. Modelling 6. Experimentation 1. Problem definition In a statement, > Given clinical parameters about a patient, can we predict whether they have heart disease? 2. Data[Original data](https://archive.ics.uci.edu/ml/datasets/heart+disease)[Kaggle](https://www.kaggle.com/mragpavank/heart-diseaseuci) 3. Evaluation > If we can reach 95% accuracy at predicting whether a patient has heart disease during the proof of concept, we'll pursue the project 4. Features This is where you'll get different information about each feature in our data.**Create a data dictionary**1. age - age in years2. sex - (1 = male; 0 = female)3. cp - chest pain type * 0: Typical angina: chest pain related decrease blood supply to the heart * 1: Atypical angina: chest pain not related to heart * 2: Non-anginal pain: typically esophageal spasms (non heart related) * 3: Asymptomatic: chest pain not showing signs of disease4. trestbps - resting blood pressure (in mm Hg on admission to the hospital) anything above 130-140 is typically cause for concern5. chol - serum cholesterol in mg/dl * serum = LDL + HDL + .2 * triglycerides * above 200 is cause for concern6. fbs - (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false) * '>126' mg/dL signals diabetes7. restecg - resting electrocardiograph results * 0: Nothing to note * 1: ST-T Wave abnormality * can range from mild symptoms to severe problems * signals non-normal heart beat * 2: Possible or definite left ventricular hypertrophy * Enlarged heart's main pumping chamber8. thalach - maximum heart rate achieved9. exang - exercise induced angina (1 = yes; 0 = no)10. oldpeak - ST depression induced by exercise relative to rest looks at stress of heart during exercise unhealthy heart will stress more11. slope - the slope of the peak exercise ST segment * 0: Upsloping: better heart rate with exercise (uncommon) * 1: Flatsloping: minimal change (typical healthy heart) * 2: Downslopins: signs of unhealthy heart12. ca - number of major vessels (0-3) colored by fluoroscopy * colored vessel means the doctor can see the blood passing through * the more blood movement the better (no clots)13. thal - thallium stress result * 1,3: normal * 6: fixed defect: used to be defected but ok now * 7: reversible defect: no proper blood movement when exercising14. target - have disease or not (1=yes, 0=no) (= the predicted attribute) Preparing the tools We're going to use pandas, Matplotlib and NumPy for data analysis and manipulation
###Code
# Import all the tools we need
# Regular EDA (exploratory data analysis) and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# For plots to appear inside the notebook
% matplotlib inline
# Models from Scikit-Learn
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
# Model Evaluations
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import plot_roc_curve
# Customizing Matplotlib plots
plt.style.use('tableau-colorblind10')
###Output
_____no_output_____
###Markdown
Load data
###Code
df = pd.read_csv("data/heart-disease.csv")
df.shape # (rows, columns)
###Output
_____no_output_____
###Markdown
Data Exploration (exploratory data analysis or EDA)The goal here is to find out more about the data and become a subject export on the dataset you're working with1. What question(s) are you trying to solve?2. What kind of data do we have and how do we treat different types?3. What's missing from the data and how do you deal with it?4. Where are the outliers and why should you care about them?5. How can you add, change or remove features to get more out of your data?
###Code
df.head(5)
# How many of each class there
df["target"].value_counts()
df["target"].value_counts().plot(kind="bar", color=["lightgreen", "lightblue"])
df.info()
# Missing values?
df.isna().sum()
df.describe()
###Output
_____no_output_____
###Markdown
Heart Disease Frequency according to Sex
###Code
df.sex.value_counts()
# Compare target column with sec column
pd.crosstab(df.target, df.sex)
def compare_crosstab_plot(obj1, obj_name=""):
if obj_name == "":
obj_name = obj1.name.capitalize()
# Create a plot of cross-tab
pd.crosstab(obj1, df.target).plot(kind="bar",
figsize=(10, 6))
plt.title(f"Heart Disease Frequency for {obj_name}")
plt.xlabel(obj_name)
plt.ylabel("Amount")
plt.legend(["0 = No Disease", "1 = Disease"])
plt.xticks(rotation=0)
compare_crosstab_plot(df.sex), df.sex.value_counts()
df["thalach"].value_counts()
###Output
_____no_output_____
###Markdown
Heart Disease Frequency per Chest Pain Type1. cp - chest pain type * 0: Typical angina: chest pain related decrease blood supply to the heart * 1: Atypical angina: chest pain not related to heart * 2: Non-anginal pain: typically esophageal spasms (non heart related) * 3: Asymptomatic: chest pain not showing signs of disease
###Code
compare_crosstab_plot(df.cp, "Chest Pain Type")
###Output
_____no_output_____
###Markdown
Age vs. Max Rate for Heart Disease
###Code
# Create another figure
plt.figure(figsize=(10, 6))
# Scatter with positive examples
plt.scatter(df.age[df.target == 1], df.thalach[df.target == 1], c="salmon")
# Scatter with negative examples
plt.scatter(df.age[df.target == 0], df.thalach[df.target == 0], c="lightblue");
# Add some helpful info
plt.title("Heart Disease in function of Age and Max Heart Rate")
plt.xlabel("Age")
plt.ylabel("Max Heart Rate")
plt.legend(["Disease", "No Disease"]);
# Check the distribution of the age column with a histogram
df.age.plot.hist();
df.head()
###Output
_____no_output_____
###Markdown
Make a correlation Matrix
###Code
df.corr()
# Let's make our correlation matrix more visible
corr_matrix = df.corr()
fig, ax = plt.subplots(figsize=(15, 10))
ax = sns.heatmap(corr_matrix,
annot=True,
linewidths=0.5,
fmt=".2f",
cmap="YlGnBu");
# bottom, top = ax.get_ylim()
# ax.set_ylim(bottom + 0.5, top - 0.5)
###Output
_____no_output_____
###Markdown
exang - exercise induced angina (1 = yes; 0 = no)
###Code
compare_crosstab_plot(df.exang, "Exercise induced angina")
df.target[df.exang == 1].value_counts()
###Output
_____no_output_____
###Markdown
5. Modeling Fit and Score a baseline model
###Code
# Split data into X and y
X, y = df.drop("target", axis=1), df["target"]
X, y
# Split data into train and test sets
np.random.seed(42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train.shape, y_train.shape, X_test.shape, y_test.shape
###Output
_____no_output_____
###Markdown
Now we've got our data split into training and test sets, it's time to build a machine learning model.We'll train it (find the patterns) on the training set.And we'll test it (use the patterns) on the test set.We're going to try 3 different machine learning models: 1. Logistic Regression 2. K-Nearest Neighbours Classifier 3. Random Forest Classifier
###Code
# Put models in a dictionary
models = {
"Logistic Regression": LogisticRegression(),
"KNN": KNeighborsClassifier(),
"Random Forest": RandomForestClassifier()
}
# Create function to fit and score models
def fit_and_score(models, X_train, X_test, y_train, y_test):
"""
Fits and evaluates given machine learning models.
models : a dict of differetn Scikit-Learn machine learning models
X_train : training data (no labels)
X_test : testing data (no labels)
y_train : training labels
y_test : test labels
"""
# Set random seed
np.random.seed(42)
# Make a dictionary to keep model scores
model_scores = {}
# Loop through models
for name, model in models.items():
# Fit the model to the data
model.fit(X_train, y_train)
# Evaluate the model and append its score to model_scores
model_scores[name] = model.score(X_test, y_test)
return model_scores
model_scores = fit_and_score(models, X_train, X_test, y_train, y_test)
model_scores
###Output
C:\Users\inter\source\Python\MLandDS\supervised-learning-classification\env\lib\site-packages\sklearn\linear_model\_logistic.py:814: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
n_iter_i = _check_optimize_result(
###Markdown
Model Comparison
###Code
model_compare = pd.DataFrame(model_scores, index=["accuracy"])
model_compare.T.plot.bar();
###Output
_____no_output_____
###Markdown
Now we've got a baseline model... and we know a model's first predictions aren't always what we should base our next steps off. What should we do?Let's look at the following:* Hypyterparameter tuning* Feature importance* Confusion matrix* Cross-validation* Precision* Recall* F1 score* Classification report* ROC curve* Area under the curve (AUC) Hyperparameter tuning KNN
###Code
# Let's tune KNN
train_scores = []
test_scores = []
# Create a list of different values for n_neighbors
neighbors = range(1, 21)
# Setup KNN instance
knn = KNeighborsClassifier()
# Loop through different n_neighbors
for i in neighbors:
knn.set_params(n_neighbors=i)
# Fit the model
knn.fit(X_train, y_train)
# Update the training set
train_scores.append(knn.score(X_train, y_train))
# Update the test set
test_scores.append(knn.score(X_test, y_test))
train_scores, test_scores
plt.plot(neighbors, train_scores, label="Train score")
plt.plot(neighbors, test_scores, label="Test score")
plt.xticks(np.arange(1, 21, 1))
plt.xlabel("Number of neighbors")
plt.ylabel("Model score")
plt.legend()
print(f"Maximum KNN score on the test data is {max(test_scores) * 100:.2f}%")
###Output
Maximum KNN score on the test data is 75.41%
###Markdown
Hyperparameter tuning with RandomizeSearchCVWe're going to tune:* RandomForestClassifier* LogisticRegression... using RandomizedSearchCV
###Code
# Create a hyperparameter grid for LogisticRegression
log_reg_grid = {"C": np.logspace(-4, 4, 20),
"solver": ["liblinear"]}
# Create a hyperparameter grid for RandomForestClassifier
rf_grid = {"n_estimators": np.arange(10, 1000, 50),
"max_depth": [None, 3, 5, 10],
"min_samples_split": np.arange(2, 20, 2),
"min_samples_leaf": np.arange(1, 20, 2)}
###Output
_____no_output_____
###Markdown
Now we've got hyperparameter grids setup for each of our models, let's tune them using RandomizedSearchCV...
###Code
# Tune LogisticRegression model
np.random.seed(42)
# Setup hyperparameter search for LogisticRegression
rs_log_reg = RandomizedSearchCV(LogisticRegression(),
param_distributions=log_reg_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit random hyperparameter search model for LogisticRegression
rs_log_reg.fit(X_train, y_train)
rs_log_reg.best_params_
rs_log_reg.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Now we've tuned LogisticRegression(), let's do the same for RandomForestClassifier()...
###Code
# Tune RandomForestClassifier model
np.random.seed(42)
# Setup hyperparameter search for LogisticRegression
rs_rf = RandomizedSearchCV(RandomForestClassifier(),
param_distributions=rf_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit random hyperparameter search model for LogisticRegression
rs_rf.fit(X_train, y_train)
rs_rf.best_params_
rs_rf.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Hyperparamter Tuning with GridSearchCVSince our LogisticRegression model provides the best scores so far, we'll try and improve them again using GridSearchCV...
###Code
# Tune LogisticRegression model
np.random.seed(42)
# Create a hyperparameter grid for LogisticRegression
log_reg_grid = {"C": np.logspace(-4, 4, 30),
"solver": ["liblinear", "newton-cg", "lbfgs", "sag", "saga"],
"penalty": ["l1", "l2", "elasticnet", "None"],
"multi_class": ["auto", "ovr", "multinomial"]
}
# Setup grid hyperparameter search for LogisticRegression
gs_log_reg = GridSearchCV(LogisticRegression(),
param_grid=log_reg_grid,
cv=5,
verbose=True)
# Fit grid hyperparameter search model for LogisticRegression
gs_log_reg.fit(X_train, y_train)
# Check the best hyperparameters
gs_log_reg.best_params_
# Evaluate the grid search LogisticRegression model
gs_log_reg.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Evaluating our tuned machine learning classifier, beyond accuracy* Confusion matrix* Precision* Recall* F1 score* Classification report* ROC curve* Area under the curve (AUC)Prefer to use cross-validation where possibleTo make comparisons and evaluate our trained model, first we need to make predictions
###Code
# Make predictions with tuned model
y_preds = gs_log_reg.predict(X_test)
y_preds
plot_roc_curve(gs_log_reg, X_test, y_test)
# Confusion metrics
confusion_matrix(y_test, y_preds)
# Increase font size
sns.set(font_scale=1.5)
def plot_conf_mat(y_test, y_preds):
"""
Plots a confusion matrix using Seaborn's heatmap().
"""
fig, ax = plt.subplots(figsize=(3, 3))
ax = sns.heatmap(confusion_matrix(y_test, y_preds),
annot=True, # Annotate the boxes
cbar=False)
plt.xlabel("Predicted label") # predictions go on the x-axis
plt.ylabel("True label") # true labels go on the y-axis
plot_conf_mat(y_test, y_preds)
###Output
_____no_output_____
###Markdown
Now we've got a ROC curve, an AUC metric and a confusion matrix, let's get a classification report as well as cross-validated precision, recall and f1-score.
###Code
y_test.value_counts()
print(classification_report(y_test, y_preds))
###Output
precision recall f1-score support
0 0.89 0.86 0.88 29
1 0.88 0.91 0.89 32
accuracy 0.89 61
macro avg 0.89 0.88 0.88 61
weighted avg 0.89 0.89 0.89 61
###Markdown
Calculate evaluation metrics using cross-validationWe're going to calculate accuracy, precision, recall and f1-score of our model using cross-validation and to do so we'll be using `cross_val_score()`.
###Code
# Check best hyperparameters
gs_log_reg.best_params_
# Create a new classifier with best parameters
clf = LogisticRegression(penalty='l2', solver='lbfgs', multi_class="multinomial", C=0.1082636733874054)
# Cross-validated accuracy, precision, recall, f1-score
cv_acc = np.mean(cross_val_score(clf, X, y, cv=5, scoring="accuracy"))
cv_precision = np.mean(cross_val_score(clf, X, y, cv=5, scoring="precision"))
cv_recall = np.mean(cross_val_score(clf, X, y, cv=5, scoring="recall"))
cv_f1 = np.mean(cross_val_score(clf, X, y, cv=5, scoring="f1"))
cv_acc, cv_precision, cv_recall, cv_f1
# Visualize cross-validated metrics
cv_metrics = pd.DataFrame({
"Accuracy": cv_acc,
"Precision": cv_precision,
"Recall": cv_recall,
"F1-score": cv_f1}, index=[0])
cv_metrics.T.plot.bar(title="Cross-validated classification metrics",
legend=False)
###Output
_____no_output_____
###Markdown
Feature ImportanceFeature importance is another as asking, "which features contributed most to the outcomes of the model and how did they contribute?"Finding feature importance is different for each machine learning model. One way to find feature importance is to search for "(MODEL NAME) feature importance".Let's find the feature importance for our LogisticRegression model...
###Code
# Fit an instance of LogisticRegression
clf = LogisticRegression(penalty='l2', solver='lbfgs', multi_class="multinomial", C=0.1082636733874054)
clf.fit(X_train, y_train)
# Check coef_
clf.coef_
# Match coef's of features to columns
feature_dict = dict(zip(df.columns, list(clf.coef_[0])))
feature_dict
# Visualize feature importance
feature_df = pd.DataFrame(feature_dict, index=[0])
feature_df.T.plot.bar(title="Feature Importance", legend=False)
###Output
_____no_output_____
###Markdown
2. sex - (1 = male; 0 = female)
###Code
pd.crosstab(df["sex"], df["target"])
pd.crosstab(df["slope"], df["target"])
###Output
_____no_output_____
###Markdown
Predicting heart disease using machine learningThis notebook looks into using various Python-based machine learning and data science libraries in an attempt to build a machine learning model capable of predicting whether or not someone has heart disease based on their medical attributes. 1.Problem DefinitionIn a statement,> Given clinical parameters about a patient,can we predict whether or not they have heart disease ? 2.DataThe original data came from the Cleveland data from the UCI Machine learning repository.https://archive.ics.uci.edu/ml/datasets/Heart+DiseaseThere is also a version of it availabel on Kaggle. https://www.kaggle.com/ronitf/heart-disease-uci 3.Evaluation>If we can reach 95% accuracy at prediciting whether or not a patient has heart disease during the proof of concept,we'll pursue the project. 4.Features* age* sex* chest pain type (4 values)* resting blood pressure* serum cholestoral in mg/dl* fasting blood sugar > 120 mg/dl* resting electrocardiographic results (values 0,1,2)* maximum heart rate achieved* exercise induced angina* oldpeak = ST depression induced by exercise relative to rest* the slope of the peak exercise ST segment* number of major vessels (0-3) colored by flourosopy* thal: 3 = normal; 6 = fixed defect; 7 = reversable defect Preparing the tools
###Code
## Import all the tools we need
## Regular EDA(Exploratory data analysis) and plotting libraries
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# We want our plots to appear inside the notebook
%matplotlib inline
## Model from Scikit-Learn
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
## Model Evaluations
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV,GridSearchCV
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.metrics import precision_score,f1_score,recall_score
from sklearn.metrics import plot_roc_curve
###Output
_____no_output_____
###Markdown
Load data
###Code
df=pd.read_csv("heart-disease.csv")
df.head()
df.shape #(rows,columns)
###Output
_____no_output_____
###Markdown
Data Exploration (exploratory data analysis or EDA)
###Code
df.tail()
## Let's find out how many of each class there
df.target.value_counts()
###Output
_____no_output_____
###Markdown
**This shows the data is balanced because of having almost equal no of data for both classes**
###Code
##Visualize this
df.target.value_counts().plot(kind="bar",color=["salmon","lightblue"]);
plt.show()
df.info()
df.isna().sum()
df.describe()
###Output
_____no_output_____
###Markdown
Heart disease frequency according to Sex
###Code
df.sex.value_counts()
# Compare target column to the sex column
pd.crosstab(df.target,df.sex)
###Output
_____no_output_____
###Markdown
* It shows out of all women there is 75% chance of suffering from heart disease and 50% chance for men of suffering from heart disease *
###Code
# create a plot of crosstab
pd.crosstab(df.target,df.sex).plot(kind="bar",
figsize=(10,6),
color=["salmon","lightblue"]);
plt.title("Heart disease frequency for sex")
plt.xlabel("0 = NO Disease,1= Disease")
plt.ylabel("Amount")
## Change the legend name
plt.legend(["female","male"]);
##Rotate the number on the x-axis
plt.xticks(rotation=0);
###Output
_____no_output_____
###Markdown
Finding patterns 2
###Code
df.head()
df["thalach"].value_counts()
###Output
_____no_output_____
###Markdown
Age vs maximum heart rate(thalach)
###Code
## Create another figure
plt.figure(figsize=(10,6))
##Scatter with positive examples
plt.scatter(df.age[df.target==1],
df.thalach[df.target==1],
c="salmon")
##Scatter with negative examples
plt.scatter(df.age[df.target==0],
df.thalach[df.target==0],
c="lightblue");
## Add some helpfu; info
plt.title("Heart disease in function of Age and Max Heart Rate")
plt.xlabel("Age")
plt.ylabel("Maximum Heart Rate")
plt.legend(["Disease","No Disease"]);
## Check the distribution of the age column with the histogram
### This tells which is the outlier
df["age"].plot.hist();
df.head()
###Output
_____no_output_____
###Markdown
Heart disease Frequency per chest pain type
###Code
pd.crosstab(df.cp,df.target)
## Make the crosstab more visual
pd.crosstab(df.cp,df.target).plot(kind="bar",
figsize=(10,6),
color=["salmon","lightblue"])
## Add some communication
plt.title("Heart Disease Frequency per Chest Pain Type");
plt.xlabel("Chest pain type")
plt.ylabel("Amount")
plt.legend(["No Disease","Disease"])
plt.xticks(rotation=0);
df.head()
###Output
_____no_output_____
###Markdown
Finding patterns 3 Correlational analysis Make correlational matrix
###Code
# Make a correlational matrix
df.corr()
##Let's make our correlation matriz a little more visual using seaborn
corr_matrix=df.corr()
fig,ax=plt.subplots(figsize=(15,10))
ax=sns.heatmap(corr_matrix,
annot=True,
linewidths=0.2,
fmt=".2f",#format numbers upto 2 decimal point
cmap="YlGnBu");
###Output
_____no_output_____
###Markdown
5.Modelling
###Code
df.tail()
##Split the data into X and y
X=df.drop("target",axis=1)
y=df.target
X.head()
y[:5]
#Split the data into train and test sets
np.random.seed(42)
##Split the data into train and split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2)
X_train
y_train,len(y_train)
###Output
_____no_output_____
###Markdown
* Now we've got our data split into training and test sets,it's time to build machine learning model* We'll train it (find the patterns) on the training set.* And we'll test it(use the patterns) on the test set.
###Code
from sklearn.svm import LinearSVC
from sklearn.svm import SVC
models={"RandomForestClassifier":RandomForestClassifier(),
"KNeighborsClassifier":KNeighborsClassifier(),
"LogisticRegression":LogisticRegression(),
#"LinearSVC":LinearSVC,
#"SVC":SVC
}
results={}
def evaluate(X_train,y_train,X_test,y_test):
"""
Fits the data and evaluate
"""
for model_name,model in models.items():
#Set random seed
np.random.seed(42)
print(f"Training on {model_name}")
#Fit the model
model.fit(X_train,y_train)
print(f"Scoring on {model_name}")
#Evaluate the model and append its score to results
score=model.score(X_test,y_test)
results[model_name]=score
evaluate(X_train,y_train,X_test,y_test)
results
## Model comparison
model_compare=pd.DataFrame(results,index=["Accuracy"])
model_compare
model_compare.T.plot(kind="bar");
plt.xticks(rotation=0);
fig,ax=plt.subplots()
ax.bar(results.keys(),
results.values(),
color=["salmon","blue","green"],
);
###Output
_____no_output_____
###Markdown
TUNING HYPERPARAMETERS Let's look at the following :* Hyperparameter tuning * Feature importance * Confusion matrix* Cross validation* Precision* Recall* F1 score* Classification report* ROC curve * Area under curve (AUC) Hyperparameter tuning Let's turn KNN
###Code
#Let's tune KNN
train_scores=[]
test_scores=[]
##Create a list of different values for n_neighbors
neighbors=range(1,21)
#setup KNN instance
knn=KNeighborsClassifier()
#Loop through different n_neighbors
for i in neighbors:
knn.set_params(n_neighbors=i)
#Fit the algorithm
knn.fit(X_train,y_train)
#Update training scores list
train_scores.append(knn.score(X_train,y_train))
##Update the test score list
test_scores.append(knn.score(X_test,y_test))
train_scores[:2],np.array(test_scores).argmax()
test_scores[10]
##Visualize
plt.plot(neighbors,train_scores,label="Train scores")
plt.plot(neighbors,test_scores,label="Test scores")
plt.legend();
plt.xlabel("n_neighbors value")
plt.ylabel("Accuracy")
plt.xticks(np.arange(1,21,1))
plt.title("Accuracy check at various scores");
plt.suptitle("Evaluation",fontweight="bold",fontsize=16);
###Output
_____no_output_____
###Markdown
Use RandomSearch and GridSearch for hyperparameter tuning Hyperparameter tuning with RandomizedSearchCVWe're going to tune:* LogisticRegression()* RandomForestRegression().....using RandomizedSearchCV* Note:Use continuous range of values for hyperparameter tuning using RandomizedSearchCV
###Code
# Create a hyperparameter grid for LogisticRegression
log_reg_grid={"C":np.logspace(-4,4,20),
"solver":["liblinear"]}
# Create hbyperparameter grid for RandomForestClassifier
rf_grid={"n_estimators":np.arange(10,1000,50),
"max_depth":[None,3,5,10],
"min_samples_split":np.arange(2,20,2),
"min_samples_leaf":np.arange(1,20,2)}
###Output
_____no_output_____
###Markdown
Let's tune using RandomizedSearchCV
###Code
#Tune LogisticRegression
np.random.seed(42)
# Setup random hyperparameter search for LogisiticRegression
rs_log_reg=RandomizedSearchCV(estimator=LogisticRegression(),
param_distributions=log_reg_grid,
cv=5,
n_iter=20,
verbose=True)
##Fit randomhyperparameter search model for LogisticRegression
rs_log_reg.fit(X_train,y_train)
rs_log_reg.best_params_
rs_log_reg.score(X_test,y_test)
###Output
_____no_output_____
###Markdown
RandomForestClassifier
###Code
np.random.seed(42)
rs_rf=RandomizedSearchCV(RandomForestClassifier(),
param_distributions=rf_grid,
cv=5,
n_iter=20,
verbose=True)
rs_rf.fit(X_train,y_train)
rs_rf.best_params_
rs_rf.score(X_test,y_test)
###Output
_____no_output_____
###Markdown
Go through process of elimination to eliminate models that doesn't improve Hyperparameter Tuning with GridSearchCVSince our LogisiticRegression model provides the best scores so far,we'll try and improve them again using GridSearchCV
###Code
## Different hyperparameters for our LogisitcRegression model
log_reg_grid={"C":np.logspace(-4,4,30),
"solver":["liblinear"]}
##Setup grid hyperparameter for GridSearchCV
gs_log_reg=GridSearchCV(estimator=LogisticRegression(),
param_grid=log_reg_grid,
cv=5,
verbose=True)
## Fit grid hyperparameters search model
gs_log_reg.fit(X_train,y_train);
###Output
Fitting 5 folds for each of 30 candidates, totalling 150 fits
###Markdown
**NOTE:LogisticRegression is fast so,fitting is also fast.**
###Code
# Check the best hyperparameters
gs_log_reg.best_params_
#Evaluate the logisitc regression
gs_log_reg.score(X_test,y_test)
results
###Output
_____no_output_____
###Markdown
EVALUATING OUR MODEL Evaluate our tuned machine learning classifier,beyond accuracy* ROC curve and AUC score* Confusion matrix* Classification report* Precision* Recall* F1....and it would be great if cross validation was used where possible. To make comparisons and evaluate our trained model ,first we need to make predictions**Its always comparing our predictions to the truth value**
###Code
#Make predictions with tuned model
y_preds=gs_log_reg.predict(X_test)
y_preds
np.array(y_test)
## Plot using built in function
plot_roc_curve(gs_log_reg,X_test,y_test)
y_preds_proba=gs_log_reg.predict_proba(X_test)
y_preds_proba=y_preds_proba[:,1]
# Plot ROC_CURVE
from sklearn.metrics import roc_curve
fpr,tpr,thresholds=roc_curve(y_test,y_preds_proba)
def plot_curve(fpr,tpr):
"""
Plot a roc curve
"""
plt.plot(fpr,tpr,color="red",label="ROC")
plt.xlabel("False positive rate")
plt.ylabel("True Positive Rate")
plt.title("FPR VS. TPR")
plt.suptitle("ROC_CURVE OF HEART DISEASE")
plt.plot([1,0],[1,0],linestyle="--",label="Guessing")
plt.legend(title="Target")
plot_curve(fpr,tpr)
print(confusion_matrix(y_test,y_preds))
sns.set(font_scale=1.5)
def plot_conf_mat(y_test,y_preds):
"""
Plots a nice looking confusion matrix using seabonr heat map
"""
fig,ax=plt.subplots(figsize=(3,3))
ax=sns.heatmap(confusion_matrix(y_test,y_preds),
annot=True,#Displays number in front of map
cbar=False#deletes the bar on the side
)
plt.xlabel("True label")
plt.ylabel("Predicted label")
plot_conf_mat(y_test,y_preds)
###Output
_____no_output_____
###Markdown
NOw we've got a ROC curve ,an AUC metric and a confusion matrix.Let's get a classification report as well as cross-validated precision,recall and f1 score.
###Code
## Classification report
print(classification_report(y_test,y_preds))
###Output
precision recall f1-score support
0 0.89 0.86 0.88 29
1 0.88 0.91 0.89 32
accuracy 0.89 61
macro avg 0.89 0.88 0.88 61
weighted avg 0.89 0.89 0.89 61
###Markdown
This is not a cross validated instead it is done only on one set of data. Calculate evaluation metrics using Cross ValidationWe're going to calculate precision,recall and f1-score using cross_cal_score
###Code
## Check the best hyperparameter
gs_log_reg.best_params_
## Create a new classifier with best parameters we found
clf=LogisticRegression(C= 0.20433597178569418,
solver= 'liblinear')
from sklearn.model_selection import cross_val_score
## Cross validated accuracy
np.random.seed(42)
cv_accuracy=cross_val_score(clf,X,y,cv=5,scoring="accuracy")
cv_accuracy=cv_accuracy.mean()
## cross validated recall
np.random.seed(42)
cv_recall=cross_val_score(clf,X,y,cv=5,scoring="recall")
cv_recall=cv_recall.mean()
## cross validated precision
np.random.seed(42)
cv_precision=cross_val_score(clf,X,y,cv=5,scoring="precision")
cv_precision=cv_precision.mean()
## f1 score
np.random.seed(42)
cv_f1=cross_val_score(clf,X,y,cv=5,scoring="f1")
cv_f1=cv_f1.mean()
## visulize our cross validated metrics
cv_metrics=pd.DataFrame({"Accuracy":cv_accuracy,
"Precison":cv_precision,
"Recall":cv_recall,
"f1":cv_f1},
index=[0])
cv_metrics.T.plot(kind="bar",
legend=False,
title="Cross-validated metrics"
);
###Output
_____no_output_____
###Markdown
Feature importance* It is another way of asking ,"which features contributes most to the outcomes of the model and how did they contribute?" Find the most important features* Finding feature importance is different for each machine learning model.One way to find the feature importance is to search for "(Model name) feature importance"Let's find the feature importance for our LogisitcRegression model..
###Code
## Fit an instant of LogisiticRegression
gs_log_reg.best_params_
clf=LogisticRegression(C= 0.20433597178569418,
solver="liblinear")
clf.fit(X_train,y_train);
df.head()
###Output
_____no_output_____
###Markdown
Coef_ checks how features and target(label) are correlated to each other
###Code
# Check coef_
a=clf.coef_
a.size
dict(zip(df.columns,(clf.coef_[0])))
# Match coef's of features to columns
feature_dict=dict(zip(df.columns,list(clf.coef_[0])))
feature_dict
## Visualize feature importance
feature_df=pd.DataFrame(feature_dict,index=[0])
feature_df.T.plot.bar(title="Feature importance",legend=False);
###Output
_____no_output_____
###Markdown
**It's telling us how the data contributes or correlates to the target.* Positive coeffiecient=Above 0 is positive related as slope increase, value increase.* Negative coeffiecient= below 0 is neagtive correlation.**
###Code
pd.crosstab(df.sex,df.target)
###Output
_____no_output_____
###Markdown
**1. Look for ratio increase and decrease.eg.as So as sex goes up the target value the ratio decreases so you can see here if the sex is zero for femalethere's almost a three to one ratio hereSo(72/24) seventy two divided by 24 is almost three.Look at that.Seventy two divided by right.Twenty four so it's a three to one ratio here.And then as sex increases the target goes down to about a one to 2 ratio C there's roughly a 50/50 here(93/114) is a negative correlation.**
###Code
pd.crosstab(df.slope,df.target)
###Output
_____no_output_____
###Markdown
Predicting Heart disease using Machine learningWe are going to take the following approach:1. Proble definition2. Data3. Evaluation4. Features5. Modelling6. Experimentation Problem DefinitionIn the given statement,> Given clinical parameter about a patient,can we predict whether or not they have heart disease? Data The original data came from the Cleavland data from UCI machine Learning repository.There is also a version of it available on Kaggle.https://www.kaggle.com/ronitf/heart-disease-uci/data Evaluation> If reach 95% accuracy in predicting whether the patient is having heart-disease or not during the concept of proof,we'll pusrsue the project. FeaturesThis is where you will get all the features or information about the data.**Create Data Dictionary*** age age in years* sex (1 = male; 0 = female)* cp chest pain type* trestbps resting blood pressure (in mm Hg on admission to the hospital)* chol serum cholestoral in mg/dl* fbs (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)* restecg resting electrocardiographic results* thalach maximum heart rate achieved* exang exercise induced angina (1 = yes; 0 = no)* oldpeak ST depression induced by exercise relative to rest Preparing the tools We gonaa use pandas,matplotlib and numpy for data manipulation and data analysis.
###Code
# Import all the tools we need
# Regular EDA (exploratory data analysis) and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# we want our plots to appear inside the notebook
%matplotlib inline
# Models from sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
# Model evaluations
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import precision_score,f1_score,recall_score
from sklearn.metrics import plot_roc_curve
###Output
_____no_output_____
###Markdown
Load Data
###Code
df = pd.read_csv("heart-disease .csv")
df
df.shape #(rows and columns)
# Data exploration(EDA)
df.head()
df.tail()
# How many of each class there are
df["target"].value_counts()
df["target"].value_counts().plot(kind="bar",color=["salmon","lightblue"])
df.info()
# Are there any missing values
df.isna().sum()
df.describe()
###Output
_____no_output_____
###Markdown
Heart disease frequency according to sex
###Code
df.sex.value_counts()
# Compare target column with sex column
pd.crosstab(df.target,df.sex)
# Create a plot of crosstab
pd.crosstab(df.target,df.sex).plot(kind="bar",
figsize=(10,6),
color=["salmon","lightblue"])
plt.title("Heart Disease Frequency for Sex")
plt.xlabel("0= No Disease, 1 = Disease")
plt.ylabel("Amount")
plt.legend(["Female","male"]);
plt.xticks(rotation=0)
###Output
_____no_output_____
###Markdown
Age vs maximum heart rate for heart disease
###Code
# Create another figure
plt.figure(figsize=(10,6))
# Scatter with positive examples
plt.scatter(df.age[df.target==1],
df.thalach[df.target==1],
c="salmon")
# Scatter with negetive examples
plt.scatter(df.age[df.target==0],
df.thalach[df.target==0],
c="lightblue")
# Add some useful info
plt.title("Age vs maximum heart rate for heart disease")
plt.xlabel("Age")
plt.ylabel("Maximum heart rate")
plt.legend(["Disease","No Disease"])
# Check the distribution of the age with histogram
df.age.plot.hist()
###Output
_____no_output_____
###Markdown
Heart Disease frequency per chest pain type
###Code
pd.crosstab(df.cp,df.target)
# Make a correlation matrix
df.corr()
# Lets make our correlation matrix prettier
corr_matrix = df.corr()
fig, ax = plt.subplots(figsize=(15,10))
ax = sns.heatmap(corr_matrix,
annot=True,
linewidth = 0.5,
fmt="2f",
cmap="YlGnBu")
###Output
_____no_output_____
###Markdown
Modeling
###Code
# Split data into X and y
X = df.drop("target",axis = 1)
y = df["target"]
np.random.seed(42)
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2)
###Output
_____no_output_____
###Markdown
We are going to try 3 different machine leanring models 1. Logistic Regression2. KNeighbor3. Random Forest Classifier
###Code
# Put models in a dictionary
models={"Logistic Regression": LogisticRegression(),
"KNN":KNeighborsClassifier(),
"Random Forest":RandomForestClassifier()}
# Create a fucntion to fit and score models
def fit_and_score(models,X_train,X_test,y_train,y_test):
"""
Fits and evaluate different machine learning models
"""
np.random.seed(42)
# Make a dictionary to keep model scores
model_scores={}
# Loop through models
for name,model in models.items():
# Fit the model to the data
model.fit(X_train,y_train)
# Evaluate the model and append it to the model_score
model_scores[name]= model.score(X_test,y_test)
return model_scores
model_scores = fit_and_score(models= models,
X_train=X_train,
X_test=X_test,
y_train=y_train,
y_test=y_test)
model_scores
###Output
C:\Users\Vikas Konaparthi\Desktop\mlcourse\heart-disease-project\env\lib\site-packages\sklearn\linear_model\_logistic.py:940: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG)
###Markdown
Model Comparision
###Code
model_compare = pd.DataFrame(model_scores,index=["accuracy"])
model_compare.T.plot.bar()
###Output
_____no_output_____
###Markdown
Lets look at the following 1. Hyperparameter tuning2. Feature impoeratance 3. Confusion matrix 4. CrossValidation5. Precision6. Reacall7. F1 score8. Classification Report9. Roc curve10. Area under the curve Hyperparameter Tuning
###Code
# Ltes turn KNN
train_scores=[]
test_scores=[]
# Create a list of differnet values for n_neighbors
neighbors = range(1,21)
# Setup KNN instance
knn = KNeighborsClassifier()
for i in neighbors:
knn.set_params(n_neighbors=i)
#Fit the algoritm
knn.fit(X_train,y_train)
# update the scoring list
train_scores.append(knn.score(X_train,y_train))
# Update the test list
test_scores.append(knn.score(X_test,y_test))
train_scores
test_scores
plt.plot(neighbors,train_scores,label="Train")
plt.plot(neighbors,test_scores,label="Test")
plt.xticks(np.arange(1,21,1))
plt.xlabel("Number of neighbors")
plt.ylabel("Model score")
plt.legend()
print(f"maximum KNN score on the test data: {max(test_scores) * 100:.2f}%")
###Output
maximum KNN score on the test data: 75.41%
###Markdown
HyperParameter tuning with RandomizedSearchCVWe are going to use * Logistic Regression * RandomForestClassifier
###Code
# Create a hyperparameter grid for LogisticRegression
log_reg_grid = {"C":np.logspace(-4,4,20),
"solver": ["liblinear"]}
# Create a hyperparameter grid for RandomForestClassifier
rf_grid = {"n_estimators": np.arange(10,1000,50),
"max_depth":[None,3,5,10],
"min_samples_split":np.arange(2,20,2),
"min_samples_leaf":np.arange(1,20,2)}
###Output
_____no_output_____
###Markdown
Randomized Search CV
###Code
# Tune Logistic Regression
np.random.seed(42)
rs_log_reg = RandomizedSearchCV(LogisticRegression(),
param_distributions=log_reg_grid,
cv=5,
n_iter=20,
verbose=True)
#Fit Random hyperparameter for Logistic Regression
rs_log_reg.fit(X_train,y_train)
###Output
Fitting 5 folds for each of 20 candidates, totalling 100 fits
###Markdown
###Code
rs_log_reg.best_params_
rs_log_reg.score(X_test,y_test)
# Now with RandomForestClassifier
np.random.seed(42)
rs_rf = RandomizedSearchCV(RandomForestClassifier(),
param_distributions=rf_grid,
cv=5,
n_iter =20,
verbose =True)
rs_rf.fit(X_train,y_train)
rs_rf.best_params_
rs_rf.score(X_test,y_test)
model_scores
###Output
_____no_output_____
###Markdown
HyperParameter tuning using GridSearchCV
###Code
log_reg_grid = {"C":np.logspace(-4,4,30),
"solver": ["liblinear"]}
gs_log_reg = GridSearchCV(LogisticRegression(),
param_grid=log_reg_grid,
cv=5,
verbose=True)
gs_log_reg.fit(X_train,y_train)
gs_log_reg.best_params_
gs_log_reg.score(X_test,y_test)
###Output
_____no_output_____
###Markdown
Evaluating our tuned machine learning classifier beyond accuracy* Roc curve and Auc score* Confusion matrix* Classification report* Precision* Recall* F1-Score... and it would be great if cross-validation was used where possible.To make comparision and evaluate a trained model, first we need to make predictions.
###Code
y_preds = gs_log_reg.predict(X_test)
y_preds
# Plot the ROC curve and calculate the AUC metrics
plot_roc_curve(gs_log_reg,X_test,y_test)
# Consusion matrix
print(confusion_matrix(y_test,y_preds))
sns.set(font_scale=1.5)
def plot_conf_mat(y_test,y_preds):
fig, ax = plt.subplots(figsize=(3,3))
ax = sns.heatmap(confusion_matrix(y_test,y_preds),
annot=True,
cbar=False)
plt.xlabel("True label")
plt.ylabel("Predicted label")
plot_conf_mat(y_test,y_preds)
print(classification_report(y_test,y_preds))
###Output
precision recall f1-score support
0 0.89 0.86 0.88 29
1 0.88 0.91 0.89 32
accuracy 0.89 61
macro avg 0.89 0.88 0.88 61
weighted avg 0.89 0.89 0.89 61
###Markdown
Classifying evaluation metrics using cross-validationWe are going to calculate the precision recall f1 using cross val score
###Code
# Check best hyper parameters
gs_log_reg.best_params_
# Create new classifier wiith best params
clf = LogisticRegression(C=0.20433597178569418,solver="liblinear")
# CROSS validated accuracy
cv_acc = cross_val_score(clf,X,y,cv=5,scoring="accuracy")
cv_acc
cv_acc =np.mean(cv_acc)
cv_acc
# CROSS validated precision
cv_precision = cross_val_score(clf,X,y,cv=5,scoring="precision")
cv_precision
cv_precision=np.mean(cv_precision)
cv_precision
# CROSS validated recall
cv_recall = cross_val_score(clf,X,y,cv=5,scoring="recall")
cv_recall
cv_recall=np.mean(cv_recall)
cv_recall
# CROSS validated f1_score
cv_f1 = cross_val_score(clf,X,y,cv=5,scoring="f1")
cv_f1
cv_f1=np.mean(cv_f1)
cv_f1
# Vizualize our cross validation metrics
cv_metrics = pd.DataFrame({
"Accuracy":cv_acc,
"Precision":cv_precision,
"Recall":cv_recall,
"f1":cv_f1,
},
index=[0])
cv_metrics.T.plot.bar(title="Cross Validated calssification metrics",
legend =False)
###Output
_____no_output_____
###Markdown
Feature Importance
###Code
# Fit an instance of Logistic Regression
clf = LogisticRegression(C=0.20433597178569418,solver="liblinear")
clf.fit(X_train,y_train)
# Check coef
clf.coef_
# Match coeff of features to columns
feature_dict = dict(zip(df.columns,list(clf.coef_[0])))
feature_dict
# Visualize feature importance
feature_df = pd.DataFrame(feature_dict,index=[0])
feature_df.T.plot.bar(title="Feature Importance",legend=False)
###Output
_____no_output_____
###Markdown
Predicting Heart Disease using Machine LearningThis notebook looks into using various Python-based machine learning and data science libraries in an attempt to build a machine learning model capable of predicting whether or not someone has heart disease based on their medical attributes. 1. Problem DefinitionIn a statement,>Given clinical parameters about a patient, can we predict whether or not they have heart disease? 2. DataThe original data came from the Cleveland database from UCI Machine Learning Repository. https://archive.ics.uci.edu/ml/datasets/heart+DiseaseThere is also a version of it available on Kaggle. https://www.kaggle.com/ronitf/heart-disease-uci/The original database contains 76 attributes, but here only 14 attributes will be used. __Attributes__ are the variables what we'll use to predict our __target variable__. 3. Evaluation>If we can reach 95% accuracy at predicting whether or not a patient has heart disease during the proof of concept, we'll pursue this project. 4. Features Heart Disease Data DictionaryThe following are the features that are used to predict the target variable (heart disease or no heart disease).1. age - age in years2. sex - (1 = male; 0 = female)3. cp - chest pain type * 0: Typical angina: chest pain related decrease blood supply to the heart * 1: Atypical angina: chest pain not related to heart * 2: Non-anginal pain: typically esophageal spasms (non heart related) * 3: Asymptomatic: chest pain not showing signs of disease4. trestbps - resting blood pressure (in mm Hg on admission to the hospital) * anything above 130-140 is typically cause for concern5. chol - serum cholestoral in mg/dl * serum = LDL + HDL + .2 * triglycerides * above 200 is cause for concern6. fbs - (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false) * '>126' mg/dL signals diabetes7. restecg - resting electrocardiographic results * 0: Nothing to note * 1: ST-T Wave abnormality * can range from mild symptoms to severe problems * signals non-normal heart beat * 2: Possible or definite left ventricular hypertrophy *Enlarged heart's main pumping chamber8. thalach - maximum heart rate achieved9. exang - exercise induced angina (1 = yes; 0 = no)10. oldpeak - ST depression induced by exercise relative to rest * looks at stress of heart during exercise * unhealthy heart will stress more11. slope - the slope of the peak exercise ST segment * 0: Upsloping: better heart rate with exercise (uncommon) * 1: Flatsloping: minimal change (typical healthy heart) * 2: Downslopins: signs of unhealthy heart12. ca - number of major vessels (0-3) colored by flourosopy * colored vessel means the doctor can see the blood passing through * the more blood movement the better (no clots)13. thal - thalium stress result * 1,3: normal * 6: fixed defect: used to be defect but okay now * 7: reversable defect: no proper blood movement when exercising14. target - have disease or not (1=yes, 0=no) (= the predicted attribute)No personal identifiable information (PPI) can be found in the dataset. Preparing the toolsPandas, Matplotlib and NumPy will be used for data analysis and manipulation.
###Code
#Import all the tools we need
#Regular EDA (exploratory data analysis) and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# Models
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
## Model Evaluations
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import plot_roc_curve
###Output
_____no_output_____
###Markdown
Load Data
###Code
df = pd.read_csv("heart-disease.csv")
df.shape
###Output
_____no_output_____
###Markdown
Data Exploration (exploratory data analysis or EDA)The goal here is to find out more about the data and become a subject matter export on the dataset you're working with.
###Code
df.head()
df.tail()
# Find out how many of each class there
df["target"].value_counts()
df["target"].value_counts().plot(kind="bar", color=["salmon","lightblue"]);
df.info()
#Are there any missing values?
df.isna().sum()
df.describe()
###Output
_____no_output_____
###Markdown
Heart Disease Frequency according to Sex
###Code
df["sex"].value_counts()
# Compare target columns with sex column
pd.crosstab(df.target, df.sex)
# Create a plot of crosstab
pd.crosstab(df.target, df.sex).plot(kind="bar", figsize=(10,6), color=["salmon","lightblue"])
plt.title("Heart Disease Frequency for Sex")
plt.xlabel("0 = No Disease, 1 = Disease")
plt.ylabel("Amount")
plt.legend(["Female", "Male"]);
plt.xticks(rotation=0);
###Output
_____no_output_____
###Markdown
Age vs. Max Heart Rate for Heart Disease
###Code
plt.figure(figsize=(10,6))
#Scatter with positive examples
plt.scatter(df.age[df.target==1],df.thalach[df.target==1], c="salmon")
#Scatter with negative examples
plt.scatter(df.age[df.target==0],df.thalach[df.target==0], c="lightblue");
plt.title("Heart Disease in function of Age and Max Heart Rate")
plt.xlabel("Age")
plt.ylabel("Max Heart Rate")
plt.legend(["Disease", "No Disease"]);
#Check the distribution of the age column with a histogram
df.age.plot.hist();
###Output
_____no_output_____
###Markdown
Heart Disease Frequency per Chest Pain Typecp - chest pain type* 0: Typical angina: chest pain related decrease blood supply to the heart* 1: Atypical angina: chest pain not related to heart* 2: Non-anginal pain: typically esophageal spasms (non heart related)* 3: Asymptomatic: chest pain not showing signs of disease
###Code
pd.crosstab(df.cp, df.target)
#Make the crosstab more visiual
pd.crosstab(df.cp, df.target).plot(kind="bar", figsize=(10,6), color=["salmon", "lightblue"])
plt.title("Heart Disease Frequency Per Chest Pain Type")
plt.xlabel("Chest Pain Type")
plt.ylabel("Frequency")
plt.legend(["No Disease", "Disease"])
plt.xticks(rotation = 0);
# Make a correlation matrix
df.corr()
#Let's make the correlation matrix a little prettier
corr_matrix = df.corr()
fig, ax = plt.subplots(figsize=(15,10))
ax = sns.heatmap(corr_matrix, annot=True, linewidths=0.5, fmt=".2f", cmap="YlGnBu")
###Output
_____no_output_____
###Markdown
5. Modeling
###Code
df.head()
#Split data into X and y
X = df.drop("target", axis=1)
y=df["target"]
X
y
#Split data into train and test sets
np.random.seed(42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train
y_train, len(y_train)
###Output
_____no_output_____
###Markdown
Now we've got our data split into training and test sets, it's time to build a machine learning model. We'll train it(find the patterns) on the training set.And we'll test it(use the parameters) on the test set.We're going to try 3 different machine learning models:1. Logistic Regression2. K-Nearest Neighbors Classifier3. Random Forest Classifier
###Code
## Put models in a dictionary
models = {"Logistic Regression": LogisticRegression(), "KNN": KNeighborsClassifier(), "Random Forest": RandomForestClassifier()}
#Create a function to fit and score the models
def fit_and_score(models, X_train, X_test, y_train, y_test):
"""
Fits and evaluates given machine learning models.
models : a dict of different Scikit-Learn machine learning models
X_train : training data
X_test : testing data
y_train : labels assosciated with training data
y_test : labels assosciated with test data
"""
# Set random seed
np.random.seed(42)
#Make a dictionary to keep model scores
model_scores={}
#Loop through models
for name, model in models.items():
#Fit the model to the data
model.fit(X_train, y_train)
#Evaluate the model and append its score to model_scores
model_scores[name] = model.score(X_test, y_test)
return model_scores
model_scores = fit_and_score(models=models, X_train=X_train, X_test=X_test, y_train=y_train, y_test=y_test)
model_scores
###Output
C:\Users\ayda\sample_project\env\lib\site-packages\sklearn\linear_model\_logistic.py:763: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
n_iter_i = _check_optimize_result(
###Markdown
Model comparison
###Code
model_compare = pd.DataFrame(model_scores, index=["accuracy"])
model_compare.T.plot.bar();
###Output
_____no_output_____
###Markdown
We can't really see it from the graph but looking at the dictionary, the LogisticRegression() model performs best.Let's look at the following:* Hyperparameter tuning* Feature importance* Confusion matrix* Cross-validation* Precision* Recall* F1 Score* Classification Report* ROC Curve* Area under the curve (AUC) Hyperparameter tuning (by hand)
###Code
#Let's tune KNN
train_scores = []
test_scores = []
#Create a list of different values for n_neighbors
neighbors = range(1,21)
knn = KNeighborsClassifier()
for i in neighbors:
knn.set_params(n_neighbors=i)
#Fit the algorithm
knn.fit(X_train, y_train)
#Update the training scores list
train_scores.append(knn.score(X_train, y_train))
#Update the test scores list
test_scores.append(knn.score(X_test, y_test))
train_scores
test_scores
plt.plot(neighbors, train_scores, label="Train score")
plt.plot(neighbors, test_scores, label="Test score")
plt.xticks(np.arange(1,21,1))
plt.xlabel("Number of neighbors")
plt.ylabel("Model score")
plt.legend()
print(f"Maximum KNN score on the test data:{max(test_scores)*100:.2f}%")
###Output
Maximum KNN score on the test data:75.41%
###Markdown
Hyperparameter tuning with RandomizedSearchCVWe're going to tune:* LogisticRegression()* RandomForestClassifier()using RandomizedSearchCV
###Code
# Create a hyperparameter grid for LogisticRegression
log_reg_grid = {"C": np.logspace(-4,4,20), "solver":["liblinear"]}
#Create a hyperparameter grid for RandomForestClassifier
rf_grid = {"n_estimators": np.arange(10,1000,50),"max_depth": [None, 3,5,10], "min_samples_split": np.arange(2,20,2),"min_samples_leaf": np.arange(1,20,2)}
# Tune Logistic Regression
np.random.seed(42)
#Setup random hyperparameter search for LogisticRegression
rs_log_reg = RandomizedSearchCV(LogisticRegression(),param_distributions = log_reg_grid, cv=5, n_iter=20, verbose=True)
#Fit random hyperparameter search model for LogisticRegression
rs_log_reg.fit(X_train, y_train)
rs_log_reg.best_params_
rs_log_reg.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Now we've tuned LogisticRegression(), let's do the same for RandomForestClassifier()...
###Code
#Setup random seed
np.random.seed(42)
#Setup random hyperparameter search for RandomForestClassifier
rs_rf = RandomizedSearchCV(RandomForestClassifier(), param_distributions=rf_grid, cv=5, n_iter=20,verbose=True)
#Fit
rs_rf.fit(X_train, y_train)
# Find the best hyperparameters
rs_rf.best_params_
#Evaluate the randomized search RandomForestClassifier model
rs_rf.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Hyperparameter tuning with GridSearchCVSince our LogisticRegression model provides the best scores so far, we'll try and improve them again using GridSearchCV...
###Code
# Different Hyperparameters for LogisticRegression Model
log_reg_grid = {"C":np.logspace(-4,4,30), "solver":["liblinear"]}
#Setup grid hyperparameter search for LogisticRegression
gs_log_reg = GridSearchCV(LogisticRegression(), param_grid=log_reg_grid, cv=5, verbose=True)
#Fit grid hyperparameter search model
gs_log_reg.fit(X_train, y_train);
gs_log_reg.best_params_
# Evaluate the grid search LogisticRegression model
gs_log_reg.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Evaluating our tunes machine learning classifier, beyond accuracy* ROC curve and AUC score* Confusion matrix* Classification Report* Precision* Recall* F1 ScoreTo make comparisons and evaluate our trained model, first we need to make predictions.
###Code
# Make predictions with tuned model
y_preds = gs_log_reg.predict(X_test)
y_preds
y_test
# Plot ROC curve and calculate AUC metric
plot_roc_curve(gs_log_reg, X_test, y_test);
# Confusion matrix
print(confusion_matrix(y_test,y_preds))
sns.set(font_scale=1.5)
def plot_conf_mat(y_test, y_preds):
fig, ax = plt.subplots(figsize=(3,3))
ax= sns.heatmap(confusion_matrix(y_test,y_preds), annot=True, cbar=False)
plt.xlabel("Predicted label") # predictions go on the x-axis
plt.ylabel("True label") # true labels go on the y-axis
plot_conf_mat(y_test, y_preds)
###Output
_____no_output_____
###Markdown
Now we've got a ROC curve, an AUC metric and a confusion matrix, let's get a classification report as well as cross-validated precision, recall and f1-score.
###Code
print(classification_report(y_test, y_preds))
###Output
precision recall f1-score support
0 0.89 0.86 0.88 29
1 0.88 0.91 0.89 32
accuracy 0.89 61
macro avg 0.89 0.88 0.88 61
weighted avg 0.89 0.89 0.89 61
###Markdown
Calculate evaluation metrics using cross-validationWe're going to calculate accuracy, precision, recall and f1-score of our model using cross-validation and to do so we'll be using cross_val_score().
###Code
#Check best hyperparameters
gs_log_reg.best_params_
#Create a new classifier with best parameters
clf = LogisticRegression(C=0.20433597178569418, solver = "liblinear")
#Cross-validated accuracy
cv_acc = cross_val_score(clf, X, y, cv=5, scoring="accuracy")
cv_acc = np.mean(cv_acc)
cv_acc
#Cross-validated precision
cv_precision = cross_val_score(clf, X, y, cv=5, scoring="precision")
cv_precision = np.mean(cv_precision)
cv_precision
# Cross-validated recall
cv_recall = cross_val_score(clf, X, y, cv=5, scoring="recall")
cv_recall = np.mean(cv_recall)
cv_recall
# Cross-validated F1-score
cv_f1 = cross_val_score(clf, X, y, cv=5, scoring="f1")
cv_f1 = np.mean(cv_f1)
cv_f1
# Visualize cross-validated metrics
cv_metrics = pd.DataFrame({"Accuracy": cv_acc, "Precision": cv_precision, "Recall": cv_recall, "F1": cv_f1}, index=[0])
cv_metrics.T.plot.bar(title="Cross-validated classification metrics", legend=False);
###Output
_____no_output_____
###Markdown
Feature ImportanceFeature importance is another way of asking, "which features contributing most to the outcomes of the model?"Or for our problem, trying to predict heart disease using a patient's medical characteristics, which characteristics contribute most to a model predicting whether someone has heart disease or not?
###Code
# Fit an instance of logistic regression
clf = LogisticRegression(C= 0.20433597178569418, solver="liblinear")
clf.fit(X_train, y_train);
#Check coef_
clf.coef_
# Match coef's of features to columns
feature_dict = dict(zip(df.columns, list(clf.coef_[0])))
feature_dict
# Visualize feature importance
feature_df = pd.DataFrame(feature_dict, index=[0])
feature_df.T.plot.bar(title="Feature Importance", legend=False);
pd.crosstab(df["sex"], df["target"])
pd.crosstab(df["slope"], df["target"])
###Output
_____no_output_____
###Markdown
Predicting heart disease using machine learning This notebook looks into using various Python-based machine learning and data science libraries in an attempt to build a machine learning model capable of predicting whether or not someone has heart disease based on their medical attributes.We're going to take the following approach:1. Problem definition2. Data3. Evaluation4. Features5. Modelling6. Experimentation Preparing the tools
###Code
# Import all the tools we need
# Regular EDA (exploratory data analysis) and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# we want our plots to appear inside the notebook
%matplotlib inline
# Models from Scikit-Learn
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
# Model Evaluations
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import plot_roc_curve
###Output
_____no_output_____
###Markdown
Load data
###Code
df = pd.read_csv("./data/heart-disease.csv")
df.shape # (rows, columns)
###Output
_____no_output_____
###Markdown
Data Exploration (exploratory data analysis or EDA)
###Code
df.head()
df.tail()
# Let's find out how many of each class there
df["target"].value_counts()
df["target"].value_counts().plot(kind="bar", color=["salmon", "lightblue"]);
df.info()
# Are there any missing values?
df.isna().sum()
df.describe()
###Output
_____no_output_____
###Markdown
Heart Disease Frequency according to Sex
###Code
df.sex.value_counts()
# sex - (1 = male; 0 = female)
# Compare target column with sex column
pd.crosstab(df.target, df.sex)
# Create a plot of crosstab
pd.crosstab(df.target, df.sex).plot(kind="bar",
figsize=(10, 6),
color=["salmon", "lightblue"])
plt.title("Heart Disease Frequency for Sex")
plt.xlabel("0 = No Diesease, 1 = Disease")
plt.ylabel("Amount")
plt.legend(["Female", "Male"]);
plt.xticks(rotation=0);
###Output
_____no_output_____
###Markdown
Age vs. Max Heart Rate for Heart Disease
###Code
# Create another figure
plt.figure(figsize=(10, 6))
# Scatter with postivie examples
plt.scatter(df.age[df.target==1],
df.thalach[df.target==1],
c="salmon")
# Scatter with negative examples
plt.scatter(df.age[df.target==0],
df.thalach[df.target==0],
c="lightblue")
# Add some helpful info
plt.title("Heart Disease in function of Age and Max Heart Rate")
plt.xlabel("Age")
plt.ylabel("Max Heart Rate")
plt.legend(["Disease", "No Disease"]);
# Check the distribution of the age column with a histogram
df.age.plot.hist();
###Output
_____no_output_____
###Markdown
Heart Disease Frequency per Chest Pain Type cp - chest pain type0. Typical angina: chest pain related decrease blood supply to the heart1. Atypical angina: chest pain not related to heart2. Non-anginal pain: typically esophageal spasms (non heart related)3. Asymptomatic: chest pain not showing signs of disease
###Code
pd.crosstab(df.cp, df.target)
# Make the crosstab more visual
pd.crosstab(df.cp, df.target).plot(kind="bar",
figsize=(10, 6),
color=["lightblue", "salmon"])
# Add some communication
plt.title("Heart Disease Frequency Per Chest Pain Type")
plt.xlabel("Chest Pain Type")
plt.ylabel("Amount")
plt.legend(["No Disease", "Disease"])
plt.xticks(rotation=0);
df.head()
# Make a correlation matrix
df.corr()
# Let's make our correlation matrix a little prettier
corr_matrix = df.corr()
fig, ax = plt.subplots(figsize=(15, 10))
ax = sns.heatmap(corr_matrix,
annot=True,
linewidths=0.5,
fmt=".2f",
cmap="YlGnBu");
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
from pandas.plotting import scatter_matrix
attributes = ["target", "cp", "chol", "thalach"]
scatter_matrix(df[attributes], figsize=(12, 8));
###Output
_____no_output_____
###Markdown
5. Modelling
###Code
df.head()
# Split data into X and y
X = df.drop("target", axis=1)
y = df["target"]
# Split data into train and test sets
np.random.seed(42)
# Split into train & test set
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2)
###Output
_____no_output_____
###Markdown
We're going to try 3 different machine learning models:1. Logistic Regression2. K-Nearest Neighbours Classifier3. Random Forest Classifier
###Code
# Put models in a dictionary
models = {"Logistic Regression": LogisticRegression(),
"KNN": KNeighborsClassifier(),
"Random Forest": RandomForestClassifier()}
# Create a function to fit and score models
def fit_and_score(models, X_train, X_test, y_train, y_test):
"""
Fits and evaluates given machine learning models.
models : a dict of differetn Scikit-Learn machine learning models
X_train : training data (no labels)
X_test : testing data (no labels)
y_train : training labels
y_test : test labels
"""
# Set random seed
np.random.seed(42)
# Make a dictionary to keep model scores
model_scores = {}
# Loop through models
for name, model in models.items():
# Fit the model to the data
model.fit(X_train, y_train)
# Evaluate the model and append its score to model_scores
model_scores[name] = model.score(X_test, y_test)
return model_scores
model_scores = fit_and_score(models=models,
X_train=X_train,
X_test=X_test,
y_train=y_train,
y_test=y_test)
model_scores
###Output
/home/deekshith/.local/lib/python3.8/site-packages/sklearn/linear_model/_logistic.py:762: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
n_iter_i = _check_optimize_result(
###Markdown
Model Comparison
###Code
model_compare = pd.DataFrame(model_scores, index=["accuracy"])
model_compare.T.plot.bar();
###Output
_____no_output_____
###Markdown
Now we've got a baseline model... and we know a model's first predictions aren't always what we should based our next steps off. What should we do?Let's look at the following:* Hyperparameter tuning* Feature importance* Confusion matrix* Cross-validation* Precision* Recall* F1 score* Classification report* ROC curve* Area under the curve (AUC) Hyperparameter tuning (by hand)
###Code
# Let's tune KNN
train_scores = []
test_scores = []
# Create a list of differnt values for n_neighbors
neighbors = range(1, 21)
# Setup KNN instance
knn = KNeighborsClassifier()
# Loop through different n_neighbors
for i in neighbors:
knn.set_params(n_neighbors=i)
# Fit the algorithm
knn.fit(X_train, y_train)
# Update the training scores list
train_scores.append(knn.score(X_train, y_train))
# Update the test scores list
test_scores.append(knn.score(X_test, y_test))
train_scores
test_scores
plt.plot(neighbors, train_scores, label="Train score")
plt.plot(neighbors, test_scores, label="Test score")
plt.xticks(np.arange(1, 21, 1))
plt.xlabel("Number of neighbors")
plt.ylabel("Model score")
plt.legend()
print(f"Maximum KNN score on the test data: {max(test_scores)*100:.2f}%")
###Output
Maximum KNN score on the test data: 75.41%
###Markdown
Hyperparameter tuning with RandomizedSearchCV We're going to tune:* LogisticRegression()* RandomForestClassifier()... using RandomizedSearchCV
###Code
# Create a hyperparameter grid for LogisticRegression
log_reg_grid = {"C": np.logspace(-4, 4, 20),
"solver": ["liblinear"]}
# Create a hyperparameter grid for RandomForestClassifier
rf_grid = {"n_estimators": np.arange(10, 1000, 50),
"max_depth": [None, 3, 5, 10],
"min_samples_split": np.arange(2, 20, 2),
"min_samples_leaf": np.arange(1, 20, 2)}
# Tune LogisticRegression
np.random.seed(42)
# Setup random hyperparameter search for LogisticRegression
rs_log_reg = RandomizedSearchCV(LogisticRegression(),
param_distributions=log_reg_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit random hyperparameter search model for LogisticRegression
rs_log_reg.fit(X_train, y_train)
rs_log_reg.best_params_
rs_log_reg.score(X_test, y_test)
# Tune RandomForestClassifier
# Setup random seed
np.random.seed(42)
# Setup random hyperparameter search for RandomForestClassifier
rs_rf = RandomizedSearchCV(RandomForestClassifier(),
param_distributions=rf_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit random hyperparameter search model for RandomForestClassifier()
rs_rf.fit(X_train, y_train)
# Find the best hyperparameters
rs_rf.best_params_
# Evaluate the randomized search RandomForestClassifier model
rs_rf.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Hyperparamter Tuning with GridSearchCV Since our LogisticRegression model provides the best scores so far, we'll try and improve them again using GridSearchCV
###Code
# Different hyperparameters for our LogisticRegression model
log_reg_grid = {"C": np.logspace(-4, 4, 30),
"solver": ["liblinear"]}
# Setup grid hyperparameter search for LogisticRegression
gs_log_reg = GridSearchCV(LogisticRegression(),
param_grid=log_reg_grid,
cv=5,
verbose=True)
# Fit grid hyperparameter search model
gs_log_reg.fit(X_train, y_train);
# Check the best hyperparmaters
gs_log_reg.best_params_
# Evaluate the grid search LogisticRegression model
gs_log_reg.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Evaluting our tuned machine learning classifier, beyond accuracy* ROC curve and AUC score* Confusion matrix* Classification report* Precision* Recall* F1-score... and it would be great if cross-validation was used where possible.To make comparisons and evaluate our trained model, first we need to make predictions.
###Code
# Make predictions with tuned model
y_preds = gs_log_reg.predict(X_test)
# Plot ROC curve and calculate and calculate AUC metric
plot_roc_curve(gs_log_reg, X_test, y_test)
# Confusion matrix
print(confusion_matrix(y_test, y_preds))
sns.set(font_scale=1.5)
def plot_conf_mat(y_test, y_preds):
"""
Plots a nice looking confusion matrix using Seaborn's heatmap()
"""
fig, ax = plt.subplots(figsize=(3, 3))
ax = sns.heatmap(confusion_matrix(y_test, y_preds),
annot=True,
cbar=False)
plt.xlabel("True label")
plt.ylabel("Predicted label")
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
plot_conf_mat(y_test, y_preds)
print(classification_report(y_test, y_preds))
###Output
precision recall f1-score support
0 0.89 0.86 0.88 29
1 0.88 0.91 0.89 32
accuracy 0.89 61
macro avg 0.89 0.88 0.88 61
weighted avg 0.89 0.89 0.89 61
###Markdown
Calculate evaluation metrics using cross-validationWe're going to calculate accuracy, precision, recall and f1-score of our model using cross-validation and to do so we'll be using cross_val_score().
###Code
# Check best hyperparameters
gs_log_reg.best_params_
# Create a new classifier with best parameters
clf = LogisticRegression(C=0.20433597178569418,
solver="liblinear")
# Cross-validated accuracy
cv_acc = cross_val_score(clf,
X,
y,
cv=5,
scoring="accuracy")
cv_acc
cv_acc = np.mean(cv_acc)
cv_acc
# Cross-validated precision
cv_precision = cross_val_score(clf,
X,
y,
cv=5,
scoring="precision")
cv_precision=np.mean(cv_precision)
cv_precision
cv_recall = cross_val_score(clf,
X,
y,
cv=5,
scoring="recall")
cv_recall = np.mean(cv_recall)
cv_recall
# Cross-validated f1-score
cv_f1 = cross_val_score(clf,
X,
y,
cv=5,
scoring="f1")
cv_f1 = np.mean(cv_f1)
cv_f1
# Visualize cross-validated metrics
cv_metrics = pd.DataFrame({"Accuracy": cv_acc,
"Precision": cv_precision,
"Recall": cv_recall,
"F1": cv_f1},
index=[0])
cv_metrics.T.plot.bar(title="Cross-validated classification metrics",
legend=False);
###Output
_____no_output_____
###Markdown
Feature ImportanceFeature importance is another as asking, "which features contributed most to the outcomes of the model and how did they contribute?"Finding feature importance is different for each machine learning model. One way to find feature importance is to search for "(MODEL NAME) feature importance".Let's find the feature importance for our LogisticRegression model...
###Code
# Fit an instance of LogisticRegression
clf = LogisticRegression(C=0.20433597178569418,
solver="liblinear")
clf.fit(X_train, y_train);
# Check coef_
clf.coef_
df.head()
# Match coef's of features to columns
feature_dict = dict(zip(df.columns, list(clf.coef_[0])))
feature_dict
# Visualize feature importance
feature_df = pd.DataFrame(feature_dict, index=[0])
feature_df.T.plot.bar(title="Feature Importance", legend=False);
pd.crosstab(df["sex"], df["target"])
pd.crosstab(df["slope"], df["target"])
###Output
_____no_output_____
###Markdown
Predicting Heart Disease using Machine LearningThis notebook will introduce some foundation machine learning and data science concepts by exploring the problem of heart disease **classification**.It is intended to be an end-to-end example of what a data science and machine learning **proof of concept** might look like. What is classification?Classification involves deciding whether a sample is part of one class or another **(single-class classification)**. If there are multiple class options, it's referred to as **multi-class classification**. What we'll end up withSince we already have a dataset, we'll approach the problem with the following machine learning modelling framework. 6 Step Machine Learning Modelling Framework More specifically, we'll look at the following topics.* **Exploratory data analysis (EDA)** - the process of going through a dataset and finding out more about it.* **Model training** - create model(s) to learn to predict a target variable based on other variables.* **Model evaluation** - evaluating a models predictions using problem-specific evaluation metrics.* **Model comparison** - comparing several different models to find the best one.* **Model fine-tuning** - once we've found a good model, how can we improve it?* **Feature importance** - since we're predicting the presence of heart disease, are there some things which are more important for prediction?* **Cross-validation** - if we do build a good model, can we be sure it will work on unseen data?* **Reporting what we've found** - if we had to present our work, what would we show someone?To work through these topics, we'll use pandas, Matplotlib and NumPy for data anaylsis, as well as, Scikit-Learn for machine learning and modelling tasks. Tools which can be used for each step of the machine learning modelling process.We'll work through each step and by the end of the notebook, we'll have a handful of models, all which can predict whether or not a person has heart disease based on a number of different parameters at a considerable accuracy.You'll also be able to describe which parameters are more indicative than others, for example, sex may be more important than age. 1. Problem DefinitionIn our case, the problem we will be exploring is **binary classification** (a sample can only be one of two things).This is because we're going to be using a number of differnet **features** (pieces of information) about a person to predict whether they have heart disease or not.In a statement,> Given clinical parameters about a patient, can we predict whether or not they have heart disease? 2. DataWhat you'll want to do here is dive into the data your problem definition is based on. This may involve, sourcing, defining different parameters, talking to experts about it and finding out what you should expect.The original data came from the Cleveland database from UCI Machine Learning Repository.Howevever, we've downloaded it in a formatted way from Kaggle.The original database contains 76 attributes, but here only 14 attributes will be used. **Attributes** (also called **features**) are the variables what we'll use to predict our **target variable**.Attributes and features are also referred to as **independent variables** and a target variable can be referred to as a dependent variable.> We use the independent variables to predict our dependent variable.Or in our case, the independent variables are a patients different medical attributes and the dependent variable is whether or not they have heart disease. 3. EvaluationThe evaluation metric is something you might define at the start of a project.Since machine learning is very experimental, you might say something like,> If we can reach 95% accuracy at predicting whether or not a patient has heart disease during the proof of concept, we'll pursure this project.The reason this is helpful is it provides a rough goal for a machine learning engineer or data scientist to work towards.However, due to the nature of experimentation, the evaluation metric may change over time. 4. FeaturesFeatures are different parts of the data. During this step, you'll want to start finding out what you can about the data.One of the most common ways to do this, is to create a **data dictionary**. Heart Disease Data DictionaryA data dictionary describes the data you're dealing with. Not all datasets come with them so this is where you may have to do your research or ask a subject matter expert (someone who knows about the data) for more.The following are the features we'll use to predict our target variable (heart disease or no heart disease).1. age - age in years2. sex - (1 = male; 0 = female)3. cp - chest pain type * 0: Typical angina: chest pain related decrease blood supply to the heart * 1: Atypical angina: chest pain not related to heart * 2: Non-anginal pain: typically esophageal spasms (non heart related) * 3: Asymptomatic: chest pain not showing signs of disease4. trestbps - resting blood pressure (in mm Hg on admission to the hospital) * anything above 130-140 is typically cause for concern5. chol - serum cholestoral in mg/dl * serum = LDL + HDL + .2 * triglycerides * above 200 is cause for concern6. fbs - (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false) * '>126' mg/dL signals diabetes7. restecg - resting electrocardiographic results * 0: Nothing to note * 1: ST-T Wave abnormality * can range from mild symptoms to severe problems * signals non-normal heart beat * 2: Possible or definite left ventricular hypertrophy * Enlarged heart's main pumping chamber8. thalach - maximum heart rate achieved9. exang - exercise induced angina (1 = yes; 0 = no)10. oldpeak - ST depression induced by exercise relative to rest * looks at stress of heart during excercise * unhealthy heart will stress more11. slope - the slope of the peak exercise ST segment * 0: Upsloping: better heart rate with excercise (uncommon) * 1: Flatsloping: minimal change (typical healthy heart) * 2: Downslopins: signs of unhealthy heart12. ca - number of major vessels (0-3) colored by flourosopy * colored vessel means the doctor can see the blood passing through * the more blood movement the better (no clots)13. thal - thalium stress result * 1,3: normal * 6: fixed defect: used to be defect but ok now * 7: reversable defect: no proper blood movement when excercising14. target - have disease or not (1=yes, 0=no) (= the predicted attribute)Note: No personal identifiable information (PPI) can be found in the dataset.It's a good idea to save these to a Python dictionary or in an external file, so we can look at them later without coming back here. Preparing the toolsAt the start of any project, it's custom to see the required libraries imported in a big chunk like you can see below.However, in practice, your projects may import libraries as you go. After you've spent a couple of hours working on your problem, you'll probably want to do some tidying up. This is where you may want to consolidate every library you've used at the top of your notebook (like the cell below).The libraries you use will differ from project to project. But there are a few which will you'll likely take advantage of during almost every structured data project.* pandas for data analysis.* NumPy for numerical operations.* Matplotlib/seaborn for plotting or data visualization.* Scikit-Learn for machine learning modelling and evaluation.
###Code
# Create data dictionary
heart_disease_dict = {
"age": "age in years",
"sex": "(1 = male; 0 = female)",
"cp": "chest pain type",
"trestbps": "resting blood pressure (in mm Hg on admission to the hospital)",
"chol": "serum cholestoral in mg/dl",
"fbs": "(fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)",
"restecg": "resting electrocardiographic results",
"thalach": "maximum heart rate achieved",
"exang": "exercise induced angina (1 = yes; 0 = no)",
"oldpeak": "ST depression induced by exercise relative to rest",
"slope": "the slope of the peak exercise ST segment",
"ca": "number of major vessels (0-3) colored by flourosopy",
"thal": "3 = normal; 6 = fixed defect; 7 = reversable defect",
"target": "(1 = heart disease, 0 = no heart disease)"
}
heart_disease_dict
###Output
_____no_output_____
###Markdown
Preparing the toolsWe are going to use pandas, Matplotlib and Numpy for data analysis and manipulation.
###Code
# Import all the tools we need
# Regular EDA (Exploratory Data Analysis) and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# We want our plots to appear inside the notebooks
%matplotlib inline
# Models from Scikit-Learn
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
# Model Evaluations
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import plot_roc_curve
###Output
_____no_output_____
###Markdown
Load DataThere are many different kinds of ways to store data. The typical way of storing **tabular data**, data similar to what you'd see in an Excel file is in `.csv` format. `.csv` stands for comma seperated values.Pandas has a built-in function to read `.csv` files called `read_csv()` which takes the file pathname of your `.csv` file. You'll likely use this a lot.
###Code
df = pd.read_csv("heart-disease.csv") # 'DataFrame' shortened to 'df'
df.shape # (rows, columns)
###Output
_____no_output_____
###Markdown
Data Exploration (Exploratory Data Analysis or EDA)Once you've imported a dataset, the next step is to explore. There's no set way of doing this. But what you should be trying to do is become more and more familiar with the dataset.Compare different columns to each other, compare them to the target variable. Refer back to your **data dictionary** and remind yourself of what different columns mean.Your goal is to become a subject matter expert on the dataset you're working with. So if someone asks you a question about it, you can give them an explanation and when you start building models, you can sound check them to make sure they're not performing too well (**overfitting**) or why they might be performing poorly (**underfitting**).Since EDA has no real set methodolgy, the following is a short check list you might want to walk through:1. What question(s) are you trying to solve (or prove wrong)?2. What kind of data do you have and how do you treat different types?3. What’s missing from the data and how do you deal with it?4. Where are the outliers and why should you care about them?5. How can you add, change or remove features to get more out of your data?Once of the quickest and easiest ways to check your data is with the `head()` function. Calling it on any dataframe will print the top 5 rows, `tail()` calls the bottom 5. You can also pass a number to them like `head(10)` to show the top 10 rows.
###Code
df.head()
df.tail()
###Output
_____no_output_____
###Markdown
Since these two values are close to even, our `target` column can be considered **balanced**. An **unbalanced** target column, meaning some classes have far more samples, can be harder to model than a balanced set. Ideally, all of your target classes have the same number of samples.If you'd prefer these values in percentages, `value_counts()` takes a parameter, `normalize` which can be set to true. `value_counts()` allows you to show how many times each of the values of a **categorical** column appear.
###Code
# Let's find out how many of each class there are, 1=has heart disease, 0=does not have heart disease
df["target"].value_counts()
###Output
_____no_output_____
###Markdown
We can plot the target column value counts by calling the `plot()` function and telling it what kind of plot we'd like, in this case, bar is good.
###Code
df["target"].value_counts().plot(kind="bar", color=["#C77858","#5862C7"]);
###Output
_____no_output_____
###Markdown
`df.info()` shows a quick insight to the number of missing values you have and what type of data your working with.In our case, there are no missing values and all of our columns are numerical in nature.
###Code
df.info()
# Are there any missing values?
df.isna().sum()
###Output
_____no_output_____
###Markdown
Another way to get some quick insights on your dataframe is to use `df.describe()`. `describe()` shows a range of different metrics about your numerical columns such as mean, max and standard deviation.
###Code
df.describe()
###Output
_____no_output_____
###Markdown
Heart Disease Frequency according to GenderIf you want to compare two columns to each other, you can use the function `pd.crosstab(column_1, column_2)`.This is helpful if you want to start gaining an intuition about how your independent variables interact with your dependent variables.Let's compare our target column with the sex column.Remember from our data dictionary, for the target column, 1 = heart disease present, 0 = no heart disease. And for sex, 1 = male, 0 = female.
###Code
df.sex.value_counts() # 1 = males, 0 = females
# Compare target column with sex column
pd.crosstab(df.target, df.sex)
###Output
_____no_output_____
###Markdown
What can we infer from this? Let's make a simple heuristic.Since there are about 100 women and 72 of them have a postive value of heart disease being present, we might infer, based on this one variable if the participant is a woman, there's a 75% chance she has heart disease.As for males, there's about 200 total with around half indicating a presence of heart disease. So we might predict, if the participant is male, 50% of the time he will have heart disease.Averaging these two values, we can assume, based on no other parameters, if there's a person, there's a 62.5% chance they have heart disease.This can be our very simple **baseline**, we'll try to beat it with machine learning. Making our crosstab visualYou can plot the crosstab by using the `plot()` function and passing it a few parameters such as, `kind` (the type of plot you want), `figsize=(length, width)` (how big you want it to be) and `color=[colour_1, colour_2]` (the different colours you'd like to use).Different metrics are represented best with different kinds of plots. In our case, a bar graph is great. We'll see examples of more later. And with a bit of practice, you'll gain an intuition of which plot to use with different variables.
###Code
pd.crosstab(df.target, df.sex).plot(kind="bar", color=["#C77858", "#5862C7"]);
###Output
_____no_output_____
###Markdown
Nice! But our plot is looking pretty bare. Let's add some attributes.We'll create the plot again with `crosstab()` and `plot()`, then add some helpful labels to it with `plt.title()`, `plt.xlabel()` and more.To add the attributes, you call them on `plt` within the same cell as where you make create the graph.
###Code
pd.crosstab(df.target, df.sex).plot(kind="bar", color=["#C77858", "#5862C7"])
plt.title("Heart Disease Frequency by Sex")
plt.xlabel("0 = No Heart Disease, 1 = Has Heart Disease")
plt.ylabel("Count")
plt.legend(["Female", "Male"])
plt.xticks(rotation=0);
###Output
_____no_output_____
###Markdown
Age vs Max Heart rate for Heart DiseaseLet's try combining a couple of independent variables, such as, `age` and `thalach` (maximum heart rate) and then comparing them to our target variable `heart disease`.Because there are so many different values for `age` and `thalach`, we'll use a scatter plot.
###Code
# Create another figure
plt.figure(figsize=(10, 6))
# Scatter with positive examples
plt.scatter(df.age[df.target==1],
df.thalach[df.target==1],
c="#C77858");
# Scatter with negative examples
plt.scatter(df.age[df.target==0],
df. thalach[df.target==0],
c="#5862C7")
# Add some helpful info
plt.title("Heart Disease in function of Age and Max Heart Rate")
plt.xlabel("Age")
plt.ylabel("Max Heart Rate")
plt.legend(["Heart Disease", "No Heart Disease"]);
###Output
_____no_output_____
###Markdown
What can we infer from this?It seems the younger someone is, the higher their max heart rate (dots are higher on the left of the graph) and the older someone is, the more green dots there are. But this may be because there are more dots all together on the right side of the graph (older participants).Both of these are observational of course, but this is what we're trying to do, build an understanding of the data.Let's check the age **distribution**.
###Code
# Check the distribution of the age column with a histogram
df.age.plot.hist();
###Output
_____no_output_____
###Markdown
We can see it's a normal distribution but slightly swaying to the right, which reflects in the scatter plot above.Let's keep going. Heart Disease Frequency per Chest Pain TypeLet's try another independent variable. This time, `cp` (chest pain).3. cp - chest pain type * 0: Typical angina: chest pain related decrease blood supply to the heart * 1: Atypical angina: chest pain not related to heart * 2: Non-anginal pain: typically esophageal spasms (non heart related) * 3: Asymptomatic: chest pain not showing signs of diseaseWe'll use the same process as we did before with `sex`.
###Code
pd.crosstab(df.cp, df.target)
# Make the crosstab more visual
pd.crosstab(df.cp, df.target).plot(kind="bar",
figsize=(10, 6),
color=["#C77858", "#5862C7"])
# Add some communication
plt.title("Heart Disease Frequency Per Chest Pain Type")
plt.xlabel("Chest Pain Type")
plt.ylabel("Amount")
plt.legend(["No Heart Disease", "Heart Disease"]);
###Output
_____no_output_____
###Markdown
What can we infer from this?Remember from our data dictionary what the different levels of chest pain are.1. cp - chest pain type * 0: Typical angina: chest pain related decrease blood supply to the heart * 1: Atypical angina: chest pain not related to heart * 2: Non-anginal pain: typically esophageal spasms (non heart related) * 3: Asymptomatic: chest pain not showing signs of diseaseIt's interesting the atypical agina (value 1) states it's not related to the heart but seems to have a higher ratio of participants with heart disease than not.Wait...?What does atypical agina even mean?At this point, it's important to remember, if your data dictionary doesn't supply you enough information, you may want to do further research on your values. This research may come in the form of asking a **subject matter expert** (such as a cardiologist or the person who gave you the data) or Googling to find out more.According to PubMed, it seems even some medical professionals are confused by the term.>Today, 23 years later, “atypical chest pain” is still popular in medical circles. Its meaning, however, remains unclear. A few articles have the term in their title, but do not define or discuss it in their text. In other articles, the term refers to noncardiac causes of chest pain.Although not conclusive, this graph above is a hint at the confusion of defintions being represented in data. Correlation between independent variablesFinally, we'll compare all of the independent variables in one hit.Why?Because this may give an idea of which independent variables may or may not have an impact on our target variable.We can do this using `df.corr()` which will create a correlation matrix for us, in other words, a big table of numbers telling us how related each variable is the other.
###Code
# Make a correlation matrix
df.corr()
# Let's make our correlation matrix a little easier to visualize
corr_matrix = df.corr()
fig, ax = plt.subplots(figsize=(15,10))
ax = sns.heatmap(corr_matrix,
annot=True,
linewidths=0.5,
fmt=".2f",
cmap="YlGnBu");
###Output
_____no_output_____
###Markdown
Much better. A higher positive value means a potential positive correlation (increase) and a higher negative value means a potential negative correlation (decrease). Enough EDA, let's modelRemember, we do exploratory data analysis (EDA) to start building an intuitition of the dataset.What have we learned so far? Aside from our basline estimate using `sex`, the rest of the data seems to be pretty distributed.So what we'll do next is **model driven EDA**, meaning, we'll use machine learning models to drive our next questions.A few extra things to remember:* Not every EDA will look the same, what we've seen here is an example of what you could do for structured, tabular dataset.* You don't necessarily have to do the same plots as we've done here, there are many more ways to visualize data, I encourage you to look at more.* We want to quickly find: * Distributions (`df.column.hist()`) * Missing values (`df.info()`) * OutliersLet's build some models.------------------------------------------------------------------------------------------------------------------------------- 5. ModelingWe've explored the data, now we'll try to use machine learning to predict our target variable based on the 13 independent variables.Remember our problem?>Given clinical parameters about a patient, can we predict whether or not they have heart disease?That's what we'll be trying to answer.And remember our evaluation metric?>If we can reach 95% accuracy at predicting whether or not a patient has heart disease during the proof of concept, we'll pursure this project.That's what we'll be aiming for.But before we build a model, we have to get our dataset ready.Let's look at it again.
###Code
df.head()
###Output
_____no_output_____
###Markdown
We're trying to predict our target variable using all of the other variables.To do this, we'll split the target variable from the rest.
###Code
X = df.drop("target", axis=1)
y = df["target"]
X
y
###Output
_____no_output_____
###Markdown
Training and test splitNow comes one of the most important concepts in machine learning, the **training/test split**.This is where you'll split your data into a **training set** and a **test set**.You use your training set to train your model and your test set to test it.The test set must remain separate from your training set. Why not use all the data to train a model?Let's say you wanted to take your model into the hospital and start using it on patients. How would you know how well your model goes on a new patient not included in the original full dataset you had?This is where the test set comes in. It's used to mimic taking your model to a real environment as much as possible.And it's why it's important to never let your model learn from the test set, it should only be evaluated on it.To split our data into a training and test set, we can use Scikit-Learn's `train_test_split()` and feed it our independent and dependent variables (`X` & `y`).
###Code
np.random.seed(42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
###Output
_____no_output_____
###Markdown
The `test_size` parameter is used to tell the `train_test_split()` function how much of our data we want in the test set.A rule of thumb is to use 80% of your data to train on and the other 20% to test on.For our problem, a train and test set are enough. But for other problems, you could also use a validation (train/validation/test) set or cross-validation (we'll see this in a second).But again, each problem will differ. The post, How (and why) to create a good validation set by Rachel Thomas is a good place to go to learn more.Let's look at our training data.
###Code
X_train.head()
y_train, len(y_train)
###Output
_____no_output_____
###Markdown
Model choicesNow we've got our data prepared, we can start to fit models. We'll be using the following and comparing their results.1. Logistic Regression - LogisticRegression()2. K-Nearest Neighbors - KNeighboursClassifier()3. RandomForest - RandomForestClassifier()**Why these?**If we look at the Scikit-Learn algorithm cheat sheet, we can see we're working on a classification problem and these are the algorithms it suggests (plus a few more). An example path we can take using the Scikit-Learn Machine Learning Map "Wait, I don't see Logistic Regression and why not use LinearSVC?"Good questions.I was confused too when I didn't see Logistic Regression listed as well because when you read the Scikit-Learn documentation on it, you can see it's a model for classification.And as for LinearSVC, let's pretend we've tried it, and it doesn't work, so we're following other options in the map.For now, knowing each of these algorithms inside and out is not essential.Machine learning and data science is an iterative practice. These algorithms are tools in your toolbox.In the beginning, on your way to becoming a practioner, it's more important to understand your problem (such as, classification versus regression) and then knowing what tools you can use to solve it.Since our dataset is relatively small, we can experiment to find which algorithm performs best.All of the algorithms in the Scikit-Learn library use the same functions, for training a model, `model.fit(X_train, y_train)` and for scoring a model `model.score(X_test, y_test)`. `score()` returns the ratio of correct predictions (1.0 = 100% correct).Since the algorithms we've chosen implement the same methods for fitting them to the data as well as evaluating them, let's put them in a dictionary and create a which fits and scores them.
###Code
# Put models in a dictionary
models = {"Logistic Regression": LogisticRegression(),
"KNN": KNeighborsClassifier(),
"Random Forest": RandomForestClassifier()}
# Create a function to fit and score models
def fit_and_score(models, X_train, X_test, y_train, y_test):
"""
Fits and evaluates given machine learning models.
models : a dict of different Scikit-Learn machine learning models
X_train : training data (no labels)
X_test : testing data (no labels)
y_train : training labels
y_test : testing labels
"""
# Set random seed
np.random.seed(42)
#Make a dict to keep model scores
model_scores = {}
for name, model in models.items():
# Fit the model to the data
model.fit(X_train, y_train)
# Evaluate the model and append it's score to model_scores
model_scores[name] = model.score(X_test, y_test)
return model_scores
model_scores = fit_and_score(models=models,
X_train=X_train,
X_test=X_test,
y_train=y_train,
y_test=y_test)
model_scores
###Output
C:\Users\daver\Desktop\ZTM\ml\heart-disease-project\env\lib\site-packages\sklearn\linear_model\_logistic.py:814: ConvergenceWarning: lbfgs failed to converge (status=1):
STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.
Increase the number of iterations (max_iter) or scale the data as shown in:
https://scikit-learn.org/stable/modules/preprocessing.html
Please also refer to the documentation for alternative solver options:
https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression
n_iter_i = _check_optimize_result(
###Markdown
Model ComparisonSince we've saved our models scores to a dictionary, we can plot them by first converting them to a DataFrame.
###Code
model_compare = pd.DataFrame(model_scores, index=["accuracy"])
model_compare.T.plot.bar();
###Output
_____no_output_____
###Markdown
We can't really see it from the graph but looking at the dictionary, the `LogisticRegression()` model performs best.Since you've found the best model. Let's take it to the boss and show her what we've found.>You: I've found it!>Her: Nice one! What did you find?>You: The best algorithm for prediting heart disease is a LogisticRegrssion!>Her: Excellent. I'm surprised the hyperparameter tuning is finished by now.>You: wonders what hyperparameter tuning is>You: Ummm yeah, me too, it went pretty quick.>Her: I'm very proud, how about you put together a classification report to show the team, and be sure to include a confusion matrix, and the cross-validated precision, recall and F1 scores. I'd also be curious to see what features are most important. Oh and don't forget to include a ROC curve.>You: asks self, "what are those???">You: Of course! I'll have to you by tomorrow.Alright, there were a few words in there which could sound made up to someone who's not a budding data scientist like yourself. But being the budding data scientist you are, you know data scientists make up words all the time.Let's briefly go through each before we see them in action.**Hyperparameter tuning** - Each model you use has a series of dials you can turn to dictate how they perform. Changing these values may increase or decrease model performance.**Feature importance** - If there are a large amount of features we're using to make predictions, do some have more importance than others? For example, for predicting heart disease, which is more important, sex or age?Confusion matrix - Compares the predicted values with the true values in a tabular way, if 100% correct, all values in the matrix will be top left to bottom right (diagonal line).Cross-validation - Splits your dataset into multiple parts and train and tests your model on each part and evaluates performance as an average.Precision - Proportion of true positives over total number of samples. Higher precision leads to less false positives.Recall - Proportion of true positives over total number of true positives and false negatives. Higher recall leads to less false negatives.F1 score - Combines precision and recall into one metric. 1 is best, 0 is worst.Classification report - Sklearn has a built-in function called classification_report() which returns some of the main classification metrics such as precision, recall and f1-score.ROC Curve - Receiver Operating Characterisitc is a plot of true positive rate versus false positive rate.Area Under Curve (AUC) - The area underneath the ROC curve. A perfect model achieves a score of 1.0. Hyperparameter tuning and cross-validationTo cook your favourite dish, you know to set the oven to 180 degrees and turn the grill on. But when your roommate cooks their favourite dish, they set use 200 degrees and the fan-forced mode. Same oven, different settings, different outcomes.The same can be done for machine learning algorithms. You can use the same algorithms but change the settings (hyperparameters) and get different results.But just like turning the oven up too high can burn your food, the same can happen for machine learning algorithms. You change the settings and it works so well, it **overfits** (does too well) the data.We're looking for the goldilocks model. One which does well on our dataset but also does well on unseen examples.To test different hyperparameters, you could use a **validation set** but since we don't have much data, we'll use **cross-validation**.The most common type of cross-validation is *k-fold*. It involves splitting your data into *k-fold's* and then testing a model on each. For example, let's say we had 5 folds (k = 5). This what it might look like. Normal train and test split versus 5-fold cross-validation We'll be using this setup to tune the hyperparameters of some of our models and then evaluate them. We'll also get a few more metrics like **precision, recall, F1-score** and **ROC** at the same time.Here's the game plan:1. Tune model hyperparameters, see which performs best2. Perform cross-validation3. Plot ROC curves4. Make a confusion matrix5. Get precision, recall and F1-score metrics6. Find the most important model features Tuning KNeighborsClassifier (K-Nearest Neighbors or KNN) by handThere's one main hyperparameter we can tune for the K-Nearest Neighbors (KNN) algorithm, and that is number of neighbours. The default is 5 (`n_neigbors=5`).What are neighbours?Imagine all our different samples on one graph like the scatter graph we have above. KNN works by assuming dots which are closer together belong to the same class. If `n_neighbors=5` then it assume a dot with the 5 closest dots around it are in the same class.We've left out some details here like what defines close or how distance is calculated but I encourage you to research them.For now, let's try a few different values of `n_neighbors`.
###Code
# Create a list of train scores
train_scores = []
# Create a list of test scores
test_scores = []
# Create a list of different values for n_neighbors
neighbors = range(1, 21) # 1 to 20
# Setup algorithm
knn = KNeighborsClassifier()
# Loop through different neighbors values
for i in neighbors:
knn.set_params(n_neighbors = i) # set neighbors value
# Fit the algorithm
knn.fit(X_train, y_train)
# Update the training scores
train_scores.append(knn.score(X_train, y_train))
# Update the test scores
test_scores.append(knn.score(X_test, y_test))
train_scores
test_scores
plt.plot(neighbors, train_scores, label="Train Score")
plt.plot(neighbors, test_scores, label="Test Score")
plt.xticks(np.arange(1, 21, 1))
plt.xlabel("Number of Neighbors")
plt.ylabel("Model Score")
plt.legend()
print(f"Maximus KNN score on the test data: {max(test_scores)*100:.2f}%")
###Output
Maximus KNN score on the test data: 75.41%
###Markdown
Looking at the graph, `n_neighbors = 11` seems best.Even knowing this, the `KNN`'s model performance didn't get near what `LogisticRegression` or the `RandomForestClassifier` did.Because of this, we'll discard `KNN` and focus on the other two.We've tuned KNN by hand but let's see how we can `LogisticsRegression` and `RandomForestClassifier` using `RandomizedSearchCV`.Instead of us having to manually try different hyperparameters by hand, `RandomizedSearchCV` tries a number of different combinations, evaluates them and saves the best. Tuning models with with `RandomizedSearchCV`Reading the Scikit-Learn documentation for `LogisticRegression`, we find there's a number of different hyperparameters we can tune.The same for `RandomForestClassifier`.Let's create a hyperparameter grid (a dictionary of different hyperparameters) for each and then test them out.
###Code
# Different LogisticRegression hyperparameters
log_reg_grid = {"C": np.logspace(-4,4, 20),
"solver": ["liblinear"]}
# Different RandomForestClassifier hyperparameters
rf_grid = {"n_estimators": np.arange(10, 1000, 50),
"max_depth": [None, 3, 5, 10],
"min_samples_split": np.arange(2, 20, 2),
"min_samples_leaf": np.arange(1, 20, 2)}
###Output
_____no_output_____
###Markdown
Now let's use `RandomizedSearchCV` to try and tune our `LogisticRegression` model.We'll pass it the different hyperparameters from `log_reg_grid` as well as set `n_iter = 20`. This means, `RandomizedSearchCV` will try 20 different combinations of hyperparameters from `log_reg_grid` and save the best ones.
###Code
# Tune LogisticRegression
np.random.seed(42)
# Setup random hyperparameter search for LogisticRegression
rs_log_reg = RandomizedSearchCV(LogisticRegression(),
param_distributions=log_reg_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit random hyperparameter search model
rs_log_reg.fit(X_train, y_train);
rs_log_reg.best_params_
rs_log_reg.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Now we've tuned `LogisticRegression` using `RandomizedSearchCV`, we'll do the same for `RandomForestClassifier`.
###Code
# Setup random seed
np.random.seed(42)
# Setup random hyperparameter search for RandomForestClassifier
rs_rf = RandomizedSearchCV(RandomForestClassifier(),
param_distributions=rf_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit random hyperparameter search model
rs_rf.fit(X_train, y_train);
# Find the best parameters
rs_rf.best_params_
# Evaluate the ranomized search random forest model
rs_rf.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Excellent! Tuning the hyperparameters for each model saw a slight performance boost in both the `RandomForestClassifier` and `LogisticRegression`.This is akin to tuning the settings on your oven and getting it to cook your favourite dish just right.But since `LogisticRegression` is pulling out in front, we'll try tuning it further with `GridSearchCV`. Tuning a model with `GridSearchCV`The difference between `RandomizedSearchCV` and `GridSearchCV` is where `RandomizedSearchCV` searches over a grid of hyperparameters performing `n_iter` combinations, `GridSearchCV` will test every single possible combination.In short:* `RandomizedSearchCV` - tries `n_iter` combinations of hyperparameters and saves the best.* `GridSearchCV` - tries every single combination of hyperparameters and saves the best.Let's see it in action.
###Code
# Different LogisiticRegression hyperparameters
log_reg_grid = {"C": np.logspace(-4, 4, 30),
"solver": ["liblinear"]}
# Setup grid hyperparameter search for LogisticRegression
gs_log_reg = GridSearchCV(LogisticRegression(),
param_grid=log_reg_grid,
cv=5,
verbose=True)
# Fit grid hyperparameters search model
gs_log_reg.fit(X_train, y_train);
# Check the best parameters
gs_log_reg.best_params_
# Evaluate the model
gs_log_reg.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
In this case, we get the same results as before since our grid only has a maximum of 20 different hyperparameter combinations.**Note**: If there are a large amount of hyperparameters combinations in your grid, `GridSearchCV` may take a long time to try them all out. This is why it's a good idea to start with `RandomizedSearchCV`, try a certain amount of combinations and then use `GridSearchCV` to refine them. Evaluating a classification model, beyond accuracyNow we've got a tuned model, let's get some of the metrics we discussed before.We want:* ROC curve and AUC score - `plot_roc_curve()`* Confusion matrix - `confusion_matrix()`* Classification report - `classification_report()`* Precision - `precision_score()`* Recall - `recall_score()`* F1-score - `f1_score()`Luckily, Scikit-Learn has these all built-in.To access them, we'll have to use our model to make predictions on the test set. You can make predictions by calling `predict()` on a trained model and passing it the data you'd like to predict on.We'll make predictions on the test data.
###Code
# Make predictions on the test data
y_preds = gs_log_reg.predict(X_test)
###Output
_____no_output_____
###Markdown
Let's see them.
###Code
y_preds
y_test
###Output
_____no_output_____
###Markdown
Since we've got our prediction values we can find the metrics we want.Let's start with the ROC curve and AUC scores. ROC Curve and AUC ScoresWhat's an ROC curve?It's a way of understanding how your model is performing by comparing the true positive rate to the false positive rate.In our case...>To get an appropriate example in a real-world problem, consider a diagnostic test that seeks to determine whether a person has a certain disease. A false positive in this case occurs when the person tests positive, but does not actually have the disease. A false negative, on the other hand, occurs when the person tests negative, suggesting they are healthy, when they actually do have the disease.Scikit-Learn implements a function `plot_roc_curve` which can help us create a ROC curve as well as calculate the area under the curve (AUC) metric.Reading the documentation on the `plot_roc_curve` function we can see it takes `(estimator, X, y)` as inputs. Where `estimator` is a fitted machine learning model and `X` and `y` are the data you'd like to test it on.In our case, we'll use the GridSearchCV version of our `LogisticRegression` estimator, `gs_log_reg` as well as the test data, `X_test` and `y_test`.
###Code
# Import ROC curve function from metrics module
from sklearn.metrics import plot_roc_curve
# Plot ROC curve and calculate AUC metric
plot_roc_curve(gs_log_reg, X_test, y_test)
###Output
C:\Users\daver\Desktop\ZTM\ml\heart-disease-project\env\lib\site-packages\sklearn\utils\deprecation.py:87: FutureWarning: Function plot_roc_curve is deprecated; Function `plot_roc_curve` is deprecated in 1.0 and will be removed in 1.2. Use one of the class methods: RocCurveDisplay.from_predictions or RocCurveDisplay.from_estimator.
warnings.warn(msg, category=FutureWarning)
###Markdown
This is great, our model does far better than guessing which would be a line going from the bottom left corner to the top right corner, AUC = 0.5. But a perfect model would achieve an AUC score of 1.0, so there's still room for improvement.Let's move onto the next evaluation request, a confusion matrix. Confusion matrixA confusion matrix is a visual way to show where your model made the right predictions and where it made the wrong predictions (or in other words, got confused).Scikit-Learn allows us to create a confusion matrix using `confusion_matrix()` and passing it the true labels and predicted labels.
###Code
# Display confusion matrix
print(confusion_matrix(y_test, y_preds))
###Output
[[25 4]
[ 3 29]]
###Markdown
As you can see, Scikit-Learn's built-in confusion matrix is a bit bland. For a presentation you'd probably want to make it visual.Let's create a function which uses Seaborn's `heatmap()` for doing so.
###Code
# Import Seaborn
import seaborn as sns
sns.set(font_scale=1.5) # Increase font size
def plot_conf_mat(y_test, y_preds):
"""
Plots a confusion matrix using Seaborn's heatmap().
"""
fig, ax = plt.subplots(figsize=(3, 3))
ax = sns.heatmap(confusion_matrix(y_test, y_preds),
annot=True, # Annotate the boxes
cbar=False)
plt.xlabel("Predicted label") # predictions go on the x-axis
plt.ylabel("True label") # true labels go on the y-axis
plot_conf_mat(y_test, y_preds)
###Output
_____no_output_____
###Markdown
That looks much better.You can see the model gets confused (predicts the wrong label) relatively the same across both classes. In essence, there are 4 occasaions where the model predicted 1 when it should've been 0 (false positive) and 3 occasions where the model predicted 0 instead of 1 (false negative). Classification reportWe can make a classification report using `classification_report()` and passing it the true labels as well as our models predicted labels.A classification report will also give us information of the precision and recall of our model for each class.
###Code
# Show classification report
print(classification_report(y_test, y_preds))
###Output
precision recall f1-score support
0 0.89 0.86 0.88 29
1 0.88 0.91 0.89 32
accuracy 0.89 61
macro avg 0.89 0.88 0.88 61
weighted avg 0.89 0.89 0.89 61
###Markdown
What's going on here?Let's get a refresh.* **Precision** - Indicates the proportion of positive identifications (model predicted class 1) which were actually correct. A model which produces no false positives has a precision of 1.0.* **Recall** - Indicates the proportion of actual positives which were correctly classified. A model which produces no false negatives has a recall of 1.0.* **F1 score** - A combination of precision and recall. A perfect model achieves an F1 score of 1.0.* **Support** - The number of samples each metric was calculated on.* **Accuracy** - The accuracy of the model in decimal form. Perfect accuracy is equal to 1.0.* **Macro avg** - Short for macro average, the average precision, recall and F1 score between classes. Macro avg doesn’t class imbalance into effort, so if you do have class imbalances, pay attention to this metric.* **Weighted avg** - Short for weighted average, the weighted average precision, recall and F1 score between classes. Weighted means each metric is calculated with respect to how many samples there are in each class. This metric will favour the majority class (e.g. will give a high value when one class out performs another due to having more samples).Ok, now we've got a few deeper insights on our model. But these were all calculated using a single training and test set. Cross-ValidationWhat we'll do to make them more solid is calculate them using cross-validation.How?We'll take the best model along with the best hyperparameters and use cross_val_score() along with various `scoring` parameter values.`cross_val_score()` works by taking an estimator (machine learning model) along with data and labels. It then evaluates the machine learning model on the data and labels using cross-validation and a defined `scoring` parameter.Let's remind ourselves of the best hyperparameters and then see them in action.
###Code
# Check best hyperparameters
gs_log_reg.best_params_
# Instantiate best model with best hyperparameters (found with GridSearchCV)
clf = LogisticRegression(C=0.20433597178569418,
solver="liblinear")
###Output
_____no_output_____
###Markdown
Now we've got an instantiated classifier, let's find some cross-validated metrics.
###Code
# Cross-validated accuracy score
cv_acc = cross_val_score(clf,
X,
y,
cv=5,
scoring="accuracy")
cv_acc
###Output
_____no_output_____
###Markdown
Since there are 5 metrics here, we'll take the average.
###Code
cv_acc = np.mean(cv_acc)
cv_acc
###Output
_____no_output_____
###Markdown
Now we'll do the same for other classification metrics.
###Code
# Cross-validated precision score
cv_precision = np.mean(cross_val_score(clf,
X,
y,
cv=5,
scoring="precision"))
cv_precision
# Cross-validated recall score
cv_recall = np.mean(cross_val_score(clf,
X,
y,
cv=5,
scoring="recall"))
cv_recall
# Cross-validated F1 score
cv_f1 = np.mean(cross_val_score(clf,
X,
y,
cv=5,
scoring="f1"))
cv_f1
###Output
_____no_output_____
###Markdown
Okay, we've got cross validated metrics, now what?Let's visualize them.
###Code
# Visualizing cross-validated metrics
cv_metrics = pd.DataFrame({"Accuracy": cv_acc,
"Precision": cv_precision,
"Recall": cv_recall,
"F1": cv_f1},
index=[0])
cv_metrics.T.plot.bar(title="Cross-Validated Metrics", legend=False);
###Output
_____no_output_____
###Markdown
Great! This looks like something we could share. An extension might be adding the metrics on top of each bar so someone can quickly tell what they were.What now?The final thing to check off the list of our model evaluation techniques is feature importance. Feature importanceFeature importance is another way of asking, "which features contributing most to the outcomes of the model?"Or for our problem, trying to predict heart disease using a patient's medical characterisitcs, which charateristics contribute most to a model predicting whether someone has heart disease or not?Unlike some of the other functions we've seen, because how each model finds patterns in data is slightly different, how a model judges how important those patterns are is different as well. This means for each model, there's a slightly different way of finding which features were most important.You can usually find an example via the Scikit-Learn documentation or via searching for something like "[MODEL TYPE] feature importance", such as, "random forest feature importance".Since we're using `LogisticRegression`, we'll look at one way we can calculate feature importance for it.To do so, we'll use the `coef_` attribute. Looking at the Scikit-Learn documentation for `LogisticRegression`, the `coef_` attribute is the coefficient of the features in the decision function.We can access the `coef_` attribute after we've fit an instance of `LogisticRegression`.
###Code
# Fit an instance of LogisticRegression (take from above)
gs_log_reg.best_params_
clf = LogisticRegression(C=0.20433597178569418,
solver="liblinear")
clf.fit(X_train, y_train);
# Check coef_
clf.coef_
###Output
_____no_output_____
###Markdown
Looking at this it might not make much sense. But these values are how much each feature contributes to how a model makes a decision on whether patterns in a sample of patients health data leans more towards having heart disease or not.Even knowing this, in it's current form, this `coef_` array still doesn't mean much. But it will if we combine it with the columns (features) of our dataframe.
###Code
# Match features to columns, this will match the coef_ to the column names (features)
features_dict = dict(zip(df.columns, list(clf.coef_[0])))
features_dict
###Output
_____no_output_____
###Markdown
Now we've match the feature coefficients to different features, let's visualize them.
###Code
# Visualize feature importance
features_df = pd.DataFrame(features_dict, index=[0])
features_df.T.plot.bar(title="Feature Importance", legend=False);
###Output
_____no_output_____
###Markdown
You'll notice some are negative and some are positive.The larger the value (bigger bar), the more the feature contributes to the models decision.If the value is negative, it means there's a negative correlation. And vice versa for positive values.For example, the `sex` attribute has a negative value of -0.86, which means as the value for `sex` increases, the `target` value decreases.We can see this by comparing the `sex` column to the `target` column.
###Code
pd.crosstab(df["sex"], df["target"])
###Output
_____no_output_____
###Markdown
You can see, when `sex` is 0 (female), there are almost 3 times as many (72 vs. 24) people with heart disease (`target` = 1) than without.And then as `sex` increases to 1 (male), the ratio goes down to almost 1 to 1 (114 vs. 93) of people who have heart disease and who don't.What does this mean?It means the model has found a pattern which reflects the data. Looking at these figures and this specific dataset, it seems if the patient is female, they're more likely to have heart disease.How about a positive correlation?
###Code
# Contrast slope (positive coefficient) with target
pd.crosstab(df["slope"], df["target"])
###Output
_____no_output_____
###Markdown
Predicting heart disease using machine learningThis notebook looks into various python-based machine learning and data science libraries in an attempt to build a machine learning model capable of predicting whether or not someone has heart disease based on their medical attributesWe are going to take the following approach:1. Problem defintion2. Data3. Evaluation4. Features5. Modeling6. Experimentation 1. Problem DefinitionIn a statement, > Given clinical parameters about a patient, can we predict whether or not they have heart disease? 2. DataThe original data came from the Cleavland data from the UCI Machine Learning Repository.https://archive.ics.uci.edu/ml/datasets/heart+diseaseThere is also a version of the data available on Kaggle.https://www.kaggle.com/ronitf/heart-disease-uci 3. Evaluation> If we can reach 95% accuracy at predicting whether or not has heart diseaseduring the proof of concept, we'll pursue the project. 4. Features**Data Dictionary*** age* sex* chest pain type (4 values)* resting blood pressure* serum cholestoral in mg/dl* fasting blood sugar > 120 mg/dl* resting electrocardiographic results (values 0,1,2)* maximum heart rate achieved* exercise induced angina* oldpeak = ST depression induced by exercise relative to rest* the slope of the peak exercise ST segment* number of major vessels (0-3) colored by flourosopy* thal: 3 = normal; 6 = fixed defect; 7 = reversable defect Preparing the toolsUsing pandas, matplotlib, and numpy for data analysis and manipulation
###Code
# Import all the tools
# Regular EDA (explratory data analysis) and plotting libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# Models from scikit-learn
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
# Model Evaluations
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import plot_roc_curve
###Output
_____no_output_____
###Markdown
Load Data
###Code
df = pd.read_csv('https://raw.githubusercontent.com/mrdbourke/zero-to-mastery-ml/master/data/heart-disease.csv')
###Output
_____no_output_____
###Markdown
Data Exploration (exploratory data analysis - EDA)
###Code
df.head(3)
df.shape
df['target'].value_counts()
df['target'].value_counts().plot(kind='bar', color=['salmon', 'lightblue']);
df.info()
# Are there any missing values?
df.isna().sum()
df.describe()
###Output
_____no_output_____
###Markdown
Heart Disease Frequency according to Sex
###Code
df.sex.value_counts()
# Compare target column to sex column
pd.crosstab(df['target'], df['sex'])
pd.crosstab(df.target, df.sex).plot(kind='bar',
figsize=(10, 6),
color=['salmon', 'lightblue']);
plt.title("Heart Disease Frequency for Sex")
plt.xlabel("0 = No Disease, 1 = Disease")
plt.ylabel("Amount")
plt.legend(["Female", "Male"])
plt.xticks(rotation=0)
###Output
_____no_output_____
###Markdown
Age vs. Max Heart Rate for Heart Disease
###Code
# Create another figure
plt.figure(figsize=(10, 6))
# Scatter with postivie examples
plt.scatter(df.age[df.target==1],
df.thalach[df.target==1],
c="salmon")
# Scatter with negative examples
plt.scatter(df.age[df.target==0],
df.thalach[df.target==0],
c="lightblue")
# Add some helpful info
plt.title("Heart Disease in function of Age and Max Heart Rate")
plt.xlabel("Age")
plt.ylabel("Max Heart Rate")
plt.legend(["Disease", "No Disease"]);
# Check the distribution of the age column with a histogram
df.age.plot.hist();
###Output
_____no_output_____
###Markdown
Heart Disease Frequency per Chest Pain Typecp - chest pain type0: Typical angina: chest pain related decrease blood supply to the heart1: Atypical angina: chest pain not related to heart2: Non-anginal pain: typically esophageal spasms (non heart related)3: Asymptomatic: chest pain not showing signs of disease
###Code
pd.crosstab(df.cp, df.target)
# Make the crosstab more visual
pd.crosstab(df.cp, df.target).plot(kind="bar",
figsize=(10, 6),
color=["salmon", "lightblue"])
# Add some communication
plt.title("Heart Disease Frequency Per Chest Pain Type")
plt.xlabel("Chest Pain Type")
plt.ylabel("Amount")
plt.legend(["No Disease", "Disease"])
plt.xticks(rotation=0);
# Make a correlation matrix
df.corr()
# Let's make our correlation matrix a little prettier
corr_matrix = df.corr()
fig, ax = plt.subplots(figsize=(15, 10))
ax = sns.heatmap(corr_matrix,
annot=True,
linewidth=0.5,
fmt='.2f',
cmap='YlGnBu');
###Output
_____no_output_____
###Markdown
5. Modeling
###Code
df.head(3)
# Split data into X and y
X = df.drop('target', axis=1)
y = df['target']
# Split the data into train and test sets
np.random.seed(42)
X_train, X_test,y_train, y_test = train_test_split(X, y, test_size=0.2)
###Output
_____no_output_____
###Markdown
Try three different models:1. Logistic Regression2. K-Nearest Neighbors Classifier3. RandomForest Classifier
###Code
# Put models in a dictionary
models = {'Logistic Regression': LogisticRegression(),
'KNN': KNeighborsClassifier(),
'Random Forest': RandomForestClassifier()}
# Create a function to fit and score models
def fit_score(X_train, X_test, y_train, y_test):
"""
Fits and evaluates given machine learning models.
models = a dict of different Scikit-Learn machine learning models
"""
# set random seed
np.random.seed(42)
# Make a dictionary to keep model scores
model_scores = {}
# Loop through models:
for name, model in models.items():
# Fit the model to the data
model.fit(X_train, y_train)
# Evaluate the model and append the model
# name and score to model_scores dictionaries
model_scores[name] = model.score(X_test, y_test)
return model_scores
model_scores = fit_score(X_train, X_test, y_train, y_test)
model_compare = pd.DataFrame(model_scores, index=["accuracy"])
model_compare.T.plot.bar();
###Output
_____no_output_____
###Markdown
Let's look at the following:* Hypyterparameter tuning* Feature importance* Confusion matrix* Cross-validation* Precision* Recall* F1 score* Classification report* ROC curve* Area under the curve (AUC) Hyperparameter Tuning
###Code
# Let's tune KNN
train_scores = []
test_scores = []
# Setup KNN's instance
knn = KNeighborsClassifier()
# Creare a list of different values for the number of neighbors
neighbors = range(1, 21)
# Loop through different neighbors
for i in neighbors:
knn.set_params(n_neighbors=i)
# Fit the algorithm
knn.fit(X_train, y_train)
# Update training scores list
train_scores.append(knn.score(X_train, y_train))
# Update test scores list
test_scores.append(knn.score(X_test, y_test))
train_scores
test_scores
fig, ax = plt.subplots(figsize=(8, 5))
ax.plot(neighbors, train_scores, label="Train Scores");
ax.plot(neighbors, test_scores, label="Test Scores");
plt.xticks(np.arange(1, 21, 1))
plt.xlabel("Number of Neighbors");
plt.ylabel("Scores");
plt.legend()
print(f"Max KNN score on the test data is {np.mean(test_scores) * 100:.2f}%")
###Output
Max KNN score on the test data is 69.51%
###Markdown
Hyperparameter tuning with RandomizedSearchCVWe are going to tune:* LogisticRegression* RandomForestClassifier... using RandomizedSearchCV
###Code
# Create a hyperparameter grid for LogisticRegression
log_reg_grid = {'C': np.logspace(-4, 4, 20),
'solver': ["liblinear"]}
# Create a hyperparameter grid for RandomForestClassifier
rfc_grid = {'n_estimators': np.arange(10, 1000, 50),
'max_depth': [None, 3, 5, 10],
'min_samples_split': np.arange(2, 20, 2),
'min_samples_leaf': np.arange(1, 20, 2)}
###Output
_____no_output_____
###Markdown
**Tune LogisticRegression**
###Code
# Set random seed
np.random.seed(42)
# Setup a random hyperparameter search for LogisticRegression
rs_log_reg = RandomizedSearchCV(LogisticRegression(),
param_distributions=log_reg_grid,
cv=5,
n_iter=20,
verbose=True)
# Fit Random Hyperparameter Search model for LogisticRegression
rs_log_reg.fit(X_train, y_train)
rs_log_reg.best_params_
rs_log_reg.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
**Tune RandomForestClassifier**
###Code
# Set random seed
np.random.seed(42)
# Setup a random hyperparameter search for RandomForestClassifier
rs_rfc = RandomizedSearchCV(RandomForestClassifier(),
param_distributions=rfc_grid,
cv=5,
n_iter=5,
verbose=True)
# Fit random hyperparameter search model for RandomForestClassifier
rs_rfc.fit(X_train, y_train)
rs_rfc.best_params_
# Evaluate the randomized search RandomForestClassifier model
rs_rfc.score(X_test, y_test)
model_scores
###Output
_____no_output_____
###Markdown
HyperParameter Tuning with GridSearchCVSince LogisticRegression provides the best results so far, we will try to improve them again using GridSearchCV
###Code
log_reg_grid = {'C': np.logspace(-4, 4, 30),
'solver': ['liblinear']}
# Setup GridSearchCV
gs_lr = GridSearchCV(LogisticRegression(),
param_grid=log_reg_grid,
cv=5,
verbose=True)
# Fit GridSearchCV of LogisticRegression
gs_lr.fit(X_train, y_train)
gs_lr.best_params_
# Evaluate the Grid Search Logistic Regression Model
gs_lr.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
Evaluating our tuned classifier, beyond accuracy* ROC curve and AUC Score* Confusion Matrix* Classification Report* Precision* Recall* F1 ScoreTo make coparisons and evaluate our model, we first need ti make predictions
###Code
# Make prediction with the tuned model
y_preds = gs_lr.predict(X_test)
y_preds
# Import ROC curve function from the sklearn.metrics module
plot_roc_curve(gs_lr, X_test, y_test);
def plot_conf_mat(y_test, y_preds):
"""
Plots a confusion matrix using Seaborn's heatmap().
"""
fig, ax = plt.subplots(figsize=(3, 3))
ax = sns.heatmap(confusion_matrix(y_test, y_preds),
annot=True, # Annotate the boxes
cbar=False)
plt.xlabel("Predicted label") # predictions go on the x-axis
plt.ylabel("True label") # true labels go on the y-axis
plot_conf_mat(y_test, y_preds)
###Output
_____no_output_____
###Markdown
Let's get a classification report, as well as a cross-validated precision, recall, f-1 score
###Code
print(classification_report(y_test, y_preds))
###Output
precision recall f1-score support
0 0.89 0.86 0.88 29
1 0.88 0.91 0.89 32
accuracy 0.89 61
macro avg 0.89 0.88 0.88 61
weighted avg 0.89 0.89 0.89 61
###Markdown
Calculate Evaluation metrics using cross validation
###Code
# check best hyperparameters
gs_lr.best_params_
# create a new classifier with best parameters
clf = LogisticRegression(C=0.20433597178569418,
solver="liblinear")
# cross validated accuracy
cv_acc = cross_val_score(clf, X, y, cv=5,
scoring='accuracy')
cv_acc = np.mean(cv_acc)
cv_acc
# cross validated precision
cv_precision = cross_val_score(clf, X, y, cv=5,
scoring='precision')
cv_precision = np.mean(cv_precision)
cv_precision
# cross validated recall
cv_recall = cross_val_score(clf, X, y, cv=5,
scoring='recall')
cv_recall = np.mean(cv_recall)
cv_recall
# cross validated f1 score
cv_f1 = cross_val_score(clf, X, y, cv=5,
scoring='f1')
cv_f1 = np.mean(cv_f1)
cv_f1
# Visualize cross-validated metrics
cv_metrics = pd.DataFrame({"Accuracy": cv_acc,
"Precision": cv_precision,
"Recall": cv_recall,
"F1": cv_f1},
index=[0])
cv_metrics.T.plot.bar(title="Cross-validated classification metrics",
legend=False);
###Output
_____no_output_____
###Markdown
Feature Importance
###Code
# Logisticregression feature importance
gs_lr.best_params_
clf = LogisticRegression(C=0.20433597178569418,
solver='liblinear')
clf.fit(X_train, y_train);
X.head(3)
# check coef_
clf.coef_
# match coef's of features to columns
features_dict = dict(zip(df.columns, clf.coef_[0]))
pd.DataFrame(features_dict, index=[0]).T.plot.bar(title="Feature Importance", legend=False)
###Output
_____no_output_____
###Markdown
Heart Disease Data DictionaryA data dictionary describes the data you're dealing with. Not all datasets come with them so this is where you may have to do your research or ask a subject matter expert (someone who knows about the data) for more.The following are the features we'll use to predict our target variable (heart disease or no heart disease).age - age in yearssex - (1 = male; 0 = female)cp - chest pain type0: Typical angina: chest pain related decrease blood supply to the heart1: Atypical angina: chest pain not related to heart2: Non-anginal pain: typically esophageal spasms (non heart related)3: Asymptomatic: chest pain not showing signs of diseasetrestbps - resting blood pressure (in mm Hg on admission to the hospital)anything above 130-140 is typically cause for concernchol - serum cholestoral in mg/dlserum = LDL + HDL + .2 * triglyceridesabove 200 is cause for concernfbs - (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)'>126' mg/dL signals diabetesrestecg - resting electrocardiographic results0: Nothing to note1: ST-T Wave abnormalitycan range from mild symptoms to severe problemssignals non-normal heart beat2: Possible or definite left ventricular hypertrophyEnlarged heart's main pumping chamberthalach - maximum heart rate achievedexang - exercise induced angina (1 = yes; 0 = no)oldpeak - ST depression induced by exercise relative to restlooks at stress of heart during excerciseunhealthy heart will stress moreslope - the slope of the peak exercise ST segment0: Upsloping: better heart rate with excercise (uncommon)1: Flatsloping: minimal change (typical healthy heart)2: Downslopins: signs of unhealthy heartca - number of major vessels (0-3) colored by flourosopycolored vessel means the doctor can see the blood passing throughthe more blood movement the better (no clots)thal - thalium stress result1,3: normal6: fixed defect: used to be defect but ok now7: reversable defect: no proper blood movement when excercisingtarget - have disease or not (1=yes, 0=no) (= the predicted attribute)Note: No personal identifiable information (PPI) can be found in the dataset.It's a good idea to save these to a Python dictionary or in an external file, so we can look at them later without coming back here.
###Code
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import precision_score, recall_score, f1_score
from sklearn.metrics import plot_roc_curve
df = pd.read_csv("7.1 heart-disease.csv")
df.shape
df.head()
df.tail()
df["target"].value_counts()
df["target"].value_counts().plot(kind='bar', color=['salmon', 'lightblue']);
df.info()
df.isna().sum()
df.describe()
df.sex.value_counts()
pd.crosstab(df.target, df.sex)
pd.crosstab(df.target, df.sex).plot(kind='bar', color=['red', 'blue'])
plt.figure(figsize=(10, 6))
plt.scatter(df.age[df.target==1],
df.thalach[df.target==1])
plt.scatter(df.age[df.target==0],
df.thalach[df.target==0])
plt.title('heart disease in function of age and max heart rate')
plt.xlabel('age')
plt.ylabel('max heart rate')
plt.legend(['no disease', 'disease']);
df.age.plot.hist()
pd.crosstab(df.cp, df.target)
pd.crosstab(df.cp, df.target).plot(kind='bar', figsize=(10, 6), color=['salmon', 'lightblue'])
plt.title('heart disease frequency per chest pain type')
plt.xlabel('chest pain type')
plt.ylabel('amount')
plt.legend(['no disease', 'disease'])
plt.xticks(rotation=0)
df.corr()
corr_matrix = df.corr()
fig, ax = plt.subplots(figsize=(15, 10))
ax = sns.heatmap(corr_matrix,
annot=True,
linewidths=0.5,
fmt='.2f',
cmap='YlGnBu',)
models = {'logistic regression': LogisticRegression(),
'KNN': KNeighborsClassifier(),
'random forest': RandomForestClassifier(),}
def fit_and_score(models, X_train, X_test, y_train, y_test):
np.random.seed(42)
model_scores = {}
for name, model in models.items():
model.fit(X_train, y_train)
model_scores[name]= model.score(X_test, y_test)
return model_scores
###Output
_____no_output_____ |
Named-Entity-Recognition-Attention.ipynb | ###Markdown
Named Entity Recognition (NER)NER is an information extraction technique to identify and classify named entities in text. These entities can be pre-defined and generic like location names, organizations, time and etc, or they can be very specific like the example with the resume.The goal of a named entity recognition (NER) system is to identify all textual mentions of the named entities. This can be broken down into two sub-tasks: identifying the boundaries of the NE, and identifying its type.Named entity recognition is a task that is well-suited to the type of classifier-based approach. In particular, a tagger can be built that labels each word in a sentence using the IOB format, where chunks are labelled by their appropriate type.The IOB Tagging system contains tags of the form:* B - {CHUNK_TYPE} – for the word in the Beginning chunk* I - {CHUNK_TYPE} – for words Inside the chunk* O – Outside any chunkThe IOB tags are further classified into the following classes –* geo = Geographical Entity* org = Organization* per = Person* gpe = Geopolitical Entity* tim = Time indicator* art = Artifact* eve = Event* nat = Natural Phenomenon Approaches to NER* **Classical Approaches:** mostly rule-based.* **Machine Learning Approaches:** there are two main methods in this category: * Treat the problem as a multi-class classification where named entities are our labels so we can apply different classification algorithms. The problem here is that identifying and labeling named entities require thorough understanding of the context of a sentence and sequence of the word labels in it, which this method ignores that. * Conditional Random Field (CRF) model. It is a probabilistic graphical model that can be used to model sequential data such as labels of words in a sentence. The CRF model is able to capture the features of the current and previous labels in a sequence but it cannot understand the context of the forward labels; this shortcoming plus the extra feature engineering involved with training a CRF model, makes it less appealing to be adapted by the industry.* **Deep Learning Approaches:** Bidirectional RNNs, Transformers EDA
###Code
data = pd.read_csv(
"../input/entity-annotated-corpus/ner.csv", encoding = "ISO-8859-1", error_bad_lines=False,
usecols=['sentence_idx', 'word', 'tag']
)
data = data[data['sentence_idx'] != 'prev-lemma'].dropna(subset=['sentence_idx']).reset_index(drop=True)
print(data.shape)
data.head()
###Output
/opt/conda/lib/python3.7/site-packages/IPython/core/interactiveshell.py:3444: FutureWarning: The error_bad_lines argument has been deprecated and will be removed in a future version.
exec(code_obj, self.user_global_ns, self.user_ns)
/opt/conda/lib/python3.7/site-packages/IPython/core/interactiveshell.py:3444: DtypeWarning: Columns (21) have mixed types.Specify dtype option on import or set low_memory=False.
exec(code_obj, self.user_global_ns, self.user_ns)
###Markdown
**Create list of list of tuples to differentiate each sentence from each other**
###Code
class SentenceGetter(object):
def __init__(self, dataset, word_col, tag_col, sent_id_col):
self.n_sent = 1
self.dataset = dataset
self.empty = False
agg_func = lambda s: [
(w, t) for w,t in zip(s[word_col].values.tolist(), s[tag_col].values.tolist())
]
self.grouped = self.dataset.groupby(sent_id_col).apply(agg_func)
self.sentences = [s for s in self.grouped]
def get_next(self):
try:
s = self.grouped["Sentence: {}".format(self.n_sent)]
self.n_sent += 1
return s
except:
return None
getter = SentenceGetter(dataset=data, word_col='word', tag_col='tag', sent_id_col='sentence_idx')
sentences = getter.sentences
print(sentences[0])
fig, ax = plt.subplots(figsize=(20, 6))
ax.hist([len(s) for s in sentences], bins=50)
ax.set_title('Number of words in each Sentence')
maxlen = max([len(s) for s in sentences])
print('Number of Sentences:', len(sentences))
print ('Maximum sequence length:', maxlen)
words = list(set(data["word"].values))
words.append("ENDPAD")
n_words = len(words)
print('Number of unique words:', n_words)
fig, ax = plt.subplots(2, 1, figsize=(20, 12))
data.tag.value_counts().plot.bar(ax=ax[0], title='Distribution of Tags')
data[data.tag != 'O'].tag.value_counts().plot.bar(ax=ax[1], title='Distribution of non-O Tags')
tags = list(set(data["tag"].values))
n_tags = len(tags)
print('Number of unique Tags:', n_tags)
###Output
Number of unique Tags: 17
###Markdown
**Converting words to numbers and numbers to words**
###Code
word2idx = {w: i for i, w in enumerate(words)}
tag2idx = {t: i for i, t in enumerate(tags)}
###Output
_____no_output_____
###Markdown
Modelling
###Code
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.layers as L
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model, to_categorical
X = [[word2idx[w[0]] for w in s] for s in sentences]
X = sequence.pad_sequences(maxlen=maxlen, sequences=X, padding="post",value=n_words - 1)
y = [[tag2idx[w[1]] for w in s] for s in sentences]
y = sequence.pad_sequences(maxlen=maxlen, sequences=y, padding="post", value=tag2idx["O"])
y = np.array([to_categorical(i, num_classes=n_tags) for i in y])
print('X shape', X.shape, 'y shape', y.shape)
class config():
VOCAB = n_words
MAX_LEN = maxlen
N_OUPUT = n_tags
EMBEDDING_VECTOR_LENGTH = 50
DENSE_DIM = 32
NUM_HEADS = 2
OUTPUT_ACTIVATION = 'softmax'
LOSS = 'categorical_crossentropy'
OPTIMIZER = 'adam'
METRICS = ['accuracy']
MAX_EPOCHS = 10
class PositionalEmbedding(L.Layer):
def __init__(self, sequence_length, input_dim, output_dim, **kwargs):
super().__init__(**kwargs)
self.token_embeddings = L.Embedding(input_dim, output_dim)
self.position_embeddings = L.Embedding(sequence_length, output_dim)
self.sequence_length = sequence_length
self.input_dim = input_dim
self.output_dim = output_dim
def call(self, inputs):
length = tf.shape(inputs)[-1]
positions = tf.range(start=0, limit=length, delta=1)
embedded_tokens = self.token_embeddings(inputs)
embedded_positions = self.position_embeddings(positions)
return embedded_tokens + embedded_positions
def get_config(self):
config = super().get_config()
config.update({
"output_dim": self.output_dim,
"sequence_length": self.sequence_length,
"input_dim": self.input_dim,
})
return config
class TransformerEncoder(L.Layer):
def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
super().__init__(**kwargs)
self.embed_dim = embed_dim
self.dense_dim = dense_dim
self.num_heads = num_heads
self.attention = L.MultiHeadAttention(num_heads=num_heads, key_dim=embed_dim)
self.dense_proj = keras.Sequential([L.Dense(dense_dim, activation='relu'), L.Dense(embed_dim)])
self.layernorm1 = L.LayerNormalization()
self.layernorm2 = L.LayerNormalization()
def call(self, inputs, mask=None):
if mask is not None:
mask = mask[: tf.newaxis, :]
attention_output = self.attention(inputs, inputs, attention_mask=mask)
proj_input = self.layernorm1(inputs + attention_output)
proj_output = self.dense_proj(proj_input)
return self.layernorm2(proj_input + proj_output)
def get_config(self):
config = super().get_confog()
config.update({
"embed_dim": self.embed_dim,
"num_heads": self.num_heads,
"dense_dim": self.dense_dim
})
return config
###Output
_____no_output_____
###Markdown
**Lets define our model**A token classification is pretty simple and similar to that of sequence classification, ie there is only one change, since we need predictions for each input tokken we do not use the Global Pooling Layer, therefore the architechture looks something like:* Input Layer* Embeddings* Transformer Encoder Block* Dropout (optional)* Classification Layer (where n_units = number of classes)
###Code
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=7)
rlp = ReduceLROnPlateau(monitor='loss', patience=3, verbose=1)
inputs = keras.Input(shape=(None, ), dtype="int64")
x = PositionalEmbedding(config.MAX_LEN, config.VOCAB, config.EMBEDDING_VECTOR_LENGTH)(inputs)
x = TransformerEncoder(config.EMBEDDING_VECTOR_LENGTH, config.DENSE_DIM, config.NUM_HEADS)(x)
x = L.Dropout(0.5)(x)
outputs = L.Dense(config.N_OUPUT, activation=config.OUTPUT_ACTIVATION)(x)
model = keras.Model(inputs, outputs)
model.compile(loss=config.LOSS, optimizer=config.OPTIMIZER, metrics=config.METRICS)
model.summary()
plot_model(model, show_shapes=True)
history = model.fit(x=X, y=y, validation_split=0.1,
callbacks=[es, rlp], epochs=config.MAX_EPOCHS
)
fig, ax = plt.subplots(2, 1, figsize=(20, 8))
df = pd.DataFrame(history.history)
df[['accuracy', 'val_accuracy']].plot(ax=ax[0])
df[['loss', 'val_loss']].plot(ax=ax[1])
ax[0].set_title('Model Accuracy', fontsize=12)
ax[1].set_title('Model Loss', fontsize=12)
fig.suptitle('Model Metrics', fontsize=18);
i = np.random.randint(0, X.shape[0])
p = model.predict(np.array([X[i]]))
p = np.argmax(p, axis=-1)
y_true = np.argmax(y, axis=-1)[i]
print(f"{'Word':15}{'True':5}\t{'Pred'}")
print("-"*30)
for (w, t, pred) in zip(X[i], y_true, p[0]):
print(f"{words[w]:15}{tags[t]}\t{tags[pred]}")
if words[w] == 'ENDPAD':
break
###Output
Word True Pred
------------------------------
Sudanese B-gpe B-gpe
government-backedO O
Arab B-gpe B-gpe
militias O O
are O O
accused O O
of O O
committing O O
atrocities O O
in O O
battling O O
Darfur B-tim B-tim
rebels O O
. O O
Sudanese B-gpe B-gpe
government-backedO O
Arab B-gpe B-gpe
militias O O
are O O
accused O O
of O O
committing O O
atrocities O O
in O O
battling O O
Darfur B-tim B-tim
rebels O O
. O O
ENDPAD O O
|
Malaria Detection using Keras.ipynb | ###Markdown
Malaria Detection using Keras
###Code
import numpy as np
import pandas as pd
import tensorflow as tf
import cv2
import tensorflow.keras
from tensorflow.keras import backend as K
from matplotlib import pyplot as plt
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Conv2D, Dense, MaxPooling2D, Flatten, Activation, Input, BatchNormalization, ZeroPadding2D, Dropout
from tensorflow.keras.models import Sequential, Model
###Output
WARNING:root:Limited tf.compat.v2.summary API due to missing TensorBoard installation.
WARNING:root:Limited tf.compat.v2.summary API due to missing TensorBoard installation.
WARNING:root:Limited tf.compat.v2.summary API due to missing TensorBoard installation.
WARNING:root:Limited tf.summary API due to missing TensorBoard installation.
###Markdown
Dataset Exploration
###Code
data_dir="cell_images/"
batch_size = 32
img_height = 180
img_width = 180
train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print(class_names)
from tensorflow.keras import layers
normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)
###Output
_____no_output_____
###Markdown
Understanding our model- The model we have created is for understanding of all the concepts one would require for understanding and building your deep learning models. The model we have created here uses functional model structure defined in Keras.
###Code
num_classes = 2
model = tf.keras.Sequential([
layers.experimental.preprocessing.Rescaling(1./255),
layers.Conv2D(32, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
model.compile(
optimizer='adam',
loss=tf.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(
train_ds,
validation_data=val_ds,
epochs=3
)
# Calling `save('my_model.h5')` creates a h5 file `my_model.h5`.
model.save("malariadetection.h5")
# It can be used to reconstruct the model identically.
reconstructed_model = tensorflow.keras.models.load_model("malariadetection.h5")
def image_reader(path):
t=cv2.imread(path)
t=cv2.cvtColor(t, cv2.COLOR_BGR2RGB)
t=cv2.resize(t, (180,180))
return t
evaluation=[]
evaluation.append(image_reader(r"cell_images\C33P1thinF_IMG_20150619_114756a_cell_179.png"))
evaluation=np.asarray(evaluation)
print(evaluation.shape)
res=model.predict(evaluation)
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
training_labels=le.fit_transform(class_names)
target=le.inverse_transform(res.argmax(1))
print(target)
###Output
(1, 180, 180, 3)
['Parasitized']
|
notebooks/002-Parts-of-a-Model.ipynb | ###Markdown
Partes de un modelo de Deep LearningEn este notebook miraremos los diferentes componentes de un modelo de Deep Learning,e investigaremos como podemos utilizar Deep Learning para utilizarlo en imágenes en futurosnotebooks. Conceptos de Deep learningA lo largo de este curso, se usarán diferentes conceptos relaciones con los models de Deep Learning.Cuando uno trabaja más y más con estos tipos de modelos, uno tiende a escuchar ciertas palabrasuna y otra vez. Por ejempo,- neurona- pesas- backpropagation- bias- función de activación- etc.En la siguiente sección, veremos que significan varias de estas variables...Por ejemplo, un modelo de deep learning podría tratar de diferenciar entre perros y gatos:Y ya cuando se analiza con más cuidado la arquitectura de un modelo, se pueden distinguir diferentespiezas, como en la siguiente ilustración: Neurona artificialUna neurona artificial es el complemento más básico y primitivo de cualquier red neural.Es la unidad computacional básica que ayuda con los siguientes funciones:1. Acepta ciertos datos de entrada y *pesas*.2. Aplical el "dot product" entre los inputs y las pesas.3. Luego aplica una transformación en la suma de las pesas y los inputs por medio de una función de activación.4. Finalmente envía el resultado a otras neuronas para que apliquen el mismo proceso.Por ejemplo, esta es una animación de una neurona que recibe 3 inputs, a los cuales se les aplica laspesas y se calcula la función de activación: Feed forwardUna de las grandes ventajas de utilizar un model de Deep Learning es que se pueden construir varios tiposde arquitecturas, con diferentes números de inputs, capas ocultas y outputs.Por ejemplo, la siguiente visualización es de una red neural que cuenta con:- 2 inputs- 1 capa oculta de 3 neuronas- 1 outputLas siguientes secciones describirán los differentes componentes de este modelo Capa de entrada- Esta capa consta de los *datos de entrada* que se le están dando a la red neuronal.- Esta capa se representa solo **como neuronas**, pero no son las neuronas de la sección anterior.- Cada neurona representa una *característica* de los datos. - Esto significa que si tenemos un conjunto de datos con 3 atributos (por ejemplo, edad, salario, y ciudad), entonces tendremos 3 neuronas en la capa de entrada para representar cada uno de ellos. - Si estamos trabajando con una imagen de la dimensión de `32 × 32` píxeles, entonces tendremos 32 * 32 = 1024 neuronas en la capa de entrada para representar cada uno de los píxeles. Capa oculta- Es en esta capa en donde encontramos las neuronas artificiales.- Se pueden tener 1 o varias capas ocultas, y esto dictará la arquitectura de la red neural.- En una red neuronal profunda, la salida de neuronas en una capa oculta es la entrada a la siguiente capa oculta.- No existe una regla general sobre cuántas capas ocultas y cuántas neuronas debe tener cada capa oculta en la red neuronal. - Aún en industria, el número de capas y neuronas es un *arte* y depende mucho de los datos que estén disponibles.- En la mayoría de los casos, todas las neuronas están conectadas entre sí y también se conoce como red neuronal totalmente conectada.- Sin embargo, en el caso de una red neuronal convolucional (CNN), no todas las neuronas están conectadas entre sí. Capa de salida- Esta capa se utiliza para representar la salida de la red neuronal.- La cantidad de neuronas de salida depende de la cantidad de salida que esperamos en el problema en cuestión.- Por ejemplo, para problemas de clasificación, la cantidad de neuronas podrían corresponder al número de *clases* en la cuales uno está interesado. Pesos y bias- Las neuronas de la red neuronal están conectadas entre sí mediante pesos, y son estos pesos los cuáles se **entrenan** en cada iteración del modelo (backpropagation).- Además de los pesos, cada neurona también tiene su propio *bias*. Este *bias* es la variable que deja que la función de activación se pueda mover de izquierda a derecha para mejor acomodar a la data.Otro punto clave para destacar aquí es que la información fluye solo en una dirección hacia adelante.De ahí que se conozca una red neuronal de alimentación directa (*feed-forward neural network). Backpropagation (propagación hacía atras)Según la sección anterior, la data fluye de atrás hacia adelante (input $\to$ hidden layer $\to$ output).Apesar de esto, en una red neural se utiliza la propagación hacía atrás para recalcular las pesasde cada neurona.**Nota**: Esta técnica solamente se utiliza durante la fase de entrenamiento del model, la cuál toma la siguiente forma:1. Durante la fase de entrenamiento, la red neuronal se inicializa con valores de peso aleatorios. Estos valores son las **pesas iniciales** del modelo.2. Los datos de entrenamiento se envían a la red y la red luego calcula la salida. Esto se conoce como *pase adelantado* (forward pass).3. Luego, la salida calculada se compara con la salida real con la ayuda de la función de pérdida / costo y se determina el error. (Verdad vs. Predicción).4. Luego de haber comparado la "verdad" contra la "predicción" del model, se utiliza el "backpropagation" para recalcular los valores de los pesos, y minimizar la función de pérdida (loss function)5. Este ajuste de peso comienza a ocurrir desde la parte trasera de la red. El error se propaga hacia atrás a las capas frontales hasta el final y las neuronas de la red comienzan a ajustar sus pesos. De ahí el nombre retropropagación.La siguiente animación intenta visualizar cómo se ve la propagación hacia atrás en una red neuronal profunda con múltiples capas ocultas.El uso de este método ha sido crucial para el uso de redes neurales, ya que se puede utilizar en redescon diferentes arquitecturas y números de neuronas. Como interactuar con Pytorch e imagenesAntes de entrenar un modelo en Pytorch, queremos utilizar Pytorch para leer e interactuar con imágenes.El siguiente es un ejemplo de cómo se puede utilizar Pytorch con imágenes. Empezamos cargando los módulos:
###Code
from __future__ import print_function, division
import os
import torch
import pandas as pd
from skimage import io, transform
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from pathlib import Path
import zipfile
import wget
from glob import glob
###Output
_____no_output_____
###Markdown
Y para este modelo necesitaremos la siguiente data: [link](https://download.pytorch.org/tutorial/faces.zip)
###Code
# Creating directory
output_dir = Path(".").joinpath("./data/faces").resolve()
output_dir.mkdir(parents=True, exist_ok=True)
# Downloading file
url = "https://download.pytorch.org/tutorial/faces.zip"
wget.download(url, out=str(output_dir))
# Unzip file
zipfile.ZipFile(output_dir.joinpath(Path(url).name)).extractall()
# Directory with faces
faces_directory = Path(".").joinpath("faces").resolve()
print(f">>> The directory with the datasets are stored in here: `{faces_directory}")
###Output
>>> The directory with the datasets are stored in here: `/Users/vfca5x5/Documents/Personal/Repositories/2021_06_Deep_Learning_tutorial_2/notebooks/faces
###Markdown
Now we can extract the metadata of the files:
###Code
landmarks_frame = pd.read_csv(f'{faces_directory}/face_landmarks.csv')
# Total number of files
files_arr = glob(f"{faces_directory}/*.jpg")
n = 65
img_name = landmarks_frame.iloc[n, 0]
landmarks = landmarks_frame.iloc[n, 1:]
landmarks = np.asarray(landmarks)
landmarks = landmarks.astype('float').reshape(-1, 2)
print('Image name: {}'.format(img_name))
print('Landmarks shape: {}'.format(landmarks.shape))
print('First 4 Landmarks: {}'.format(landmarks[:4]))
print(f">>> There are a total of `{len(files_arr)}` pictures!")
###Output
Image name: person-7.jpg
Landmarks shape: (68, 2)
First 4 Landmarks: [[32. 65.]
[33. 76.]
[34. 86.]
[34. 97.]]
>>> There are a total of `69` pictures!
###Markdown
Y ahora miremos como se mira una imagen:
###Code
def show_landmarks(image, landmarks):
plt.imshow(image)
plt.scatter(landmarks[:, 0], landmarks[:, 1], s=10, marker='.', c='r')
plt.pause(0.001) # pause a bit so that plots are updated
plt.figure()
show_landmarks(io.imread(os.path.join('./faces/', img_name)),
landmarks)
plt.show()
###Output
_____no_output_____
###Markdown
Ahora podemos utilizar `Dataset` para definir una clase de Pytorch, la cuál nos ayudará a interactuar con imágenes
###Code
class FaceLandmarksDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, csv_file, root_dir, transform=None):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.landmarks_frame = pd.read_csv(csv_file)
self.root_dir = root_dir
self.transform = transform
def __len__(self):
return len(self.landmarks_frame)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img_name = os.path.join(self.root_dir,
self.landmarks_frame.iloc[idx, 0])
image = io.imread(img_name)
landmarks = self.landmarks_frame.iloc[idx, 1:]
landmarks = np.array([landmarks])
landmarks = landmarks.astype('float').reshape(-1, 2)
sample = {'image': image, 'landmarks': landmarks}
if self.transform:
sample = self.transform(sample)
return sample
###Output
_____no_output_____
###Markdown
Ahora creamos la clase de la data para poder interactuar con nuestra data:
###Code
face_dataset = FaceLandmarksDataset(csv_file='./faces/face_landmarks.csv',
root_dir='./faces/')
fig = plt.figure()
for i in range(len(face_dataset)):
sample = face_dataset[i]
print(i, sample['image'].shape, sample['landmarks'].shape)
ax = plt.subplot(1, 4, i + 1)
plt.tight_layout()
ax.set_title('Sample #{}'.format(i))
ax.axis('off')
show_landmarks(**sample)
if i == 3:
plt.show()
break
###Output
0 (324, 215, 3) (68, 2)
|
FRCNN-SVM-Eval.ipynb | ###Markdown
Load pretrained detection model and Prediect directly Load models of DNN and SVM
###Code
cfg = get_cfg()
cfg.merge_from_file('configs/test_unfreeze_lastfews.yaml')
model = build_model(cfg) # returns a torch.nn.Module
model.eval()
metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
# ckpt_file = 'checkpoints/coco/base_model/model_final.pth'
# ckpt_file = 'checkpoints/coco/faster_rcnn/30shot_person_freeze_last_cos/model_final.pth'
# ckpt_file = 'checkpoints/coco/faster_rcnn/30shot_person_freeze_last_fc/model_final.pth'
# ckpt_file = 'checkpoints/coco/faster_rcnn/30shot_person_unfreeze_lastfews/model_final.pth'
ckpt_file = 'checkpoints/coco/faster_rcnn/30shot_person_unfreeze_whole/model_0015999.pth'
# ckpt_file = 'checkpoints/coco/faster_rcnn/30shot_airplane_unfreeze_whole/model_0029999.pth'
DetectionCheckpointer(model).load(ckpt_file)
# clf = load('svm_results/svm_model_finetuned_prop_base.joblib')
# clf = load('svm_results/svm_model_finetuned_prop_lastfew.joblib')
# clf = load('svm_results/svm_model_finetuned_prop_whole.joblib')
run_rcnn = False
ft_extractor_type = ckpt_file.split('/')[-2]
shots_num = 100
class_name = 'person'
file_name = 'svm_{}_{}_{}.joblib'.format(ft_extractor_type, shots_num, class_name)
clf = load('svm_results_0216/'+ file_name)
# define some functions for preprocessing
assert len(cfg.MODEL.PIXEL_MEAN) == len(cfg.MODEL.PIXEL_STD)
num_channels = len(cfg.MODEL.PIXEL_MEAN)
device = 'cuda'
pixel_mean = (
torch.Tensor(cfg.MODEL.PIXEL_MEAN)
.to(device)
.view(num_channels, 1, 1)
)
pixel_std = (
torch.Tensor(cfg.MODEL.PIXEL_STD)
.to(device)
.view(num_channels, 1, 1)
)
normalizer = lambda x: (x - pixel_mean) / pixel_std
###Output
_____no_output_____
###Markdown
Register dataset
###Code
from detectron2.data.datasets import register_coco_instances
json_dir = 'datasets/coco_experiments/seed1/full_box_30shot_person_trainval.json'
image_dir = 'datasets/coco/trainval2014'
register_coco_instances("30shot_person_train", {}, json_dir, image_dir)
json_dir = 'datasets/coco_experiments/seed1/full_box_{}shot_{}_test.json'.format(1000, class_name)
image_dir = 'datasets/coco/trainval2014'
testset_name = "{}_test".format(class_name)
register_coco_instances(testset_name, {}, json_dir, image_dir)
# json_dir = 'datasets/cocosplit/datasplit/5k.json'
# image_dir = 'datasets/coco/trainval2014'
# register_coco_instances("5k_test", {}, json_dir, image_dir)
# testset_name = "5k_test"
###Output
_____no_output_____
###Markdown
Difference between some dataloader
###Code
# data_loader_train = build_detection_train_loader(cfg)
# data_loader_train_it = iter(data_loader_train)
# data = next(data_loader_train_it)
# print(len(data))
# print(data[0]['image'].shape)
# print(data)
# data_loader_test = build_detection_test_loader(cfg, "1000shot_person_test")
# data_loader_test_it = iter(data_loader_test)
data_loader_test = build_detection_test_loader(cfg, testset_name)
data_loader_test_it = iter(data_loader_test)
data = next(data_loader_test_it)
print(len(data))
print(data[0]['image'].shape)
print(len(data_loader_test))
# transform_gen = T.ResizeShortestEdge(
# [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST],
# cfg.INPUT.MAX_SIZE_TEST,
# )
# print([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST],
# cfg.INPUT.MAX_SIZE_TEST)
# data_loader = build_detection_train_loader(DatasetCatalog.get("1000shot_person_test"),
# mapper=DatasetMapper(cfg, is_train=False, augmentations=[transform_gen]),
# total_batch_size = 10)
# data_loader_it = iter(data_loader)
# data = next(data_loader_it)
# print(len(data))
# print(data[0]['image'].shape)
# print(data)
## the type of dataloader is different 'AspectRatio Grouped Dataset' (don't know the reason)
# run one iter
# evaluator = COCOEvaluator("1000shot_person_test", cfg, True, output_dir = cfg.OUTPUT_DIR)
# with torch.no_grad():
# inputs = data
# outputs = model(inputs)
# evaluator.reset()
# evaluator.process(inputs, outputs)
# results = evaluator.evaluate()
# default test
# run_rcnn = True
if run_rcnn:
evaluator = COCOEvaluator(testset_name, cfg, True, output_dir = cfg.OUTPUT_DIR)
evaluator.reset()
inference_on_dataset(model, data_loader_test, evaluator)
###Output
_____no_output_____
###Markdown
Run detector step by step (should have same results)
###Code
if run_rcnn:
evaluator = COCOEvaluator(testset_name, cfg, True, output_dir = cfg.OUTPUT_DIR)
evaluator.reset()
training = False
save_results = False
box2box_transform = Box2BoxTransform(
weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS
)
smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA
# test_score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST
test_score_thresh = 0.5
test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
test_detections_per_img = cfg.TEST.DETECTIONS_PER_IMAGE
with torch.no_grad():
for idx, inputs in enumerate(data_loader_test):
# batched_inputs = data
batched_inputs = inputs
###################################
# outputs = model(inputs) #
#---------------------------------#
# Normalize, pad and batch the input images. (Preprocess_image)
images = [x["image"].to('cuda') for x in batched_inputs]
images = [normalizer(x) for x in images]
images = ImageList.from_tensors(
images, model.backbone.size_divisibility
)
# forward
features = model.backbone(images.tensor)
# print('features shape:', features['p3'].shape)
proposals, _ = model.proposal_generator(images, features)
# print('proposal num per img:', proposals_list[0].objectness_logits.shape)
# results, _ = model.roi_heads(images, features, proposals)
# print('\ninstance for image 0:', results[0], '\n')
# run roi_heads step by step
if training:
# proposals = [proposal for proposal in proposals]
targets = [d['instances'].to('cuda') for d in data]
proposals = model.roi_heads.label_and_sample_proposals(proposals, targets)
box_features = model.roi_heads.box_pooler(
[features[f] for f in ["p2", "p3", "p4", "p5"]], [x.proposal_boxes for x in proposals]
)
# print(box_features.shape)
box_features = model.roi_heads.box_head(box_features)
# print(box_features.shape)
pred_class_logits, pred_proposal_deltas = model.roi_heads.box_predictor(
box_features
)
# print('pred_class_logits', pred_class_logits[:3])
# print('pred_proposal_deltas', pred_proposal_deltas.shape)
outputs = FastRCNNOutputs(
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta,
)
results, _ = outputs.inference(
test_score_thresh,
test_nms_thresh,
test_detections_per_img,
)
# postprocess: resize images
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
# print('postprocessed instance for image 0:\n', processed_results[0], '\n')
###################################
# evaluate
evaluator.process(inputs, processed_results)
save_results = True
if save_results:
# visualizer
# inputs should be only one image
raw_image = cv2.imread(batched_inputs[0]['file_name'])
result_show = processed_results[0]["instances"]
v = Visualizer(raw_image,
metadata=MetadataCatalog.get(testset_name),
scale=1.0,
instance_mode=ColorMode.IMAGE # remove the colors of unsegmented pixels
)
v = v.draw_instance_predictions(result_show.to("cpu"))
folder_name = './test_0216/det_last_fc_0.5/'
os.makedirs(folder_name, exist_ok=True)
det_img_dir = folder_name + str(idx) + '.jpg'
cv2.imwrite(det_img_dir, v.get_image())
eval_results = evaluator.evaluate()
###Output
_____no_output_____
###Markdown
Add SVM classifier as the final layer
###Code
evaluator = COCOEvaluator(testset_name, cfg, True, output_dir = cfg.OUTPUT_DIR)
evaluator.reset()
training = False
box2box_transform = Box2BoxTransform(
weights=cfg.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS
)
smooth_l1_beta = cfg.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA
# test_score_thresh = cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST
test_score_thresh = 0.5
test_nms_thresh = cfg.MODEL.ROI_HEADS.NMS_THRESH_TEST
test_detections_per_img = cfg.TEST.DETECTIONS_PER_IMAGE
print(test_score_thresh, test_nms_thresh, test_detections_per_img)
with torch.no_grad():
for idx, inputs in enumerate(data_loader_test):
# batched_inputs = data
batched_inputs = inputs
###################################
# outputs = model(inputs) #
#---------------------------------#
# Normalize, pad and batch the input images. (Preprocess_image)
images = [x["image"].to('cuda') for x in batched_inputs]
images = [normalizer(x) for x in images]
images = ImageList.from_tensors(
images, model.backbone.size_divisibility
)
# forward
features = model.backbone(images.tensor)
# print('features shape:', features['p3'].shape)
proposals, _ = model.proposal_generator(images, features)
# print('proposal num per img:', proposals[0].objectness_logits.shape) # 1000 proposals
# run roi_heads step by step
if training:
# proposals = [proposal for proposal in proposals]
targets = [x['instances'].to('cuda') for x in batched_inputs]
proposals = model.roi_heads.label_and_sample_proposals(proposals, targets)
box_features = model.roi_heads.box_pooler(
[features[f] for f in ["p2", "p3", "p4", "p5"]], [x.proposal_boxes for x in proposals]
)
# print(box_features.shape)
box_features = model.roi_heads.box_head(box_features)
# print(box_features.shape)
pred_class_logits, pred_proposal_deltas = model.roi_heads.box_predictor(
box_features
)
# print('pred_class_logits', pred_class_logits[:3])
# print('pred_proposal_deltas', pred_proposal_deltas.shape)
outputs = FastRCNNOutputs(
box2box_transform,
pred_class_logits,
pred_proposal_deltas,
proposals,
smooth_l1_beta,
)
results, _ = outputs.inference(
test_score_thresh,
test_nms_thresh,
test_detections_per_img,
)
# postprocess: resize images
processed_results = []
for results_per_image, input_per_image, image_size in zip(
results, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results.append({"instances": r})
# print('postprocessed instance for image 0:\n', processed_results[0], '\n')
# SVM
X = box_features.to('cpu').detach().numpy()
# y_hat = clf.predict(X)
pred_class_logits_svm = clf.predict_log_proba(X)
pred_class_logits_svm = torch.from_numpy(pred_class_logits_svm).to('cuda')
# print(y_hat.shape)
# print(pred_class_logits_svm[:3])
outputs_svm = FastRCNNOutputs(
box2box_transform,
pred_class_logits_svm,
pred_proposal_deltas,
proposals,
smooth_l1_beta,
)
pred_instances_svm, _ = outputs_svm.inference(
test_score_thresh,
test_nms_thresh,
test_detections_per_img,
)
processed_results_svm = []
for results_per_image, input_per_image, image_size in zip(
pred_instances_svm, batched_inputs, images.image_sizes
):
height = input_per_image.get("height", image_size[0])
width = input_per_image.get("width", image_size[1])
r = detector_postprocess(results_per_image, height, width)
processed_results_svm.append({"instances": r})
# print('\n\nSVM postprocessed instance for image 0:\n', processed_results_svm[0], '\n')
###################################
# evaluate
evaluator.process(inputs, processed_results_svm)
save_results = False
if save_results:
# visualizer
# inputs should be only one image
raw_image = cv2.imread(batched_inputs[0]['file_name'])
result_show = processed_results_svm[0]["instances"]
v = Visualizer(raw_image,
metadata=MetadataCatalog.get("1000shot_person_test"),
scale=1.0,
instance_mode=ColorMode.IMAGE # remove the colors of unsegmented pixels
)
v = v.draw_instance_predictions(result_show.to("cpu"))
# det_img_folder = time.strftime("%d_%H_%M/")
folder_name = './test_0216/svm_{}_{}_{}_{}/'.format(ft_extractor_type, shots_num, class_name, test_score_thresh)
os.makedirs(folder_name, exist_ok=True)
det_img_dir = folder_name + str(idx) + '.jpg'
cv2.imwrite(det_img_dir, v.get_image())
results = evaluator.evaluate()
###Output
0.5 0.5 100
|
examples/notebooks/Uncertainty.ipynb | ###Markdown
Uncertainty in Deep LearningA common criticism of deep learning models is that they tend to act as black boxes. A model produces outputs, but doesn't given enough context to interpret them properly. How reliable are the model's predictions? Are some predictions more reliable than others? If a model predicts a value of 5.372 for some quantity, should you assume the true value is between 5.371 and 5.373? Or that it's between 2 and 8? In some fields this situation might be good enough, but not in science. For every value predicted by a model, we also want an estimate of the uncertainty in that value so we can know what conclusions to draw based on it.DeepChem makes it very easy to estimate the uncertainty of predicted outputs (at least for the models that support it—not all of them do). Let's start by seeing an example of how to generate uncertainty estimates. We load a dataset, create a model, train it on the training set, and predict the output on the test set.
###Code
import deepchem as dc
import numpy as np
import matplotlib.pyplot as plot
tasks, datasets, transformers = dc.molnet.load_sampl()
train_dataset, valid_dataset, test_dataset = datasets
model = dc.models.MultitaskRegressor(len(tasks), 1024, uncertainty=True)
model.fit(train_dataset, nb_epoch=200)
y_pred, y_std = model.predict_uncertainty(test_dataset)
###Output
Loading dataset from disk.
Loading dataset from disk.
Loading dataset from disk.
WARNING:tensorflow:Entity <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a3390cc50>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a3390cc50>>: AttributeError: module 'gast' has no attribute 'Num'
WARNING: Entity <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a3390cc50>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a3390cc50>>: AttributeError: module 'gast' has no attribute 'Num'
WARNING:tensorflow:Entity <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a3390cc50>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a3390cc50>>: AttributeError: module 'gast' has no attribute 'Num'
WARNING: Entity <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a3390cc50>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a3390cc50>>: AttributeError: module 'gast' has no attribute 'Num'
###Markdown
All of this looks exactly like any other example, with just two differences. First, we add the option `uncertainty=True` when creating the model. This instructs it to add features to the model that are needed for estimating uncertainty. Second, we call `predict_uncertainty()` instead of `predict()` to produce the output. `y_pred` is the predicted outputs. `y_std` is another array of the same shape, where each element is an estimate of the uncertainty (standard deviation) of the corresponding element in `y_pred`. And that's all there is to it! Simple, right?Of course, it isn't really that simple at all. DeepChem is doing a lot of work to come up with those uncertainties. So now let's pull back the curtain and see what is really happening. (For the full mathematical details of calculating uncertainty, see https://arxiv.org/abs/1703.04977)To begin with, what does "uncertainty" mean? Intuitively, it is a measure of how much we can trust the predictions. More formally, we expect that the true value of whatever we are trying to predict should usually be within a few standard deviations of the predicted value. But uncertainty comes from many sources, ranging from noisy training data to bad modelling choices, and different sources behave in different ways. It turns out there are two fundamental types of uncertainty we need to take into account. Aleatoric UncertaintyConsider the following graph. It shows the best fit linear regression to a set of ten data points.
###Code
# Generate some fake data and plot a regression line.
x = np.linspace(0, 5, 10)
y = 0.15*x + np.random.random(10)
plot.scatter(x, y)
fit = np.polyfit(x, y, 1)
line_x = np.linspace(-1, 6, 2)
plot.plot(line_x, np.poly1d(fit)(line_x))
plot.show()
###Output
_____no_output_____
###Markdown
The line clearly does not do a great job of fitting the data. There are many possible reasons for this. Perhaps the measuring device used to capture the data was not very accurate. Perhaps `y` depends on some other factor in addition to `x`, and if we knew the value of that factor for each data point we could predict `y` more accurately. Maybe the relationship between `x` and `y` simply isn't linear, and we need a more complicated model to capture it. Regardless of the cause, the model clearly does a poor job of predicting the training data, and we need to keep that in mind. We cannot expect it to be any more accurate on test data than on training data. This is known as *aleatoric uncertainty*.How can we estimate the size of this uncertainty? By training a model to do it, of course! At the same time it is learning to predict the outputs, it is also learning to predict how accurately each output matches the training data. For every output of the model, we add a second output that produces the corresponding uncertainty. Then we modify the loss function to make it learn both outputs at the same time. Epistemic UncertaintyNow consider these three curves. They are fit to the same data points as before, but this time we are using 10th degree polynomials.
###Code
plot.figure(figsize=(12, 3))
line_x = np.linspace(0, 5, 50)
for i in range(3):
plot.subplot(1, 3, i+1)
plot.scatter(x, y)
fit = np.polyfit(np.concatenate([x, [3]]), np.concatenate([y, [i]]), 10)
plot.plot(line_x, np.poly1d(fit)(line_x))
plot.show()
###Output
_____no_output_____
###Markdown
Each of them perfectly interpolates the data points, yet they clearly are different models. (In fact, there are infinitely many 10th degree polynomials that exactly interpolate any ten data points.) They make identical predictions for the data we fit them to, but for any other value of `x` they produce different predictions. This is called *epistemic uncertainty*. It means the data does not fully constrain the model. Given the training data, there are many different models we could have found, and those models make different predictions.The ideal way to measure epistemic uncertainty is to train many different models, each time using a different random seed and possibly varying hyperparameters. Then use all of them for each input and see how much the predictions vary. This is very expensive to do, since it involves repeating the whole training process many times. Fortunately, we can approximate the same effect in a less expensive way: by using dropout.Recall that when you train a model with dropout, you are effectively training a huge ensemble of different models all at once. Each training sample is evaluated with a different dropout mask, corresponding to a different random subset of the connections in the full model. Usually we only perform dropout during training and use a single averaged mask for prediction. But instead, let's use dropout for prediction too. We can compute the output for lots of different dropout masks, then see how much the predictions vary. This turns out to give a reasonable estimate of the epistemic uncertainty in the outputs. Uncertain Uncertainty?Now we can combine the two types of uncertainty to compute an overall estimate of the error in each output:$$\sigma_\text{total} = \sqrt{\sigma_\text{aleatoric}^2 + \sigma_\text{epistemic}^2}$$This is the value DeepChem reports. But how much can you trust it? Remember how I started this tutorial: deep learning models should not be used as black boxes. We want to know how reliable the outputs are. Adding uncertainty estimates does not completely eliminate the problem; it just adds a layer of indirection. Now we have estimates of how reliable the outputs are, but no guarantees that those estimates are themselves reliable.Let's go back to the example we started with. We trained a model on the SAMPL training set, then generated predictions and uncertainties for the test set. Since we know the correct outputs for all the test samples, we can evaluate how well we did. Here is a plot of the absolute error in the predicted output versus the predicted uncertainty.
###Code
abs_error = np.abs(y_pred.flatten()-test_dataset.y.flatten())
plot.scatter(y_std.flatten(), abs_error)
plot.xlabel('Standard Deviation')
plot.ylabel('Absolute Error')
plot.show()
###Output
_____no_output_____
###Markdown
The first thing we notice is that the axes have similar ranges. The model clearly has learned the overall magnitude of errors in the predictions. There also is clearly a correlation between the axes. Values with larger uncertainties tend on average to have larger errors.Now let's see how well the values satisfy the expected distribution. If the standard deviations are correct, and if the errors are normally distributed (which is certainly not guaranteed to be true!), we expect 95% of the values to be within two standard deviations, and 99% to be within three standard deviations. Here is a histogram of errors as measured in standard deviations.
###Code
plot.hist(abs_error/y_std.flatten(), 20)
plot.show()
###Output
_____no_output_____
###Markdown
Uncertainty in Deep LearningA common criticism of deep learning models is that they tend to act as black boxes. A model produces outputs, but doesn't given enough context to interpret them properly. How reliable are the model's predictions? Are some predictions more reliable than others? If a model predicts a value of 5.372 for some quantity, should you assume the true value is between 5.371 and 5.373? Or that it's between 2 and 8? In some fields this situation might be good enough, but not in science. For every value predicted by a model, we also want an estimate of the uncertainty in that value so we can know what conclusions to draw based on it.DeepChem makes it very easy to estimate the uncertainty of predicted outputs (at least for the models that support it—not all of them do). Let's start by seeing an example of how to generate uncertainty estimates. We load a dataset, create a model, train it on the training set, and predict the output on the test set.
###Code
import deepchem as dc
import numpy as np
import matplotlib.pyplot as plot
tasks, datasets, transformers = dc.molnet.load_sampl()
train_dataset, valid_dataset, test_dataset = datasets
model = dc.models.MultitaskRegressor(len(tasks), 1024, uncertainty=True)
model.fit(train_dataset, nb_epoch=200)
y_pred, y_std = model.predict_uncertainty(test_dataset)
###Output
Loading dataset from disk.
Loading dataset from disk.
Loading dataset from disk.
WARNING:tensorflow:Entity <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a32863828>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a32863828>>: AttributeError: module 'gast' has no attribute 'Num'
WARNING: Entity <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a32863828>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a32863828>>: AttributeError: module 'gast' has no attribute 'Num'
WARNING:tensorflow:Entity <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a32863828>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a32863828>>: AttributeError: module 'gast' has no attribute 'Num'
WARNING: Entity <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a32863828>> could not be transformed and will be executed as-is. Please report this to the AutgoGraph team. When filing the bug, set the verbosity to 10 (on Linux, `export AUTOGRAPH_VERBOSITY=10`) and attach the full output. Cause: converting <bound method SwitchedDropout.call of <deepchem.models.layers.SwitchedDropout object at 0x1a32863828>>: AttributeError: module 'gast' has no attribute 'Num'
###Markdown
All of this looks exactly like any other example, with just two differences. First, we add the option `uncertainty=True` when creating the model. This instructs it to add features to the model that are needed for estimating uncertainty. Second, we call `predict_uncertainty()` instead of `predict()` to produce the output. `y_pred` is the predicted outputs. `y_std` is another array of the same shape, where each element is an estimate of the uncertainty (standard deviation) of the corresponding element in `y_pred`. And that's all there is to it! Simple, right?Of course, it isn't really that simple at all. DeepChem is doing a lot of work to come up with those uncertainties. So now let's pull back the curtain and see what is really happening. (For the full mathematical details of calculating uncertainty, see https://arxiv.org/abs/1703.04977)To begin with, what does "uncertainty" mean? Intuitively, it is a measure of how much we can trust the predictions. More formally, we expect that the true value of whatever we are trying to predict should usually be within a few standard deviations of the predicted value. But uncertainty comes from many sources, ranging from noisy training data to bad modelling choices, and different sources behave in different ways. It turns out there are two fundamental types of uncertainty we need to take into account. Aleatoric UncertaintyConsider the following graph. It shows the best fit linear regression to a set of ten data points.
###Code
# Generate some fake data and plot a regression line.
x = np.linspace(0, 5, 10)
y = 0.15*x + np.random.random(10)
plot.scatter(x, y)
fit = np.polyfit(x, y, 1)
line_x = np.linspace(-1, 6, 2)
plot.plot(line_x, np.poly1d(fit)(line_x))
plot.show()
###Output
_____no_output_____
###Markdown
The line clearly does not do a great job of fitting the data. There are many possible reasons for this. Perhaps the measuring device used to capture the data was not very accurate. Perhaps `y` depends on some other factor in addition to `x`, and if we knew the value of that factor for each data point we could predict `y` more accurately. Maybe the relationship between `x` and `y` simply isn't linear, and we need a more complicated model to capture it. Regardless of the cause, the model clearly does a poor job of predicting the training data, and we need to keep that in mind. We cannot expect it to be any more accurate on test data than on training data. This is known as *aleatoric uncertainty*.How can we estimate the size of this uncertainty? By training a model to do it, of course! At the same time it is learning to predict the outputs, it is also learning to predict how accurately each output matches the training data. For every output of the model, we add a second output that produces the corresponding uncertainty. Then we modify the loss function to make it learn both outputs at the same time. Epistemic UncertaintyNow consider these three curves. They are fit to the same data points as before, but this time we are using 10th degree polynomials.
###Code
plot.figure(figsize=(12, 3))
line_x = np.linspace(0, 5, 50)
for i in range(3):
plot.subplot(1, 3, i+1)
plot.scatter(x, y)
fit = np.polyfit(np.concatenate([x, [3]]), np.concatenate([y, [i]]), 10)
plot.plot(line_x, np.poly1d(fit)(line_x))
plot.show()
###Output
_____no_output_____
###Markdown
Each of them perfectly interpolates the data points, yet they clearly are different models. (In fact, there are infinitely many 10th degree polynomials that exactly interpolate any ten data points.) They make identical predictions for the data we fit them to, but for any other value of `x` they produce different predictions. This is called *epistemic uncertainty*. It means the data does not fully constrain the model. Given the training data, there are many different models we could have found, and those models make different predictions.The ideal way to measure epistemic uncertainty is to train many different models, each time using a different random seed and possibly varying hyperparameters. Then use all of them for each input and see how much the predictions vary. This is very expensive to do, since it involves repeating the whole training process many times. Fortunately, we can approximate the same effect in a less expensive way: by using dropout.Recall that when you train a model with dropout, you are effectively training a huge ensemble of different models all at once. Each training sample is evaluated with a different dropout mask, corresponding to a different random subset of the connections in the full model. Usually we only perform dropout during training and use a single averaged mask for prediction. But instead, let's use dropout for prediction too. We can compute the output for lots of different dropout masks, then see how much the predictions vary. This turns out to give a reasonable estimate of the epistemic uncertainty in the outputs. Uncertain Uncertainty?Now we can combine the two types of uncertainty to compute an overall estimate of the error in each output:$$\sigma_\text{total} = \sqrt{\sigma_\text{aleatoric}^2 + \sigma_\text{epistemic}^2}$$This is the value DeepChem reports. But how much can you trust it? Remember how I started this tutorial: deep learning models should not be used as black boxes. We want to know how reliable the outputs are. Adding uncertainty estimates does not completely eliminate the problem; it just adds a layer of indirection. Now we have estimates of how reliable the outputs are, but no guarantees that those estimates are themselves reliable.Let's go back to the example we started with. We trained a model on the SAMPL training set, then generated predictions and uncertainties for the test set. Since we know the correct outputs for all the test samples, we can evaluate how well we did. Here is a plot of the absolute error in the predicted output versus the predicted uncertainty.
###Code
abs_error = np.abs(y_pred.flatten()-test_dataset.y.flatten())
plot.scatter(y_std.flatten(), abs_error)
plot.xlabel('Standard Deviation')
plot.ylabel('Absolute Error')
plot.show()
###Output
_____no_output_____
###Markdown
The first thing we notice is that the axes have similar ranges. The model clearly has learned the overall magnitude of errors in the predictions. There also is clearly a correlation between the axes. Values with larger uncertainties tend on average to have larger errors.Now let's see how well the values satisfy the expected distribution. If the standard deviations are correct, and if the errors are normally distributed (which is certainly not guaranteed to be true!), we expect 95% of the values to be within two standard deviations, and 99% to be within three standard deviations. Here is a histogram of errors as measured in standard deviations.
###Code
plot.hist(abs_error/y_std.flatten(), 20)
plot.show()
###Output
_____no_output_____
###Markdown
Uncertainty in Deep LearningA common criticism of deep learning models is that they tend to act as black boxes. A model produces outputs, but doesn't given enough context to interpret them properly. How reliable are the model's predictions? Are some predictions more reliable than others? If a model predicts a value of 5.372 for some quantity, should you assume the true value is between 5.371 and 5.373? Or that it's between 2 and 8? In some fields this situation might be good enough, but not in science. For every value predicted by a model, we also want an estimate of the uncertainty in that value so we can know what conclusions to draw based on it.DeepChem makes it very easy to estimate the uncertainty of predicted outputs (at least for the models that support it—not all of them do). Let's start by seeing an example of how to generate uncertainty estimates. We load a dataset, create a model, train it on the training set, and predict the output on the test set.
###Code
import deepchem as dc
import numpy as np
import matplotlib.pyplot as plot
tasks, datasets, transformers = dc.molnet.load_sampl()
train_dataset, valid_dataset, test_dataset = datasets
model = dc.models.MultitaskRegressor(len(tasks), 1024, uncertainty=True)
model.fit(train_dataset, nb_epoch=200)
y_pred, y_std = model.predict_uncertainty(test_dataset)
###Output
Loading dataset from disk.
Loading dataset from disk.
Loading dataset from disk.
###Markdown
All of this looks exactly like any other example, with just two differences. First, we add the option `uncertainty=True` when creating the model. This instructs it to add features to the model that are needed for estimating uncertainty. Second, we call `predict_uncertainty()` instead of `predict()` to produce the output. `y_pred` is the predicted outputs. `y_std` is another array of the same shape, where each element is an estimate of the uncertainty (standard deviation) of the corresponding element in `y_pred`. And that's all there is to it! Simple, right?Of course, it isn't really that simple at all. DeepChem is doing a lot of work to come up with those uncertainties. So now let's pull back the curtain and see what is really happening. (For the full mathematical details of calculating uncertainty, see https://arxiv.org/abs/1703.04977)To begin with, what does "uncertainty" mean? Intuitively, it is a measure of how much we can trust the predictions. More formally, we expect that the true value of whatever we are trying to predict should usually be within a few standard deviations of the predicted value. But uncertainty comes from many sources, ranging from noisy training data to bad modelling choices, and different sources behave in different ways. It turns out there are two fundamental types of uncertainty we need to take into account. Aleatoric UncertaintyConsider the following graph. It shows the best fit linear regression to a set of ten data points.
###Code
# Generate some fake data and plot a regression line.
x = np.linspace(0, 5, 10)
y = 0.15*x + np.random.random(10)
plot.scatter(x, y)
fit = np.polyfit(x, y, 1)
line_x = np.linspace(-1, 6, 2)
plot.plot(line_x, np.poly1d(fit)(line_x))
plot.show()
###Output
_____no_output_____
###Markdown
The line clearly does not do a great job of fitting the data. There are many possible reasons for this. Perhaps the measuring device used to capture the data was not very accurate. Perhaps `y` depends on some other factor in addition to `x`, and if we knew the value of that factor for each data point we could predict `y` more accurately. Maybe the relationship between `x` and `y` simply isn't linear, and we need a more complicated model to capture it. Regardless of the cause, the model clearly does a poor job of predicting the training data, and we need to keep that in mind. We cannot expect it to be any more accurate on test data than on training data. This is known as *aleatoric uncertainty*.How can we estimate the size of this uncertainty? By training a model to do it, of course! At the same time it is learning to predict the outputs, it is also learning to predict how accurately each output matches the training data. For every output of the model, we add a second output that produces the corresponding uncertainty. Then we modify the loss function to make it learn both outputs at the same time. Epistemic UncertaintyNow consider these three curves. They are fit to the same data points as before, but this time we are using 10th degree polynomials.
###Code
plot.figure(figsize=(12, 3))
line_x = np.linspace(0, 5, 50)
for i in range(3):
plot.subplot(1, 3, i+1)
plot.scatter(x, y)
fit = np.polyfit(np.concatenate([x, [3]]), np.concatenate([y, [i]]), 10)
plot.plot(line_x, np.poly1d(fit)(line_x))
plot.show()
###Output
_____no_output_____
###Markdown
Each of them perfectly interpolates the data points, yet they clearly are different models. (In fact, there are infinitely many 10th degree polynomials that exactly interpolate any ten data points.) They make identical predictions for the data we fit them to, but for any other value of `x` they produce different predictions. This is called *epistemic uncertainty*. It means the data does not fully constrain the model. Given the training data, there are many different models we could have found, and those models make different predictions.The ideal way to measure epistemic uncertainty is to train many different models, each time using a different random seed and possibly varying hyperparameters. Then use all of them for each input and see how much the predictions vary. This is very expensive to do, since it involves repeating the whole training process many times. Fortunately, we can approximate the same effect in a less expensive way: by using dropout.Recall that when you train a model with dropout, you are effectively training a huge ensemble of different models all at once. Each training sample is evaluated with a different dropout mask, corresponding to a different random subset of the connections in the full model. Usually we only perform dropout during training and use a single averaged mask for prediction. But instead, let's use dropout for prediction too. We can compute the output for lots of different dropout masks, then see how much the predictions vary. This turns out to give a reasonable estimate of the epistemic uncertainty in the outputs. Uncertain Uncertainty?Now we can combine the two types of uncertainty to compute an overall estimate of the error in each output:$$\sigma_\text{total} = \sqrt{\sigma_\text{aleatoric}^2 + \sigma_\text{epistemic}^2}$$This is the value DeepChem reports. But how much can you trust it? Remember how I started this tutorial: deep learning models should not be used as black boxes. We want to know how reliable the outputs are. Adding uncertainty estimates does not completely eliminate the problem; it just adds a layer of indirection. Now we have estimates of how reliable the outputs are, but no guarantees that those estimates are themselves reliable.Let's go back to the example we started with. We trained a model on the SAMPL training set, then generated predictions and uncertainties for the test set. Since we know the correct outputs for all the test samples, we can evaluate how well we did. Here is a plot of the absolute error in the predicted output versus the predicted uncertainty.
###Code
abs_error = np.abs(y_pred.flatten()-test_dataset.y.flatten())
plot.scatter(y_std.flatten(), abs_error)
plot.xlabel('Standard Deviation')
plot.ylabel('Absolute Error')
plot.show()
###Output
_____no_output_____
###Markdown
The first thing we notice is that the axes have similar ranges. The model clearly has learned the overall magnitude of errors in the predictions. There also is clearly a correlation between the axes. Values with larger uncertainties tend on average to have larger errors.Now let's see how well the values satisfy the expected distribution. If the standard deviations are correct, and if the errors are normally distributed (which is certainly not guaranteed to be true!), we expect 95% of the values to be within two standard deviations, and 99% to be within three standard deviations. Here is a histogram of errors as measured in standard deviations.
###Code
plot.hist(abs_error/y_std.flatten(), 20)
plot.show()
###Output
_____no_output_____ |
DL0101EN-25-GoogLeNet.ipynb | ###Markdown
|S no|Dataset|files||------|------|--||1.|Daisy|633||2.|Dandelion|898||3.|Roses|641||4.|Sunflowers|699||5.|Tulips|799||Total|3,670|
###Code
# define parameters
CLASS_NUM = 5
BATCH_SIZE = 16
EPOCH_STEPS = int(4323/BATCH_SIZE)
IMAGE_SHAPE = (224, 224, 3)
IMAGE_TRAIN = "Data/Flowers/"
MODEL_NAME = "Model/googlenet_flower.h5"
# prepare data
train_datagen = ImageDataGenerator(
rescale=1./255,
#rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip =True
)
generator_main = train_datagen.flow_from_directory(
IMAGE_TRAIN,
target_size=(IMAGE_SHAPE[0], IMAGE_SHAPE[1]),
batch_size=BATCH_SIZE,
class_mode='categorical'
)
def my_generator(generator):
while True: # keras requires all generators to be infinite
data = next(generator)
x = data[0]
y = data[1], data[1], data[1]
yield x, y
train_generator = my_generator(generator_main)
###Output
_____no_output_____
###Markdown
create model**Inception Layer**
###Code
def inception(x, filters):
# 1 x1
path1 = Conv2D(filters=filters[0], kernel_size=(1,1), strides=1, padding='same', activation='relu')(x)
# 1x1->3x3
path2 = Conv2D(filters=filters[1][0], kernel_size=(1,1), strides=1, padding='same', activation='relu')(x)
path2 = Conv2D(filters=filters[1][1], kernel_size=(3,3), strides=1, padding='same', activation='relu')(path2)
# 1x1->5x5
path3 = Conv2D(filters=filters[2][0], kernel_size=(1,1), strides=1, padding='same', activation='relu')(x)
path3 = Conv2D(filters=filters[2][1], kernel_size=(5,5), strides=1, padding='same', activation='relu')(path3)
# 3x3->1x1
path4 = MaxPooling2D(pool_size=(3,3), strides=1, padding='same')(x)
path4 = Conv2D(filters=filters[3], kernel_size=(1,1), strides=1, padding='same', activation='relu')(path4)
return Concatenate(axis=-1)([path1,path2,path3,path4])
def auxiliary(x,name=None):
layer = AveragePooling2D(pool_size=(5,5), strides=3, padding='valid')(x)
layer = Conv2D(filters=128, kernel_size=(1,1), strides=1, padding='same', activation='relu')(layer)
layer = Flatten()(layer)
layer = Dense(units=256, activation='relu')(layer)
layer = Dropout(0.4)(layer)
layer = Dense(units=CLASS_NUM, activation='softmax', name=name)(layer)
return layer
###Output
_____no_output_____
###Markdown

###Code
def googlenet():
layer_in = Input(shape=IMAGE_SHAPE)
## Stage-1
layer = Conv2D(filters=64, kernel_size=(7,7), strides=2, padding='same', activation='relu')(layer_in)
layer = MaxPooling2D(pool_size=(3,3), strides=2, padding='same')(layer)
layer = BatchNormalization()(layer)
# stage-2
layer = Conv2D(filters=64, kernel_size=(1,1), strides=1, padding='same', activation='relu')(layer)
layer = Conv2D(filters=192, kernel_size=(3,3), strides=1, padding='same', activation='relu')(layer)
layer = BatchNormalization()(layer)
layer = MaxPooling2D(pool_size=(3,3), strides=2, padding='same')(layer)
# stage-3
layer = inception(layer, [ 64, (96,128), (16,32), 32]) #3a
layer = inception(layer, [128, (128,192), (32,96), 64]) #3b
layer = MaxPooling2D(pool_size=(3,3), strides=2, padding='same')(layer)
# stage-4
layer = inception(layer, [192, (96,208), (16,48), 64]) #4a
aux1 = auxiliary(layer, name='aux1')
layer = inception(layer, [160, (112,224), (24,64), 64]) #4b
layer = inception(layer, [128, (128,256), (24,64), 64]) #4c
layer = inception(layer, [112, (144,288), (32,64), 64]) #4d
aux2 = auxiliary(layer, name='aux2')
layer = inception(layer, [256, (160,320), (32,128), 128]) #4e
layer = MaxPooling2D(pool_size=(3,3), strides=2, padding='same')(layer)
# stage-5
layer = inception(layer, [256, (160,320), (32,128), 128]) #5a
layer = inception(layer, [384, (192,384), (48,128), 128]) #5b
layer = AveragePooling2D(pool_size=(7,7), strides=1, padding='valid')(layer)
# stage-6
layer = Flatten()(layer)
layer = Dropout(0.4)(layer)
layer = Dense(units=256, activation='linear')(layer)
main = Dense(units=CLASS_NUM, activation='softmax', name='main')(layer)
model = Model(inputs=layer_in, outputs=[main, aux1, aux2])
return model
model =googlenet()
model.summary()
optimizer = ['Adam','SGD','Adam','SGD']
epochs = [20,30,20,30]
history_all = {}
for i in range(len(optimizer)):
print("Using Optimizer:"+ optimizer[i]+',Epoch:'+str(epochs[i]))
model.compile(loss='categorical_crossentropy',
loss_weights={'main': 1.0, 'aux1': 0.3, 'aux2': 0.3},
optimizer=optimizer[i], metrics=['accuracy'])
train_history = model.fit(
train_generator,
steps_per_epoch=EPOCH_STEPS,
epochs=epochs[i],
#callbacks=[checkpoint]
shuffle=True
)
# save history
if len(history_all) == 0:
history_all = {key: [] for key in train_history.history}
for key in history_all:
history_all[key].extend(train_history.history[key])
model.save(MODEL_NAME)
# show train history
def show_train_history(history, xlabel, ylabel, train):
for item in train:
plt.plot(history[item])
plt.title('Train History')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend(train, loc='upper left')
plt.show()
show_train_history(history_all, 'Epoch', 'Accuracy', ('main_acc', 'aux1_acc', 'aux2_acc'))
show_train_history(history_all, 'Epoch', 'Loss', ('main_loss', 'aux1_loss', 'aux2_loss'))
###Output
_____no_output_____ |
week3_course_python_III/day1_python_VII/exercise_dictlist_comprehension.ipynb | ###Markdown
Python | list/dict comprehension List comprehension in Python is a compact way of creating a list from a sequence. It is a short way to create a new list. List comprehension is considerably faster than processing a list using the for loop. ```python syntax[i for i in iterable if expression]``` Dictionary comprehension is the same as list comprehension but with dictionaries. I know, you didn't see that coming. ```python syntax{key: value for i in iterable}``` - https://www.programiz.com/python-programming/list-comprehension- https://www.programiz.com/python-programming/dictionary-comprehension Exercise 1.Use list comprehension and print the result to solve these problems: 1. Make a list with the square number of numbers from 0 to 20.
###Code
p= [i**2 for i in range(21)]
print(p)
###Output
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225, 256, 289, 324, 361, 400]
###Markdown
2. Make a list with the first 50 power of two.
###Code
p= [i**2 for i in range(50)]
print(p)
###Output
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100, 121, 144, 169, 196, 225, 256, 289, 324, 361, 400, 441, 484, 529, 576, 625, 676, 729, 784, 841, 900, 961, 1024, 1089, 1156, 1225, 1296, 1369, 1444, 1521, 1600, 1681, 1764, 1849, 1936, 2025, 2116, 2209, 2304, 2401]
###Markdown
3. Calculate the square root of the first 100 numbers. **You will probably need to install math library with pip and import it in this file.**
###Code
p= [i**0.5 for i in range(100)]
print(p)
###Output
[0.0, 1.0, 1.4142135623730951, 1.7320508075688772, 2.0, 2.23606797749979, 2.449489742783178, 2.6457513110645907, 2.8284271247461903, 3.0, 3.1622776601683795, 3.3166247903554, 3.4641016151377544, 3.605551275463989, 3.7416573867739413, 3.872983346207417, 4.0, 4.123105625617661, 4.242640687119285, 4.358898943540674, 4.47213595499958, 4.58257569495584, 4.69041575982343, 4.795831523312719, 4.898979485566356, 5.0, 5.0990195135927845, 5.196152422706632, 5.291502622129181, 5.385164807134504, 5.477225575051661, 5.5677643628300215, 5.656854249492381, 5.744562646538029, 5.830951894845301, 5.916079783099616, 6.0, 6.082762530298219, 6.164414002968976, 6.244997998398398, 6.324555320336759, 6.4031242374328485, 6.48074069840786, 6.557438524302, 6.6332495807108, 6.708203932499369, 6.782329983125268, 6.855654600401044, 6.928203230275509, 7.0, 7.0710678118654755, 7.14142842854285, 7.211102550927978, 7.280109889280518, 7.3484692283495345, 7.416198487095663, 7.483314773547883, 7.54983443527075, 7.615773105863909, 7.681145747868608, 7.745966692414834, 7.810249675906654, 7.874007874011811, 7.937253933193772, 8.0, 8.06225774829855, 8.12403840463596, 8.18535277187245, 8.246211251235321, 8.306623862918075, 8.366600265340756, 8.426149773176359, 8.48528137423857, 8.54400374531753, 8.602325267042627, 8.660254037844387, 8.717797887081348, 8.774964387392123, 8.831760866327848, 8.888194417315589, 8.94427190999916, 9.0, 9.055385138137417, 9.1104335791443, 9.16515138991168, 9.219544457292887, 9.273618495495704, 9.327379053088816, 9.38083151964686, 9.433981132056603, 9.486832980505138, 9.539392014169456, 9.591663046625438, 9.643650760992955, 9.695359714832659, 9.746794344808963, 9.797958971132712, 9.848857801796104, 9.899494936611665, 9.9498743710662]
###Markdown
4. Create this list `[-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0]`.
###Code
p= [i for i in range(-10,1,1)]
print(p)
###Output
[-10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0]
###Markdown
5. Filter only negative and zero in the list `numbers`.
###Code
numbers = [-4, -3, -2, -1, 0, 2, 4, 6]
numbers = [-4, -3, -2, -1, 0, 2, 4, 6]
numers2 = [element for element in numbers if element <= 0] #escribi o en lugar de 0 lol
print(numers2)
###Output
[-4, -3, -2, -1, 0]
###Markdown
6. Find the odd numbers from 1-100 and put them on a list.
###Code
odd_numbers = [element for element in range(101) if element%2 != 0]
print(odd_numbers)
###Output
[1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99]
###Markdown
7. Find all of the numbers from 1-1000 that are divisible by 7.
###Code
numbers7 = [element for element in range(1001) if element%7 == 0]
print(numbers7)
###Output
[0, 7, 14, 21, 28, 35, 42, 49, 56, 63, 70, 77, 84, 91, 98, 105, 112, 119, 126, 133, 140, 147, 154, 161, 168, 175, 182, 189, 196, 203, 210, 217, 224, 231, 238, 245, 252, 259, 266, 273, 280, 287, 294, 301, 308, 315, 322, 329, 336, 343, 350, 357, 364, 371, 378, 385, 392, 399, 406, 413, 420, 427, 434, 441, 448, 455, 462, 469, 476, 483, 490, 497, 504, 511, 518, 525, 532, 539, 546, 553, 560, 567, 574, 581, 588, 595, 602, 609, 616, 623, 630, 637, 644, 651, 658, 665, 672, 679, 686, 693, 700, 707, 714, 721, 728, 735, 742, 749, 756, 763, 770, 777, 784, 791, 798, 805, 812, 819, 826, 833, 840, 847, 854, 861, 868, 875, 882, 889, 896, 903, 910, 917, 924, 931, 938, 945, 952, 959, 966, 973, 980, 987, 994]
###Markdown
8. Remove all of the vowels in a string. Hint: make a list of the non-vowels.
###Code
teststring = "When you reach the end of your rope, tie a knot in it and hang on."
vowels = ('a', 'e', 'i', 'o', 'u')
teststring = "When you reach the end of your rope, tie a knot in it and hang on."
novowel = [element = '' for element in teststring if element in vowels]
print(novowel)
teststring = "When you reach the end of your rope, tie a knot in it and hang on."
vowels = ('a', 'e', 'i', 'o', 'u')
novowel = [teststring.replace(element, "") for element in teststring if element in vowels]
print(novowel)
###Output
['Whn you rach th nd of your rop, ti a knot in it and hang on.', 'When yu reach the end f yur rpe, tie a knt in it and hang n.', 'When yo reach the end of yor rope, tie a knot in it and hang on.', 'Whn you rach th nd of your rop, ti a knot in it and hang on.', 'When you rech the end of your rope, tie knot in it nd hng on.', 'Whn you rach th nd of your rop, ti a knot in it and hang on.', 'Whn you rach th nd of your rop, ti a knot in it and hang on.', 'When yu reach the end f yur rpe, tie a knt in it and hang n.', 'When yu reach the end f yur rpe, tie a knt in it and hang n.', 'When yo reach the end of yor rope, tie a knot in it and hang on.', 'When yu reach the end f yur rpe, tie a knt in it and hang n.', 'Whn you rach th nd of your rop, ti a knot in it and hang on.', 'When you reach the end of your rope, te a knot n t and hang on.', 'Whn you rach th nd of your rop, ti a knot in it and hang on.', 'When you rech the end of your rope, tie knot in it nd hng on.', 'When yu reach the end f yur rpe, tie a knt in it and hang n.', 'When you reach the end of your rope, te a knot n t and hang on.', 'When you reach the end of your rope, te a knot n t and hang on.', 'When you rech the end of your rope, tie knot in it nd hng on.', 'When you rech the end of your rope, tie knot in it nd hng on.', 'When yu reach the end f yur rpe, tie a knt in it and hang n.']
###Markdown
9. Find the capital letters (and not white space) in the sentence `"The Way To Get Started Is To Quit Talking And Begin Doing."`.
###Code
sentence = "The Way To Get Started Is To Quit Talking And Begin Doing."
capitals = [element for element in sentence if element.isupper()]
print(capitals)
###Output
['T', 'W', 'T', 'G', 'S', 'I', 'T', 'Q', 'T', 'A', 'B', 'D']
###Markdown
10. Find all the consonants in the sentence `"Tell me and I forget. Teach me and I remember. Involve me and I learn."`.
###Code
#seria el contrario del 8 pero el 8 no me sale
###Output
_____no_output_____
###Markdown
11. Create 4 lists of 10 random numbers between 0 and 100 each. **You will probably need to import random module.**
###Code
import random
randomlist = [random.sample(range(0, 101), 5) for i in range(0,5)]
print(randomlist)
###Output
[[50, 53, 42, 84, 49], [75, 69, 17, 61, 10], [13, 11, 50, 36, 15], [69, 85, 88, 44, 86], [5, 0, 83, 37, 13]]
###Markdown
Exercise 2. 1. Flatten the following list of lists of lists to a one dimensional list **using list-comprehension**:```pythonexpected output:[1, 2, 3, 4, 5, 6, 7, 8, 9]```
###Code
list_of_lists =[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
list_of_lists =[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
unique_list = []
unique_list = [unique_list.append(element2) for element in list_of_lists for element2 in element]
print(unique_list) #no estoy viendo qu'e pasa aqu'i
unique_list = [numero for elemento in list_of_lists for numero in elemento]
print(unique_list)
###Output
[1, 2, 3, 4, 5, 6, 7, 8, 9]
###Markdown
2. Flatten the following list to a new list, and capitalize the elements of the new list **using list-comprehension**:```pythonexpected output:['SPAIN', 'MADRID', 'FRANCE', 'PARIS', 'PORTUGAL', 'LISBON']```
###Code
countries = [[('Spain', 'Madrid')], [('France', 'Paris')], [('Portugal', 'Lisbon')]]
countries = [[('Spain', 'Madrid')], [('France', 'Paris')], [('Portugal', 'Lisbon')]]
newlist3 = [pais.upper() for elemento in countries for tupla in elemento for pais in tupla]
print(newlist3)
###Output
['SPAIN', 'MADRID', 'FRANCE', 'PARIS', 'PORTUGAL', 'LISBON']
###Markdown
3. Change the `countries` list to a list of dictionaries:```pythonexpected output:[{'country': 'SPAIN', 'city': 'MADRID'},{'country': 'FRANCE', 'city': 'PARIS'},{'country': 'PORTUGAL', 'city': 'LISBON'}]```
###Code
dict_country = {key if i%2==0 else value for i in range(len(newlist3))}
print(dict_country)
###Output
_____no_output_____
###Markdown
4. Change the following list of lists to a list of concatenated strings **using list-comprehension**:```pythonexpected output:['Gabriel Vazquez', 'Clara Piniella', 'Diomedes Barbero']```
###Code
names = [[('Gabriel', 'Vazquez')], [('Clara', 'Piniella')], [('Diomedes', 'Barnames')]]
newlist3 = [' '.join(tupla) for lista in names for tupla in lista]
print(newlist3)
###Output
['Gabriel Vazquez', 'Clara Piniella', 'Diomedes Barnames']
###Markdown
5. Convert the numbers of the following nested list to floats. Use **floats_list** as the name of the list. **using list-comprehension**
###Code
big_list_of_lists = [['40', '20', '10', '30'], ['20', '20', '20', '20', '20', '30', '20'], \
['30', '20', '30', '50', '10', '30', '20', '20', '20'], ['100', '100'], ['100', '100', '100', '100', '100'], \
['100', '100', '100', '100']]
floats_list = [float(element) for list in big_list_of_lists for element in list]
print(floats_list)
###Output
[40.0, 20.0, 10.0, 30.0, 20.0, 20.0, 20.0, 20.0, 20.0, 30.0, 20.0, 30.0, 20.0, 30.0, 50.0, 10.0, 30.0, 20.0, 20.0, 20.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]
###Markdown
6. Using list comprehension create the following list of tuples:```pythonexpected output: [(0, 1, 0, 0, 0, 0, 0),(1, 1, 1, 1, 1, 1, 1),(2, 1, 2, 4, 8, 16, 32),(3, 1, 3, 9, 27, 81, 243),(4, 1, 4, 16, 64, 256, 1024),(5, 1, 5, 25, 125, 625, 3125),(6, 1, 6, 36, 216, 1296, 7776),(7, 1, 7, 49, 343, 2401, 16807),(8, 1, 8, 64, 512, 4096, 32768),(9, 1, 9, 81, 729, 6561, 59049),(10, 1, 10, 100, 1000, 10000, 100000)]```
###Code
lista = [(str(i) + str(i**(i-1)) + str(i**(i)) + str(i**(i+1)) + str(i**(i+2)) + str(i**(i+3)) + str(i**(i+4)) for i in range (11) ]
print(lista)
#aqui es que no se ni por donde empezar
###Output
_____no_output_____
###Markdown
Exercise 3. 1. First, create a range from 100 to 160 with steps of 10. Second, using **dict comprehension**, create a dictionary where each number in the range is the key and each item divided by 100 is the value.
###Code
p= [i for i in range(100,161,10)]
print(p)
dict = {i:i/100 for i in range(100,161,10)}
print(dict)
###Output
{100: 1.0, 110: 1.1, 120: 1.2, 130: 1.3, 140: 1.4, 150: 1.5, 160: 1.6}
###Markdown
2. Using **dict comprehension** and a conditional argument create a dictionary from `curr_dict` where only the key:value pairs with value above 2000 are taken to the new dictionary.
###Code
curr_dict = {"Netflix":4950,"HBO":2400,"Amazon":1800, "Movistar":1700}
dict = {key : value for key, value in curr_dict.items() if value>2000}
print(dict)
###Output
{'Netflix': 4950, 'HBO': 2400}
###Markdown
3. Create a function that receives two lists `list1` and `list2` by parameter and returns a dictionary with each element of `list1` as keys and the elements of `list2` as values. This time use **dict comprehension** to do so.
###Code
List1 = [1, 2 ,3, 4]
list2 = ['a', 'b', 'c', 'd']
def createdic (list1, list2):
diccionario = {}
for i in range(len(list1)):
diccionario[list1:list2[i]]
print(diccionario)
createdic (list1 = list1, list2 = list2)
#no acabo de entender por qu'e me dice que list1 no est'a definido
diccionario = {List1[i]: list2[i] for i in range(len(List1))}
print(diccionario)
###Output
{1: 'a', 2: 'b', 3: 'c', 4: 'd'}
|
Model_Selection/XG_boost_on_cancer_data.ipynb | ###Markdown
XGBoost Importing the libraries
###Code
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
###Output
_____no_output_____
###Markdown
Importing the dataset
###Code
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, -1].values
dataset.head()
###Output
_____no_output_____
###Markdown
Splitting the dataset into the Training set and Test set
###Code
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
###Output
_____no_output_____
###Markdown
Training XGBoost on the Training set
###Code
from xgboost import XGBClassifier
classifier = XGBClassifier()
classifier.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
Making the Confusion Matrix
###Code
from sklearn.metrics import confusion_matrix, accuracy_score
y_pred = classifier.predict(X_test)
cm = confusion_matrix(y_test, y_pred)
print(cm)
accuracy_score(y_test, y_pred)
###Output
[[84 3]
[ 0 50]]
###Markdown
This is much higher accuracy than computed by any of the othre models (DT, LR, RF ...) Applying k-Fold Cross Validation To avoid getting lucky with the test set.
###Code
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X = X_train, y = y_train, cv = 10)
print("Accuracy: {:.2f} %".format(accuracies.mean()*100))
print("Standard Deviation: {:.2f} %".format(accuracies.std()*100))
###Output
Accuracy: 96.53 %
Standard Deviation: 2.07 %
###Markdown
The mean is still pretty close to the test data accuracy and standard deviation is also comparitively lower (although can be made lower with GridCV applied on XGBoost's hyperparameters)
###Code
###Output
_____no_output_____ |
docs/abscal_notebooks/example_wfc3_ir_grism.ipynb | ###Markdown
WFC3 IR Grism ABSCAL Example NotebookThis notebook will take you through the steps of downloading sample MAST data and running the WFC3 IR grism abscal scripts on that data. Set up Python EnvironmentThis step imports the python modules used by this script.
###Code
import glob
import os
import numpy as np
import shutil
from astroquery.mast import Observations
from pathlib import Path
from tempfile import TemporaryDirectory
from abscal.wfc3.preprocess_table_create import populate_table
from abscal.wfc3.reduce_grism_coadd import coadd
from abscal.wfc3.reduce_grism_wavelength import wlmeas, wlmake
%matplotlib inline
work_dir = os.getcwd()
###Output
_____no_output_____
###Markdown
Optional: Set up temporary directory for dataBy default, notebooks store downloaded files (etc.) in the directory in which they are running. If you don't wish to do this (or don't have access to that directory), you can set up a temporary directory and store data in it.
###Code
# Set this flag to True if you wish to use a temporary directory
use_temporary_dir = True
# Set this flag if you want to define a custom directory to work in
use_custom_dir = False
if use_temporary_dir:
data_dir = TemporaryDirectory()
os.chdir(data_dir.name)
work_dir = data_dir.name
if use_custom_dir:
work_dir = "/Users/york/Projects/abscal/examples/notebook_dev"
print("Storing data in {}".format(work_dir))
###Output
_____no_output_____
###Markdown
Optional: Download input data from MASTThis notebook is designed to be run with any WFC3 IR grism data (although planetary nebula observations of a known target will be required for the wavelength steps). In order to simply see how these scripts work with example data, or to test their operation, you can use a set of example data.The next cell defines a function that will download all non-HLA data from a specific HST observing program (i.e. data whose observation ID does not begin with "hst_"), and copy the downloaded files into a single directory (here, the same directory where the notebook is running). This function may be more generally useful for retrieving observations from MAST, and can be copied and used separately (or modified to suit) as long as the following import statements are included: from astroquery.mast import Observations import os import shutilThe following cell will download two sets of example data (a flux calibration target and a planetary nebula target) from MAST. In particular, it will download program 15587 for flux calibration, and 13582 for planetary nebula data.If you already have downloaded data with which you want to work, these cells can be skipped entirely.
###Code
def download_mast_program(proposal_id, download_dir, skip_existing_files=True):
flux_table = Observations.query_criteria(proposal_id=proposal_id)
obs_mask = [x[:4] != 'hst_' for x in flux_table['obs_id']]
obs_filter = [id for id in flux_table['obs_id'] if id[:4] != 'hst_']
flux_table = flux_table[obs_mask]
obs_ids = flux_table['obsid']
if skip_existing_files:
i = 0
while i < len(obs_filter):
# This is an idiom for going through a list and potentially removing items from it. If you're going
# through a list in a for loop, the result of removing items from the list during the loop is not
# well-defined, so this method is used instead.
if len(glob.glob(os.path.join(download_dir, obs_filter[i]+"*"))) > 0:
obs_filter.remove(obs_filter[i])
else:
i += 1
data_products = Observations.get_product_list(obs_ids)
if len(data_products) > 0 and len(obs_filter) > 0:
manifest = Observations.download_products(data_products, download_dir=download_dir, extension=["fits"],
obs_id=obs_filter)
for file_name in manifest['Local Path']:
base_file = os.path.basename(file_name)
print("Copying {}".format(base_file))
shutil.copy(os.path.join(download_dir, file_name), os.path.join(download_dir, base_file))
# Download the planetary nebula program
download_mast_program(13582, work_dir)
# Download the flux calibration program
download_mast_program(15587, work_dir)
###Output
_____no_output_____
###Markdown
Set up the initial data tableThis cell will cet up a data table of all WFC3 data in the current directory. Note that, in succeeding steps, only the IR grism data will actually be reduced (except that filter data taken at the same position and POSTARG during the same visit will be used, if available, to derive an initial location of the grism zeroth-order), so the presence of data other than IR grism data will not confuse the remaining scripts.The populate_table function called below can take a variety of arguments. In particular, if you have an existing table of observations (in the form of an AbscalDataTable), you can pass in that table and add any additional observations to that table. Also, the function can take arbitrary keyword arguments, which are currently used to set whether to use verbose output, and whether to output an IDL-compatible data table, in the future there may be additional settable parameters that will affect the way that data is ingested into a table.
###Code
verbose_output = True
data_table = populate_table(verbose=True, search_dirs=work_dir)
###Output
_____no_output_____
###Markdown
Now that there's a data table, let's take a look at what's in it (and what its features are). The data table holds a list of observations along with metadata describing the date, program, visit, grism or filter used, and other exposure parameters. There are also a number of columns related to the current abscal run, of which only two (the path at which the observation can be found, and the file name used to obtain the other metadata) are currently filled in. The AbscalDataTable class subclasses the Astropy table class, so for or less anything from the Astropy table documentationi at can be used here. Reduce the WFC3 Grism DataThis cell will take the data table from `populate_table()` and reduce all of the grism exposures in it. `coadd()` function allows for many default parameters to be reset at runtime. NOTE: Jupyter notebooks do not allow blocking calls in the middle of cells. When run as a script, ABSCAL uses blocking figures with text-input boxes to allow user interaction. As such, when running ABSCAL from a notebook, there is currently no way to use interactive elements.
###Code
reduced_table = coadd(data_table, out_dir=work_dir, verbose=True, plots=True)
###Output
_____no_output_____
###Markdown
The main difference between `data_table` and `reduced_table` is that the latter has many more parameters filled in (zeroth order image location, extracted spectrum file, co-added file, etc. Measure Wavelength FitThis cell takes the planetary nebula exposures in reduced_table, and finds the location of a set of emission lines. When run interactively, this allows the user to override fit locations and choose to reject fits. NOTE: The "notebook" flag is required in order to display non-interactive plots without being trapped in an endless loop.
###Code
wavelength_table = wlmeas(reduced_table, verbose=True, plots=True, out_dir=work_dir, notebook=True)
###Output
_____no_output_____
###Markdown
The wavelength_table is a standard Astropy table rather than an AbscalDataTable, and stores only information on the derived line positions. Generate Wavelength SolutionThis cell takes the results of the wavelength fit above, and uses it to derive a full-detector wavelength solution based on position relative to the zeroth order.
###Code
fit_table = wlmake(reduced_table, wavelength_table, verbose=True, plots=True, out_dir=work_dir)
###Output
_____no_output_____ |
figures/.ipynb_checkpoints/Figure 3-checkpoint.ipynb | ###Markdown
Figure 3This notebook recreates the figure panels included in Figure 3 of Lee et al. 2021. Description of the DataThe data used in this notebook comes from the experiments described in Lee et al. 2021. Specifically, we have the behavioral and activity of a trained deep RL agent performing a evidence accumulation task from Engelhard et al. 2019. The dataset includes 5000 trials of the trained agent with frozen weights. Preparing the Data Importing required code packages and modules
###Code
import pickle
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import pandas as pd
import sys
from scipy.io import loadmat, savemat
import utils.cnnlstm_analysis_utils as utils
import seaborn as sns
from scipy import stats
from matplotlib import gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib.lines import Line2D
import os
###Output
_____no_output_____
###Markdown
downloading data
###Code
load_prefix = '../../data/logs/VA_maze/'
with open(load_prefix + '5000t_mosttrain_db.p', 'rb') as f:
[actions_, rewards_, feats_, terms_, vs_, tow_counts_, episode_lengths] = pickle.load(f)
f.close()
vs = np.hstack(vs_)
terms = np.hstack(terms_)
rewards = np.hstack(rewards_)
ep_rew = np.array([np.sum(r_trial) for r_trial in rewards_])
ep_tow = np.array([np.max(trial, 0) for trial in tow_counts_])
tow_counts = np.vstack(tow_counts_)
weights = utils.get_params_from_zip(load_prefix + 'rl_model_20800000_steps')
# weights.keys()
w_pol = weights['model/pi/w:0']
b_pol = weights['model/pi/b:0']
w_val = np.squeeze(weights['model/vf/w:0'])
b_val = weights['model/vf/b:0']
trial_info = loadmat(load_prefix + 'trialinfo_db.mat')
trial_info = trial_info['trials']
trial_info.dtype.names
choices = utils.extract_field(trial_info, 'choice')
trial_type = utils.extract_field(trial_info, 'trialType')
raw_ypos = utils.extract_field(trial_info, 'position')[:,1]
cueCombos_ = utils.extract_field(trial_info, 'cueCombo')
cuePos_ = utils.extract_field(trial_info, 'cuePos')
cueOnset_ = utils.extract_field(trial_info, 'cueOnset')
raw_ypos_ = [x[:,1] for x in trial_info['position'][0]]
raw_xpos_ = [x[:,0] for x in trial_info['position'][0]]
raw_vpos_ = [x[:,2] for x in trial_info['position'][0]]
ypos_ = [np.hstack([np.array(x[:-1]), x[-2] * np.ones((7,))]) for x in raw_ypos_]
ypos = np.hstack(ypos_)
###Output
_____no_output_____
###Markdown
Plotting Parameters
###Code
# PLOTTING PARAMS
matplotlib.rcParams.update({'font.size': 15})
matplotlib.rcParams.update({'font.family': 'Arial'})
FONT_BG = 25
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42 # allow text of pdf to be edited in illustrator
mpl.rcParams["axes.spines.right"] = False
mpl.rcParams["axes.spines.top"] = False
marker_plots = {'marker':'.', 'markersize':2, 'markeredgecolor':'k', 'markerfacecolor':'k'}
heatmap_sz = (4, 3.5)
example_sz = (4, 1)
left_col = 'red'
right_col = 'deepskyblue'
###Output
_____no_output_____
###Markdown
Organizing DataPulling out the specific data that we will use for figure panels
###Code
CUEP_LIM = 140
REWP_LEN_S = -16
REWP_LEN_STP = -5
ypos_cuep = np.squeeze(np.dstack([ypos_t[:CUEP_LIM] for ypos_t in ypos_])[:,:,0])
(ep_towdelt_idx, ep_towdiff_idx) = utils.get_ep_tow_idx(ep_tow)
###Output
_____no_output_____
###Markdown
Calculate Vector RPEs
###Code
if os.path.exists(load_prefix + 'pes.p'):
with open(load_prefix + 'pes.p', 'rb') as f:
pes = pickle.load(f)
f.close()
else:
feats = np.vstack(feats_)
rewards = np.hstack(rewards_)
terms = np.hstack(terms_)
start = np.roll(terms,1)
nsteps = len(terms)
nfeatures = feats_[0][0].shape[0]
gamma = 0.99
# compute per-feature PEs
pes = np.zeros((nsteps, nfeatures))
for i in range(0,nsteps-1):
if (terms[i]): # there is a one-off error-- the SECOND index of the start of the trial accurately measures the start of the trial
pes[i,:] = rewards[i] / nfeatures - w_val * feats[i,:]
else:
pes[i,:] = rewards[i] / nfeatures + w_val * (-feats[i,:] + gamma * feats[i+1,:])
pickle.dump(pes, open(load_prefix + "pes.p", "wb") )
# pes split by 5000 trials
pes_ = utils.split_by_ep_len(pes, np.hstack((episode_lengths)))
pes_cuep = np.dstack([pes_i[:CUEP_LIM,:] for pes_i in pes_])
ypos_cuep = np.squeeze(np.dstack([ypos_t[:CUEP_LIM] for ypos_t in ypos_])[:,:,0])
###Output
_____no_output_____
###Markdown
Figure 3A: View Angle Plot
###Code
# get PEs by view angle
pes_cuep_flat = np.vstack([pes_i[:CUEP_LIM,:] for pes_i in pes_])
vpos_cuep_flat = np.round(np.hstack([trial[:CUEP_LIM] for trial in raw_vpos_]),2)
pes_cuep_vabinned = utils.bin_data_by_vpos(pes_cuep_flat, vpos_cuep_flat)
EX_UNIT_VA_IDX = 43
fig, ex_ax = plt.subplots(figsize=example_sz)
ex_ax.set_xlim([-0.5, 0.5])
ex_ax.plot(np.linspace(-0.5, 0.5, 21), pes_cuep_vabinned[utils.sort_by_max_loc(pes_cuep_vabinned),:][EX_UNIT_VA_IDX,:].T, color ='k')
ex_ax.set_xlabel('Right <- Angle (rad) -> Left');
ex_ax.set_ylabel('Example Unit');
fig, ax_va = plt.subplots(figsize = heatmap_sz)
im = ax_va.imshow(utils.norm_within_feat(pes_cuep_vabinned)[utils.sort_by_max_loc(pes_cuep_vabinned),:],
aspect = 'auto', extent = [-0.5, 0.5, 64, 1], cmap = utils.parula_map, interpolation = 'none')
ax_va.set_yticks([20, 40, 60]) # 32,
ax_va.set_yticklabels(['20', '40', '60'])
ax_va.spines['right'].set_visible(True)
ax_va.spines['top'].set_visible(True)
ax_va.set_xlabel('Right <- Angle (rad) -> Left');
ax_va.set_ylabel('Vector RPE');
cbar = plt.colorbar(im)
cbar.set_label('Peak Norm. Activity')
###Output
_____no_output_____
###Markdown
Figure 3B: Position Plot
###Code
# *SLOPE* sorted position from matlab withonly position sensitive units
# from matlab script: timelock_to_pos.m
norm_pes_pos = loadmat(load_prefix + 'sorted_norm_pos_pes.mat')['norm_pes']
ypos_pes_pos = np.squeeze(loadmat(load_prefix + 'sorted_norm_pos_pes.mat')['num_steps_xticks'])
order = np.squeeze(loadmat(load_prefix + 'sorted_norm_pos_pes.mat')['order']) - 1
slopevec = np.squeeze(loadmat(load_prefix + 'sorted_norm_pos_pes.mat')['slopvec'])
POS_SEN_UNIT_START = 25
EX_UNIT_POS_IDX = 34
peak_order = utils.sort_by_max_loc(utils.norm_within_feat(np.nanmean(pes_cuep,-1).T))
norm_pes = utils.norm_within_feat(np.nanmean(pes_cuep,-1).T)
psorted_norm_pes_pos = norm_pes[peak_order,:]
order_possenonly = [value for value in order if value in peak_order[POS_SEN_UNIT_START:]]
norm_pes_pos_possenonly = norm_pes_pos[order_possenonly,:]
pes_pos = np.nanmean(pes_cuep[117:,:,:],-1).T;
pes_pos_possenonly = pes_pos[order_possenonly,:]
fig, ex_ax = plt.subplots(figsize=example_sz)
ex_ax.plot(ypos_pes_pos, pes_pos_possenonly[EX_UNIT_POS_IDX,:].T, color = 'k')
ex_ax.set_xlim([ypos_pes_pos[0], ypos_pes_pos[-1]]);
ex_ax.set_xlabel('Position(cm)');
ex_ax.set_ylabel('Example Unit');
fig, ax_pos = plt.subplots(figsize=heatmap_sz)
im = ax_pos.imshow(norm_pes_pos[order,:], cmap = utils.parula_map,
aspect = 'auto',interpolation = 'none')
ax_pos.spines['right'].set_visible(True)
ax_pos.spines['top'].set_visible(True)
ax_pos.set_xlabel('Position(cm)');
ax_pos.set_ylabel('Vector RPE');
cbar = plt.colorbar(im)
cbar.set_label('Peak Norm. Activity')
###Output
_____no_output_____
###Markdown
Figure 3C: Cue Response Plot
###Code
leftCue_ = [trialcue[0][0] -2 for trialcue in cueOnset_]
rightCue_ = [trialcue[1][0] -2 for trialcue in cueOnset_]
get_timelocked_cues = lambda pes_, cueLocs: np.dstack([utils.timelock_to_cue(pes_, cueLocs, pes_i) for pes_i in np.arange(64)])
pes_lcue = get_timelocked_cues(pes_,leftCue_)
pes_rcue = get_timelocked_cues(pes_,rightCue_)
vmin = loadmat(load_prefix + 'sorted_pes_lcuercue2.mat')['imedg1']
vmax = loadmat(load_prefix + 'sorted_pes_lcuercue2.mat')['imedg2']
norm_pes_lcue = loadmat(load_prefix + 'sorted_pes_lcuercue2.mat')['mrContra']
norm_pes_rcue = loadmat(load_prefix + 'sorted_pes_lcuercue2.mat')['mrIpsi']
sort_order = np.squeeze(loadmat(load_prefix + 'sorted_pes_lcuercue2.mat')['order']) - 1
# UNIT 40 is the delimitor between LEFT and RIGHT sensitive cues
LR_DELIM = 40
EX_UNIT_LEFT_IDX = 9
EX_UNIT_RIGHT_IDX = 43
fig, ex_ax = plt.subplots(figsize = example_sz)
ex_ax.plot(np.arange(-1, 15), np.nanmean(pes_lcue,0)[4:-10, sort_order[EX_UNIT_LEFT_IDX]], **marker_plots, label = 'Left Cue', color = left_col)
ex_ax.plot(np.arange(-1, 15), np.nanmean(pes_rcue,0)[4:-10, sort_order[EX_UNIT_LEFT_IDX]], **marker_plots, label = 'Right Cue', color = right_col)
ex_ax.set_xlim(-1, 15)
ex_ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ex_ax.set_xlabel('Timesetps from Left Cue Onset');
ex_ax.set_ylabel('Example Unit');
fig, left_ax = plt.subplots(figsize = heatmap_sz)
im = left_ax.imshow(norm_pes_lcue[sort_order,:-10], aspect = 'auto', extent = [-5,15,64,1],
cmap = utils.parula_map , interpolation = 'none') # ,vmin = vmin, vmax = vmax)
left_ax.set_title('Left Cue', color = left_col, fontsize = 15)
left_ax.set_yticks([20, 40, 60]) # EX_UNIT_LEFT_IDX,
left_ax.set_yticklabels([ '20', '40', '60'])
left_ax.set_xticks([0,10])
left_ax.spines['right'].set_visible(True)
left_ax.spines['top'].set_visible(True)
left_ax.set_xlabel('Time steps from Left Cue Onset')
left_ax.set_ylabel('Vector RPEs')
cbar = plt.colorbar(im)
cbar.set_label('Peak Norm. Activity')
fig, ex_ax = plt.subplots(figsize = example_sz)
ex_ax.plot(np.arange(-1, 15), np.nanmean(pes_lcue,0)[4:-10, sort_order[EX_UNIT_RIGHT_IDX]], **marker_plots, label = 'Left Cue', color = left_col)
ex_ax.plot(np.arange(-1, 15), np.nanmean(pes_rcue,0)[4:-10, sort_order[EX_UNIT_RIGHT_IDX]], **marker_plots, label = 'Right Cue', color = right_col)
ex_ax.set_xlim(-1, 15)
ex_ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
ex_ax.set_xlabel('Timesetps from Right Cue Onset');
ex_ax.set_ylabel('Example Unit');
fig, right_ax = plt.subplots(figsize = heatmap_sz)
im = right_ax.imshow(norm_pes_rcue[sort_order,:-10], aspect = 'auto', extent = [-5,15,64,1],
cmap = utils.parula_map, interpolation = 'none') # , vmin = vmin, vmax = vmax)
right_ax.set_title('Right Cue', color = right_col, fontsize = 15)
right_ax.spines['right'].set_visible(True)
right_ax.spines['top'].set_visible(True)
right_ax.set_yticks([20, 40, 60])
right_ax.set_yticklabels(['20', '40', '60'])
right_ax.set_xticks([0, 10])
right_ax.set_xlabel('Time steps from Right Cue Onset');
right_ax.set_ylabel('Vector RPEs');
cbar = plt.colorbar(im)
cbar.set_label('Peak Norm. Activity')
###Output
_____no_output_____
###Markdown
Figure 3D: VA neural plot
###Code
# all neural data uses matlab code: neural_behavior.m
va_hm = loadmat('./data/neural_behaviors.mat')['va_heatmap']
va_ex = loadmat('./data/neural_behaviors.mat')['va_ex'][0]
va_ex_se = loadmat('./data/neural_behaviors.mat')['va_ex_se'][0]
fig, ex_ax = plt.subplots(figsize=example_sz)
ex_ax.plot(np.linspace(-1,1, 23), va_ex, color ='k')
ex_ax.fill_between(np.linspace(-1,1, 23), va_ex - va_ex_se, va_ex + va_ex_se, color = 'k', alpha = 0.5)
ex_ax.set_xlabel('Right <- Angle (rad) -> Left');
ex_ax.set_ylabel('Example Unit (ΔF/F)');
fig, ax_va = plt.subplots(figsize = heatmap_sz)
va_hm[np.isnan(va_hm)] = 0
im = ax_va.imshow(va_hm, aspect = 'auto', extent = [-1, 1, 64, 1], cmap = utils.parula_map, interpolation = 'none')
ax_va.spines['right'].set_visible(True)
ax_va.spines['top'].set_visible(True)
ax_va.set_xlabel('Right <- Angle (rad) -> Left');
ax_va.set_ylabel('Neurons');
ax_va.set_title('View Angle \n(n = 137/303)')
cbar = plt.colorbar(im)
cbar.set_label('Peak Norm. Activity')
###Output
_____no_output_____
###Markdown
Figure 3E: Position neural plot
###Code
# all neural data uses matlab code: neural_behavior.m
pos_hm = loadmat('./data/neural_behaviors.mat')['pos_heatmap']
pos_ex = loadmat('./data/neural_behaviors.mat')['pos_ex'][0]
pos_ex_se = loadmat('./data/neural_behaviors.mat')['pos_ex_se'][0]
fig, ex_ax = plt.subplots(figsize=example_sz)
ex_ax.plot(np.linspace(0,220, 45), pos_ex, color ='k')
ex_ax.fill_between(np.linspace(0,220, 45), pos_ex - pos_ex_se, pos_ex + pos_ex_se, color = 'k', alpha = 0.5)
ex_ax.set_xlabel('Position (cm)');
ex_ax.set_ylabel('Example Unit (ΔF/F)');
fig, ax_va = plt.subplots(figsize = heatmap_sz)
im = ax_va.imshow(pos_hm, aspect = 'auto', extent = [0, 220, 64, 1], cmap = utils.parula_map, interpolation = 'none')
ax_va.spines['right'].set_visible(True)
ax_va.spines['top'].set_visible(True)
ax_va.set_xlabel('Position (cm)');
ax_va.set_ylabel('Neurons');
ax_va.set_title('Position \n(n = 91/303)')
cbar = plt.colorbar(im)
cbar.set_label('Peak Norm. Activity')
###Output
_____no_output_____ |
Quantitative Analysis/1. Group Identifier.ipynb | ###Markdown
Testing Semtiment Analysis for the Detection of Ingroup and Outgroup---Over three tests, IBM's Watson and TextBlob were used to determine whether sentiment could detect ingroups and outgroups of a text, scores related to seed words of the group schema and elevation and otherising statements. For detecting the ingroup and outgroup of a text the dataset was processed to produce list of named entities for each orator. These lists were then processed by TextBlob and Watson to produce the scored results. The same process was used for scoring the seed words with a list of named concepts being used. Finally for detecting elevation and otherising statements, the statements containing both a named entity and concept were extracted from the dataset and scored. Sentences classed as positive or negative were classed as elevation or otherising respectively. Benchmark DataA successful test will identify each of the following entities as either ingroup or outgroup.
###Code
import os
import json
import pandas as pd
filepath = "C:/Users/Steve/OneDrive - University of Southampton/CulturalViolence/KnowledgeBases/Data/"
with open(os.path.join(filepath, "groups_benchmark.json"), "r") as f:
groups = json.load(f)
keys = list(groups.keys())
print(keys)
frames = []
for value in groups.values():
frames.append(pd.DataFrame(dict([ (k, pd.Series(v)) for k, v in value.items() ]), index = None).fillna(""))
display(pd.concat(frames , keys = keys))
###Output
['bush', 'binladen']
###Markdown
Set up the pipeline
###Code
import os
import sys
import platform
import json
import pandas as pd
import datetime
import tqdm
import spacy
from spacy.tokens import Span
from spacy.pipeline import merge_entities
from spacy.matcher import Matcher
from spacy import displacy
from VPipeLibrary.custpipe import EntityMatcher
pd.set_option('display.max_colwidth', -1)
pd.set_option("display.max_columns", 2000)
pd.set_option("display.max_rows", 2000)
spacy.info()
print('============================== Info about python ==============================')
print('python version: ', platform.sys.version)
print(f'completed at: {datetime.datetime.now().strftime("%b %d %Y %H:%M:%S")}')
print('=========================== Loading Language Models ===========================')
model = 'en_core_web_md'
print('loading', model)
nlp = spacy.load(model)
print('loaded', model)
print(f'completed at: {datetime.datetime.now().strftime("%b %d %Y %H:%M:%S")}')
class NamedConcept(object):
def __init__(self, nlp):
self.matcher = Matcher(nlp.vocab)
with open(r'C:\Users\Steve\Documents\CulturalViolence\KnowledgeBases\group_typology.json', 'r') as fp:
self.named_concept_rules = json.load(fp)
for entry in self.named_concept_rules.values():
for pattern in entry.values():
for subcat, terms in pattern.items():
self.matcher.add(subcat, None, [{"LEMMA" : {"IN" : terms}}])
def __call__(self, doc):
matches = self.matcher(doc)
spans = [] # keep the spans for later so we can merge them afterwards
for match_id, start, end in matches:
## gather up noun phrases
concept = Span(doc, start, end, label=doc.vocab.strings[match_id])
if "the" in [word.lower_ for word in list(doc[start].lefts)]:
Span(doc, start - doc[start].n_lefts, end + doc[start].n_rights, label = doc.vocab.strings[match_id])
#print(concept, '=>', concept.label_)
# elif doc[start].dep_ in ["poss", "compound"] and end != len(doc):
# try:
# concept = Span(doc, start, list(doc[start + 1].rights)[-1].i + 1, label=doc.vocab.strings[match_id])
# print(concept, '=>', concept.label_)
# except:
# continue
elif doc[start - 1].dep_ in ["amod", "compound"] and start != 0:
if doc[start -1].ent_type_:
concept = Span(doc, start - 1, end, label=doc[start -1].ent_type_)
else:
concept = Span(doc, start - 1, end, label=doc.vocab.strings[match_id])
#print(concept, '=>', concept.label_)
elif doc[start - 1].pos_ in ["NOUN", "PROPN"] and start != 0:
concept = Span(doc, start - 1, end, label=doc.vocab.strings[match_id])
#print(concept, '=>', concept.label_)
elif doc[start + 1].pos_ in ["NOUN", "PROPN"] and end != len(doc):
concept = Span(doc, start, end + 1, label=doc.vocab.strings[match_id])
#print(concept, '=>', concept.label_)
# elif doc[start - 2].dep_ in ["nsubj", "amod"] and doc[start].dep_ in ["pobj"] and start != 0:
# concept = Span(doc, start - 2, end, label=doc.vocab.strings[match_id])
# #print(concept, '=>', concept.label_)
elif doc[start].dep_ in ["nsubj", "csubj", "pobj"] and end != len(doc):
if doc[start + 1].dep_ in ["prep"]:
try:
concept = Span(doc, start, list(doc[start + 1].rights)[-1].i + 1, label=doc.vocab.strings[match_id])
#print(concept, '=>', concept.label_)
except:
continue
doc.ents = spacy.util.filter_spans(list(doc.ents) + [concept])
spans.append(concept)
with doc.retokenize() as retokenizer:
# Iterate over all spans and merge them into one token. This is done
# after setting the entities – otherwise, it would cause mismatched
# indices!
for span in spacy.util.filter_spans(doc.ents):
retokenizer.merge(span)
return doc # don't forget to return the Doc!
from spacy.pipeline import EntityRuler
# remove all pipeline components
for pipe in nlp.pipe_names:
if pipe not in ['tagger', "parser", "ner"]:
nlp.remove_pipe(pipe)
#add entity ruler to the pipe
with open(r'C:\Users\Steve\Documents\CulturalViolence\KnowledgeBases\group_typology.json', 'r') as fp:
group_typology = json.load(fp)
ruler = EntityRuler(nlp)
patterns = []
for entry in group_typology.values():
for pattern in entry.values():
for subcat, terms in pattern.items():
patterns.append({"label" : subcat, "pattern" : [{"LEMMA" : {"IN" : terms}}]})
# add new pipe components
ruler.add_patterns(patterns)
#nlp.add_pipe(ruler)
nlp.add_pipe(EntityMatcher(nlp), before = "ner")
nlp.add_pipe(NamedConcept(nlp), after = "ner")
nlp.add_pipe(merge_entities)
print(f'completed at: {datetime.datetime.now().strftime("%b %d %Y %H:%M:%S")}')
##Set up filepaths
filepath = 'C:/Users/Steve/OneDrive - University of Southampton/CulturalViolence/KnowledgeBases/Speeches/'
binladenpath = os.path.join(filepath, 'Osama bin Laden/')
bushpath = os.path.join(filepath, 'George Bush/')
filepath = "C:/Users/Steve/OneDrive - University of Southampton/CulturalViolence/KnowledgeBases/data/"
resultspath = "C:/Users/Steve/OneDrive - University of Southampton/CulturalViolence/KnowledgeBases/Experiment 4 - Testing Sentiment Analysis to Detect Ingroup and Outgroup/"
print(f'completed at: {datetime.datetime.now().strftime("%b %d %Y %H:%M:%S")}')
###Output
completed at: Feb 13 2020 15:36:42
###Markdown
IBM Watson(insert information about IBM Watson)
###Code
import json
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions, KeywordsOptions, SentimentOptions
apikey = ''
url = ''
authenticator = IAMAuthenticator(apikey)
service = NaturalLanguageUnderstandingV1(version='2019-07-12', authenticator=authenticator)
service.set_service_url(url)
# response = service.analyze(
# text="The evidence we have gathered all points to a collection of loosely affiliated terrorist organizations known as al-Qa\'eda.",
# features=Features(sentiment=SentimentOptions()
# )).get_result()
# #entities=EntitiesOptions(emotion=True, sentiment=True, limit=2)
# #keywords=KeywordsOptions(emotion=True, sentiment=True, limit=2)
# print(json.dumps(response, indent=2))
print(f'completed at: {datetime.datetime.now().strftime("%b %d %Y %H:%M:%S")}')
###Output
completed at: Dec 19 2019 00:01:44
###Markdown
Test 1. Sentiment related to Named EntitiesThe following test uses Watson for detecting sentiment related to target terms.The target terms are user defined and here based on the spaCy NER model with the supplementary component.The technique works by sending a text and list of target terms to the API and the scores related to each are returned.These results show this technique negatively scores a number of entities which would reasonably be expected to be positive.
###Code
with open(os.path.join(filepath, "bush_filelist.json"), 'r') as fp:
bush_filelist = json.load(fp)
text = ''
for file in bush_filelist[3:]:
with open(bushpath + file[1], 'r') as fp:
text = text + fp.read()
#doc = nlp(text)
## create a list of named entities for analysis and store results in Bush_Analysis_v2.json or BinLaden_Analysis_v2.json
targets = list({ent.lower_ for ent in doc.ents if ent.label_ in named_entities})
# response = service.analyze(text=text, features=Features( \
# sentiment=SentimentOptions(targets=targets), \
# entities=EntitiesOptions(sentiment=True), \
# keywords=KeywordsOptions(sentiment=True,emotion=True)
# )).get_result()
with open(os.path.join(resultspath, "Bush_Analysis_v2.json"), "wb") as f:
f.write(json.dumps(response).encode("utf-8"))
print(f'completed at: {datetime.datetime.now().strftime("%b %d %Y %H:%M:%S")}')
###Output
completed at: Dec 19 2019 01:06:18
###Markdown
TextBlob(insert information about TextBlob)
###Code
from textblob import TextBlob
from textblob.np_extractors import ConllExtractor
from textblob.sentiments import NaiveBayesAnalyzer
extractor = ConllExtractor()
with open(r'C:\Users\Steve\Documents\CulturalViolence\George Bush\20010120-Address to Joint Session of Congress Following 911 Attacks.txt', 'r') as fp:
doc = nlp(fp.read())
for sentence in doc.sents:
displacy.render(sentence, style = "ent")
with open(os.path.join(filepath, "Bush_Analysis_v2.json", 'r') as fp:
response = json.load(fp)
table = response["sentiment"]["targets"]
keys = "sentiment"
frames = []
print(f'document sentiment score:{response["sentiment"]["document"]["score"]}')
print(f'document sentiment: {response["sentiment"]["document"]["label"]}')
sentiment = "negative"
frames = []
for entry in table:
objs = {"sentiment" : entry["score"], "label" : entry["label"]}
if objs["label"] == sentiment:
df = pd.DataFrame(objs, index = [entry["text"]], columns = list(objs.keys())).fillna("")
frames.append(df)
display(pd.concat(frames, sort = False).sort_values("sentiment", ascending = True).style.background_gradient(cmap=cmp))
###Output
document sentiment score:-0.295518
document sentiment: negative
###Markdown
Sentiment Scores for Watson Defined EntitiesThis final test looks at the scores asigned to the named entities identified by Watson as opposed to those which are user defined.In the first instance, while Al Qaeda and the Taliban are identified, the Egyptian Islamic Jihad" and "Islamic Movement of Uzbekistan" - both identified by Bush as adversaries - have not been identified by Watson.Much like the other Watson components, entities which would reasonably be expected to be scored positively are scored negatively.
###Code
table = response["entities"]
frames = []
print(f'document sentiment score:{response["sentiment"]["document"]["score"]}')
print(f'document sentiment: {response["sentiment"]["document"]["label"]}')
#text => sentiment => relvance => emotion => count
#print([e["text"] for e in table])
sentiment = "negative"
print(len(table))
for entry in table:
objs = {'count' : entry["count"], 'type' : entry["type"], 'sentiment': entry["sentiment"]["score"], 'label' : entry["sentiment"]["label"]}
if objs["label"] == sentiment:
df = pd.DataFrame(objs, index = [entry["text"]], columns = list(objs.keys())).fillna("")
frames.append(df)
cmp = "Reds"
### don't work on this one
display(pd.concat(frames, sort = False).sort_values(["sentiment"], ascending = False).style.background_gradient(cmap=cmp))
###Output
document sentiment score:-0.295518
document sentiment: negative
50
###Markdown
Test 2. Sentiment Scores for Seed Words of the Group SchemaThis next test shows how Watson scores feature terms of the text.The feature terms are assigned by the Watson API.This test develops upon a simple score and provides some degree of explanation as to why a feature is scored negatively.For example:- Al Qaeda is scored negatively along with the emotion of fear.- The Taliban Regime is scored negatively with the emotion of disgust.- Terrorists is scored negatively with the emotion of anger.- American people is also scored negatively but with the emotion of sadness.While these results seem plausible, however, the API does not seem to provide much explanatory value for the other features.Moreover, there is no explanation as to why these decisions have been made.
###Code
table = response["keywords"]
frames = []
print(f'document sentiment score:{response["sentiment"]["document"]["score"]}')
print(f'document sentiment: {response["sentiment"]["document"]["label"]}')
#text => sentiment => relvance => emotion => count
sentiment = "negative"
for entry in table:
objs = {"count" : entry["count"], "sentiment" : entry["sentiment"]["score"], "label" : entry["sentiment"]["label"], \
"sadness" : entry["emotion"]["sadness"], "joy" : entry["emotion"]["joy"], "fear" : entry["emotion"]["fear"], \
"disgust": entry["emotion"]["disgust"], "anger" : entry["emotion"]["anger"]}
if objs["label"] == sentiment:
df = pd.DataFrame(objs, index = [entry["text"]], columns = list(objs.keys())).fillna("")
frames.append(df)
print(len(table))
cmp = "Reds"
### don't work on this one
display(pd.concat(frames, sort = False).sort_values("sentiment", ascending = True).style.background_gradient(cmap=cmp))
###Output
document sentiment score:-0.295518
document sentiment: negative
50
###Markdown
Test 3: using IBM Watson and Text Blob for Detecting Elevation and Debasement StatementsThis test is based on the following steps1. take sentences containing named entity and mode of influence2. sentiment analysis using some library3. if the sentence is positive entity is tagged as elevation4. if the sentence is negative entity is tagged as debasement
###Code
#list of named entities
named_entities = set()
named_entities = {"NORP", "GPE", "NORP", "PERSON", "ORG"}
#list of labels for each mode of influence
labels = set()
for value in group_typology.values():
for subcat in value.values():
for term in list(subcat.keys()):
labels.add(term)
# a dictionary object of ingroup and outgroup sentences determined by sentiment score
groups = {"ingroup" : [], "outgroup" : []}
for sentence in tqdm.tqdm(doc.sents, total = len(list(doc.sents))):
#get the tokens.ent_types_ for each token in the sentence
ent_types = {token.ent_type_ for token in sentence if token.ent_type_}
# if the sentence ent_types contain both a named entities and mode of influence
if not ent_types.isdisjoint(named_entities) and not ent_types.isdisjoint(labels):
# get the sentiment score for TextBlob
textblob_score = TextBlob(sentence.text).sentiment.polarity
# get the score for IBM Watson
#response = service.analyze(text=sentence.text,features=Features(sentiment=SentimentOptions())).get_result()
#watson_score = response['sentiment']['document']['score']
watson_score = 0
# result for the sentence
result = (sentence.start, sentence.end, textblob_score, watson_score)
## append to ingroup category if either have a positive score and vice versa
if watson_score > 0 or textblob_score > 0:
groups["ingroup"].append(result)
elif textblob_score < 0 or watson_score < 0:
groups["outgroup"].append(result)
with open(r"C:\Users\Steve\Documents\CulturalViolence\KnowledgeBases\group_sentiment_ruler.json", "wb") as f:
f.write(json.dumps(groups).encode("utf-8"))
print(f'completed at: {datetime.datetime.now().strftime("%b %d %Y %H:%M:%S")}')
###Output
_____no_output_____
###Markdown
Test ResultsWe can see that neither is able to detect the sentences defining the ingroup and outgroup of a text.Despite Watson's sophistication, it provides no more value than TextBlob, which is a much simpler technology.Through observation, we know the following are outgroup named entities from Bush's speech:1. al Qaeda2. Egyptian Islamic Jihad3. Islamic Movement of Uzbekistan4. the TalibanA successful test with return these named entities as an outgroup.
###Code
with open(os.path.join(resultspath, "group_sentiment_namedconcept.json"), 'r') as fp:
groups = json.load(fp)
gradient = "elevation"
for index in groups[group]:
print('-----')
print(f'these are the result for {gradient}')
sentence = doc[index[0]:index[1]]
token = []
pos = []
ent_type = []
sentiment = []
dep = []
sent_table = {
"token" : [token.text for token in sentence],
"dep" : [token.dep_ for token in sentence],
"ent_type" : [token.ent_type_ for token in sentence],
"pos" : [token.pos_ for token in sentence],
"sentiment" : [TextBlob(token.text).sentiment.polarity for token in sentence]
}
print(f'{gradient} sentence sentiment score for TextBlob: {index[2]}')
print(f'{gradient} sentence sentiment score for Watson: {index[3]}')
display(pd.DataFrame.from_dict(sent_table, orient = "index"))
###Output
-----
these are the result for ingroup
ingroup sentence sentiment score for TextBlob: 0.6666666666666666
ingroup sentence sentiment score for Watson: -0.670821
|
Mar22/Statistics/visualizations2.ipynb | ###Markdown
Bar Graphs
###Code
import matplotlib.pyplot as plt
labels = ['Fruits', 'Vegetables', 'Others']
counts = [3, 5, 1]
plt.bar(labels, counts)
plt.show()
import numpy as np
food_items_1 = [1,1] # 1 of fruits and 1 of vegetables
food_items_2 = [3,2] # 1 of fruuts and 2 of vegetables
food_items_3 = [2,3]
counts = [food_items_1, food_items_2, food_items_3]
locations = np.array([0,1,2])
width = 0.3
bars_fruits = plt.bar(locations , [food_item[0] for food_item in counts])
bars_vegetables = plt.bar(locations , [food_item[1] for food_item in counts], bottom=[food_item[0] for food_item in counts])
plt.xticks(locations, ['Fruits', 'Vegetables', 'others'])
plt.legend([bars_fruits, bars_vegetables],['Fruits', 'Vegetables'])
plt.show()
###Output
_____no_output_____
###Markdown
Histogram---------
###Code
x = np.random.randn(100)
plt.hist(x)
plt.show()
plt.hist(x, bins=100)
plt.show()
y = np.random.randn(100) * 4 + 5
plt.hist(x, color='b', bins=20, alpha=0.25)
plt.hist(y, color='r', bins=20, alpha=0.25)
plt.show()
###Output
_____no_output_____
###Markdown
Heat Maps
###Code
my_map = np.random.randn(10, 10)
plt.imshow(my_map)
plt.colorbar()
plt.show()
###Output
_____no_output_____
###Markdown
Visualizations of Probabaility Distributions* `conda install scipy` ensure is executed
###Code
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
samples = np.random.normal(0,1, size=1000)
x = np.linspace(samples.min(), samples.max(), 1000)
y = stats.norm.pdf(x)
plt.hist(samples, alpha=0.25, bins=20, density=True)
plt.plot(x, y)
plt.show()
###Output
_____no_output_____
###Markdown
Visualizations shorthand from Seaborn and Pandas
###Code
import pandas as pd
x = np.random.normal(0, 1, 1000)
y = np.random.normal(5, 2, 1000)
df = pd.DataFrame({'Column 1': x, 'Column 2': y})
df.tail()
import seaborn as sns
sns.jointplot(x='Column 1', y='Column 2', data=df)
plt.show()
###Output
_____no_output_____ |
_notebooks/2020-09-10-Multivariate-Linear-Regression.ipynb | ###Markdown
Multivariate Linear Regression from scratch> Multivariate linear regression and PCA from scratch using Pytorch for car price prediction.- author: "Axel Mendoza"- categories: [linear-regression, car-price, pca, pytorch, from-scratch]- toc: false- comments: true- badges: true- image: images/cadillac.jpg  The goal of this experiment is to train a linear model to predict the selling price of a car.We will use the framework Pytorch for the tensor calculus and the CarDekho dataset that contains information about used cars listed on the website of the same name.This dataset has 301 unique entities with the following features:- car name- year of release- selling price- present price- kilometers driven- fuel: such as petrol or diesel- transmission: such as manual or automatic- owner: how many times the car changed owner Read data from csv using Pandas
###Code
import os
import sys
import torch
import pandas as pd
import numpy as np
import seaborn as sns
import statsmodels.api as sm
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
df = pd.read_csv(os.path.join('data', 'car_data.csv'))
df.head()
###Output
_____no_output_____
###Markdown
Convert categorical variable into indicator variables
###Code
f_continuous = df[['Year', 'Selling_Price', 'Present_Price', 'Kms_Driven', 'Owner']]
f_categorical = pd.get_dummies(df[['Fuel_Type', 'Seller_Type', 'Transmission']])
df = pd.concat([f_continuous, f_categorical], axis=1)
# Drop refundant features
df.drop(['Transmission_Automatic', 'Seller_Type_Dealer', 'Fuel_Type_CNG'], axis=1, inplace=True)
df.head()
###Output
_____no_output_____
###Markdown
Visualize histogram of all features
###Code
df.hist(bins=14, color='steelblue', edgecolor='black', linewidth=1.0, xlabelsize=8, ylabelsize=8, grid=False)
plt.tight_layout(rect=(0, 0, 1.2, 1.2))
###Output
_____no_output_____
###Markdown
Most cars on sales are consuming petrol instead of diesel, have had only one owner, are from 2012-present, are manual and most have a selling price between 1000 and 10000 dollars. Heatmap correlation
###Code
plt.figure(figsize=(16, 8))
sns.heatmap(df.corr(), square= True, annot=True, fmt='.2f')
###Output
_____no_output_____
###Markdown
Most of the variables are highly correlated.As expected, the present price variable is the most correlated with the target selling price. Pairwise Plots
###Code
cols_viz = ['Kms_Driven', 'Year', 'Selling_Price', 'Present_Price']
pp = sns.pairplot(df[cols_viz], height=1.8, aspect=1.8,
plot_kws=dict(edgecolor="k", linewidth=0.5),
diag_kind="kde", diag_kws=dict(shade=True))
fig = pp.fig
fig.subplots_adjust(top=0.93, wspace=0.3)
t = fig.suptitle('Wine Attributes Pairwise Plots', fontsize=14)
###Output
_____no_output_____
###Markdown
Most of the features are highly correlated to each other.Some outliers are present but as there is very few, we will keep them in the training set.The year feature have a polynomial correlation with the selling price so a polynomial regression will most likely outperform a standard linear regression. Make train test split
###Code
# Separate the target from the dataFrame
Y = df['Selling_Price']
X = df.drop('Selling_Price', axis=1)
# Convert data to Pytorch tensor
X_t = torch.from_numpy(X.to_numpy()).float()
Y_t = torch.from_numpy(Y.to_numpy()).float().unsqueeze(1)
X_train, X_test, Y_train, Y_test = train_test_split(X_t, Y_t, test_size=0.33, random_state=42)
###Output
_____no_output_____
###Markdown
Train a multivariate linear regressionTraining a linear model using least square regression is equivalent to minimize the mean squared error:$$\begin{align} \text{Mse}(\boldsymbol{\hat{y}}, \boldsymbol{y}) &= \frac{1}{n}\sum_{i=1}^{n}{||\hat{y}_i - y_i ||_{2}^{2}} \\ &= \frac{1}{n}||\boldsymbol{X}\boldsymbol{w} - \boldsymbol{y} ||_2^2\end{align}$$where $n$ is the number of samples, $\hat{y}$ is the predicted value of the model and $y$ is the true target.The prediction $\hat{y}$ is obtained by matrix multiplication between the input $\boldsymbol{X}$ and the weights of the model $\boldsymbol{w}$.Minimizing the $\text{Mse}$ can be achieved by solving the gradient of this equation equals to zero in regards to the weights $\boldsymbol{w}$:$$\begin{align} \nabla_{\boldsymbol{w}}\text{Mse} &= 0 \\ (\boldsymbol{X}^\top \boldsymbol{X})^{-1}\boldsymbol{X}^\top \boldsymbol{y} &= \boldsymbol{w}\end{align}$$For more information on how to find $\boldsymbol{w}$ please visit the following [link](https://en.wikipedia.org/wiki/Least_squaresLinear_least_squares:~:text=A%20regression%20model%20is%20a%20linear%20one%20when%20the%20model%20comprises%20a%20linear%20combination%20of%20the%20parameters).
###Code
def add_ones_col(X):
"""Add a column a one to the input torch tensor"""
x_0 = torch.ones((X.shape[0],), dtype=torch.float32).unsqueeze(1)
X = torch.cat([x_0, X], dim=1)
return X
def multi_linear_reg(X, y):
"""Multivariate linear regression function
Args:
X: A torch tensor for the data.
y: A torch tensor for the labels.
"""
X = add_ones_col(X) # Add a column of ones to X to agregate the bias to the input matrices
Xt_X = X.T.mm(X)
Xt_y = X.T.mm(y)
Xt_X_inv = Xt_X.inverse()
w = Xt_X_inv.mm(Xt_y)
return w
def prediction(X, w):
"""Predicts a selling price for each input
Args:
X: A torch tensor for the data.
w: A torch tensor for the weights of the linear regression mode.
"""
X = add_ones_col(X)
return X.mm(w)
# Fit the training set into the model to get the weights
w = multi_linear_reg(X_train, Y_train)
# Predict using matrix multiplication with the weights
Y_pred_train = prediction(X_train, w)
Y_pred_test = prediction(X_test, w)
###Output
_____no_output_____
###Markdown
Compute prediction error
###Code
def mse(Y_true, Y_pred):
error = Y_pred - Y_true
return (error.T.mm(error) / Y_pred.shape[0]).item()
def mae(Y_true, Y_pred):
error = Y_pred - Y_true
return error.abs().mean().item()
mse_train = mse(Y_train, Y_pred_train)
mae_train = mae(Y_train, Y_pred_train)
print('MSE Train:\t', mse_train)
print('MAE Train:\t', mae_train, end='\n\n')
mse_test = mse(Y_test, Y_pred_test)
mae_test = mae(Y_test, Y_pred_test)
print('MSE Test:\t', mse_test)
print('MAE Test:\t', mae_test, end='\n\n')
###Output
MSE Train: 2.808985471725464
MAE Train: 1.1321566104888916
MSE Test: 3.7205495834350586
MAE Test: 1.2941011190414429
###Markdown
The model has an error of 1.29 on average on the training test.Not bad for a linear model, taking into consideration that the mean of the present price is 7.62. Principal component analysis visualizationIn this section, we will use PCA to reduce the number of feature to two, in order to visualize the plane of the linear regressor.Suppose a collection of $m$ points $\{\boldsymbol{x}^{(1)}, \dots, \boldsymbol{x}^{(m)}\}$ in $\mathbb{R}^n$.The principal components analysis aims to reduce the dimensionality of the points while losing the least precision as possible.For each point $\boldsymbol{x}^{(i)} \in \mathbb{R}^n$ we will find a corresponding code vector $\boldsymbol{c}^{(i)} \in \mathbb{R}^l$ where $l$ is smaller than $n$.Let $f$ be the encoding function and $g$ be the decoding function and $\boldsymbol{D} \in \mathbb{R}^{n,l}$ is the decoding matrix whose columns are orthonormal:$$\begin{align} f(\boldsymbol{x}) &= \boldsymbol{D}^\top \boldsymbol{x} \\ g(f(\boldsymbol{x})) &= \boldsymbol{D}\boldsymbol{D}^\top \boldsymbol{x}\end{align}$$
###Code
def cov(X):
"""Computes the covariance of the input
The covariance matrix gives some sense of how much two values are
linearly related to each other, as well as the scale of these variables.
It is computed by (1 / (N - 1)) * (X - E[X]).T (X - E[X]).
Args:
X: A torch tensor as input.
"""
X -= X.mean(dim=0, keepdim=True)
fact = 1.0 / (X.shape[0] - 1)
cov = fact * X.T.mm(X)
return cov
def pca(X, target_dim=2):
"""Computes the n^th first principal components of the input
PCA can be implemented using the n^th principal components of the covariance matrix.
We could have been using an eigen decomposition because the covariance matrix is always squared
but singular value decomposition does also the trick if we take the right singular vectors
and perform a matrix multiplication to the right.
Args:
X: A torch tensor as the input.
target_dim: An integer for selecting the n^th first components.
"""
cov_x = cov(X)
U, S, V = torch.svd(cov_x)
transform_mat = V[:, :target_dim]
X_reduced = X.mm(transform_mat)
return X_reduced, transform_mat
X_test_pca, _ = pca(X_test, target_dim=2)
X_train_pca, _ = pca(X_train, target_dim=2)
points = torch.cat([X_test_pca[:3], Y_pred_test[:3]], axis=1)
v1 = points[2, :] - points[0, :]
v2 = points[1, :] - points[0, :]
cp = torch.cross(v1, v2)
a, b, c = cp
d = cp.dot(points[2, :])
min_mesh_x = min(X_test_pca[:, 0].min(), X_train_pca[:, 0].min())
max_mesh_x = max(X_test_pca[:, 0].max(), X_train_pca[:, 0].max())
min_mesh_y = min(X_test_pca[:, 1].min(), X_train_pca[:, 1].min())
max_mesh_y = max(X_test_pca[:, 1].max(), X_train_pca[:, 1].max())
mesh_x = np.linspace(min_mesh_x, max_mesh_x, 25)
mesh_y = np.linspace(min_mesh_y, max_mesh_y, 25)
mesh_xx, mesh_yy = np.meshgrid(mesh_x, mesh_y)
mesh_zz = (d - a * mesh_xx - b * mesh_yy) / c
###Output
_____no_output_____
###Markdown
Here we recreate the prediction plane using three points of the prediction.More information at this [link](http://kitchingroup.cheme.cmu.edu/blog/2015/01/18/Equation-of-a-plane-through-three-points).
###Code
fig = plt.figure(figsize=(25,7))
ax1 = fig.add_subplot(131, projection='3d')
ax2 = fig.add_subplot(132, projection='3d')
ax3 = fig.add_subplot(133, projection='3d')
axes = [ax1, ax2, ax3]
for ax in axes:
ax.scatter(X_test_pca[:, 0], X_test_pca[:, 1], Y_test, color='red', edgecolor='black')
ax.scatter(X_train_pca[:, 0], X_train_pca[:, 1], Y_train, color='green', edgecolor='black')
ax.scatter(mesh_xx.flatten(), mesh_yy.flatten(), mesh_zz.flatten(), facecolor=(0, 0, 0, 0), s=20, edgecolor='#70b3f0')
ax.set_xlabel('1st component', fontsize=12)
ax.set_ylabel('2nd component', fontsize=12)
ax.set_zlabel('Selling Price', fontsize=12)
ax.ticklabel_format(axis="x", style="sci", scilimits=(0,0))
ax.ticklabel_format(axis="y", style="sci", scilimits=(0,0))
ax.ticklabel_format(axis="z", style="sci", scilimits=(0,0))
ax1.view_init(elev=60, azim=50)
ax2.view_init(elev=10, azim=0)
ax3.view_init(elev=-15, azim=140)
###Output
_____no_output_____
###Markdown
The plane is fitting pretty well the data ! Exploratory Data AnalysisI made an attempt to discard some features based on p-value but it didn't improve the results.More on p-value [here](https://www.statsdirect.com/help/basics/p_values.htm:~:text=The%20P%20value%2C%20or%20calculated,the%20hypothesis%20is%20being%20tested).
###Code
columns = X.columns
while len(columns) > 0:
pvalues = []
X_1 = X[columns]
X_1 = sm.add_constant(X) # add a columns of ones for the bias
model = sm.OLS(Y, X_1).fit() # fit a linear regression
pvalues = pd.Series(model.pvalues[1:], index=columns)
max_idx = np.argmax(pvalues)
max_pval = pvalues[max_idx]
if max_pval > 0.05:
# if the p_values is greater than 0.05, the feature has not enough
# informational value for the training
columns = columns.drop(columns[max_idx])
print('Dropping column ' + columns[max_idx] + ', pvalue is: ' + str(max_pval))
else:
break
# Keeping only the columns with very low p-value
X = df[columns]
X_t = torch.from_numpy(X.to_numpy()).float()
X_train, X_test, Y_train, Y_test = train_test_split(X_t, Y_t, test_size=0.33, random_state=42)
w = multi_linear_reg(X_train, Y_train)
Y_pred_train = prediction(X_train, w)
Y_pred_test = prediction(X_test, w)
mse_train = mse(Y_train, Y_pred_train)
mse_test = mse(Y_test, Y_pred_test)
print('MSE Train:\t', mse_train)
print('MSE Test:\t', mse_test)
###Output
MSE Train: 3.4574925899505615
MSE Test: 3.8332533836364746
|
commons-helpers/gencat_upload.ipynb | ###Markdown
Commons helper: uploads from gencat.catThis notebook helps users to upload images from the [press room](http://premsa.gencat.cat/) at `gencat.cat`. It is coded using some Python 3.6 features such as f-strings and therefore won't run in prior versions.You can run this notebook from [PAWS](http://paws.wmflabs.org/) or from your own environment. Prerequisites (for execution from own environment)- Clone the repository where this notebook is available (it imports some functions from `utils.py`, located in a folder within this notebook parent folder).- Create a Python 3 virtual environment and activate it.- Install `pywikibot`:```bashpip install pywikibot```- Install `mako`:```bashpip install mako```or:```bashconda install mako```- Install `beautifulsoup4`:```bashpip install beautifulsoup4```or:```bashconda install beautifulsoup4```- Create a properly formatted `user-config.py` file.- Launch `jupyter notebook` using the kernel associated to the virtual environment. ConfigurationThis notebook takes all the photograms in a given URL (provided that this URL hosts pictures as attachments or inline) and uploads them to commons inserting the proper license templates. The following features are automatically extracted:- **Image name**: The name of the images is taken from the title of attachment. For inline photographs, the image name is taken from the page title.- **Image description**: The description is usually the first paragraph in the page.- **Image date**: The date is extracted from the page date.**However** you can override or update most of them by editing the `config` dictionary in the notebook, add additional categories or determine which images to upload- `url`: This is where the press note is available. This configuration element is **mandatory**.- `categories`: Include here as many categories as you want to assign to all images (for a category for a particular image you must do it afterwards, once uploaded to Commons). If empty, no categories but the automatically detected will be added.- `uploader_category`: If you wish to assign a category for you as uploader, do it here. If empty, no category will be added.- `article_title`: Specify a different text for the link that will be provided as source.- `image_name`: Include your own image name if you don't like the one being extracted. If you assign an image name, it will be used as base name for all the images, with an autoincremental number appended to the it to distinguish between all the photographs.- `pub_date`: Use the following format: YYYY-MM-DD (i.e. 2018-13-24)- `excluded`: A list with the indices of the pictures you don't wish to upload. Inline images as appended at the end. To-do list1. Support file formats other than JPG.2. Create a generic function for image uploading
###Code
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pywikibot as pb
from pywikibot.specialbots import UploadRobot
import requests
from requests.compat import quote
from bs4 import BeautifulSoup
from mako.template import Template
import os, re
import shutil
import imghdr
commons_site = pb.Site("commons", "commons")
# Path handling for importing utils.py
import sys, inspect
current_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
folder_parts = current_folder.split(os.sep)
parent_folder = os.sep.join(folder_parts[:-1])
if current_folder not in sys.path:
sys.path.insert(0, current_folder)
if parent_folder not in sys.path:
sys.path.insert(0, parent_folder)
from wikimedia.utils import is_commons_file, get_hash
# Creation of images folder
cwd = os.getcwd()
images_directory = os.path.join(cwd, 'images')
if not os.path.exists(images_directory):
os.makedirs(images_directory)
# Configuration
config = {
'url': 'http://premsa.gencat.cat/pres_fsvp/AppJava/notapremsavw/306797/ca/vicepresident-govern-pere-aragones-reuneix-amb-delegacio-deurodiputats-lalianca-lliure-europea.do',
'categories': ["Pere Aragonès",
'June 2018 in Barcelona'],
'uploader_category': 'Files uploaded by User:Discasto',
'head_picture': False,
'article_title': "El vicepresident del Govern, Pere Aragonès, es reuneix amb una delegació d'eurodiputats de l'Aliança Lliure Europea",
'image_name': "El vicepresident del Govern, Pere Aragonès, es reuneix amb una delegació d'eurodiputats de l'Aliança Lliure Europea",
'pub_date': None,
'article_content': "El vicepresident del Govern i conseller d’Economia i Hisenda, Pere Aragonès, en qualitat de president de la Generalitat en funcions i acompanyat del conseller d’Acció Exterior, Relacions Institucionals i Transparència, Ernest Maragall, s’ha reunit avui amb una delegació d’Eurodiputats de l’Aliança Lliure Europea (ALE), encapçalada pel seu president, Josep Maria Terricabras. Durant la trobada, que ha tingut lloc aquest migdia al Palau de la Generalitat, els eurodiputats han manifestat el seu interès per la situació d’excepcionalitat política que viu Catalunya i els reptes de futur de la UE. A part del vicepresident, el conseller d’Acció Exterior i el president de l’ALE, també han participat a la reunió els eurodiputats Jill Evans, Jordi Solé, Miroslavs Mitrofanovs i François Alfonsi.",
'excluded': []
}
categories = [category for category in (config['categories'] + [config['uploader_category']]) if category]
# Retrieval of base page for extracting gallery information
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0"}
r = requests.get(config['url'], headers=headers)
soup = BeautifulSoup(r.text, 'html.parser')
# Image date
if not config['pub_date']:
pub_date=soup.find_all("span", attrs={"itemprop": "datePublished"})[0].get_text().strip().split(' ')[0].split('-')
pub_date.reverse()
pub_date='-'.join(pub_date)
else:
pub_date = config['pub_date']
pub_date
# Gallery title
if not config['article_title']:
title = soup.find_all("h1", class_="FW_headline")[0].get_text().strip().replace(' ', ' ')
else:
title = config['article_title']
title = title.replace(':', ' -').replace(' ', ' ')
title
# Image description
if not config['article_content']:
article_content = soup.find_all("div", class_="FW_article-content")[0].get_text().strip().split('\n')[0]
else :
article_content = config['article_content']
article_content
template = u"""=={{int:filedesc}}==
{{Information
|description={{ca|1=${description}}}
|date=${date}
|source=[${url} Nota de Premsa - ${title}]
|author=Generalitat de Catalunya
|permission=
|other versions=
}}
=={{int:license-header}}==
{{LicenseReview}}
{{attribution-gencat}}
${cat_string}"""
vars = {
"url": config['url'],
"description": article_content,
"date": pub_date,
"title": title,
"cat_string": '\n'.join(['[[Category:'+i+']]' for i in categories])
}
t = Template(template)
_text = t.render(**vars)
_text
image_list = [{"url": image["href"].strip(), "name": image["title"].replace(':', ' -').replace(' ', ' ').strip()} for image in soup.find_all("a", class_="external") if ('.jpg' in image['href'].lower() or '.jpeg' in image['href'].lower())]
image_list
if config['head_picture']:
image_list.extend([{"url": item["src"], "name": title} for item in soup.find_all("img", class_="FW_object-attached") if item not in soup.find_all("img", class_="FW_object-attached_banner")])
if config['image_name']:
image_list = [{'url': image['url'], 'name': config['image_name']} for image in image_list]
image_list
# Image retrieval and upload to Commons
excluded = config['excluded']
used_names = []
for i, image in enumerate(image_list):
# If the image is excluded, let's skip it
if i in excluded:
print ("Image excluded. Skipping")
continue
# First, the image is downloaded and locally stored
image_url = quote(image["url"].encode('utf-8'), ':/')
image_name = image["name"].replace(':', ' -').replace(' ', ' ') + '.jpg'
image_path = os.path.join(images_directory, image_name)
try:
r = requests.get(image_url, headers=headers, stream=True)
with open(image_path, 'wb') as out_file:
shutil.copyfileobj(r.raw, out_file)
# hack for PNG files wrongly given the JPG extension
if imghdr.what(image_path) == "png":
os.rename(image_path, image_path.replace("jpg", "png"))
image_path = image_path.replace("jpg", "png")
image_name = image_name.replace("jpg", "png")
except Exception as e:
print (e)
print ('Failed download. Skipping')
continue
# If the image is already in Commons, let's skip it
if is_commons_file(get_hash(image_path)) :
print ("Image already in commons. Skipping")
os.remove(image_path)
continue
# If the image name is already in commons, find a new name
if pb.Page(commons_site, image_name, ns=6).exists():
print (f"Image name ({image_name}) already used in Commons")
used_names.append(image_name)
while True:
if image_name in used_names :
# Finding a new name
image_subject = '.'.join(image_name.split('.')[:-1])
image_extension = 'jpg'
p = re.compile('(.*) ([0-9]{2}\.jpg)')
m = p.match(image_name)
if m is None:
image_name = f"{image_subject} 02.{image_extension}"
else :
counter = int(m.group(2)[:2]) + 1
image_name = '{m.group(1)} {counter:02d}.{image_extension}'
if pb.Page(commons_site, image_name, ns=6).exists():
print (f"Image name ({image_name}) already used in Commons. Finding a new name")
used_names.append(image_name)
else :
print (f"Preparing to upload image with name {image_name}")
used_names.append(image_name)
break
# image upload
bot = UploadRobot([image_path],
description = _text,
useFilename = image_name,
keepFilename = True,
verifyDescription = False,
ignoreWarning = True,
targetSite = commons_site)
bot.run()
os.remove(image_path)
###Output
_____no_output_____ |
lecture3/lecture3_1.ipynb | ###Markdown
NICO2AI 第3回 線形回帰 (18/01/20) 3.1 Numpy配列の結合第1・2回で行列の生成と操作について扱ってきました。コーディングしていくなかで、同じ長さを持つベクトルaとベクトルbを連結して新しい行列cを作りたいというケースが多々あります。Numpyにはそのためのメソッドとして複数の関数が用意されています。配列を結合する場合、「どの次元方向に結合するか」が重要になります。次元の数え方は0始まりであることに注意しつつ見ていきましょう。
###Code
# ここは今回おまじないだと思って実行してください。
# A. 今日使うパッケージのインポート
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager as fm
from matplotlib.colors import LogNorm
from sklearn import datasets
# B. 実行上問題のないWarningは非表示にする
import warnings
warnings.filterwarnings('ignore')
# C1. Plotを描画できるようにする
%matplotlib inline
# C2. デフォルトのPlotスタイルシートの変更
plt.style.use('ggplot')
plt.rcParams['ytick.color'] = '111111'
plt.rcParams['xtick.color'] = '111111'
plt.rcParams['axes.labelcolor'] = '111111'
plt.rcParams['font.size'] = 15
###Output
_____no_output_____
###Markdown
3.1.1 垂直方向と水平方向の結合
###Code
# 合体させるベクトルaとbを作成します
a = np.array([[1,2]])
b = np.array([[3,4]])
print('a={}'.format(a))
print(a.shape) # 1行2列
print('\n')
print('b={}'.format(b))
print(b.shape) # 1行2列
# vstackは縦方向結合です
c = np.vstack((a, b))
print('c={}'.format(c))
print(c.shape)
print('\n')
# hstackは水平方向結合です
d = np.hstack((a, b))
print('d={}'.format(d))
print(d.shape)
###Output
c=[[1 2]
[3 4]]
(2, 2)
d=[[1 2 3 4]]
(1, 4)
###Markdown
※他にも、dstack、concatenate、などの結合方法があるので、各自必要に応じて調べてみてください。 3.1.2 データ生成とstackを用いた結合
###Code
# 例 -3から3まで等間隔に5個のデータと、バイアス項となる全部1のデータを生成します
x = np.linspace(-3, 3, 5) # -3から3を5点に分割
b = np.ones(5) #1が5個、行方向に並ぶ
print('x={}'.format(x))
print(x.shape)
print('\n')
print('b={}'.format(b))
print(b.shape)
###Output
x=[-3. -1.5 0. 1.5 3. ]
(5,)
b=[1. 1. 1. 1. 1.]
(5,)
###Markdown
※(5, ) は1行5列という意味ではなく、ただ5つの要素を持つベクトルという意味です。行や列という概念がないので注意してください。
###Code
# stackはaxisを追加する結合方法
X_T = np.stack((x, b), axis=0) # axis=0は行という概念を新しく作り、2つを結合させるので、2×5になる
X = np.stack((x, b), axis=1) # axis=1は列という概念を新しく作り、2つを結合させるので、5×2になる
print('X_T={}'.format(X_T))
print(X_T.shape)
print("\n")
print('X={}'.format(X))
print(X.shape)
###Output
X_T=[[-3. -1.5 0. 1.5 3. ]
[ 1. 1. 1. 1. 1. ]]
(2, 5)
X=[[-3. 1. ]
[-1.5 1. ]
[ 0. 1. ]
[ 1.5 1. ]
[ 3. 1. ]]
(5, 2)
###Markdown
クイズ3.1 -3から3までを等間隔にとった30要素からなるベクトルxと、全要素が1の30次元のバイアスベクトルbを作成し、xとbを結合させて、30行×2列の配列Xを作成してください。時間5分。
###Code
# WRITE ME!
# Answer
x = np.linspace(-3, 3, 30)
b = np.ones(30)
X = np.stack((x,b), axis = 1)
print(X.shape)
print('X={}'.format(X))
# 回答を確認してください。
print(X.shape)
print('X={}'.format(X))
###Output
_____no_output_____
###Markdown
解答例3.1
###Code
# Answer
###Output
_____no_output_____
###Markdown
3.1.3 stackを用いた多次元のデータの結合
###Code
# 例1
x1 = np.linspace(-3, 3, 5) # -3から3を5分割
x2 = np.linspace(0, 3, 5) # 0から3を5分割
b = np.ones(5) # 1が5個
X = np.stack((x1, x2, b), axis=1) #
print('X={}'.format(X))
print(X.shape)
# 例2 x1の0乗、1乗、2乗を作成
x1 = np.linspace(-3, 3, 5) # -3から3を5分割
X = np.stack([x1**p for p in range(0, 3)], axis=1) # 列を新しく作り2つ結合させるので、5×2になる
print('X={}'.format(X))
print(X.shape,"\n")
###Output
X=[[ 1. -3. 9. ]
[ 1. -1.5 2.25]
[ 1. 0. 0. ]
[ 1. 1.5 2.25]
[ 1. 3. 9. ]]
(5, 3)
###Markdown
3.2 方程式を解く本節では線形方程式を解析的に解く方法を説明します。 Slackの使い方の練習
###Code
# slack の使い方の練習です。
x = np.linspace(-3, 3, 30)
b = np.ones(29)
X = np.stack((x,b), axis = 1)
print(X.shape)
print('X={}'.format(X))
###Output
_____no_output_____
###Markdown
3.2.1 単位行列の作成
###Code
# 単位行列を作成
I = np.eye(3) # 単位行列の大きさを指定
print(I)
print('\n')
print(I*10) # 定数倍
###Output
[[1. 0. 0.]
[0. 1. 0.]
[0. 0. 1.]]
[[10. 0. 0.]
[ 0. 10. 0.]
[ 0. 0. 10.]]
###Markdown
3.2.2 方程式を解く線形方程式を解析的に解く際には、np.linalg.solveを使用します。$$ \left( \begin{array}{cc} 3 1 \\4 1 \\ \end{array} \right) \left( \begin{array}{c} \theta_1 \\ \theta_2 \\ \end{array} \right) = \left( \begin{array}{c} 9 \\ 11 \\ \end{array} \right) $$
###Code
# 方程式を解く
A = np.array([[3,1], [4,1]])
B = np.array([[9],[11]]) # B = np.array([9,11])でも良い
theta = np.linalg.solve(A, B) # A*theta=B となるtheta=A^{-1}*Bを求める
print('theta={}'.format(theta))
###Output
theta=[[2.]
[3.]]
###Markdown
練習クイズ3.2$A = \left( \begin{array}{ccccc} 1 & 2 \\ 3 & 4 \\ \end{array} \right)$, $B = \left( \begin{array}{ccccc} 1 & 2 \\ 2 & 1 \\ \end{array} \right)$, $I = \left( \begin{array}{ccccc} 1 & 0 \\ 0 & 1 \\ \end{array} \right)$, $C = \left( \begin{array}{ccccc} 9.5 \\ 12.5 \\ \end{array} \right)$において、$$(A^{T}B-3I)×{\bf \theta} = C$$の関係を満たすとき、${\bf \theta}$ を求めよ。時間5分。※注意Numpyで行列の掛け算はA\*Bでなく、np.dot(A,B)です。転置はどうやるの?各自調べてください。例えば、 numpy 転置 とかで検索。
###Code
# WRITE ME!
# 回答を確認してください。
print(theta) # 答えは [[0.5],[1.5]]です。
###Output
_____no_output_____
###Markdown
解答例3.2
###Code
# Answer
A = np.array([[1,2], [3,4]])
B = np.array([[1,2], [2,1]])
I = np.eye(2)
tmp = np.dot(A.T, B)-3*I
C = np.array([[9.5],[12.5]])
theta = np.linalg.solve(tmp, C)
print(theta)
###Output
[[0.5]
[1.5]]
###Markdown
補足:np.linalgモジュールsolve関数だけでなく、np.linalgモジュールでは次のような機能をサポートしています: cholesky: コレスキー分解 svd: SVD (特異値分解) eig: 固有値及び固有ベクトルの計算 det: 行列式の計算 lstsq: 線形行列方程式 (linear matrix equation) を最小二乗法で解く 3.3 Matplotlib入門本節では、データや学習結果を視覚的に見るための方法として、Matplotlibの基本的な使い方を扱います。Matplotlibは、主に2Dのグラフを表示するためのライブラリとして標準的に使われています。本節はMatplotlibの基本概念とその最低限の使い方を素早く解説します。注意:本講義ではすでにグラフのデザインは見やすいようにやや調整しています。ファイル先頭に記述していますが、解説は行いません。 3.3.1 グラフの読み方Matplotlibのコマンドの意味を理解する最も簡単な方法は、対応する概念の名前を覚えることです。 今回は以下の事項だけ覚えておきましょう:* Figure:1枚の図全体 (**複数のプロットを持つことができる**)* Axes: グリッドとデータ点を持つプロット (≠axis)* Line: 直線プロット (曲がっているように見えますが、**細かく見ると直線プロットの集積です**)* Scatter: 散布図プロット* X/Y axis label: X軸/Y軸のラベル名* Title: グラフタイトル* Legend: 凡例 (各線・点の説明あるいは記述)(https://matplotlib.org/faq/usage_faq.html より) 3.3.2 Matplotlibの2つの使い方Matplotlibで最初に戸惑うのは、**2種類の書き方が存在する**ということです。これは、Matplotlibがかっちりとしたシステムの上に、Matlabライクな構文をラッパーとして用意したためです。どちらも正しい書き方ですので、戸惑わないようにしてください。 1. シンプルな書き方 (本講義で使用)シンプルな方法では、基本的に全ての命令はplt.から呼び出します。 簡単である一方、1枚のfigureが複数のaxesを持った場合、複数枚のfigureを扱う場合などはコードがわかりにくくなる場合があります。
###Code
%matplotlib inline
# この命令で、JupyternotebookやColaboratory上で描画できるようにします
# インポート
import matplotlib.pyplot as plt
# シンプルな方法の例
X = np.linspace(-2, 2, 100) # -2から2まで100個の点を等間隔で用意
Y = X ** 2 # y=x^2
plt.figure() # Figureを初期化
plt.plot(X, Y) # プロット
plt.show() # 図を表示
###Output
_____no_output_____
###Markdown
2. フォーマルな書き方フォーマルな書き方では、明示的にfigureやaxesを扱います。シンプルな書き方ではplt.figure()を呼びだすだけでしたが、正式にはplt.figure()で作られたfigureを変数として受け取り、それを操作することによって描画を行います。関数の呼び出し方などもやや異なることに注意が必要です。
###Code
fig = plt.figure() # Figureを生成
ax = fig.add_subplot(111) # Axesを生成
ax.plot(X, Y) # 指定のAxesに描画
fig.show() # 図を表示
###Output
_____no_output_____
###Markdown
3.3.3 直線プロットの描画直線と点からなるFigureを描く方法を説明します
###Code
# 直線データ
X = np.linspace(-3, 3, 30) # -3から3まで30個の点を等間隔で用意
Y = 3*X + 5
# 点のデータ
rnd = np.random.RandomState(0) #全員同じ結果が得られるように、乱数のseedを固定します
X_p = np.linspace(-3, 3, 10)
Y_p =3*X_p + 5 + 0.5*rnd.randn(len(X_p)) # 標準偏差0.5のガウス分布に従うノイズを加えています
# 描画
plt.figure()
plt.plot(X, Y, color="g", linewidth=2, label='line')
plt.scatter(X_p, Y_p, marker="x", color="b", s=20, label='point')
plt.xlim(-5, 5) # xlim(最小値、最大値)
plt.ylim(-10, 20) # ylim(最小値、最大値)
plt.xlabel("Input")
plt.ylabel("Output")
plt.legend(loc="lower right") # loc引数を指定することで、凡例の出現位置を制御できる
plt.tight_layout() # グラフから文字がはみ出たりした場合に自動調整してくれる
plt.show()
###Output
_____no_output_____
###Markdown
線の色の指定にはcolor引数を、太さの指定にはlinewidth引数を用います。(https://matplotlib.org/2.0.0/examples/color/named_colors.html より) 散布図の描画には、plt.scatterを使います。また、marker引数を指定することで、マーカー (点の形) を指定することができます。(https://matplotlib.org/2.0.0/examples/lines_bars_and_markers/marker_reference.html より) 練習クイズ$y = 3x + 4$を満たす直線と、その直線に平均0、標準偏差2.1のノイズが加わった点15個(xは-4から4を均等)をプロットせよ。ただし、描画範囲は -5 < x < 5とする。(時間5分)
###Code
# WRITE ME!
# 回答確認は目視でお願いします
###Output
_____no_output_____
###Markdown
解答例3.3
###Code
# Answer
X = np.linspace(-5, 5, 100) # -4から4まで100個の点を等間隔で用意
Y = 3*X + 4
# 点のデータ
rnd = np.random.RandomState(0) #全員同じ結果が得られるように、乱数のseedを固定します
X_p = np.linspace(-4, 4, 15)
Y_p = 3*X_p + 4 + 2.1*rnd.randn(len(X_p)) # 標準偏差2.1のガウス分布を加えている
# 描画
plt.figure()
plt.plot(X, Y, color="g", linewidth=2, label='line')
plt.scatter(X_p, Y_p, marker="x", color="b", s=20, label='point')
plt.xlim(-5, 5) # xlim(最小値、最大値)
plt.ylim(-30, 30) # ylim(最小値、最大値)
plt.xlabel("Input")
plt.ylabel("Output")
plt.legend(loc="lower right") # loc引数を指定することで、凡例の出現位置を制御できる
plt.tight_layout() # グラフから文字がはみ出たりした場合に自動調整してくれる
plt.show()
###Output
_____no_output_____
###Markdown
実践演習3.1 実践演習の進め方1. 講師が題材及びコードの説明をします2. "WRITE ME!"の部分のコードを書いてみましょう3. 書き始める前に必要な処理の概略を頭の中やノートに浮かべてからコードに落とし込みましょう 本日の演習内容- 課題1. 1次元データに対する線形回帰- 補助課題 多項式回帰(課題1が早く終わった方用) 課題1. 1次元データに対する線形回帰今回は、非常に単純な $$ t = 0.5x + 0.2 + \epsilon $$ というデータ生成分布にしたがって生成された30個のデータ点$t_{i}$ から、真のパラメータ傾き0.5とバイアス0.2を線形回帰で予測してみましょう。 ノイズ$\epsilon$は平均0、標準偏差0.2のガウス分布に従うとします。基底関数としては、データ生成分布と同じく、 $$ y = ax + b $$ という傾きとバイアスを持つモデルを使います。プログラムの流れは次のようになります:1. 訓練データ点$({\bf x}, {\bf t})$の生成2. バイアス項 (常に係数が1) を列として加えた行列${\bf X}$の作成3. 解析解 ${\bf \theta}_{LS} = ({\bf X}^T {\bf X})^{-1} {\bf X}^T {\bf t}$ の計算4. 予測${\bf y_{pred}} = {\bf X}{\bf \theta}_{LS}$の計算5. 正しいデータ点$y$の計算6. データ生成関数(yの直線),予測関数(y_predの直線)、及びデータ点tを描画課題:WRITE ME!の部分を埋めて、線形回帰を行うプログラムを完成させよ。(時間10分)
###Code
rnd = np.random.RandomState(0) # 全員同じ結果が得られるように、乱数のseedを固定します
# 1. 30個の訓練データ(x,t)を生成します(ただしxは-3から3の範囲で等間隔)
# WRITE ME!
# 図でx,tを確認
plt.figure()
plt.scatter(x, t, marker="x", color="b")
plt.xlim(-3, 3) # xlim(最小値、最大値)
plt.ylim(-2, 2) # ylim(最小値、最大値)
plt.show()
#Answer
# 1. 30個の訓練データ(x,t)を生成します(ただしxは-3から3の範囲で等間隔)
# 2. バイアス項 (常に係数が1) のベクトルbを列として、xに加えた行列X=[x,b]の作成
# WRITE ME!
# 確認
print(X)
# Answer
# 2. バイアス項 (常に係数が1) のベクトルbを列として、xに加えた行列X=[x,b]の作成
# 3. 解析解 θ_LS=(XT・X)^−1・XT・tの計算
# WRITE ME!
# 確認
print(theta_LS)
# Answer
# 3. 解析解 θ_LS=(XT・X)^−1・XT・tの計算
# 4. 予測 y_pred=θ_LS・Xの計算 → y_pred=X・θ_LSの間違い
# WRITE ME!
# 確認
print(y_pred)
# Answer
# 4. 予測 y_pred=θ_LS・Xの計算
# 5. 正しいデータ点yの計算
# WRITE ME!
# 確認
print(y)
# Answer
# 5. 正しいデータ点yの計算
#6.データ生成関数(yの直線),予測関数(y_predの直線)、及びデータ点tを描画
plt.figure(figsize=(8, 6))
plt.title("the results of linear regression") # タイトル
plt.plot(x, y_pred, color="r", label="Predicted function", linewidth=2) # ラベルをつける
plt.plot(x, y, color="g", label="Data generation function", linewidth=2) # ラベルをつける
plt.scatter(x, t, marker="x", color="b", label="Training points") # ラベルをつける
plt.xlim(-3, 3) # xlim(最小値、最大値)
plt.ylim(-2, 2) # ylim(最小値、最大値)
plt.xlabel("Input")
plt.ylabel("Output")
plt.legend(loc="lower right") # loc引数を指定することで、凡例の出現位置を制御できる
plt.tight_layout() # グラフから文字がはみ出たりした場合に自動調整してくれる
plt.show()
###Output
_____no_output_____
###Markdown
補助課題:多項式回帰 $$ y = sin(\pi x) / (\pi x) + 0.1x + \epsilon $$ というデータ生成分布にしたがって生成された30個のデータ点$t_{i}$ から、多項式で回帰予測してみましょう。ノイズ$\epsilon$は平均0、標準偏差0.2のガウス分布に従うとします。1問目と同じく線形回帰を扱いますが、今度はモデル化の方法が少し異なります。 $$y = \theta_0 + \theta_1 x + \theta_2 x^2 + \cdots + \theta_k x^k$$今回は、sin関数を近似するために基底関数に多項式を入れました。課題:1. 課題1と同様の手順に基づき、sin関数から生成されたデータに関して多項式回帰を実装・実行せよ。多項式の次元数n_dimを調整して、挙動の変化を観察せよ。2. n_dimを増やした場合、過学習が発生し、各θの値が非常に大きくなってしまう。L2正則化を実装せよ。正則化係数λを調整して、挙動の変化を観察せよ。 ヒント1:$$X = \left( \begin{array}{ccccc} 1 & x_1 & x_1^2 \cdots & x_1^k \\ 1 & x_2 & x_2^2 \cdots & x_2^k \\ \vdots & \vdots & \ddots & \vdots \\ 1 & x_m & x_m^2 \cdots & x_m^k \\ \end{array} \right)$$ ヒント2:正則化係数をλとした時、正則化係数を考慮した二乗誤差最小化の解は、Iを単位行列として $$ {\bf \theta}_{LS} = ({\bf X}^T {\bf X} + \lambda I)^{-1} {\bf X}^T {\bf t} $$ で表される。
###Code
# 課題1と同様の手順に基づき、sin関数から生成されたデータに関して多項式回帰を実装・実行せよ。多項式の次元数n_dimを調整して、挙動の変化を観察せよ。
rnd = np.random.RandomState(0) #全員同じ結果が得られるように、乱数のseedを固定します
# 1. 30個の訓練データ(x,t)を生成します(ただしxは-3から3の範囲で等間隔)
# WRITE ME!
# 2. バイアス項 およびxのk乗項を列として加えた行列Xの作成
# WRITE ME!
# 3. 解析解 θ_LS=(XT・X)^−1・XT・tの計算
# WRITE ME!
# 4.1 予測 y=θ_LS・Xの計算
# WRITE ME!
#5. 正しいデータ点yの計算
y = np.sin(np.pi*plt_x)/(np.pi*plt_x) + 0.1*plt_x
#6.データ生成関数(yの直線),予測関数(y_predの直線)、及びデータ点tを描画
plt.figure(figsize=(8, 6))
plt.title(str(n_dim)+"dim") # タイトル
plt.plot(plt_x, y_pred, color="r", label="Predicted function", linewidth=2) # ラベルをつける
plt.plot(plt_x, y, color="g", label="Data generation function", linewidth=2) # ラベルをつける
plt.scatter(x, t, marker="x", color="b", label="Training points") # ラベルをつける
plt.xlabel("Input")
plt.ylabel("Output")
plt.legend(loc="lower right") # loc引数を指定することで、凡例の出現位置を制御できる
plt.tight_layout() # グラフから文字がはみ出たりした場合に自動調整してくれる
plt.show()
# Answer
# 課題1と同様の手順に基づき、sin関数から生成されたデータに関して多項式回帰を実装・実行せよ。多項式の次元数n_dimを調整して、挙動の変化を観察せよ。
# 補助課題の2
# n_dimを増やした場合、過学習が発生し、各θの値が非常に大きくなってしまう。L2正則化を実装せよ。正則化係数λを調整して、挙動の変化を観察せよ。
rnd = np.random.RandomState(0) #全員同じ結果が得られるように、乱数のseedを固定します
# 1. 30個の訓練データ(x,t)を生成します(ただしxは-3から3の範囲で等間隔)
# WRITE ME!
# 2. バイアス項 およびxのk乗項を列として加えた行列Xの作成
# WRITE ME!
# 3. 解析解 θ_LS=(XT・X+λI)−1・XT・tの計算
# WRITE ME!
# 4 予測 y=θ_LS・Xの計算
# WRITE ME!
#5. 正しいデータ点yの計算
y = np.sin(np.pi*plt_x)/(np.pi*plt_x) + 0.1*plt_x
#6.データ生成関数(yの直線),予測関数(y_predの直線)、及びデータ点tを描画
plt.figure(figsize=(8, 6))
plt.title(str(n_dim)+"dim+Regularization"+str(lmbda)) # タイトル
plt.plot(plt_x, y_pred, color="r", label="Predicted function", linewidth=2) # ラベルをつける
plt.plot(plt_x, y, color="g", label="Data generation function", linewidth=2) # ラベルをつける
plt.scatter(x, t, marker="x", color="b", label="Training points") # ラベルをつける
plt.xlabel("Input")
plt.ylabel("Output")
plt.legend(loc="lower right") # loc引数を指定することで、凡例の出現位置を制御できる
plt.tight_layout() # グラフから文字がはみ出たりした場合に自動調整してくれる
plt.show()
# Answer
# 補助課題の2
###Output
_____no_output_____ |
site/en-snapshot/federated/tutorials/tff_for_federated_learning_research_compression.ipynb | ###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
View on TensorFlow.org Run in Google Colab View source on GitHub Download notebook TFF for Federated Learning Research: Model and Update Compression**NOTE**: This colab has been verified to work with the [latest released version](https://github.com/tensorflow/federatedcompatibility) of the `tensorflow_federated` pip package, but the Tensorflow Federated project is still in pre-release development and may not work on `master`.In this tutorial, we use the [EMNIST](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/emnist) dataset to demonstrate how to enable lossy compression algorithms to reduce communication cost in the Federated Averaging algorithm using the `tff.learning.build_federated_averaging_process` API and the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API. For more details on the Federated Averaging algorithm, see the paper [Communication-Efficient Learning of Deep Networks from Decentralized Data](https://arxiv.org/abs/1602.05629). Before we startBefore we start, please run the following to make sure that your environment iscorrectly setup. If you don't see a greeting, please refer to the[Installation](../install.md) guide for instructions.
###Code
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow-federated-nightly
!pip install --quiet --upgrade tensorflow-model-optimization
!pip install --quiet --upgrade nest-asyncio
import nest_asyncio
nest_asyncio.apply()
%load_ext tensorboard
import functools
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_model_optimization.python.core.internal import tensor_encoding as te
###Output
_____no_output_____
###Markdown
Verify if TFF is working.
###Code
@tff.federated_computation
def hello_world():
return 'Hello, World!'
hello_world()
###Output
_____no_output_____
###Markdown
Preparing the input dataIn this section we load and preprocess the EMNIST dataset included in TFF. Please check out [Federated Learning for Image Classification](https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classificationpreparing_the_input_data) tutorial for more details about EMNIST dataset.
###Code
# This value only applies to EMNIST dataset, consider choosing appropriate
# values if switching to other datasets.
MAX_CLIENT_DATASET_SIZE = 418
CLIENT_EPOCHS_PER_ROUND = 1
CLIENT_BATCH_SIZE = 20
TEST_BATCH_SIZE = 500
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=True)
def reshape_emnist_element(element):
return (tf.expand_dims(element['pixels'], axis=-1), element['label'])
def preprocess_train_dataset(dataset):
"""Preprocessing function for the EMNIST training dataset."""
return (dataset
# Shuffle according to the largest client dataset
.shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)
# Repeat to do multiple local epochs
.repeat(CLIENT_EPOCHS_PER_ROUND)
# Batch to a fixed client batch size
.batch(CLIENT_BATCH_SIZE, drop_remainder=False)
# Preprocessing step
.map(reshape_emnist_element))
emnist_train = emnist_train.preprocess(preprocess_train_dataset)
###Output
_____no_output_____
###Markdown
Defining a modelHere we define a keras model based on the orginial FedAvg CNN, and then wrap the keras model in an instance of [tff.learning.Model](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model) so that it can be consumed by TFF.Note that we'll need a **function** which produces a model instead of simply a model directly. In addition, the function **cannot** just capture a pre-constructed model, it must create the model in the context that it is called. The reason is that TFF is designed to go to devices, and needs control over when resources are constructed so that they can be captured and packaged up.
###Code
def create_original_fedavg_cnn_model(only_digits=True):
"""The CNN model used in https://arxiv.org/abs/1602.05629."""
data_format = 'channels_last'
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
conv2d(filters=32),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10 if only_digits else 62),
tf.keras.layers.Softmax(),
])
return model
# Gets the type information of the input data. TFF is a strongly typed
# functional programming framework, and needs type information about inputs to
# the model.
input_spec = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0]).element_spec
def tff_model_fn():
keras_model = create_original_fedavg_cnn_model()
return tff.learning.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
###Output
_____no_output_____
###Markdown
Training the model and outputting training metricsNow we are ready to construct a Federated Averaging algorithm and train the defined model on EMNIST dataset.First we need to build a Federated Averaging algorithm using the [tff.learning.build_federated_averaging_process](https://www.tensorflow.org/federated/api_docs/python/tff/learning/build_federated_averaging_process) API.
###Code
federated_averaging = tff.learning.build_federated_averaging_process(
model_fn=tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))
###Output
_____no_output_____
###Markdown
Now let's run the Federated Averaging algorithm. The execution of a Federated Learning algorithm from the perspective of TFF looks like this:1. Initialize the algorithm and get the inital server state. The server state contains necessary information to perform the algorithm. Recall, since TFF is functional, that this state includes both any optimizer state the algorithm uses (e.g. momentum terms) as well as the model parameters themselves--these will be passed as arguments and returned as results from TFF computations.2. Execute the algorithm round by round. In each round, a new server state will be returned as the result of each client training the model on its data. Typically in one round: 1. Server broadcast the model to all the participating clients. 2. Each client perform work based on the model and its own data. 3. Server aggregates all the model to produce a sever state which contains a new model.For more details, please see [Custom Federated Algorithms, Part 2: Implementing Federated Averaging](https://www.tensorflow.org/federated/tutorials/custom_federated_algorithms_2) tutorial.Training metrics are written to the Tensorboard directory for displaying after the training.
###Code
#@title Load utility functions
def format_size(size):
"""A helper function for creating a human-readable size."""
size = float(size)
for unit in ['bit','Kibit','Mibit','Gibit']:
if size < 1024.0:
return "{size:3.2f}{unit}".format(size=size, unit=unit)
size /= 1024.0
return "{size:.2f}{unit}".format(size=size, unit='TiB')
def set_sizing_environment():
"""Creates an environment that contains sizing information."""
# Creates a sizing executor factory to output communication cost
# after the training finishes. Note that sizing executor only provides an
# estimate (not exact) of communication cost, and doesn't capture cases like
# compression of over-the-wire representations. However, it's perfect for
# demonstrating the effect of compression in this tutorial.
sizing_factory = tff.framework.sizing_executor_factory()
# TFF has a modular runtime you can configure yourself for various
# environments and purposes, and this example just shows how to configure one
# part of it to report the size of things.
context = tff.framework.ExecutionContext(executor_fn=sizing_factory)
tff.framework.set_default_context(context)
return sizing_factory
def train(federated_averaging_process, num_rounds, num_clients_per_round, summary_writer):
"""Trains the federated averaging process and output metrics."""
# Create a environment to get communication cost.
environment = set_sizing_environment()
# Initialize the Federated Averaging algorithm to get the initial server state.
state = federated_averaging_process.initialize()
with summary_writer.as_default():
for round_num in range(num_rounds):
# Sample the clients parcitipated in this round.
sampled_clients = np.random.choice(
emnist_train.client_ids,
size=num_clients_per_round,
replace=False)
# Create a list of `tf.Dataset` instances from the data of sampled clients.
sampled_train_data = [
emnist_train.create_tf_dataset_for_client(client)
for client in sampled_clients
]
# Round one round of the algorithm based on the server state and client data
# and output the new state and metrics.
state, metrics = federated_averaging_process.next(state, sampled_train_data)
# For more about size_info, please see https://www.tensorflow.org/federated/api_docs/python/tff/framework/SizeInfo
size_info = environment.get_size_info()
broadcasted_bits = size_info.broadcast_bits[-1]
aggregated_bits = size_info.aggregate_bits[-1]
print('round {:2d}, metrics={}, broadcasted_bits={}, aggregated_bits={}'.format(round_num, metrics, format_size(broadcasted_bits), format_size(aggregated_bits)))
# Add metrics to Tensorboard.
for name, value in metrics['train'].items():
tf.summary.scalar(name, value, step=round_num)
# Add broadcasted and aggregated data size to Tensorboard.
tf.summary.scalar('cumulative_broadcasted_bits', broadcasted_bits, step=round_num)
tf.summary.scalar('cumulative_aggregated_bits', aggregated_bits, step=round_num)
summary_writer.flush()
# Clean the log directory to avoid conflicts.
!rm -R /tmp/logs/scalars/*
# Set up the log directory and writer for Tensorboard.
logdir = "/tmp/logs/scalars/original/"
summary_writer = tf.summary.create_file_writer(logdir)
train(federated_averaging_process=federated_averaging, num_rounds=10,
num_clients_per_round=10, summary_writer=summary_writer)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09433962404727936,loss=2.3181073665618896>>, broadcasted_bits=507.62MiB, aggregated_bits=507.62MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.0765027329325676,loss=2.3148586750030518>>, broadcasted_bits=1015.24MiB, aggregated_bits=1015.24MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08872458338737488,loss=2.3089394569396973>>, broadcasted_bits=1.49GiB, aggregated_bits=1.49GiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10852713137865067,loss=2.304060220718384>>, broadcasted_bits=1.98GiB, aggregated_bits=1.98GiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10818713158369064,loss=2.3026843070983887>>, broadcasted_bits=2.48GiB, aggregated_bits=2.48GiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10454985499382019,loss=2.300365447998047>>, broadcasted_bits=2.97GiB, aggregated_bits=2.97GiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12841254472732544,loss=2.29765248298645>>, broadcasted_bits=3.47GiB, aggregated_bits=3.47GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14023210108280182,loss=2.2977216243743896>>, broadcasted_bits=3.97GiB, aggregated_bits=3.97GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.15060241520404816,loss=2.29490327835083>>, broadcasted_bits=4.46GiB, aggregated_bits=4.46GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13088512420654297,loss=2.2942349910736084>>, broadcasted_bits=4.96GiB, aggregated_bits=4.96GiB
###Markdown
Start TensorBoard with the root log directory specified above to display the training metrics. It can take a few seconds for the data to load. Except for Loss and Accuracy, we also output the amount of broadcasted and aggregated data. Broadcasted data refers to tensors the server pushes to each client while aggregated data refers to tensors each client returns to the server.
###Code
#@test {"skip": true}
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____
###Markdown
Build a custom broadcast and aggregate functionNow let's implement function to use lossy compression algorithms on broadcasted data and aggregated data using the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API.First, we define two functions:* `broadcast_encoder_fn` which creates an instance of [te.core.SimpleEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/simple_encoder.pyL30) to encode tensors or variables in server to client communication (Broadcast data).* `mean_encoder_fn` which creates an instance of [te.core.GatherEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/gather_encoder.pyL30) to encode tensors or variables in client to server communicaiton (Aggregation data).It is important to note that we do not apply a compression method to the entire model at once. Instead, we decide how (and whether) to compress each variable of the model independently. The reason is that generally, small variables such as biases are more sensitive to inaccuracy, and being relatively small, the potential communication savings are also relatively small. Hence we do not compress small variables by default. In this example, we apply uniform quantization to 8 bits (256 buckets) to every variable with more than 10000 elements, and only apply identity to other variables.
###Code
def broadcast_encoder_fn(value):
"""Function for building encoded broadcast."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_simple_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_simple_encoder(te.encoders.identity(), spec)
def mean_encoder_fn(value):
"""Function for building encoded mean."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_gather_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_gather_encoder(te.encoders.identity(), spec)
###Output
_____no_output_____
###Markdown
TFF provides APIs to convert the encoder function into a format that `tff.learning.build_federated_averaging_process` API can consume. By using the `tff.learning.framework.build_encoded_broadcast_from_model` and `tff.learning.framework.build_encoded_mean_from_model`, we can create two functions that can be passed into `broadcast_process` and `aggregation_process` agruments of `tff.learning.build_federated_averaging_process` to create a Federated Averaging algorithms with a lossy compression algorithm.
###Code
encoded_broadcast_process = (
tff.learning.framework.build_encoded_broadcast_process_from_model(
tff_model_fn, broadcast_encoder_fn))
encoded_mean_process = (
tff.learning.framework.build_encoded_mean_process_from_model(
tff_model_fn, mean_encoder_fn))
federated_averaging_with_compression = tff.learning.build_federated_averaging_process(
tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0),
broadcast_process=encoded_broadcast_process,
aggregation_process=encoded_mean_process)
###Output
_____no_output_____
###Markdown
Training the model againNow let's run the new Federated Averaging algorithm.
###Code
logdir_for_compression = "/tmp/logs/scalars/compression/"
summary_writer_for_compression = tf.summary.create_file_writer(
logdir_for_compression)
train(federated_averaging_process=federated_averaging_with_compression,
num_rounds=10,
num_clients_per_round=10,
summary_writer=summary_writer_for_compression)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08722109347581863,loss=2.3216357231140137>>, broadcasted_bits=146.46MiB, aggregated_bits=146.46MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08379272371530533,loss=2.3108291625976562>>, broadcasted_bits=292.92MiB, aggregated_bits=292.92MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08834951370954514,loss=2.3074147701263428>>, broadcasted_bits=439.38MiB, aggregated_bits=439.39MiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10467479377985,loss=2.305814027786255>>, broadcasted_bits=585.84MiB, aggregated_bits=585.85MiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09853658825159073,loss=2.3012874126434326>>, broadcasted_bits=732.30MiB, aggregated_bits=732.31MiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14904330670833588,loss=2.3005223274230957>>, broadcasted_bits=878.77MiB, aggregated_bits=878.77MiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13152804970741272,loss=2.2985599040985107>>, broadcasted_bits=1.00GiB, aggregated_bits=1.00GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12392637878656387,loss=2.297102451324463>>, broadcasted_bits=1.14GiB, aggregated_bits=1.14GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13289350271224976,loss=2.2944107055664062>>, broadcasted_bits=1.29GiB, aggregated_bits=1.29GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12661737203598022,loss=2.2971296310424805>>, broadcasted_bits=1.43GiB, aggregated_bits=1.43GiB
###Markdown
Start TensorBoard again to compare the training metrics between two runs.As you can see in Tensorboard, there is a significant reduction between the `orginial` and `compression` curves in the `broadcasted_bits` and `aggregated_bits` plots while in the `loss` and `sparse_categorical_accuracy` plot the two curves are pretty similiar.In conclusion, we implemented a compression algorithm that can achieve similar performance as the orignial Federated Averaging algorithm while the comminucation cost is significently reduced.
###Code
#@test {"skip": true}
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____
###Markdown
Build a custom broadcast and aggregate functionNow let's implement function to use lossy compression algorithms on broadcasted data and aggregated data using the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API.First, we define two functions:* `broadcast_encoder_fn` which creates an instance of [te.core.SimpleEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/simple_encoder.pyL30) to encode tensors or variables in server to client communication (Broadcast data).* `mean_encoder_fn` which creates an instance of [te.core.GatherEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/gather_encoder.pyL30) to encode tensors or variables in client to server communicaiton (Aggregation data).It is important to note that we do not apply a compression method to the entire model at once. Instead, we decide how (and whether) to compress each variable of the model independently. The reason is that generally, small variables such as biases are more sensitive to inaccuracy, and being relatively small, the potential communication savings are also relatively small. Hence we do not compress small variables by default. In this example, we apply uniform quantization to 8 bits (256 buckets) to every variable with more than 10000 elements, and only apply identity to other variables.
###Code
def broadcast_encoder_fn(value):
"""Function for building encoded broadcast."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_simple_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_simple_encoder(te.encoders.identity(), spec)
def mean_encoder_fn(value):
"""Function for building encoded mean."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_gather_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_gather_encoder(te.encoders.identity(), spec)
###Output
_____no_output_____
###Markdown
TFF provides APIs to convert the encoder function into a format that `tff.learning.build_federated_averaging_process` API can consume. By using the `tff.learning.framework.build_encoded_broadcast_from_model` and `tff.learning.framework.build_encoded_mean_from_model`, we can create two functions that can be passed into `broadcast_process` and `aggregation_process` agruments of `tff.learning.build_federated_averaging_process` to create a Federated Averaging algorithms with a lossy compression algorithm.
###Code
encoded_broadcast_process = (
tff.learning.framework.build_encoded_broadcast_process_from_model(
tff_model_fn, broadcast_encoder_fn))
encoded_mean_process = (
tff.learning.framework.build_encoded_mean_process_from_model(
tff_model_fn, mean_encoder_fn))
federated_averaging_with_compression = tff.learning.build_federated_averaging_process(
tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0),
broadcast_process=encoded_broadcast_process,
aggregation_process=encoded_mean_process)
###Output
_____no_output_____
###Markdown
Training the model againNow let's run the new Federated Averaging algorithm.
###Code
logdir_for_compression = "/tmp/logs/scalars/compression/"
summary_writer_for_compression = tf.summary.create_file_writer(
logdir_for_compression)
train(federated_averaging_process=federated_averaging_with_compression,
num_rounds=10,
num_clients_per_round=10,
summary_writer=summary_writer_for_compression)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08722109347581863,loss=2.3216357231140137>>, broadcasted_bits=146.46MiB, aggregated_bits=146.46MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08379272371530533,loss=2.3108291625976562>>, broadcasted_bits=292.92MiB, aggregated_bits=292.92MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08834951370954514,loss=2.3074147701263428>>, broadcasted_bits=439.38MiB, aggregated_bits=439.39MiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10467479377985,loss=2.305814027786255>>, broadcasted_bits=585.84MiB, aggregated_bits=585.85MiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09853658825159073,loss=2.3012874126434326>>, broadcasted_bits=732.30MiB, aggregated_bits=732.31MiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14904330670833588,loss=2.3005223274230957>>, broadcasted_bits=878.77MiB, aggregated_bits=878.77MiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13152804970741272,loss=2.2985599040985107>>, broadcasted_bits=1.00GiB, aggregated_bits=1.00GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12392637878656387,loss=2.297102451324463>>, broadcasted_bits=1.14GiB, aggregated_bits=1.14GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13289350271224976,loss=2.2944107055664062>>, broadcasted_bits=1.29GiB, aggregated_bits=1.29GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12661737203598022,loss=2.2971296310424805>>, broadcasted_bits=1.43GiB, aggregated_bits=1.43GiB
###Markdown
Start TensorBoard again to compare the training metrics between two runs.As you can see in Tensorboard, there is a significant reduction between the `orginial` and `compression` curves in the `broadcasted_bits` and `aggregated_bits` plots while in the `loss` and `sparse_categorical_accuracy` plot the two curves are pretty similiar.In conclusion, we implemented a compression algorithm that can achieve similar performance as the orignial Federated Averaging algorithm while the comminucation cost is significently reduced.
###Code
#@test {"skip": true}
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TFF for Federated Learning Research: Model and Update Compression**NOTE**: This colab has been verified to work with the [latest released version](https://github.com/tensorflow/federatedcompatibility) of the `tensorflow_federated` pip package, but the Tensorflow Federated project is still in pre-release development and may not work on `master`.In this tutorial, we use the [EMNIST](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/emnist) dataset to demonstrate how to enable lossy compression algorithms to reduce communication cost in the Federated Averaging algorithm using the `tff.learning.build_federated_averaging_process` API and the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API. For more details on the Federated Averaging algorithm, see the paper [Communication-Efficient Learning of Deep Networks from Decentralized Data](https://arxiv.org/abs/1602.05629). Before we startBefore we start, please run the following to make sure that your environment iscorrectly setup. If you don't see a greeting, please refer to the[Installation](../install.md) guide for instructions.
###Code
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow_federated
!pip install --quiet --upgrade tensorflow-model-optimization
%load_ext tensorboard
import functools
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_model_optimization.python.core.internal import tensor_encoding as te
###Output
_____no_output_____
###Markdown
Verify if TFF is working.
###Code
@tff.federated_computation
def hello_world():
return 'Hello, World!'
hello_world()
###Output
_____no_output_____
###Markdown
Preparing the input dataIn this section we load and preprocess the EMNIST dataset included in TFF. Please check out [Federated Learning for Image Classification](https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classificationpreparing_the_input_data) tutorial for more details about EMNIST dataset.
###Code
# This value only applies to EMNIST dataset, consider choosing appropriate
# values if switching to other datasets.
MAX_CLIENT_DATASET_SIZE = 418
CLIENT_EPOCHS_PER_ROUND = 1
CLIENT_BATCH_SIZE = 20
TEST_BATCH_SIZE = 500
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=True)
def reshape_emnist_element(element):
return (tf.expand_dims(element['pixels'], axis=-1), element['label'])
def preprocess_train_dataset(dataset):
"""Preprocessing function for the EMNIST training dataset."""
return (dataset
# Shuffle according to the largest client dataset
.shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)
# Repeat to do multiple local epochs
.repeat(CLIENT_EPOCHS_PER_ROUND)
# Batch to a fixed client batch size
.batch(CLIENT_BATCH_SIZE, drop_remainder=False)
# Preprocessing step
.map(reshape_emnist_element))
emnist_train = emnist_train.preprocess(preprocess_train_dataset)
###Output
_____no_output_____
###Markdown
Defining a modelHere we define a keras model based on the orginial FedAvg CNN, and then wrap the keras model in an instance of [tff.learning.Model](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model) so that it can be consumed by TFF.Note that we'll need a **function** which produces a model instead of simply a model directly. In addition, the function **cannot** just capture a pre-constructed model, it must create the model in the context that it is called. The reason is that TFF is designed to go to devices, and needs control over when resources are constructed so that they can be captured and packaged up.
###Code
def create_original_fedavg_cnn_model(only_digits=True):
"""The CNN model used in https://arxiv.org/abs/1602.05629."""
data_format = 'channels_last'
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
conv2d(filters=32),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10 if only_digits else 62),
tf.keras.layers.Softmax(),
])
return model
# Gets the type information of the input data. TFF is a strongly typed
# functional programming framework, and needs type information about inputs to
# the model.
input_spec = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0]).element_spec
def tff_model_fn():
keras_model = create_original_fedavg_cnn_model()
return tff.learning.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
###Output
_____no_output_____
###Markdown
Training the model and outputting training metricsNow we are ready to construct a Federated Averaging algorithm and train the defined model on EMNIST dataset.First we need to build a Federated Averaging algorithm using the [tff.learning.build_federated_averaging_process](https://www.tensorflow.org/federated/api_docs/python/tff/learning/build_federated_averaging_process) API.
###Code
federated_averaging = tff.learning.build_federated_averaging_process(
model_fn=tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))
###Output
_____no_output_____
###Markdown
Now let's run the Federated Averaging algorithm. The execution of a Federated Learning algorithm from the perspective of TFF looks like this:1. Initialize the algorithm and get the inital server state. The server state contains necessary information to perform the algorithm. Recall, since TFF is functional, that this state includes both any optimizer state the algorithm uses (e.g. momentum terms) as well as the model parameters themselves--these will be passed as arguments and returned as results from TFF computations.2. Execute the algorithm round by round. In each round, a new server state will be returned as the result of each client training the model on its data. Typically in one round: 1. Server broadcast the model to all the participating clients. 2. Each client perform work based on the model and its own data. 3. Server aggregates all the model to produce a sever state which contains a new model.For more details, please see [Custom Federated Algorithms, Part 2: Implementing Federated Averaging](https://www.tensorflow.org/federated/tutorials/custom_federated_algorithms_2) tutorial.Training metrics are written to the Tensorboard directory for displaying after the training.
###Code
#@title Load utility functions
def format_size(size):
"""A helper function for creating a human-readable size."""
size = float(size)
for unit in ['B','KiB','MiB','GiB']:
if size < 1024.0:
return "{size:3.2f}{unit}".format(size=size, unit=unit)
size /= 1024.0
return "{size:.2f}{unit}".format(size=size, unit='TiB')
def set_sizing_environment():
"""Creates an environment that contains sizing information."""
# Creates a sizing executor factory to output communication cost
# after the training finishes. Note that sizing executor only provides an
# estimate (not exact) of communication cost, and doesn't capture cases like
# compression of over-the-wire representations. However, it's perfect for
# demonstrating the effect of compression in this tutorial.
sizing_factory = tff.framework.sizing_executor_factory()
# TFF has a modular runtime you can configure yourself for various
# environments and purposes, and this example just shows how to configure one
# part of it to report the size of things.
context = tff.framework.ExecutionContext(executor_fn=sizing_factory)
tff.framework.set_default_context(context)
return sizing_factory
def train(federated_averaging_process, num_rounds, num_clients_per_round, summary_writer):
"""Trains the federated averaging process and output metrics."""
# Create a environment to get communication cost.
environment = set_sizing_environment()
# Initialize the Federated Averaging algorithm to get the initial server state.
state = federated_averaging_process.initialize()
with summary_writer.as_default():
for round_num in range(num_rounds):
# Sample the clients parcitipated in this round.
sampled_clients = np.random.choice(
emnist_train.client_ids,
size=num_clients_per_round,
replace=False)
# Create a list of `tf.Dataset` instances from the data of sampled clients.
sampled_train_data = [
emnist_train.create_tf_dataset_for_client(client)
for client in sampled_clients
]
# Round one round of the algorithm based on the server state and client data
# and output the new state and metrics.
state, metrics = federated_averaging_process.next(state, sampled_train_data)
# For more about size_info, please see https://www.tensorflow.org/federated/api_docs/python/tff/framework/SizeInfo
size_info = environment.get_size_info()
broadcasted_bits = size_info.broadcast_bits[-1]
aggregated_bits = size_info.aggregate_bits[-1]
print('round {:2d}, metrics={}, broadcasted_bits={}, aggregated_bits={}'.format(round_num, metrics, format_size(broadcasted_bits), format_size(aggregated_bits)))
# Add metrics to Tensorboard.
for name, value in metrics['train']._asdict().items():
tf.summary.scalar(name, value, step=round_num)
# Add broadcasted and aggregated data size to Tensorboard.
tf.summary.scalar('cumulative_broadcasted_bits', broadcasted_bits, step=round_num)
tf.summary.scalar('cumulative_aggregated_bits', aggregated_bits, step=round_num)
summary_writer.flush()
# Clean the log directory to avoid conflicts.
!rm -R /tmp/logs/scalars/*
# Set up the log directory and writer for Tensorboard.
logdir = "/tmp/logs/scalars/original/"
summary_writer = tf.summary.create_file_writer(logdir)
train(federated_averaging_process=federated_averaging, num_rounds=10,
num_clients_per_round=10, summary_writer=summary_writer)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09433962404727936,loss=2.3181073665618896>>, broadcasted_bits=507.62MiB, aggregated_bits=507.62MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.0765027329325676,loss=2.3148586750030518>>, broadcasted_bits=1015.24MiB, aggregated_bits=1015.24MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08872458338737488,loss=2.3089394569396973>>, broadcasted_bits=1.49GiB, aggregated_bits=1.49GiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10852713137865067,loss=2.304060220718384>>, broadcasted_bits=1.98GiB, aggregated_bits=1.98GiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10818713158369064,loss=2.3026843070983887>>, broadcasted_bits=2.48GiB, aggregated_bits=2.48GiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10454985499382019,loss=2.300365447998047>>, broadcasted_bits=2.97GiB, aggregated_bits=2.97GiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12841254472732544,loss=2.29765248298645>>, broadcasted_bits=3.47GiB, aggregated_bits=3.47GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14023210108280182,loss=2.2977216243743896>>, broadcasted_bits=3.97GiB, aggregated_bits=3.97GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.15060241520404816,loss=2.29490327835083>>, broadcasted_bits=4.46GiB, aggregated_bits=4.46GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13088512420654297,loss=2.2942349910736084>>, broadcasted_bits=4.96GiB, aggregated_bits=4.96GiB
###Markdown
Start TensorBoard with the root log directory specified above to display the training metrics. It can take a few seconds for the data to load. Except for Loss and Accuracy, we also output the amount of broadcasted and aggregated data. Broadcasted data refers to tensors the server pushes to each client while aggregated data refers to tensors each client returns to the server.
###Code
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____
###Markdown
Build a custom broadcast and aggregate functionNow let's implement function to use lossy compression algorithms on broadcasted data and aggregated data using the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API.First, we define two functions:* `broadcast_encoder_fn` which creates an instance of [te.core.SimpleEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/simple_encoder.pyL30) to encode tensors or variables in server to client communication (Broadcast data).* `mean_encoder_fn` which creates an instance of [te.core.GatherEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/gather_encoder.pyL30) to encode tensors or variables in client to server communicaiton (Aggregation data).It is important to note that we do not apply a compression method to the entire model at once. Instead, we decide how (and whether) to compress each variable of the model independently. The reason is that generally, small variables such as biases are more sensitive to inaccuracy, and being relatively small, the potential communication savings are also relatively small. Hence we do not compress small variables by default. In this example, we apply uniform quantization to 8 bits (256 buckets) to every variable with more than 10000 elements, and only apply identity to other variables.
###Code
def broadcast_encoder_fn(value):
"""Function for building encoded broadcast."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_simple_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_simple_encoder(te.encoders.identity(), spec)
def mean_encoder_fn(value):
"""Function for building encoded mean."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_gather_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_gather_encoder(te.encoders.identity(), spec)
###Output
_____no_output_____
###Markdown
TFF provides APIs to convert the encoder function into a format that `tff.learning.build_federated_averaging_process` API can consume. By using the `tff.learning.framework.build_encoded_broadcast_from_model` and `tff.learning.framework.build_encoded_mean_from_model`, we can create two functions that can be passed into `broadcast_process` and `aggregation_process` agruments of `tff.learning.build_federated_averaging_process` to create a Federated Averaging algorithms with a lossy compression algorithm.
###Code
encoded_broadcast_process = (
tff.learning.framework.build_encoded_broadcast_process_from_model(
tff_model_fn, broadcast_encoder_fn))
encoded_mean_process = (
tff.learning.framework.build_encoded_mean_process_from_model(
tff_model_fn, mean_encoder_fn))
federated_averaging_with_compression = tff.learning.build_federated_averaging_process(
tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0),
broadcast_process=encoded_broadcast_process,
aggregation_process=encoded_mean_process)
###Output
_____no_output_____
###Markdown
Training the model againNow let's run the new Federated Averaging algorithm.
###Code
logdir_for_compression = "/tmp/logs/scalars/compression/"
summary_writer_for_compression = tf.summary.create_file_writer(
logdir_for_compression)
train(federated_averaging_process=federated_averaging_with_compression,
num_rounds=10,
num_clients_per_round=10,
summary_writer=summary_writer_for_compression)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08722109347581863,loss=2.3216357231140137>>, broadcasted_bits=146.46MiB, aggregated_bits=146.46MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08379272371530533,loss=2.3108291625976562>>, broadcasted_bits=292.92MiB, aggregated_bits=292.92MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08834951370954514,loss=2.3074147701263428>>, broadcasted_bits=439.38MiB, aggregated_bits=439.39MiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10467479377985,loss=2.305814027786255>>, broadcasted_bits=585.84MiB, aggregated_bits=585.85MiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09853658825159073,loss=2.3012874126434326>>, broadcasted_bits=732.30MiB, aggregated_bits=732.31MiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14904330670833588,loss=2.3005223274230957>>, broadcasted_bits=878.77MiB, aggregated_bits=878.77MiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13152804970741272,loss=2.2985599040985107>>, broadcasted_bits=1.00GiB, aggregated_bits=1.00GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12392637878656387,loss=2.297102451324463>>, broadcasted_bits=1.14GiB, aggregated_bits=1.14GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13289350271224976,loss=2.2944107055664062>>, broadcasted_bits=1.29GiB, aggregated_bits=1.29GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12661737203598022,loss=2.2971296310424805>>, broadcasted_bits=1.43GiB, aggregated_bits=1.43GiB
###Markdown
Start TensorBoard again to compare the training metrics between two runs.As you can see in Tensorboard, there is a significant reduction between the `orginial` and `compression` curves in the `broadcasted_bits` and `aggregated_bits` plots while in the `loss` and `sparse_categorical_accuracy` plot the two curves are pretty similiar.In conclusion, we implemented a compression algorithm that can achieve similar performance as the orignial Federated Averaging algorithm while the comminucation cost is significently reduced.
###Code
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TFF for Federated Learning Research: Model and Update Compression**NOTE**: This colab has been verified to work with the [latest released version](https://github.com/tensorflow/federatedcompatibility) of the `tensorflow_federated` pip package, but the Tensorflow Federated project is still in pre-release development and may not work on `master`.In this tutorial, we use the [EMNIST](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/emnist) dataset to demonstrate how to enable lossy compression algorithms to reduce communication cost in the Federated Averaging algorithm using the `tff.learning.build_federated_averaging_process` API and the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API. For more details on the Federated Averaging algorithm, see the paper [Communication-Efficient Learning of Deep Networks from Decentralized Data](https://arxiv.org/abs/1602.05629). Before we startBefore we start, please run the following to make sure that your environment iscorrectly setup. If you don't see a greeting, please refer to the[Installation](../install.md) guide for instructions.
###Code
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow_federated_nightly
!pip install --quiet --upgrade tensorflow-model-optimization
!pip install --quiet --upgrade nest_asyncio
import nest_asyncio
nest_asyncio.apply()
%load_ext tensorboard
import functools
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_model_optimization.python.core.internal import tensor_encoding as te
###Output
_____no_output_____
###Markdown
Verify if TFF is working.
###Code
@tff.federated_computation
def hello_world():
return 'Hello, World!'
hello_world()
###Output
_____no_output_____
###Markdown
Preparing the input dataIn this section we load and preprocess the EMNIST dataset included in TFF. Please check out [Federated Learning for Image Classification](https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classificationpreparing_the_input_data) tutorial for more details about EMNIST dataset.
###Code
# This value only applies to EMNIST dataset, consider choosing appropriate
# values if switching to other datasets.
MAX_CLIENT_DATASET_SIZE = 418
CLIENT_EPOCHS_PER_ROUND = 1
CLIENT_BATCH_SIZE = 20
TEST_BATCH_SIZE = 500
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=True)
def reshape_emnist_element(element):
return (tf.expand_dims(element['pixels'], axis=-1), element['label'])
def preprocess_train_dataset(dataset):
"""Preprocessing function for the EMNIST training dataset."""
return (dataset
# Shuffle according to the largest client dataset
.shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)
# Repeat to do multiple local epochs
.repeat(CLIENT_EPOCHS_PER_ROUND)
# Batch to a fixed client batch size
.batch(CLIENT_BATCH_SIZE, drop_remainder=False)
# Preprocessing step
.map(reshape_emnist_element))
emnist_train = emnist_train.preprocess(preprocess_train_dataset)
###Output
_____no_output_____
###Markdown
Defining a modelHere we define a keras model based on the orginial FedAvg CNN, and then wrap the keras model in an instance of [tff.learning.Model](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model) so that it can be consumed by TFF.Note that we'll need a **function** which produces a model instead of simply a model directly. In addition, the function **cannot** just capture a pre-constructed model, it must create the model in the context that it is called. The reason is that TFF is designed to go to devices, and needs control over when resources are constructed so that they can be captured and packaged up.
###Code
def create_original_fedavg_cnn_model(only_digits=True):
"""The CNN model used in https://arxiv.org/abs/1602.05629."""
data_format = 'channels_last'
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
conv2d(filters=32),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10 if only_digits else 62),
tf.keras.layers.Softmax(),
])
return model
# Gets the type information of the input data. TFF is a strongly typed
# functional programming framework, and needs type information about inputs to
# the model.
input_spec = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0]).element_spec
def tff_model_fn():
keras_model = create_original_fedavg_cnn_model()
return tff.learning.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
###Output
_____no_output_____
###Markdown
Training the model and outputting training metricsNow we are ready to construct a Federated Averaging algorithm and train the defined model on EMNIST dataset.First we need to build a Federated Averaging algorithm using the [tff.learning.build_federated_averaging_process](https://www.tensorflow.org/federated/api_docs/python/tff/learning/build_federated_averaging_process) API.
###Code
federated_averaging = tff.learning.build_federated_averaging_process(
model_fn=tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))
###Output
_____no_output_____
###Markdown
Now let's run the Federated Averaging algorithm. The execution of a Federated Learning algorithm from the perspective of TFF looks like this:1. Initialize the algorithm and get the inital server state. The server state contains necessary information to perform the algorithm. Recall, since TFF is functional, that this state includes both any optimizer state the algorithm uses (e.g. momentum terms) as well as the model parameters themselves--these will be passed as arguments and returned as results from TFF computations.2. Execute the algorithm round by round. In each round, a new server state will be returned as the result of each client training the model on its data. Typically in one round: 1. Server broadcast the model to all the participating clients. 2. Each client perform work based on the model and its own data. 3. Server aggregates all the model to produce a sever state which contains a new model.For more details, please see [Custom Federated Algorithms, Part 2: Implementing Federated Averaging](https://www.tensorflow.org/federated/tutorials/custom_federated_algorithms_2) tutorial.Training metrics are written to the Tensorboard directory for displaying after the training.
###Code
#@title Load utility functions
def format_size(size):
"""A helper function for creating a human-readable size."""
size = float(size)
for unit in ['bit','Kibit','Mibit','Gibit']:
if size < 1024.0:
return "{size:3.2f}{unit}".format(size=size, unit=unit)
size /= 1024.0
return "{size:.2f}{unit}".format(size=size, unit='TiB')
def set_sizing_environment():
"""Creates an environment that contains sizing information."""
# Creates a sizing executor factory to output communication cost
# after the training finishes. Note that sizing executor only provides an
# estimate (not exact) of communication cost, and doesn't capture cases like
# compression of over-the-wire representations. However, it's perfect for
# demonstrating the effect of compression in this tutorial.
sizing_factory = tff.framework.sizing_executor_factory()
# TFF has a modular runtime you can configure yourself for various
# environments and purposes, and this example just shows how to configure one
# part of it to report the size of things.
context = tff.framework.ExecutionContext(executor_fn=sizing_factory)
tff.framework.set_default_context(context)
return sizing_factory
def train(federated_averaging_process, num_rounds, num_clients_per_round, summary_writer):
"""Trains the federated averaging process and output metrics."""
# Create a environment to get communication cost.
environment = set_sizing_environment()
# Initialize the Federated Averaging algorithm to get the initial server state.
state = federated_averaging_process.initialize()
with summary_writer.as_default():
for round_num in range(num_rounds):
# Sample the clients parcitipated in this round.
sampled_clients = np.random.choice(
emnist_train.client_ids,
size=num_clients_per_round,
replace=False)
# Create a list of `tf.Dataset` instances from the data of sampled clients.
sampled_train_data = [
emnist_train.create_tf_dataset_for_client(client)
for client in sampled_clients
]
# Round one round of the algorithm based on the server state and client data
# and output the new state and metrics.
state, metrics = federated_averaging_process.next(state, sampled_train_data)
# For more about size_info, please see https://www.tensorflow.org/federated/api_docs/python/tff/framework/SizeInfo
size_info = environment.get_size_info()
broadcasted_bits = size_info.broadcast_bits[-1]
aggregated_bits = size_info.aggregate_bits[-1]
print('round {:2d}, metrics={}, broadcasted_bits={}, aggregated_bits={}'.format(round_num, metrics, format_size(broadcasted_bits), format_size(aggregated_bits)))
# Add metrics to Tensorboard.
for name, value in metrics['train'].items():
tf.summary.scalar(name, value, step=round_num)
# Add broadcasted and aggregated data size to Tensorboard.
tf.summary.scalar('cumulative_broadcasted_bits', broadcasted_bits, step=round_num)
tf.summary.scalar('cumulative_aggregated_bits', aggregated_bits, step=round_num)
summary_writer.flush()
# Clean the log directory to avoid conflicts.
!rm -R /tmp/logs/scalars/*
# Set up the log directory and writer for Tensorboard.
logdir = "/tmp/logs/scalars/original/"
summary_writer = tf.summary.create_file_writer(logdir)
train(federated_averaging_process=federated_averaging, num_rounds=10,
num_clients_per_round=10, summary_writer=summary_writer)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09433962404727936,loss=2.3181073665618896>>, broadcasted_bits=507.62MiB, aggregated_bits=507.62MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.0765027329325676,loss=2.3148586750030518>>, broadcasted_bits=1015.24MiB, aggregated_bits=1015.24MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08872458338737488,loss=2.3089394569396973>>, broadcasted_bits=1.49GiB, aggregated_bits=1.49GiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10852713137865067,loss=2.304060220718384>>, broadcasted_bits=1.98GiB, aggregated_bits=1.98GiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10818713158369064,loss=2.3026843070983887>>, broadcasted_bits=2.48GiB, aggregated_bits=2.48GiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10454985499382019,loss=2.300365447998047>>, broadcasted_bits=2.97GiB, aggregated_bits=2.97GiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12841254472732544,loss=2.29765248298645>>, broadcasted_bits=3.47GiB, aggregated_bits=3.47GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14023210108280182,loss=2.2977216243743896>>, broadcasted_bits=3.97GiB, aggregated_bits=3.97GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.15060241520404816,loss=2.29490327835083>>, broadcasted_bits=4.46GiB, aggregated_bits=4.46GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13088512420654297,loss=2.2942349910736084>>, broadcasted_bits=4.96GiB, aggregated_bits=4.96GiB
###Markdown
Start TensorBoard with the root log directory specified above to display the training metrics. It can take a few seconds for the data to load. Except for Loss and Accuracy, we also output the amount of broadcasted and aggregated data. Broadcasted data refers to tensors the server pushes to each client while aggregated data refers to tensors each client returns to the server.
###Code
#@test {"skip": true}
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____
###Markdown
Build a custom broadcast and aggregate functionNow let's implement function to use lossy compression algorithms on broadcasted data and aggregated data using the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API.First, we define two functions:* `broadcast_encoder_fn` which creates an instance of [te.core.SimpleEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/simple_encoder.pyL30) to encode tensors or variables in server to client communication (Broadcast data).* `mean_encoder_fn` which creates an instance of [te.core.GatherEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/gather_encoder.pyL30) to encode tensors or variables in client to server communicaiton (Aggregation data).It is important to note that we do not apply a compression method to the entire model at once. Instead, we decide how (and whether) to compress each variable of the model independently. The reason is that generally, small variables such as biases are more sensitive to inaccuracy, and being relatively small, the potential communication savings are also relatively small. Hence we do not compress small variables by default. In this example, we apply uniform quantization to 8 bits (256 buckets) to every variable with more than 10000 elements, and only apply identity to other variables.
###Code
def broadcast_encoder_fn(value):
"""Function for building encoded broadcast."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_simple_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_simple_encoder(te.encoders.identity(), spec)
def mean_encoder_fn(value):
"""Function for building encoded mean."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_gather_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_gather_encoder(te.encoders.identity(), spec)
###Output
_____no_output_____
###Markdown
TFF provides APIs to convert the encoder function into a format that `tff.learning.build_federated_averaging_process` API can consume. By using the `tff.learning.framework.build_encoded_broadcast_from_model` and `tff.learning.framework.build_encoded_mean_from_model`, we can create two functions that can be passed into `broadcast_process` and `aggregation_process` agruments of `tff.learning.build_federated_averaging_process` to create a Federated Averaging algorithms with a lossy compression algorithm.
###Code
encoded_broadcast_process = (
tff.learning.framework.build_encoded_broadcast_process_from_model(
tff_model_fn, broadcast_encoder_fn))
encoded_mean_process = (
tff.learning.framework.build_encoded_mean_process_from_model(
tff_model_fn, mean_encoder_fn))
federated_averaging_with_compression = tff.learning.build_federated_averaging_process(
tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0),
broadcast_process=encoded_broadcast_process,
aggregation_process=encoded_mean_process)
###Output
_____no_output_____
###Markdown
Training the model againNow let's run the new Federated Averaging algorithm.
###Code
logdir_for_compression = "/tmp/logs/scalars/compression/"
summary_writer_for_compression = tf.summary.create_file_writer(
logdir_for_compression)
train(federated_averaging_process=federated_averaging_with_compression,
num_rounds=10,
num_clients_per_round=10,
summary_writer=summary_writer_for_compression)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08722109347581863,loss=2.3216357231140137>>, broadcasted_bits=146.46MiB, aggregated_bits=146.46MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08379272371530533,loss=2.3108291625976562>>, broadcasted_bits=292.92MiB, aggregated_bits=292.92MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08834951370954514,loss=2.3074147701263428>>, broadcasted_bits=439.38MiB, aggregated_bits=439.39MiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10467479377985,loss=2.305814027786255>>, broadcasted_bits=585.84MiB, aggregated_bits=585.85MiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09853658825159073,loss=2.3012874126434326>>, broadcasted_bits=732.30MiB, aggregated_bits=732.31MiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14904330670833588,loss=2.3005223274230957>>, broadcasted_bits=878.77MiB, aggregated_bits=878.77MiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13152804970741272,loss=2.2985599040985107>>, broadcasted_bits=1.00GiB, aggregated_bits=1.00GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12392637878656387,loss=2.297102451324463>>, broadcasted_bits=1.14GiB, aggregated_bits=1.14GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13289350271224976,loss=2.2944107055664062>>, broadcasted_bits=1.29GiB, aggregated_bits=1.29GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12661737203598022,loss=2.2971296310424805>>, broadcasted_bits=1.43GiB, aggregated_bits=1.43GiB
###Markdown
Start TensorBoard again to compare the training metrics between two runs.As you can see in Tensorboard, there is a significant reduction between the `orginial` and `compression` curves in the `broadcasted_bits` and `aggregated_bits` plots while in the `loss` and `sparse_categorical_accuracy` plot the two curves are pretty similiar.In conclusion, we implemented a compression algorithm that can achieve similar performance as the orignial Federated Averaging algorithm while the comminucation cost is significently reduced.
###Code
#@test {"skip": true}
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TFF for Federated Learning Research: Model and Update Compression**NOTE**: This colab has been verified to work with the [latest released version](https://github.com/tensorflow/federatedcompatibility) of the `tensorflow_federated` pip package, but the Tensorflow Federated project is still in pre-release development and may not work on `master`.In this tutorial, we use the [EMNIST](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/emnist) dataset to demonstrate how to enable lossy compression algorithms to reduce communication cost in the Federated Averaging algorithm using the `tff.learning.build_federated_averaging_process` API and the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API. For more details on the Federated Averaging algorithm, see the paper [Communication-Efficient Learning of Deep Networks from Decentralized Data](https://arxiv.org/abs/1602.05629). Before we startBefore we start, please run the following to make sure that your environment iscorrectly setup. If you don't see a greeting, please refer to the[Installation](../install.md) guide for instructions.
###Code
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow_federated_nightly
!pip install --quiet --upgrade tensorflow-model-optimization
!pip install --quiet --upgrade nest_asyncio
import nest_asyncio
nest_asyncio.apply()
%load_ext tensorboard
import functools
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_model_optimization.python.core.internal import tensor_encoding as te
###Output
_____no_output_____
###Markdown
Verify if TFF is working.
###Code
@tff.federated_computation
def hello_world():
return 'Hello, World!'
hello_world()
###Output
_____no_output_____
###Markdown
Preparing the input dataIn this section we load and preprocess the EMNIST dataset included in TFF. Please check out [Federated Learning for Image Classification](https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classificationpreparing_the_input_data) tutorial for more details about EMNIST dataset.
###Code
# This value only applies to EMNIST dataset, consider choosing appropriate
# values if switching to other datasets.
MAX_CLIENT_DATASET_SIZE = 418
CLIENT_EPOCHS_PER_ROUND = 1
CLIENT_BATCH_SIZE = 20
TEST_BATCH_SIZE = 500
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=True)
def reshape_emnist_element(element):
return (tf.expand_dims(element['pixels'], axis=-1), element['label'])
def preprocess_train_dataset(dataset):
"""Preprocessing function for the EMNIST training dataset."""
return (dataset
# Shuffle according to the largest client dataset
.shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)
# Repeat to do multiple local epochs
.repeat(CLIENT_EPOCHS_PER_ROUND)
# Batch to a fixed client batch size
.batch(CLIENT_BATCH_SIZE, drop_remainder=False)
# Preprocessing step
.map(reshape_emnist_element))
emnist_train = emnist_train.preprocess(preprocess_train_dataset)
###Output
_____no_output_____
###Markdown
Defining a modelHere we define a keras model based on the orginial FedAvg CNN, and then wrap the keras model in an instance of [tff.learning.Model](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model) so that it can be consumed by TFF.Note that we'll need a **function** which produces a model instead of simply a model directly. In addition, the function **cannot** just capture a pre-constructed model, it must create the model in the context that it is called. The reason is that TFF is designed to go to devices, and needs control over when resources are constructed so that they can be captured and packaged up.
###Code
def create_original_fedavg_cnn_model(only_digits=True):
"""The CNN model used in https://arxiv.org/abs/1602.05629."""
data_format = 'channels_last'
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
conv2d(filters=32),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10 if only_digits else 62),
tf.keras.layers.Softmax(),
])
return model
# Gets the type information of the input data. TFF is a strongly typed
# functional programming framework, and needs type information about inputs to
# the model.
input_spec = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0]).element_spec
def tff_model_fn():
keras_model = create_original_fedavg_cnn_model()
return tff.learning.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
###Output
_____no_output_____
###Markdown
Training the model and outputting training metricsNow we are ready to construct a Federated Averaging algorithm and train the defined model on EMNIST dataset.First we need to build a Federated Averaging algorithm using the [tff.learning.build_federated_averaging_process](https://www.tensorflow.org/federated/api_docs/python/tff/learning/build_federated_averaging_process) API.
###Code
federated_averaging = tff.learning.build_federated_averaging_process(
model_fn=tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))
###Output
_____no_output_____
###Markdown
Now let's run the Federated Averaging algorithm. The execution of a Federated Learning algorithm from the perspective of TFF looks like this:1. Initialize the algorithm and get the inital server state. The server state contains necessary information to perform the algorithm. Recall, since TFF is functional, that this state includes both any optimizer state the algorithm uses (e.g. momentum terms) as well as the model parameters themselves--these will be passed as arguments and returned as results from TFF computations.2. Execute the algorithm round by round. In each round, a new server state will be returned as the result of each client training the model on its data. Typically in one round: 1. Server broadcast the model to all the participating clients. 2. Each client perform work based on the model and its own data. 3. Server aggregates all the model to produce a sever state which contains a new model.For more details, please see [Custom Federated Algorithms, Part 2: Implementing Federated Averaging](https://www.tensorflow.org/federated/tutorials/custom_federated_algorithms_2) tutorial.Training metrics are written to the Tensorboard directory for displaying after the training.
###Code
#@title Load utility functions
def format_size(size):
"""A helper function for creating a human-readable size."""
size = float(size)
for unit in ['B','KiB','MiB','GiB']:
if size < 1024.0:
return "{size:3.2f}{unit}".format(size=size, unit=unit)
size /= 1024.0
return "{size:.2f}{unit}".format(size=size, unit='TiB')
def set_sizing_environment():
"""Creates an environment that contains sizing information."""
# Creates a sizing executor factory to output communication cost
# after the training finishes. Note that sizing executor only provides an
# estimate (not exact) of communication cost, and doesn't capture cases like
# compression of over-the-wire representations. However, it's perfect for
# demonstrating the effect of compression in this tutorial.
sizing_factory = tff.framework.sizing_executor_factory()
# TFF has a modular runtime you can configure yourself for various
# environments and purposes, and this example just shows how to configure one
# part of it to report the size of things.
context = tff.framework.ExecutionContext(executor_fn=sizing_factory)
tff.framework.set_default_context(context)
return sizing_factory
def train(federated_averaging_process, num_rounds, num_clients_per_round, summary_writer):
"""Trains the federated averaging process and output metrics."""
# Create a environment to get communication cost.
environment = set_sizing_environment()
# Initialize the Federated Averaging algorithm to get the initial server state.
state = federated_averaging_process.initialize()
with summary_writer.as_default():
for round_num in range(num_rounds):
# Sample the clients parcitipated in this round.
sampled_clients = np.random.choice(
emnist_train.client_ids,
size=num_clients_per_round,
replace=False)
# Create a list of `tf.Dataset` instances from the data of sampled clients.
sampled_train_data = [
emnist_train.create_tf_dataset_for_client(client)
for client in sampled_clients
]
# Round one round of the algorithm based on the server state and client data
# and output the new state and metrics.
state, metrics = federated_averaging_process.next(state, sampled_train_data)
# For more about size_info, please see https://www.tensorflow.org/federated/api_docs/python/tff/framework/SizeInfo
size_info = environment.get_size_info()
broadcasted_bits = size_info.broadcast_bits[-1]
aggregated_bits = size_info.aggregate_bits[-1]
print('round {:2d}, metrics={}, broadcasted_bits={}, aggregated_bits={}'.format(round_num, metrics, format_size(broadcasted_bits), format_size(aggregated_bits)))
# Add metrics to Tensorboard.
for name, value in metrics['train']._asdict().items():
tf.summary.scalar(name, value, step=round_num)
# Add broadcasted and aggregated data size to Tensorboard.
tf.summary.scalar('cumulative_broadcasted_bits', broadcasted_bits, step=round_num)
tf.summary.scalar('cumulative_aggregated_bits', aggregated_bits, step=round_num)
summary_writer.flush()
# Clean the log directory to avoid conflicts.
!rm -R /tmp/logs/scalars/*
# Set up the log directory and writer for Tensorboard.
logdir = "/tmp/logs/scalars/original/"
summary_writer = tf.summary.create_file_writer(logdir)
train(federated_averaging_process=federated_averaging, num_rounds=10,
num_clients_per_round=10, summary_writer=summary_writer)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09433962404727936,loss=2.3181073665618896>>, broadcasted_bits=507.62MiB, aggregated_bits=507.62MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.0765027329325676,loss=2.3148586750030518>>, broadcasted_bits=1015.24MiB, aggregated_bits=1015.24MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08872458338737488,loss=2.3089394569396973>>, broadcasted_bits=1.49GiB, aggregated_bits=1.49GiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10852713137865067,loss=2.304060220718384>>, broadcasted_bits=1.98GiB, aggregated_bits=1.98GiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10818713158369064,loss=2.3026843070983887>>, broadcasted_bits=2.48GiB, aggregated_bits=2.48GiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10454985499382019,loss=2.300365447998047>>, broadcasted_bits=2.97GiB, aggregated_bits=2.97GiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12841254472732544,loss=2.29765248298645>>, broadcasted_bits=3.47GiB, aggregated_bits=3.47GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14023210108280182,loss=2.2977216243743896>>, broadcasted_bits=3.97GiB, aggregated_bits=3.97GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.15060241520404816,loss=2.29490327835083>>, broadcasted_bits=4.46GiB, aggregated_bits=4.46GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13088512420654297,loss=2.2942349910736084>>, broadcasted_bits=4.96GiB, aggregated_bits=4.96GiB
###Markdown
Start TensorBoard with the root log directory specified above to display the training metrics. It can take a few seconds for the data to load. Except for Loss and Accuracy, we also output the amount of broadcasted and aggregated data. Broadcasted data refers to tensors the server pushes to each client while aggregated data refers to tensors each client returns to the server.
###Code
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____
###Markdown
Build a custom broadcast and aggregate functionNow let's implement function to use lossy compression algorithms on broadcasted data and aggregated data using the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API.First, we define two functions:* `broadcast_encoder_fn` which creates an instance of [te.core.SimpleEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/simple_encoder.pyL30) to encode tensors or variables in server to client communication (Broadcast data).* `mean_encoder_fn` which creates an instance of [te.core.GatherEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/gather_encoder.pyL30) to encode tensors or variables in client to server communicaiton (Aggregation data).It is important to note that we do not apply a compression method to the entire model at once. Instead, we decide how (and whether) to compress each variable of the model independently. The reason is that generally, small variables such as biases are more sensitive to inaccuracy, and being relatively small, the potential communication savings are also relatively small. Hence we do not compress small variables by default. In this example, we apply uniform quantization to 8 bits (256 buckets) to every variable with more than 10000 elements, and only apply identity to other variables.
###Code
def broadcast_encoder_fn(value):
"""Function for building encoded broadcast."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_simple_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_simple_encoder(te.encoders.identity(), spec)
def mean_encoder_fn(value):
"""Function for building encoded mean."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_gather_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_gather_encoder(te.encoders.identity(), spec)
###Output
_____no_output_____
###Markdown
TFF provides APIs to convert the encoder function into a format that `tff.learning.build_federated_averaging_process` API can consume. By using the `tff.learning.framework.build_encoded_broadcast_from_model` and `tff.learning.framework.build_encoded_mean_from_model`, we can create two functions that can be passed into `broadcast_process` and `aggregation_process` agruments of `tff.learning.build_federated_averaging_process` to create a Federated Averaging algorithms with a lossy compression algorithm.
###Code
encoded_broadcast_process = (
tff.learning.framework.build_encoded_broadcast_process_from_model(
tff_model_fn, broadcast_encoder_fn))
encoded_mean_process = (
tff.learning.framework.build_encoded_mean_process_from_model(
tff_model_fn, mean_encoder_fn))
federated_averaging_with_compression = tff.learning.build_federated_averaging_process(
tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0),
broadcast_process=encoded_broadcast_process,
aggregation_process=encoded_mean_process)
###Output
_____no_output_____
###Markdown
Training the model againNow let's run the new Federated Averaging algorithm.
###Code
logdir_for_compression = "/tmp/logs/scalars/compression/"
summary_writer_for_compression = tf.summary.create_file_writer(
logdir_for_compression)
train(federated_averaging_process=federated_averaging_with_compression,
num_rounds=10,
num_clients_per_round=10,
summary_writer=summary_writer_for_compression)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08722109347581863,loss=2.3216357231140137>>, broadcasted_bits=146.46MiB, aggregated_bits=146.46MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08379272371530533,loss=2.3108291625976562>>, broadcasted_bits=292.92MiB, aggregated_bits=292.92MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08834951370954514,loss=2.3074147701263428>>, broadcasted_bits=439.38MiB, aggregated_bits=439.39MiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10467479377985,loss=2.305814027786255>>, broadcasted_bits=585.84MiB, aggregated_bits=585.85MiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09853658825159073,loss=2.3012874126434326>>, broadcasted_bits=732.30MiB, aggregated_bits=732.31MiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14904330670833588,loss=2.3005223274230957>>, broadcasted_bits=878.77MiB, aggregated_bits=878.77MiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13152804970741272,loss=2.2985599040985107>>, broadcasted_bits=1.00GiB, aggregated_bits=1.00GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12392637878656387,loss=2.297102451324463>>, broadcasted_bits=1.14GiB, aggregated_bits=1.14GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13289350271224976,loss=2.2944107055664062>>, broadcasted_bits=1.29GiB, aggregated_bits=1.29GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12661737203598022,loss=2.2971296310424805>>, broadcasted_bits=1.43GiB, aggregated_bits=1.43GiB
###Markdown
Start TensorBoard again to compare the training metrics between two runs.As you can see in Tensorboard, there is a significant reduction between the `orginial` and `compression` curves in the `broadcasted_bits` and `aggregated_bits` plots while in the `loss` and `sparse_categorical_accuracy` plot the two curves are pretty similiar.In conclusion, we implemented a compression algorithm that can achieve similar performance as the orignial Federated Averaging algorithm while the comminucation cost is significently reduced.
###Code
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TFF for Federated Learning Research: Model and Update Compression**NOTE**: This colab has been verified to work with the [latest released version](https://github.com/tensorflow/federatedcompatibility) of the `tensorflow_federated` pip package, but the Tensorflow Federated project is still in pre-release development and may not work on `master`.In this tutorial, we use the [EMNIST](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/emnist) dataset to demonstrate how to enable lossy compression algorithms to reduce communication cost in the Federated Averaging algorithm using the `tff.learning.build_federated_averaging_process` API and the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API. For more details on the Federated Averaging algorithm, see the paper [Communication-Efficient Learning of Deep Networks from Decentralized Data](https://arxiv.org/abs/1602.05629). Before we startBefore we start, please run the following to make sure that your environment iscorrectly setup. If you don't see a greeting, please refer to the[Installation](../install.md) guide for instructions.
###Code
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow_federated_nightly
!pip install --quiet --upgrade tensorflow-model-optimization
!pip install --quiet --upgrade nest_asyncio
import nest_asyncio
nest_asyncio.apply()
%load_ext tensorboard
import functools
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_model_optimization.python.core.internal import tensor_encoding as te
###Output
_____no_output_____
###Markdown
Verify if TFF is working.
###Code
@tff.federated_computation
def hello_world():
return 'Hello, World!'
hello_world()
###Output
_____no_output_____
###Markdown
Preparing the input dataIn this section we load and preprocess the EMNIST dataset included in TFF. Please check out [Federated Learning for Image Classification](https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classificationpreparing_the_input_data) tutorial for more details about EMNIST dataset.
###Code
# This value only applies to EMNIST dataset, consider choosing appropriate
# values if switching to other datasets.
MAX_CLIENT_DATASET_SIZE = 418
CLIENT_EPOCHS_PER_ROUND = 1
CLIENT_BATCH_SIZE = 20
TEST_BATCH_SIZE = 500
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=True)
def reshape_emnist_element(element):
return (tf.expand_dims(element['pixels'], axis=-1), element['label'])
def preprocess_train_dataset(dataset):
"""Preprocessing function for the EMNIST training dataset."""
return (dataset
# Shuffle according to the largest client dataset
.shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)
# Repeat to do multiple local epochs
.repeat(CLIENT_EPOCHS_PER_ROUND)
# Batch to a fixed client batch size
.batch(CLIENT_BATCH_SIZE, drop_remainder=False)
# Preprocessing step
.map(reshape_emnist_element))
emnist_train = emnist_train.preprocess(preprocess_train_dataset)
###Output
_____no_output_____
###Markdown
Defining a modelHere we define a keras model based on the orginial FedAvg CNN, and then wrap the keras model in an instance of [tff.learning.Model](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model) so that it can be consumed by TFF.Note that we'll need a **function** which produces a model instead of simply a model directly. In addition, the function **cannot** just capture a pre-constructed model, it must create the model in the context that it is called. The reason is that TFF is designed to go to devices, and needs control over when resources are constructed so that they can be captured and packaged up.
###Code
def create_original_fedavg_cnn_model(only_digits=True):
"""The CNN model used in https://arxiv.org/abs/1602.05629."""
data_format = 'channels_last'
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
conv2d(filters=32),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10 if only_digits else 62),
tf.keras.layers.Softmax(),
])
return model
# Gets the type information of the input data. TFF is a strongly typed
# functional programming framework, and needs type information about inputs to
# the model.
input_spec = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0]).element_spec
def tff_model_fn():
keras_model = create_original_fedavg_cnn_model()
return tff.learning.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
###Output
_____no_output_____
###Markdown
Training the model and outputting training metricsNow we are ready to construct a Federated Averaging algorithm and train the defined model on EMNIST dataset.First we need to build a Federated Averaging algorithm using the [tff.learning.build_federated_averaging_process](https://www.tensorflow.org/federated/api_docs/python/tff/learning/build_federated_averaging_process) API.
###Code
federated_averaging = tff.learning.build_federated_averaging_process(
model_fn=tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))
###Output
_____no_output_____
###Markdown
Now let's run the Federated Averaging algorithm. The execution of a Federated Learning algorithm from the perspective of TFF looks like this:1. Initialize the algorithm and get the inital server state. The server state contains necessary information to perform the algorithm. Recall, since TFF is functional, that this state includes both any optimizer state the algorithm uses (e.g. momentum terms) as well as the model parameters themselves--these will be passed as arguments and returned as results from TFF computations.2. Execute the algorithm round by round. In each round, a new server state will be returned as the result of each client training the model on its data. Typically in one round: 1. Server broadcast the model to all the participating clients. 2. Each client perform work based on the model and its own data. 3. Server aggregates all the model to produce a sever state which contains a new model.For more details, please see [Custom Federated Algorithms, Part 2: Implementing Federated Averaging](https://www.tensorflow.org/federated/tutorials/custom_federated_algorithms_2) tutorial.Training metrics are written to the Tensorboard directory for displaying after the training.
###Code
#@title Load utility functions
def format_size(size):
"""A helper function for creating a human-readable size."""
size = float(size)
for unit in ['B','KiB','MiB','GiB']:
if size < 1024.0:
return "{size:3.2f}{unit}".format(size=size, unit=unit)
size /= 1024.0
return "{size:.2f}{unit}".format(size=size, unit='TiB')
def set_sizing_environment():
"""Creates an environment that contains sizing information."""
# Creates a sizing executor factory to output communication cost
# after the training finishes. Note that sizing executor only provides an
# estimate (not exact) of communication cost, and doesn't capture cases like
# compression of over-the-wire representations. However, it's perfect for
# demonstrating the effect of compression in this tutorial.
sizing_factory = tff.framework.sizing_executor_factory()
# TFF has a modular runtime you can configure yourself for various
# environments and purposes, and this example just shows how to configure one
# part of it to report the size of things.
context = tff.framework.ExecutionContext(executor_fn=sizing_factory)
tff.framework.set_default_context(context)
return sizing_factory
def train(federated_averaging_process, num_rounds, num_clients_per_round, summary_writer):
"""Trains the federated averaging process and output metrics."""
# Create a environment to get communication cost.
environment = set_sizing_environment()
# Initialize the Federated Averaging algorithm to get the initial server state.
state = federated_averaging_process.initialize()
with summary_writer.as_default():
for round_num in range(num_rounds):
# Sample the clients parcitipated in this round.
sampled_clients = np.random.choice(
emnist_train.client_ids,
size=num_clients_per_round,
replace=False)
# Create a list of `tf.Dataset` instances from the data of sampled clients.
sampled_train_data = [
emnist_train.create_tf_dataset_for_client(client)
for client in sampled_clients
]
# Round one round of the algorithm based on the server state and client data
# and output the new state and metrics.
state, metrics = federated_averaging_process.next(state, sampled_train_data)
# For more about size_info, please see https://www.tensorflow.org/federated/api_docs/python/tff/framework/SizeInfo
size_info = environment.get_size_info()
broadcasted_bits = size_info.broadcast_bits[-1]
aggregated_bits = size_info.aggregate_bits[-1]
print('round {:2d}, metrics={}, broadcasted_bits={}, aggregated_bits={}'.format(round_num, metrics, format_size(broadcasted_bits), format_size(aggregated_bits)))
# Add metrics to Tensorboard.
for name, value in metrics['train'].items():
tf.summary.scalar(name, value, step=round_num)
# Add broadcasted and aggregated data size to Tensorboard.
tf.summary.scalar('cumulative_broadcasted_bits', broadcasted_bits, step=round_num)
tf.summary.scalar('cumulative_aggregated_bits', aggregated_bits, step=round_num)
summary_writer.flush()
# Clean the log directory to avoid conflicts.
!rm -R /tmp/logs/scalars/*
# Set up the log directory and writer for Tensorboard.
logdir = "/tmp/logs/scalars/original/"
summary_writer = tf.summary.create_file_writer(logdir)
train(federated_averaging_process=federated_averaging, num_rounds=10,
num_clients_per_round=10, summary_writer=summary_writer)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09433962404727936,loss=2.3181073665618896>>, broadcasted_bits=507.62MiB, aggregated_bits=507.62MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.0765027329325676,loss=2.3148586750030518>>, broadcasted_bits=1015.24MiB, aggregated_bits=1015.24MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08872458338737488,loss=2.3089394569396973>>, broadcasted_bits=1.49GiB, aggregated_bits=1.49GiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10852713137865067,loss=2.304060220718384>>, broadcasted_bits=1.98GiB, aggregated_bits=1.98GiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10818713158369064,loss=2.3026843070983887>>, broadcasted_bits=2.48GiB, aggregated_bits=2.48GiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10454985499382019,loss=2.300365447998047>>, broadcasted_bits=2.97GiB, aggregated_bits=2.97GiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12841254472732544,loss=2.29765248298645>>, broadcasted_bits=3.47GiB, aggregated_bits=3.47GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14023210108280182,loss=2.2977216243743896>>, broadcasted_bits=3.97GiB, aggregated_bits=3.97GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.15060241520404816,loss=2.29490327835083>>, broadcasted_bits=4.46GiB, aggregated_bits=4.46GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13088512420654297,loss=2.2942349910736084>>, broadcasted_bits=4.96GiB, aggregated_bits=4.96GiB
###Markdown
Start TensorBoard with the root log directory specified above to display the training metrics. It can take a few seconds for the data to load. Except for Loss and Accuracy, we also output the amount of broadcasted and aggregated data. Broadcasted data refers to tensors the server pushes to each client while aggregated data refers to tensors each client returns to the server.
###Code
#@test {"skip": true}
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____
###Markdown
Copyright 2020 The TensorFlow Authors.
###Code
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###Output
_____no_output_____
###Markdown
TFF for Federated Learning Research: Model and Update Compression**NOTE**: This colab has been verified to work with the [latest released version](https://github.com/tensorflow/federatedcompatibility) of the `tensorflow_federated` pip package, but the Tensorflow Federated project is still in pre-release development and may not work on `master`.In this tutorial, we use the [EMNIST](https://www.tensorflow.org/federated/api_docs/python/tff/simulation/datasets/emnist) dataset to demonstrate how to enable lossy compression algorithms to reduce communication cost in the Federated Averaging algorithm using the `tff.learning.build_federated_averaging_process` API and the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API. For more details on the Federated Averaging algorithm, see the paper [Communication-Efficient Learning of Deep Networks from Decentralized Data](https://arxiv.org/abs/1602.05629). Before we startBefore we start, please run the following to make sure that your environment iscorrectly setup. If you don't see a greeting, please refer to the[Installation](../install.md) guide for instructions.
###Code
#@test {"skip": true}
!pip install --quiet --upgrade tensorflow_federated_nightly
!pip install --quiet --upgrade tensorflow-model-optimization
!pip install --quiet --upgrade nest_asyncio
import nest_asyncio
nest_asyncio.apply()
%load_ext tensorboard
import functools
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
from tensorflow_model_optimization.python.core.internal import tensor_encoding as te
###Output
_____no_output_____
###Markdown
Verify if TFF is working.
###Code
@tff.federated_computation
def hello_world():
return 'Hello, World!'
hello_world()
###Output
_____no_output_____
###Markdown
Preparing the input dataIn this section we load and preprocess the EMNIST dataset included in TFF. Please check out [Federated Learning for Image Classification](https://www.tensorflow.org/federated/tutorials/federated_learning_for_image_classificationpreparing_the_input_data) tutorial for more details about EMNIST dataset.
###Code
# This value only applies to EMNIST dataset, consider choosing appropriate
# values if switching to other datasets.
MAX_CLIENT_DATASET_SIZE = 418
CLIENT_EPOCHS_PER_ROUND = 1
CLIENT_BATCH_SIZE = 20
TEST_BATCH_SIZE = 500
emnist_train, emnist_test = tff.simulation.datasets.emnist.load_data(
only_digits=True)
def reshape_emnist_element(element):
return (tf.expand_dims(element['pixels'], axis=-1), element['label'])
def preprocess_train_dataset(dataset):
"""Preprocessing function for the EMNIST training dataset."""
return (dataset
# Shuffle according to the largest client dataset
.shuffle(buffer_size=MAX_CLIENT_DATASET_SIZE)
# Repeat to do multiple local epochs
.repeat(CLIENT_EPOCHS_PER_ROUND)
# Batch to a fixed client batch size
.batch(CLIENT_BATCH_SIZE, drop_remainder=False)
# Preprocessing step
.map(reshape_emnist_element))
emnist_train = emnist_train.preprocess(preprocess_train_dataset)
###Output
_____no_output_____
###Markdown
Defining a modelHere we define a keras model based on the orginial FedAvg CNN, and then wrap the keras model in an instance of [tff.learning.Model](https://www.tensorflow.org/federated/api_docs/python/tff/learning/Model) so that it can be consumed by TFF.Note that we'll need a **function** which produces a model instead of simply a model directly. In addition, the function **cannot** just capture a pre-constructed model, it must create the model in the context that it is called. The reason is that TFF is designed to go to devices, and needs control over when resources are constructed so that they can be captured and packaged up.
###Code
def create_original_fedavg_cnn_model(only_digits=True):
"""The CNN model used in https://arxiv.org/abs/1602.05629."""
data_format = 'channels_last'
max_pool = functools.partial(
tf.keras.layers.MaxPooling2D,
pool_size=(2, 2),
padding='same',
data_format=data_format)
conv2d = functools.partial(
tf.keras.layers.Conv2D,
kernel_size=5,
padding='same',
data_format=data_format,
activation=tf.nn.relu)
model = tf.keras.models.Sequential([
tf.keras.layers.InputLayer(input_shape=(28, 28, 1)),
conv2d(filters=32),
max_pool(),
conv2d(filters=64),
max_pool(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10 if only_digits else 62),
tf.keras.layers.Softmax(),
])
return model
# Gets the type information of the input data. TFF is a strongly typed
# functional programming framework, and needs type information about inputs to
# the model.
input_spec = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0]).element_spec
def tff_model_fn():
keras_model = create_original_fedavg_cnn_model()
return tff.learning.from_keras_model(
keras_model=keras_model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
###Output
_____no_output_____
###Markdown
Training the model and outputting training metricsNow we are ready to construct a Federated Averaging algorithm and train the defined model on EMNIST dataset.First we need to build a Federated Averaging algorithm using the [tff.learning.build_federated_averaging_process](https://www.tensorflow.org/federated/api_docs/python/tff/learning/build_federated_averaging_process) API.
###Code
federated_averaging = tff.learning.build_federated_averaging_process(
model_fn=tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0))
###Output
_____no_output_____
###Markdown
Now let's run the Federated Averaging algorithm. The execution of a Federated Learning algorithm from the perspective of TFF looks like this:1. Initialize the algorithm and get the inital server state. The server state contains necessary information to perform the algorithm. Recall, since TFF is functional, that this state includes both any optimizer state the algorithm uses (e.g. momentum terms) as well as the model parameters themselves--these will be passed as arguments and returned as results from TFF computations.2. Execute the algorithm round by round. In each round, a new server state will be returned as the result of each client training the model on its data. Typically in one round: 1. Server broadcast the model to all the participating clients. 2. Each client perform work based on the model and its own data. 3. Server aggregates all the model to produce a sever state which contains a new model.For more details, please see [Custom Federated Algorithms, Part 2: Implementing Federated Averaging](https://www.tensorflow.org/federated/tutorials/custom_federated_algorithms_2) tutorial.Training metrics are written to the Tensorboard directory for displaying after the training.
###Code
#@title Load utility functions
def format_size(size):
"""A helper function for creating a human-readable size."""
size = float(size)
for unit in ['B','KiB','MiB','GiB']:
if size < 1024.0:
return "{size:3.2f}{unit}".format(size=size, unit=unit)
size /= 1024.0
return "{size:.2f}{unit}".format(size=size, unit='TiB')
def set_sizing_environment():
"""Creates an environment that contains sizing information."""
# Creates a sizing executor factory to output communication cost
# after the training finishes. Note that sizing executor only provides an
# estimate (not exact) of communication cost, and doesn't capture cases like
# compression of over-the-wire representations. However, it's perfect for
# demonstrating the effect of compression in this tutorial.
sizing_factory = tff.framework.sizing_executor_factory()
# TFF has a modular runtime you can configure yourself for various
# environments and purposes, and this example just shows how to configure one
# part of it to report the size of things.
context = tff.framework.ExecutionContext(executor_fn=sizing_factory)
tff.framework.set_default_context(context)
return sizing_factory
def train(federated_averaging_process, num_rounds, num_clients_per_round, summary_writer):
"""Trains the federated averaging process and output metrics."""
# Create a environment to get communication cost.
environment = set_sizing_environment()
# Initialize the Federated Averaging algorithm to get the initial server state.
state = federated_averaging_process.initialize()
with summary_writer.as_default():
for round_num in range(num_rounds):
# Sample the clients parcitipated in this round.
sampled_clients = np.random.choice(
emnist_train.client_ids,
size=num_clients_per_round,
replace=False)
# Create a list of `tf.Dataset` instances from the data of sampled clients.
sampled_train_data = [
emnist_train.create_tf_dataset_for_client(client)
for client in sampled_clients
]
# Round one round of the algorithm based on the server state and client data
# and output the new state and metrics.
state, metrics = federated_averaging_process.next(state, sampled_train_data)
# For more about size_info, please see https://www.tensorflow.org/federated/api_docs/python/tff/framework/SizeInfo
size_info = environment.get_size_info()
broadcasted_bits = size_info.broadcast_bits[-1]
aggregated_bits = size_info.aggregate_bits[-1]
print('round {:2d}, metrics={}, broadcasted_bits={}, aggregated_bits={}'.format(round_num, metrics, format_size(broadcasted_bits), format_size(aggregated_bits)))
# Add metrics to Tensorboard.
for name, value in metrics['train'].items():
tf.summary.scalar(name, value, step=round_num)
# Add broadcasted and aggregated data size to Tensorboard.
tf.summary.scalar('cumulative_broadcasted_bits', broadcasted_bits, step=round_num)
tf.summary.scalar('cumulative_aggregated_bits', aggregated_bits, step=round_num)
summary_writer.flush()
# Clean the log directory to avoid conflicts.
!rm -R /tmp/logs/scalars/*
# Set up the log directory and writer for Tensorboard.
logdir = "/tmp/logs/scalars/original/"
summary_writer = tf.summary.create_file_writer(logdir)
train(federated_averaging_process=federated_averaging, num_rounds=10,
num_clients_per_round=10, summary_writer=summary_writer)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09433962404727936,loss=2.3181073665618896>>, broadcasted_bits=507.62MiB, aggregated_bits=507.62MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.0765027329325676,loss=2.3148586750030518>>, broadcasted_bits=1015.24MiB, aggregated_bits=1015.24MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08872458338737488,loss=2.3089394569396973>>, broadcasted_bits=1.49GiB, aggregated_bits=1.49GiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10852713137865067,loss=2.304060220718384>>, broadcasted_bits=1.98GiB, aggregated_bits=1.98GiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10818713158369064,loss=2.3026843070983887>>, broadcasted_bits=2.48GiB, aggregated_bits=2.48GiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10454985499382019,loss=2.300365447998047>>, broadcasted_bits=2.97GiB, aggregated_bits=2.97GiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12841254472732544,loss=2.29765248298645>>, broadcasted_bits=3.47GiB, aggregated_bits=3.47GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14023210108280182,loss=2.2977216243743896>>, broadcasted_bits=3.97GiB, aggregated_bits=3.97GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.15060241520404816,loss=2.29490327835083>>, broadcasted_bits=4.46GiB, aggregated_bits=4.46GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13088512420654297,loss=2.2942349910736084>>, broadcasted_bits=4.96GiB, aggregated_bits=4.96GiB
###Markdown
Start TensorBoard with the root log directory specified above to display the training metrics. It can take a few seconds for the data to load. Except for Loss and Accuracy, we also output the amount of broadcasted and aggregated data. Broadcasted data refers to tensors the server pushes to each client while aggregated data refers to tensors each client returns to the server.
###Code
#@test {"skip": true}
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____
###Markdown
Build a custom broadcast and aggregate functionNow let's implement function to use lossy compression algorithms on broadcasted data and aggregated data using the [tensor_encoding](http://jakubkonecny.com/files/tensor_encoding.pdf) API.First, we define two functions:* `broadcast_encoder_fn` which creates an instance of [te.core.SimpleEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/simple_encoder.pyL30) to encode tensors or variables in server to client communication (Broadcast data).* `mean_encoder_fn` which creates an instance of [te.core.GatherEncoder](https://github.com/tensorflow/model-optimization/blob/ee53c9a9ae2e18ac1e443842b0b96229f0afb6d6/tensorflow_model_optimization/python/core/internal/tensor_encoding/core/gather_encoder.pyL30) to encode tensors or variables in client to server communicaiton (Aggregation data).It is important to note that we do not apply a compression method to the entire model at once. Instead, we decide how (and whether) to compress each variable of the model independently. The reason is that generally, small variables such as biases are more sensitive to inaccuracy, and being relatively small, the potential communication savings are also relatively small. Hence we do not compress small variables by default. In this example, we apply uniform quantization to 8 bits (256 buckets) to every variable with more than 10000 elements, and only apply identity to other variables.
###Code
def broadcast_encoder_fn(value):
"""Function for building encoded broadcast."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_simple_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_simple_encoder(te.encoders.identity(), spec)
def mean_encoder_fn(value):
"""Function for building encoded mean."""
spec = tf.TensorSpec(value.shape, value.dtype)
if value.shape.num_elements() > 10000:
return te.encoders.as_gather_encoder(
te.encoders.uniform_quantization(bits=8), spec)
else:
return te.encoders.as_gather_encoder(te.encoders.identity(), spec)
###Output
_____no_output_____
###Markdown
TFF provides APIs to convert the encoder function into a format that `tff.learning.build_federated_averaging_process` API can consume. By using the `tff.learning.framework.build_encoded_broadcast_from_model` and `tff.learning.framework.build_encoded_mean_from_model`, we can create two functions that can be passed into `broadcast_process` and `aggregation_process` agruments of `tff.learning.build_federated_averaging_process` to create a Federated Averaging algorithms with a lossy compression algorithm.
###Code
encoded_broadcast_process = (
tff.learning.framework.build_encoded_broadcast_process_from_model(
tff_model_fn, broadcast_encoder_fn))
encoded_mean_process = (
tff.learning.framework.build_encoded_mean_process_from_model(
tff_model_fn, mean_encoder_fn))
federated_averaging_with_compression = tff.learning.build_federated_averaging_process(
tff_model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02),
server_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=1.0),
broadcast_process=encoded_broadcast_process,
aggregation_process=encoded_mean_process)
###Output
_____no_output_____
###Markdown
Training the model againNow let's run the new Federated Averaging algorithm.
###Code
logdir_for_compression = "/tmp/logs/scalars/compression/"
summary_writer_for_compression = tf.summary.create_file_writer(
logdir_for_compression)
train(federated_averaging_process=federated_averaging_with_compression,
num_rounds=10,
num_clients_per_round=10,
summary_writer=summary_writer_for_compression)
###Output
round 0, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08722109347581863,loss=2.3216357231140137>>, broadcasted_bits=146.46MiB, aggregated_bits=146.46MiB
round 1, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08379272371530533,loss=2.3108291625976562>>, broadcasted_bits=292.92MiB, aggregated_bits=292.92MiB
round 2, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.08834951370954514,loss=2.3074147701263428>>, broadcasted_bits=439.38MiB, aggregated_bits=439.39MiB
round 3, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.10467479377985,loss=2.305814027786255>>, broadcasted_bits=585.84MiB, aggregated_bits=585.85MiB
round 4, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.09853658825159073,loss=2.3012874126434326>>, broadcasted_bits=732.30MiB, aggregated_bits=732.31MiB
round 5, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.14904330670833588,loss=2.3005223274230957>>, broadcasted_bits=878.77MiB, aggregated_bits=878.77MiB
round 6, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13152804970741272,loss=2.2985599040985107>>, broadcasted_bits=1.00GiB, aggregated_bits=1.00GiB
round 7, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12392637878656387,loss=2.297102451324463>>, broadcasted_bits=1.14GiB, aggregated_bits=1.14GiB
round 8, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.13289350271224976,loss=2.2944107055664062>>, broadcasted_bits=1.29GiB, aggregated_bits=1.29GiB
round 9, metrics=<broadcast=<>,aggregation=<>,train=<sparse_categorical_accuracy=0.12661737203598022,loss=2.2971296310424805>>, broadcasted_bits=1.43GiB, aggregated_bits=1.43GiB
###Markdown
Start TensorBoard again to compare the training metrics between two runs.As you can see in Tensorboard, there is a significant reduction between the `orginial` and `compression` curves in the `broadcasted_bits` and `aggregated_bits` plots while in the `loss` and `sparse_categorical_accuracy` plot the two curves are pretty similiar.In conclusion, we implemented a compression algorithm that can achieve similar performance as the orignial Federated Averaging algorithm while the comminucation cost is significently reduced.
###Code
#@test {"skip": true}
%tensorboard --logdir /tmp/logs/scalars/ --port=0
###Output
_____no_output_____ |
NLPday3.ipynb | ###Markdown
Looping with conditions.
###Code
sent1=['Call','me','Mukut','.']
for xyzzy in sent1:
if xyzzy.endswith('t'):
print(xyzzy)
for token in sent1:
if token.islower():
print(token,'is lowercase.')
elif token.istitle():
print(token, 'is a titlecase word')
else:
print(token, 'is punctuation')
text2
tricky= sorted([w for w in set(text2) if "cie" in w or "cei" in w])
for word in tricky:
print(word,end=",")
###Output
_____no_output_____
###Markdown
Generating Language Output. babelize_shell() or babelfish translator service is gone. **Gutenburg Corpus**
###Code
nltk.corpus.gutenberg.fileids()
emma = nltk.corpus.gutenberg.words('austen-emma.txt')
len(emma)
for fileid in gutenberg.fileids():
num_chars = len(gutenberg.raw(fileid))
num_words = len(gutenberg.words(fileid))
num_sents = len(gutenberg.sents(fileid))
num_vocab = len(set([w.lower() for w in gutenberg.words(fileid)]))
print(int(num_chars/num_words), int(num_words/num_sents), int(num_words/num_vocab),fileid)
len(gutenberg.raw('blake-poems.txt')) #raw() gives us the contents of the file without any linguistic processing.
macbeth_sentences = gutenberg.sents('shakespeare-macbeth.txt')
macbeth_sentences
longest_len = max([len(s) for s in macbeth_sentences])
longest_len
[s for s in macbeth_sentences if len(s) == longest_len]
###Output
_____no_output_____
###Markdown
Web and Chat Text
###Code
from nltk.corpus import webtext
for fileid in webtext.fileids():
print(fileid, webtext.raw(fileid)[:65])
from nltk.corpus import nps_chat
chatroom = nps_chat.posts('10-19-20s_706posts.xml')
chatroom[123]
###Output
_____no_output_____
###Markdown
**Brown Corpus** : The Brown Corpus is a convenient resource for studying systematic differences between genres, a kind of linguistic inquiry known as stylistics.
###Code
from nltk.corpus import brown
brown.categories()
brown.words(categories='news')
brown.words(fileids='cg22')
brown.sents(categories=['news', 'editorial', 'reviews'])
news_text=brown.words(categories='news')
fdist= nltk.FreqDist([w.lower() for w in news_text])
modals = ['can', 'could', 'may', 'might', 'must', 'will']
for m in modals:
print(m + ':', fdist[m],end=(","))
editorial_text=brown.words(categories='editorial')
fdist1=nltk.FreqDist([w.lower for w in editorial_text])
modals1=['what', 'when','where', 'who','why']
for m in modals1:
print(m+":",fdist1[m],end=",")
cfd = nltk.ConditionalFreqDist((genre, word)
for genre in brown.categories()
for word in brown.words(categories=genre))
genres = ['news', 'religion', 'hobbies', 'science_fiction', 'romance', 'humor']
modals = ['can', 'could', 'may', 'might', 'must', 'will']
cfd.tabulate(conditions=genres, samples=modals)
###Output
_____no_output_____ |
Lab 11/.ipynb_checkpoints/060_Lab11_Task1-checkpoint.ipynb | ###Markdown
Linear Model
###Code
#roll_number=60
model = SVC(kernel='linear',random_state=60)
model.fit(x_train,y_train)
pred = model.predict(x_test)
print(pred)
from sklearn import metrics
print("Accuracy:",metrics.accuracy_score(y_test, pred))
print("Precision Score : ",metrics.precision_score(y_test, pred, pos_label='positive' ,average='micro'))
print("Recall Score : ",metrics.recall_score(y_test, pred, pos_label='positive',average='micro'))
###Output
Accuracy: 0.9404
Precision Score : 0.9404
Recall Score : 0.9404
###Markdown
Polynomial Model
###Code
####polynomial model
#roll_number = 60
model1 = SVC(kernel='poly',degree=3,gamma='scale',random_state=60)
model1.fit(x_train,y_train)
pred1 = model1.predict(x_test)
from sklearn import metrics
print("Accuracy:",metrics.accuracy_score(y_test, pred1))
print("Precision Score : ",metrics.precision_score(y_test, pred1, pos_label='positive' ,average='micro'))
print("Recall Score : ",metrics.recall_score(y_test, pred1, pos_label='positive',average='micro'))
###Output
Accuracy: 0.9771
Precision Score : 0.9771
Recall Score : 0.9771
###Markdown
RBF Model
###Code
from sklearn.svm import SVC
#roll_number=60
model2 = SVC(kernel='rbf',gamma='scale',random_state=60)
model2.fit(x_train,y_train)
pred2 = model2.predict(x_test)
from sklearn import metrics
print("Accuracy:",metrics.accuracy_score(y_test, pred2))
print("Precision Score : ",metrics.precision_score(y_test, pred2, pos_label='positive' ,average='micro'))
print("Recall Score : ",metrics.recall_score(y_test, pred2, pos_label='positive',average='micro'))
###Output
Accuracy: 0.9792
Precision Score : 0.9792
Recall Score : 0.9792
|
notebooks/.ipynb_checkpoints/1D RCWA Test of the First Order Formulation (TE)-checkpoint.ipynb | ###Markdown
Important details of the first order formulationThis notebook here is to test the first order ODE solverThe main thing to worry about is the fact that we need to do some mode sorting in order to properly evaluate the eigenmodes for the $E$ and $H$ fields (so it matches with the 2nd order formulation, which automatically does 'mode sorting').
###Code
## same as the analytic case but with the fft
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import cond
import cmath;
from scipy.fftpack import fft, fftfreq, fftshift, rfft
from scipy.fftpack import dst, idst
from scipy.linalg import expm
from scipy import linalg as LA
import random
# Moharam et. al Formulation for stable and efficient implementation for RCWA
plt.close("all")
'''
1D TE implementation of PLANAR DIFFRACTiON...the easy case
only: sign convention is exp(-ikr) (is the positive propagating wave), so loss is + not -
source for fourier decomps is from the paper: Formulation for stable and efficient implementation of
the rigorous coupled-wave analysis of binary gratings by Moharam et. al
'''
np.set_printoptions(precision = 4)
def grating_fourier_harmonics(order, fill_factor, n_ridge, n_groove):
""" function comes from analytic solution of a step function in a finite unit cell"""
#n_ridge = index of refraction of ridge (should be dielectric)
#n_ridge = index of refraction of groove (air)
#n_ridge has fill_factor
#n_groove has (1-fill_factor)
# there is no lattice constant here, so it implicitly assumes that the lattice constant is 1...which is not good
if(order == 0):
return n_ridge**2*fill_factor + n_groove**2*(1-fill_factor);
else:
#should it be 1-fill_factor or fill_factor?, should be fill_factor
return(n_ridge**2 - n_groove**2)*np.sin(np.pi*order*(fill_factor))/(np.pi*order);
def grating_fourier_array(num_ord, fill_factor, n_ridge, n_groove):
""" what is a convolution in 1D """
fourier_comps = list();
for i in range(-num_ord, num_ord+1):
fourier_comps.append(grating_fourier_harmonics(i, fill_factor, n_ridge, n_groove));
return fourier_comps;
def fourier_reconstruction(x, period, num_ord, n_ridge, n_groove, fill_factor = 0.5):
index = np.arange(-num_ord, num_ord+1);
f = 0;
for n in index:
coef = grating_fourier_harmonics(n, fill_factor, n_ridge, n_groove);
f+= coef*np.exp(cmath.sqrt(-1)*np.pi*n*x/period);
#f+=coef*np.cos(np.pi*n*x/period)
return f;
def fourier_reconstruction_general(x, period, num_ord, coefs):
'''
overloading odesn't work in python...fun fact, since it is dynamically typed (vs statically typed)
:param x:
:param period:
:param num_ord:
:param coefs:
:return:
'''
index = np.arange(-num_ord, num_ord+1);
f = 0; center = int(len(coefs)/2); #no offset
for n in index:
coef = coefs[center+n];
f+= coef*np.exp(cmath.sqrt(-1)*2*np.pi*n*x/period);
return f;
def grating_fft(eps_r):
assert len(eps_r.shape) == 2
assert eps_r.shape[1] == 1;
#eps_r: discrete 1D grid of the epsilon profile of the structure
fourier_comp = np.fft.fftshift(np.fft.fft(eps_r, axis = 0)/eps_r.shape[0]);
#ortho norm in fft will do a 1/sqrt(n) scaling
return np.squeeze(fourier_comp);
# plt.plot(x, np.real(fourier_reconstruction(x, period, 1000, 1,np.sqrt(12), fill_factor = 0.1)));
# plt.title('check that the analytic fourier series works')
# #'note that the lattice constant tells you the length of the ridge'
# plt.show()
L0 = 1e-6;
e0 = 8.854e-12;
mu0 = 4*np.pi*1e-8;
fill_factor = 0.3; # 50% of the unit cell is the ridge material
num_ord = 10; #INCREASING NUMBER OF ORDERS SEEMS TO CAUSE THIS THING TO FAIL, to many orders induce evanescence...particularly
# when there is a small fill factor
PQ = 2*num_ord+1;
indices = np.arange(-num_ord, num_ord+1)
n_ridge = 3.48; #3.48; # ridge
n_groove = 1; # groove (unit-less)
lattice_constant = 0.7; # SI units
# we need to be careful about what lattice constant means
# in the gaylord paper, lattice constant exactly means (0, L) is one unit cell
d = 0.46; # thickness, SI units
Nx = 2*256;
eps_r = n_groove**2*np.ones((2*Nx, 1)); #put in a lot of points in eps_r
border = int(2*Nx*fill_factor);
eps_r[0:border] = n_ridge**2;
fft_fourier_array = grating_fft(eps_r);
x = np.linspace(-lattice_constant,lattice_constant,1000);
period = lattice_constant;
fft_reconstruct = fourier_reconstruction_general(x, period, num_ord, fft_fourier_array);
fourier_array_analytic = grating_fourier_array(Nx, fill_factor, n_ridge, n_groove);
analytic_reconstruct = fourier_reconstruction(x, period, num_ord, n_ridge, n_groove, fill_factor)
plt.figure();
plt.plot(np.real(fft_fourier_array[Nx-20:Nx+20]), linewidth=2)
plt.plot(np.real(fourier_array_analytic[Nx-20:Nx+20]));
plt.legend(('fft', 'analytic'))
plt.show()
plt.figure();
plt.plot(x,fft_reconstruct)
plt.plot(x,analytic_reconstruct);
plt.legend(['fft', 'analytic'])
plt.show()
## simulation parameters
theta = (0)*np.pi/180;
## construct permittivity harmonic components E
#fill factor = 0 is complete dielectric, 1 is air
##construct convolution matrix
E = np.zeros((2 * num_ord + 1, 2 * num_ord + 1)); E = E.astype('complex')
p0 = Nx; #int(Nx/2);
p_index = np.arange(-num_ord, num_ord + 1);
q_index = np.arange(-num_ord, num_ord + 1);
fourier_array = fft_fourier_array;#fourier_array_analytic;
detected_pffts = np.zeros_like(E);
for prow in range(2 * num_ord + 1):
# first term locates z plane, 2nd locates y coumn, prow locates x
row_index = p_index[prow];
for pcol in range(2 * num_ord + 1):
pfft = p_index[prow] - p_index[pcol];
detected_pffts[prow, pcol] = pfft;
E[prow, pcol] = fourier_array[p0 + pfft]; # fill conv matrix from top left to top right
## IMPORTANT TO NOTE: the indices for everything beyond this points are indexed from -num_ord to num_ord+1
###Output
_____no_output_____
###Markdown
Calculating the Poynting Vector of a Sum of Plane WavesEach plane wave has a contributing amplitude. Do we sum all the plane waves then calculate the Poynting vector or can we sum the Poynting vectors of the individual waves? The issue is that a product of a sum is not the same as the sum of products. Presently, I've tried both. It only appears to work in the simple case of 0 orders (or just plane waves).Technically, each plane has has a different propagation in $k_z$ indexed by their Fourier index, which means we cna't deal with the components by themselves, we'd have to add in their phase. We could also just set $z=0$ to avoid this nasty problem.Secret Sauce: Obviously with the Fourier order decomposition in $x$, some of the fourier $k_{zi}$ components may be evanescent. These components should have no contribution to the Poynting vector (especially in the far-field). some debugging notesthe long wavelength case should be error-free because there is little scattering into higher diffraction orders... but it appears to be the opposite in our test case below. Mathematically, however, we can see it in the fact that we have $m_i \lambda$ when we determine our kx_array, which means larger wavelengths have larger contributions at higher orders, which is a weird sounding statement.In the 2nd order, stable formulation, we simply have REAL eigenvalues (if the system is Hermitian). When we square root this, if we have a purely negative number, then doing the square root should still only yield a single sign in the imaginary part as well.It is clear that solving the first order and second order eigenvalue problems lead to the same set of eigenvalues, but the choice selected by the second order problem seems weird. Specifically, while the real part is always consistently one sign (which means sorting would work), the imaginary part selection is not all the same sign, particularly the eigenvalues with effectively zero real $k$. The problem with this has been determined to be a numerical artifact (see below). One proposed solution is to zero out the real part if it's below numerical precision (but this feels very unsatisfactory)Empirically, it seems that sorting the larger block eigenvalues FAILS period.In our current code implementation, we have the X matrix as np.exp(-k_0 *Q *d). So we neg out everything. However, the fact that some of the imaginary parts flip sign doesn't mean this part is wrong.
###Code
## plot snippets
## plot verifies that all eigenvalues extracted by beigenvals matches with one in Q
# plt.subplot(121)
# for b in beigenvals:
# plt.axhline(np.imag(b))
# plt.plot(np.imag(np.diag(Q)),'.g-', markersize = 10)
# plt.title(str((len(beigenvals), len(np.diag(Q)))))
# plt.subplot(122)
# for b in beigenvals:
# plt.axhline(np.real(b))
# plt.plot(np.real(np.diag(Q)),'.g-', markersize = 10)
# plt.title('real')
# plt.show()
## snippet to check maching square of eigenvals
# for b in np.abs(np.square(beigenvals)):
# # plt.axhline(b);
# # plt.plot(sorted(np.abs(eigenvals)));
# # plt.show();
## alternate construction of 1D convolution matrix
spectra = list();
spectra_T = list();
I = np.identity(2 * num_ord + 1)
PQ = 2*num_ord+1;
zeros = np.zeros((PQ, PQ))
# E is now the convolution of fourier amplitudes
wavelength_scan = np.linspace(0.5,2.3,20)
for wvlen in wavelength_scan:
j = cmath.sqrt(-1);
lam0 = wvlen; k0 = 2 * np.pi / lam0; #free space wavelength in SI units
print('wavelength: ' + str(wvlen));
## =====================STRUCTURE======================##
## Region I: reflected region (half space)
n1 = 1;#cmath.sqrt(-1)*1e-12; #apparently small complex perturbations are bad in Region 1, these shouldn't be necessary
## Region 2; transmitted region
n2 = 1;
#from the kx_components given the indices and wvln
kx_array = k0*(n1*np.sin(theta) + indices*(lam0 / lattice_constant)); #0 is one of them, k0*lam0 = 2*pi
k_xi = kx_array;
## IMPLEMENT SCALING: these are the fourier orders of the x-direction decomposition.
KX = np.diag(kx_array/k0);
KX2 = np.diag(np.power((k_xi/k0),2)); #singular since we have a n=0, m= 0 order and incidence is normal
#print(KX2)
KZ2 = ((n1)**2 - np.diag(KX2)).astype('complex');
KZ = np.sqrt(KZ2)
# print('KZ2: '+str(KZ2)+': imag Kz: '+str(KZ))
# KZ_mask = np.imag(KZ)<1e-2;
# KZ_mask = np.expand_dims(KZ_mask, axis = 1)
# print(KZ_mask.shape)
## construct matrix of Gamma^2 ('constant' term in ODE):
A = KX2 - E; #conditioning of this matrix is not bad, A SHOULD BE SYMMETRIC
#sum of a symmetric matrix and a diagonal matrix should be symmetric;
AO = np.block([[zeros, I],[A, zeros]])
beigenvals, bigW = LA.eig(AO);
print('conditioning of big block: '+str(np.linalg.cond(AO)))
## SORTING IS REQUIRED
#sorting procedure is to order by smallest imaginary part to largest imaginary part
# We actually have to calculate poynting vector (the modes are Ey, Hx) and Sz = -Ey*Hx
# since we are dealing with a normalized H, we need to divide by 1/j;
Ey_modes = bigW[0:PQ, :];
Hx_modes = bigW[PQ:, :]
EyAmp = np.sum(Ey_modes, axis = 0); #amplitude
HxAmp = np.sum(Hx_modes, axis = 0);
#EyAmp = np.sum(Ey_modes*KZ_mask, axis = 0); #amplitude
#HxAmp = np.sum(Hx_modes*KZ_mask, axis = 0);
## right now it works for a uniform slab, any orders...which means it
Sz = (1/j)*EyAmp*(HxAmp);
# print(Sz.shape)
sorted_indices_poynting = np.argsort(Sz)
sorted_indices = np.argsort(np.real(beigenvals))
#print(Sz[sorted_indices])
sorted_eigenmodes = bigW[:,sorted_indices ];
#print(sorted_eigenmodes)
#adding real and imaginary parts seems to work...
sorted_eigenvals = beigenvals[sorted_indices]
Wp = sorted_eigenmodes[0:PQ:,0:PQ]
eigenvals_wp = (sorted_eigenvals[0:PQ]);
print('sorted beigenvals: '+str(sorted_eigenvals))
print('extracted eigenvals: '+str(eigenvals_wp))
#print(eigenvals_wp.shape)
#plt.plot(np.imag(sorted_eigenvals));
#plt.show()
##
# when we calculate eigenvals, how do we know the eigenvals correspond to each particular fourier order?
eigenvals, W = LA.eig(A); #A should be symmetric or hermitian
print('sorted eigenvals: '+str(sorted(np.sqrt(eigenvals))))
# plt.subplot(121);
# plt.imshow(np.abs(W));
# plt.subplot(122);
# plt.imshow(np.abs(bigW));
# plt.show()
#W = Wp;
#V = sorted_eigenmodes[0:PQ, :]
#we should be gauranteed that all eigenvals are REAL
eigenvals = eigenvals.astype('complex');
Q = np.diag(np.sqrt(eigenvals)); #Q should only be positive square root of eigenvals
#real parts match, but the imaginaries don't
#Q = np.diag(eigenvals_wp);
#plt.plot(sorted(np.abs(beigenvals)),'.-');
# colors = [(random.random(),random.random(),random.random()) for i in range(len(beigenvals))]
# plt.subplot(131)
# plt.plot(np.real(beigenvals), np.imag(beigenvals), '.'); plt.title('1st')
# plt.ylim([-4,4])
# plt.subplot(132)
# plt.plot(np.real(np.diag(Qn)), np.imag(np.diag(Qn)), '.'); plt.title('2nd')
# plt.ylim([-4,4])
# plt.subplot(133);
# plt.plot(np.real(np.sqrt(eigenvals)), np.imag(np.sqrt(eigenvals)), '.g'); plt.title('squared 2nd')
# plt.ylim([-4,4])
plt.show();
# plt.subplot(121)
# for b in beigenvals:
# plt.axhline(np.imag(b), color='r')
# for b in np.sqrt(eigenvals):
# plt.axhline(np.imag(b))
# plt.plot((np.imag(np.diag(Q))),'.g-', markersize = 10)
# plt.plot((np.imag(np.diag(Qn))), '.c-', markersize = 10)
# plt.title(str((len(beigenvals), len(np.diag(Q)))))
# plt.subplot(122)
# for b in beigenvals:
# plt.axhline(np.real(b), color = 'r')
# for b in np.sqrt(eigenvals):
# plt.axhline(np.real(b))
# plt.plot((np.real(np.diag(Q))),'.g-', markersize = 10)
# plt.title('real')
# plt.plot((np.real(np.diag(Qn))), '.c-')
# plt.show()
V = W@Q; #H modes
## THIS ENFORCES a PARTICULAR SIGN...
X = np.diag(np.exp(-k0*np.diag(Q)*d)); #this is poorly conditioned because exponentiation
## pointwise exponentiation vs exponentiating a matrix
## observation: almost everything beyond this point is worse conditioned
k_I = k0**2*(n1**2 - (k_xi/k0)**2); #k_z in reflected region k_I,zi
k_II = k0**2*(n2**2 - (k_xi/k0)**2); #k_z in transmitted region
k_I = k_I.astype('complex'); k_I = np.sqrt(k_I);
k_II = k_II.astype('complex'); k_II = np.sqrt(k_II);
Y_I = np.diag(k_I/k0);
Y_II = np.diag(k_II/k0);
delta_i0 = np.zeros((len(kx_array),1));
delta_i0[num_ord] = 1;
n_delta_i0 = delta_i0*j*n1*np.cos(theta); #this is a VECTOR
## design auxiliary variables: SEE derivation in notebooks: RCWA_note.ipynb
# we want to design the computation to avoid operating with X, particularly with inverses
# since X is the worst conditioned thing
Wi = np.linalg.inv(W);
Vi = np.linalg.inv(V);
Oi = 0.5*np.block([[Wi, Vi],[Wi, -Vi]])
f = I;
g = j*Y_II; #all matrices
fg = np.concatenate((f,g),axis = 0)
#ab = np.matmul(np.linalg.inv(O),fg);
# ab = np.matmul(Oi, fg);
# a = ab[0:PQ,:];
# b = ab[PQ:,:];
a = 0.5*(Wi+j*Vi@Y_II);
b = 0.5*(Wi-j*Vi@Y_II);
fbiX = np.matmul(np.linalg.inv(b),X)
#altTerm = (a@X@X@b); #not well conditioned and I-altTermis is also poorly conditioned.
#print(np.linalg.cond(I-np.linalg.inv(altTerm)))
#print(np.linalg.cond(X@b)); #not well conditioned.
term = X@a@fbiX; # THIS IS SHITTILY CONDITIONED
# print((np.linalg.cond(X), np.linalg.cond(term)))
# print(np.linalg.cond(I+term)); #but this is EXTREMELY WELL CONDITIONED.
f = np.matmul(W, I+term);
g = np.matmul(V,-I+term);
T = np.linalg.inv(j*np.matmul(Y_I,f)+g);
T = np.matmul(T,(np.matmul(j*Y_I,delta_i0)+n_delta_i0));
R = np.matmul(f,T)-delta_i0;
T = np.matmul(fbiX, T)
## calculate diffraction efficiencies
#I would expect this number to be real...
DE_ri = R*np.conj(R)*np.real(np.expand_dims(k_I,1))/(k0*n1*np.cos(theta));
DE_ti = T*np.conj(T)*np.real(np.expand_dims(k_II,1))/(k0*n1*np.cos(theta));
#print(np.sum(DE_ri))
#print(np.sum(DE_ri))
spectra.append(np.sum(DE_ri)); #spectra_T.append(T);
spectra_T.append(np.sum(DE_ti))
plt.figure();
plt.plot(wavelength_scan, np.abs(spectra));
plt.plot(wavelength_scan, np.abs(spectra_T))
plt.xlabel('wavelength (microns)')
plt.ylabel('R/T')
plt.title('sample RCWA spectra for a 1D grating')
plt.legend(['reflection', 'transmission'])
# plt.axhline(((3.48-1)/(3.48+1))**2,xmin=0, xmax = max(wavelength_scan))
# plt.axhline(((3.48-1)/(3.48+1)),xmin=0, xmax = max(wavelength_scan), color='r')
plt.ylim([0,1])
plt.show()
## comparison to the 2nd order, stable formulation
# also the first real test of the function
###Output
_____no_output_____
###Markdown
A Curious Numerical ObservationSomething which plagues the first order formulation is that if we take a number,say -5. Taking the square root would give $-\sqrt{5}$. however, if we add in a tiny COMPLEX perturbation of negative sign, we can completely flip the sign. This must be a numerical artifact of numerical imprecision that happens in the stable formulation when we take the square root, but WHY is the 2nd order formulation still stable?
###Code
import math
a = -5 - j*1e-16;
print(cmath.sqrt(a))
###Output
(2.2360679774997896e-17-2.23606797749979j)
###Markdown
ONLY FIRST ORDER RAW CODE
###Code
## alternate construction of 1D convolution matrix
spectra = list();
spectra_T = list();
I = np.identity(2 * num_ord + 1)
PQ = 2*num_ord+1;
zeros = np.zeros((PQ, PQ))
# E is now the convolution of fourier amplitudes
wavelength_scan = np.linspace(0.5,2.3,100)
for wvlen in wavelength_scan:
j = cmath.sqrt(-1);
lam0 = wvlen; k0 = 2 * np.pi / lam0; #free space wavelength in SI units
print('wavelength: ' + str(wvlen));
## =====================STRUCTURE======================##
## Region I: reflected region (half space)
n1 = 1;#cmath.sqrt(-1)*1e-12; #apparently small complex perturbations are bad in Region 1, these shouldn't be necessary
## Region 2; transmitted region
n2 = 1;
#from the kx_components given the indices and wvln
kx_array = k0*(n1*np.sin(theta) + indices*(lam0 / lattice_constant)); #0 is one of them, k0*lam0 = 2*pi
k_xi = kx_array;
## IMPLEMENT SCALING: these are the fourier orders of the x-direction decomposition.
KX = np.diag(kx_array/k0);
KX2 = np.diag(np.power((k_xi/k0),2)); #singular since we have a n=0, m= 0 order and incidence is normal
#print(KX2)
KZ2 = ((n1)**2 - np.diag(KX2)).astype('complex');
KZ = np.sqrt(KZ2)
# print('KZ2: '+str(KZ2)+': imag Kz: '+str(KZ))
# KZ_mask = np.imag(KZ)<1e-2;
# KZ_mask = np.expand_dims(KZ_mask, axis = 1)
# print(KZ_mask.shape)
## construct matrix of Gamma^2 ('constant' term in ODE):
A = KX2 - E; #conditioning of this matrix is not bad, A SHOULD BE SYMMETRIC
#sum of a symmetric matrix and a diagonal matrix should be symmetric;
AO = np.block([[zeros, I],[A, zeros]])
beigenvals, bigW = LA.eig(AO);
print('conditioning of big block: '+str(np.linalg.cond(AO)))
## SORTING IS REQUIRED
sq_beigenvals = np.square(beigenvals);
#try rounding...
rounded_beigenvals = np.array([round(i,12) for i in beigenvals])
print(rounded_beigenvals)
quadrant_sort = [1 if np.real(i)>=0 and np.imag(i)>=0 else 0 for i in rounded_beigenvals];
print(quadrant_sort)
sorted_indices = np.nonzero(quadrant_sort)[0]
print(sorted_indices)
#sorted_indices = np.argsort(np.real(rounded_beigenvals))
sorted_eigenmodes = bigW[:,sorted_indices];
#print(sorted_eigenmodes)
#adding real and imaginary parts seems to work...
sorted_eigenvals = beigenvals[sorted_indices]
Wp = sorted_eigenmodes[PQ:,0:PQ]
eigenvals_wp = (sorted_eigenvals[0:PQ]);
# when we calculate eigenvals, how do we know the eigenvals correspond to each particular fourier order?
eigenvals, W = LA.eig(A); #A should be symmetric or hermitian
print('sorted eigenvals: '+str(sorted_eigenvals))
# plt.plot(sq_beigenvals);
# plt.plot(eigenvals)
# plt.show()
W = Wp;
#V = sorted_eigenmodes[0:PQ, :]
#we should be gauranteed that all eigenvals are REAL
eigenvals = eigenvals.astype('complex');
#real parts match, but the imaginaries don't
Q = np.diag(eigenvals_wp);
#plt.plot(sorted(np.abs(beigenvals)),'.-');
colors = [(random.random(),random.random(),random.random()) for i in range(len(beigenvals))]
plt.subplot(141)
plt.plot(np.real(beigenvals), np.imag(beigenvals), '.'); plt.title('1st'); plt.ylim([-4,4])
plt.subplot(142)
plt.plot(np.real(eigenvals_wp), (np.imag(eigenvals_wp)), '.r', markersize = 10)
plt.plot(np.real(np.sqrt(eigenvals)), abs(np.imag(np.sqrt(eigenvals))), '.g', markersize = 5)
plt.ylim([-4,4])
plt.subplot(143);
plt.plot(np.real(eigenvals), np.imag(eigenvals), '.'); plt.title('2nd')
plt.ylim([-4,4])
plt.subplot(144)
plt.plot(np.real(np.sqrt(eigenvals)), np.imag(np.sqrt(eigenvals)), '.g'); plt.title('sqrt 2nd')
plt.ylim([-4,4])
plt.show();
# plt.subplot(121)
# for b in beigenvals:
# plt.axhline(np.imag(b), color='r')
# for b in np.sqrt(eigenvals):
# plt.axhline(np.imag(b))
# plt.plot((np.imag(np.diag(Q))),'.g-', markersize = 10)
# plt.plot((np.imag(np.diag(Qn))), '.c-', markersize = 10)
# plt.title(str((len(beigenvals), len(np.diag(Q)))))
# plt.subplot(122)
# for b in beigenvals:
# plt.axhline(np.real(b), color = 'r')
# for b in np.sqrt(eigenvals):
# plt.axhline(np.real(b))
# plt.plot((np.real(np.diag(Q))),'.g-', markersize = 10)
# plt.title('real')
# plt.plot((np.real(np.diag(Qn))), '.c-')
# plt.show()
V = W@Q; #H modes
## THIS ENFORCES a PARTICULAR SIGN...
X = np.diag(np.exp(-k0*np.diag(Q)*d)); #this is poorly conditioned because exponentiation
## pointwise exponentiation vs exponentiating a matrix
## observation: almost everything beyond this point is worse conditioned
k_I = k0**2*(n1**2 - (k_xi/k0)**2); #k_z in reflected region k_I,zi
k_II = k0**2*(n2**2 - (k_xi/k0)**2); #k_z in transmitted region
k_I = k_I.astype('complex'); k_I = np.sqrt(k_I);
k_II = k_II.astype('complex'); k_II = np.sqrt(k_II);
Y_I = np.diag(k_I/k0);
Y_II = np.diag(k_II/k0);
delta_i0 = np.zeros((len(kx_array),1));
delta_i0[num_ord] = 1;
n_delta_i0 = delta_i0*j*n1*np.cos(theta); #this is a VECTOR
## design auxiliary variables: SEE derivation in notebooks: RCWA_note.ipynb
# we want to design the computation to avoid operating with X, particularly with inverses
# since X is the worst conditioned thing
Wi = np.linalg.inv(W);
Vi = np.linalg.inv(V);
Oi = 0.5*np.block([[Wi, Vi],[Wi, -Vi]])
f = I;
g = j*Y_II; #all matrices
fg = np.concatenate((f,g),axis = 0)
#ab = np.matmul(np.linalg.inv(O),fg);
# ab = np.matmul(Oi, fg);
# a = ab[0:PQ,:];
# b = ab[PQ:,:];
a = 0.5*(Wi+j*Vi@Y_II);
b = 0.5*(Wi-j*Vi@Y_II);
fbiX = np.matmul(np.linalg.inv(b),X)
#altTerm = (a@X@X@b); #not well conditioned and I-altTermis is also poorly conditioned.
#print(np.linalg.cond(I-np.linalg.inv(altTerm)))
#print(np.linalg.cond(X@b)); #not well conditioned.
term = X@a@fbiX; # THIS IS SHITTILY CONDITIONED
# print((np.linalg.cond(X), np.linalg.cond(term)))
# print(np.linalg.cond(I+term)); #but this is EXTREMELY WELL CONDITIONED.
f = np.matmul(W, I+term);
g = np.matmul(V,-I+term);
T = np.linalg.inv(j*np.matmul(Y_I,f)+g);
T = np.matmul(T,(np.matmul(j*Y_I,delta_i0)+n_delta_i0));
R = np.matmul(f,T)-delta_i0;
T = np.matmul(fbiX, T)
## calculate diffraction efficiencies
#I would expect this number to be real...
DE_ri = R*np.conj(R)*np.real(np.expand_dims(k_I,1))/(k0*n1*np.cos(theta));
DE_ti = T*np.conj(T)*np.real(np.expand_dims(k_II,1))/(k0*n1*np.cos(theta));
#print(np.sum(DE_ri))
#print(np.sum(DE_ri))
spectra.append(np.sum(DE_ri)); #spectra_T.append(T);
spectra_T.append(np.sum(DE_ti))
plt.figure();
plt.plot(wavelength_scan, np.abs(spectra), '.-', markersize = 10);
plt.plot(wavelength_scan, np.abs(spectra_T), '.-', markersize = 10)
plt.xlabel('wavelength (microns)')
plt.ylabel('R/T')
plt.title('sample RCWA spectra for a 1D grating')
plt.legend(['reflection', 'transmission'])
# plt.axhline(((3.48-1)/(3.48+1))**2,xmin=0, xmax = max(wavelength_scan))
# plt.axhline(((3.48-1)/(3.48+1)),xmin=0, xmax = max(wavelength_scan), color='r')
plt.ylim([0,1])
plt.show()
print(sq_beigenvals)
#we could try to numerically pair them (IF WE have some guarantee that they come in complex conjugate pairs), which I think is only valid
# for the TE case.
###Output
[39.7605-3.6852e-15j 38.5394+2.4318e-16j 39.7605-1.3813e-14j
38.5394-2.2645e-14j -6.1187-5.1246e-15j -6.1187+7.5567e-15j
7.9428+6.4636e-16j 6.1804+8.1315e-17j 7.9428-1.8974e-15j
6.1804-1.1804e-15j]
|
Basic programs/ 4. Find the Square Root of a number..ipynb | ###Markdown
For natural numbers
###Code
# function for squreroot
def sol(num):
squareroot= int(num) ** 0.5
return squareroot
#porvide the input here
num = input("Enter a number:")
#getting output
print(" Square Root of {0} is : {1}".format(num,sol(num)))
###Output
Enter a number:4
Square Root of 4 is : 2.0
###Markdown
For real or complex numbers
###Code
# Import complex math module
import cmath
'''function for squareroot of real & complex numbers using
sqrt function from cmath'''
def sol(num):
squreroot = cmath.sqrt(num)
return squreroot
#provide complex number here
num = complex(input("Enter a real or complex number in the format a+bj (e.g: 1+2j):"))
#getting output
print("Square Root of {0} is : {1:.3f}".format(num,sol(num)))
###Output
_____no_output_____ |
Copy_of_Welcome_To_Colaboratory.ipynb | ###Markdown
###Code
pip install tensorflow keras numpy mnist matplotlib
#kütüphaneleri içe aktarıyoruz
import numpy as np
import mnist
from keras.models import Sequential
from keras. layers import Dense
from keras.utils import to_categorical
import matplotlib.pyplot as plt
#verisetini kaydediyoruz
train_images = mnist.train_images()
train_labels = mnist.train_labels()
test_images = mnist. test_images()
test_labels = mnist.test_labels()
#görüntüleri normalleştiriyoruz, ağı yormaması için pixel küçülüyo
train_images = (train_images / 255) - 0.5
test_images = (test_images/ 255) - 0.5
train_images = train_images.reshape((-1, 784))
test_images = test_images.reshape((-1,784))
#şekli yazdırıyo
print(train_images.shape)
print(test_images.shape)
#modeli şekillendiriyo
model = Sequential()
model.add(Dense(64, activation='relu', input_dim=784))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(
optimizer= 'adam',
loss = 'categorical_crossentropy', #loss function for classes > 2
metrics = ['accuracy']
)
model.fit(
train_images,
to_categorical(train_labels),
epochs=10,
batch_size = 10
)
model.evaluate(
test_images,
to_categorical(test_labels)
)
model.save_weights('model.h5')
predictions = model.predict(test_images[:9])
print (np.argmax(predictions, axis =1))
print(test_labels[:9])
#görsel sırası
import matplotlib.pyplot as plt
for i in range(0,9):
first_image = test_images[i]
first_image = np.array(first_image, dtype='float')
pixels = first_image.reshape((28, 28))
plt.imshow(pixels, cmap='gray')
plt.show()
###Output
_____no_output_____
###Markdown
###Code
pip install tensorflow keras numpy mnist matplotlib
import numpy as np
import mnist
from keras.models import Sequential
from keras. layers import Dense
from keras.utils import to_categorical
import matplotlib.pyplot as plt
train_images = mnist.train_images()
train_labels = mnist.train_labels()
test_images = mnist. test_images()
test_labels = mnist.test_labels()
train_images = (train_images / 255) - 0.5
test_images = (test_images/ 255) - 0.5
train_images = train_images.reshape((-1, 784))
test_images = test_images.reshape((-1,784))
print(train_images.shape)
print(test_images.shape)
model = Sequential()
model.add(Dense(64, activation='relu', input_dim=784))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))
model.compile(
optimizer= 'adam',
loss = 'categorical_crossentropy',
metrics = ['accuracy']
)
model.fit(
train_images,
to_categorical(train_labels),
epochs=10,
batch_size = 10
)
model.evaluate(
test_images,
to_categorical(test_labels)
)
model.save_weights('model.h5')
predictions = model.predict(test_images[:9])
print (np.argmax(predictions, axis =1))
print(test_labels[:9])
import matplotlib.pyplot as plt
for i in range(0,9):
first_image = test_images[i]
first_image = np.array(first_image, dtype='float')
pixels = first_image.reshape((28, 28))
plt.imshow(pixels, cmap='gray')
plt.show()
###Output
_____no_output_____
###Markdown
What is Colaboratory?Colaboratory, or "Colab" for short, allows you to write and execute Python in your browser, with - Zero configuration required- Free access to GPUs- Easy sharingWhether you're a **student**, a **data scientist** or an **AI researcher**, Colab can make your work easier. Watch [Introduction to Colab](https://www.youtube.com/watch?v=inN8seMm7UI) to learn more, or just get started below! **Getting started**The document you are reading is not a static web page, but an interactive environment called a **Colab notebook** that lets you write and execute code.For example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result:
###Code
seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day
###Output
_____no_output_____
###Markdown
To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut "Command/Ctrl+Enter". To edit the code, just click the cell and start editing.Variables that you define in one cell can later be used in other cells:
###Code
seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week
###Output
_____no_output_____
###Markdown
Colab notebooks allow you to combine **executable code** and **rich text** in a single document, along with **images**, **HTML**, **LaTeX** and more. When you create your own Colab notebooks, they are stored in your Google Drive account. You can easily share your Colab notebooks with co-workers or friends, allowing them to comment on your notebooks or even edit them. To learn more, see [Overview of Colab](/notebooks/basic_features_overview.ipynb). To create a new Colab notebook you can use the File menu above, or use the following link: [create a new Colab notebook](http://colab.research.google.comcreate=true).Colab notebooks are Jupyter notebooks that are hosted by Colab. To learn more about the Jupyter project, see [jupyter.org](https://www.jupyter.org). Data scienceWith Colab you can harness the full power of popular Python libraries to analyze and visualize data. The code cell below uses **numpy** to generate some random data, and uses **matplotlib** to visualize it. To edit the code, just click the cell and start editing.
###Code
import numpy as np
from matplotlib import pyplot as plt
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Sample Visualization")
plt.show()
###Output
_____no_output_____
###Markdown
You can import your own data into Colab notebooks from your Google Drive account, including from spreadsheets, as well as from Github and many other sources. To learn more about importing data, and how Colab can be used for data science, see the links below under [Working with Data](working-with-data). Machine learningWith Colab you can import an image dataset, train an image classifier on it, and evaluate the model, all in just [a few lines of code](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/quickstart/beginner.ipynb). Colab notebooks execute code on Google's cloud servers, meaning you can leverage the power of Google hardware, including [GPUs and TPUs](using-accelerated-hardware), regardless of the power of your machine. All you need is a browser. Colab is used extensively in the machine learning community with applications including:- Getting started with TensorFlow- Developing and training neural networks- Experimenting with TPUs- Disseminating AI research- Creating tutorialsTo see sample Colab notebooks that demonstrate machine learning applications, see the [machine learning examples](machine-learning-examples) below. More Resources Working with Notebooks in Colab- [Overview of Colaboratory](/notebooks/basic_features_overview.ipynb)- [Guide to Markdown](/notebooks/markdown_guide.ipynb)- [Importing libraries and installing dependencies](/notebooks/snippets/importing_libraries.ipynb)- [Saving and loading notebooks in GitHub](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)- [Interactive forms](/notebooks/forms.ipynb)- [Interactive widgets](/notebooks/widgets.ipynb)- [TensorFlow 2 in Colab](/notebooks/tensorflow_version.ipynb) Working with Data- [Loading data: Drive, Sheets, and Google Cloud Storage](/notebooks/io.ipynb) - [Charts: visualizing data](/notebooks/charts.ipynb)- [Getting started with BigQuery](/notebooks/bigquery.ipynb) Machine Learning Crash CourseThese are a few of the notebooks from Google's online Machine Learning course. See the [full course website](https://developers.google.com/machine-learning/crash-course/) for more.- [Intro to Pandas](/notebooks/mlcc/intro_to_pandas.ipynb)- [Tensorflow concepts](/notebooks/mlcc/tensorflow_programming_concepts.ipynb) Using Accelerated Hardware- [TensorFlow with GPUs](/notebooks/gpu.ipynb)- [TensorFlow with TPUs](/notebooks/tpu.ipynb) Machine Learning ExamplesTo see end-to-end examples of the interactive machine learning analyses that Colaboratory makes possible, check out these tutorials using models from [TensorFlow Hub](https://tfhub.dev).A few featured examples:- [Retraining an Image Classifier](https://tensorflow.org/hub/tutorials/tf2_image_retraining): Build a Keras model on top of a pre-trained image classifier to distinguish flowers.- [Text Classification](https://tensorflow.org/hub/tutorials/tf2_text_classification): Classify IMDB movie reviews as either *positive* or *negative*.- [Style Transfer](https://tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization): Use deep learning to transfer style between images.- [Multilingual Universal Sentence Encoder Q&A](https://tensorflow.org/hub/tutorials/retrieval_with_tf_hub_universal_encoder_qa): Use a machine learning model to answer questions from the SQuAD dataset.- [Video Interpolation](https://tensorflow.org/hub/tutorials/tweening_conv3d): Predict what happened in a video between the first and the last frame.
###Code
!pip install pulp
# import the library pulp as p
import pulp as p
# Create a LP Minimization problem
Lp_prob = p.LpProblem('Problem', p.LpMaximize)
# Create problem Variables
x = p.LpVariable("x", lowBound = 0) # Create a variable x >= 0
y = p.LpVariable("y", lowBound = 0) # Create a variable y >= 0
# Objective Function
Lp_prob += 17.1667 * x + 25.8667 * y
# Constraints:
Lp_prob += 13 * x + 19 * y <= 2400
Lp_prob += 20 * x + 29 * y <= 2100
Lp_prob += x >= 10
Lp_prob += x >= 0
Lp_prob += y >= 0
# Display the problem
print(Lp_prob)
status = Lp_prob.solve() # Solver
print(p.LpStatus[status]) # The solution status
# Printing the final solution
print(p.value(x), p.value(y), p.value(Lp_prob.objective))
l=[1,'a',2,'abc']
print(l)
l=[-2]
l[-2]
l=[1,'a',2,'abc']
print(l[2])
print(l[-2])
print(l[0:])
print(l[0:1])
print(l[2:])
print(l[:3])
print(l[-4:-1])
def sum(a,b):
z=a+b
return z
d= sum("a","b")
print(d)
import numpy as np
import matplotlib.pyplot as plt
x=[1,2,3,4,5]
y=[4,5,6,7,8]
plt.plot(x,y)
plt.xlim(0,10)
plt.ylim(0,10)
x=[1,2,3,4,5]
y=[]
for i in x:
z=2*i
y.append(z)
x=np.linspace(1,5,5)
y=2*x
print(y)
x=np.linspace(1,20,50)
y=18-2*x
y1=(42-2*x)/3
plt.plot(x,y)
plt.plot(x,y1)
x=np.linspace(1,20,50)
y=18-2*x
y1=(42-2*x)/3
plt.plot(x,y,label='2x+y=18')
plt.plot(x,y1,label='2x+2y=42')
plt.xlim(0,30)
plt.ylim(0,20)
plt.legend()
x=np.array([1,2,3])
x1=np.array(([1],[2],[3]))
print(x)
print(x1)
table=np.array([[1,2,3],[4,5,6],[7,8,9]])
print(table)
print(table[2,1])
print(table[2])
print(table[-2])
print(table[1])
print(table)
###Output
_____no_output_____
###Markdown
Welcome to Colaboratory!Colaboratory is a free Jupyter notebook environment that requires no setup and runs entirely in the cloud.With Colaboratory you can write and execute code, save and share your analyses, and access powerful computing resources, all for free from your browser.
###Code
#@title Introducing Colaboratory
#@markdown This 3-minute video gives an overview of the key features of Colaboratory:
from IPython.display import YouTubeVideo
YouTubeVideo('inN8seMm7UI', width=600, height=400)
###Output
_____no_output_____
###Markdown
Getting StartedThe document you are reading is a [Jupyter notebook](https://jupyter.org/), hosted in Colaboratory. It is not a static page, but an interactive environment that lets you write and execute code in Python and other languages.For example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result:
###Code
seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day
###Output
_____no_output_____
###Markdown
To execute the code in the above cell, select it with a click and then either press the ▷ button to the left of the code, or use the keyboard shortcut "⌘/Ctrl+Enter".All cells modify the same global state, so variables that you define by executing a cell can be used in other cells:
###Code
seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week
###Output
_____no_output_____
###Markdown
What is Colaboratory?Colaboratory, or "Colab" for short, allows you to write and execute Python in your browser, with - Zero configuration required- Free access to GPUs- Easy sharingWhether you're a **student**, a **data scientist** or an **AI researcher**, Colab can make your work easier. Watch [Introduction to Colab](https://www.youtube.com/watch?v=inN8seMm7UI) to learn more, or just get started below! **Getting started**The document you are reading is not a static web page, but an interactive environment called a **Colab notebook** that lets you write and execute code.For example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result:
###Code
seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day
###Output
_____no_output_____
###Markdown
To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut "Command/Ctrl+Enter". To edit the code, just click the cell and start editing.Variables that you define in one cell can later be used in other cells:
###Code
seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week
###Output
_____no_output_____
###Markdown
Colab notebooks allow you to combine **executable code** and **rich text** in a single document, along with **images**, **HTML**, **LaTeX** and more. When you create your own Colab notebooks, they are stored in your Google Drive account. You can easily share your Colab notebooks with co-workers or friends, allowing them to comment on your notebooks or even edit them. To learn more, see [Overview of Colab](/notebooks/basic_features_overview.ipynb). To create a new Colab notebook you can use the File menu above, or use the following link: [create a new Colab notebook](http://colab.research.google.comcreate=true).Colab notebooks are Jupyter notebooks that are hosted by Colab. To learn more about the Jupyter project, see [jupyter.org](https://www.jupyter.org). Data scienceWith Colab you can harness the full power of popular Python libraries to analyze and visualize data. The code cell below uses **numpy** to generate some random data, and uses **matplotlib** to visualize it. To edit the code, just click the cell and start editing.
###Code
import numpy as np
from matplotlib import pyplot as plt
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Sample Visualization")
plt.show()
###Output
_____no_output_____
###Markdown
You can import your own data into Colab notebooks from your Google Drive account, including from spreadsheets, as well as from Github and many other sources. To learn more about importing data, and how Colab can be used for data science, see the links below under [Working with Data](working-with-data). Machine learningWith Colab you can import an image dataset, train an image classifier on it, and evaluate the model, all in just [a few lines of code](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/quickstart/beginner.ipynb). Colab notebooks execute code on Google's cloud servers, meaning you can leverage the power of Google hardware, including [GPUs and TPUs](using-accelerated-hardware), regardless of the power of your machine. All you need is a browser. Colab is used extensively in the machine learning community with applications including:- Getting started with TensorFlow- Developing and training neural networks- Experimenting with TPUs- Disseminating AI research- Creating tutorialsTo see sample Colab notebooks that demonstrate machine learning applications, see the [machine learning examples](machine-learning-examples) below. More Resources Working with Notebooks in Colab- [Overview of Colaboratory](/notebooks/basic_features_overview.ipynb)- [Guide to Markdown](/notebooks/markdown_guide.ipynb)- [Importing libraries and installing dependencies](/notebooks/snippets/importing_libraries.ipynb)- [Saving and loading notebooks in GitHub](https://colab.research.google.com/github/googlecolab/colabtools/blob/master/notebooks/colab-github-demo.ipynb)- [Interactive forms](/notebooks/forms.ipynb)- [Interactive widgets](/notebooks/widgets.ipynb)- [TensorFlow 2 in Colab](/notebooks/tensorflow_version.ipynb) Working with Data- [Loading data: Drive, Sheets, and Google Cloud Storage](/notebooks/io.ipynb) - [Charts: visualizing data](/notebooks/charts.ipynb)- [Getting started with BigQuery](/notebooks/bigquery.ipynb) Machine Learning Crash CourseThese are a few of the notebooks from Google's online Machine Learning course. See the [full course website](https://developers.google.com/machine-learning/crash-course/) for more.- [Intro to Pandas](/notebooks/mlcc/intro_to_pandas.ipynb)- [Tensorflow concepts](/notebooks/mlcc/tensorflow_programming_concepts.ipynb)- [First steps with TensorFlow](/notebooks/mlcc/first_steps_with_tensor_flow.ipynb)- [Intro to neural nets](/notebooks/mlcc/intro_to_neural_nets.ipynb)- [Intro to sparse data and embeddings](/notebooks/mlcc/intro_to_sparse_data_and_embeddings.ipynb) Using Accelerated Hardware- [TensorFlow with GPUs](/notebooks/gpu.ipynb)- [TensorFlow with TPUs](/notebooks/tpu.ipynb) Machine Learning ExamplesTo see end-to-end examples of the interactive machine learning analyses that Colaboratory makes possible, check out these tutorials using models from [TensorFlow Hub](https://tfhub.dev).A few featured examples:- [Retraining an Image Classifier](https://tensorflow.org/hub/tutorials/tf2_image_retraining): Build a Keras model on top of a pre-trained image classifier to distinguish flowers.- [Text Classification](https://tensorflow.org/hub/tutorials/tf2_text_classification): Classify IMDB movie reviews as either *positive* or *negative*.- [Style Transfer](https://tensorflow.org/hub/tutorials/tf2_arbitrary_image_stylization): Use deep learning to transfer style between images.- [Multilingual Universal Sentence Encoder Q&A](https://tensorflow.org/hub/tutorials/retrieval_with_tf_hub_universal_encoder_qa): Use a machine learning model to answer questions from the SQuAD dataset.- [Video Interpolation](https://tensorflow.org/hub/tutorials/tweening_conv3d): Predict what happened in a video between the first and the last frame. **PRIME NUMBER**1)take input from the user2)prime numbers are greater than 13)check for factors4)check divisibility of input number from 2 till input number5)then output
###Code
num = int(input("Enter a number: "))
if num > 1:
for i in range(2,num):
if (num % i) == 0:
print(num,"is not a prime number")
break
else:
print(num,"is a prime number")
else:
print(num,"is not a prime number")
###Output
Enter a number: 21
21 is not a prime number
###Markdown
**ARMSTRONG NUMBER**abcd... = pow(a,n) + pow(b,n) + pow(c,n) + pow(d,n) + .... 1)take input from user2)initialise sum3)find the sum of the cube of each digit4)display the result
###Code
num = int(input("Enter a number: "))
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** 3
temp //= 10
# display the result
if num == sum:
print(num,"is an Armstrong number")
else:
print(num,"is not an Armstrong number")
###Output
Enter a number: 407
407 is an Armstrong number
###Markdown
**NEON NUMBER**Steps to Check Neon Number in Python1)Take a number as input.2)Find Square of the number.3)Calculate the sum of digits of the square.4)If the square is equal to the number, then it is neon number, else not.
###Code
num = int(input("Enter a number \n"))
sqr = num*num #square of num
sumOfDigit = 0
while sqr>0:
sumOfDigit =sumOfDigit + sqr%10
sqr = sqr//10
if (num == sumOfDigit):
print("Neon Number \n")
else:
print("Not a Neon Number \n")
###Output
Enter a number
9
Neon Number
###Markdown
What is Colaboratory?Colaboratory, or "Colab" for short, allows you to write and execute Python in your browser, with - Zero configuration required- Free access to GPUs- Easy sharingWhether you're a **student**, a **data scientist** or an **AI researcher**, Colab can make your work easier. Watch [Introduction to Colab](https://www.youtube.com/watch?v=inN8seMm7UI) to learn more, or just get started below! **Getting started**The document you are reading is not a static web page, but an interactive environment called a **Colab notebook** that lets you write and execute code.For example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result:
###Code
seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day
hello
# test screeeeem
###Output
_____no_output_____
###Markdown
To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut "Command/Ctrl+Enter". To edit the code, just click the cell and start editing.Variables that you define in one cell can later be used in other cells:
###Code
seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week
🤣🤣🤣😂😎😎🙄😫😫
ug
katie
max
###Output
_____no_output_____
###Markdown
Colab notebooks allow you to combine **executable code** and **rich text** in a single document, along with **images**, **HTML**, **LaTeX** and more. When you create your own Colab notebooks, they are stored in your Google Drive account. You can easily share your Colab notebooks with co-workers or friends, allowing them to comment on your notebooks or even edit them. To learn more, see [Overview of Colab](/notebooks/basic_features_overview.ipynb). To create a new Colab notebook you can use the File menu above, or use the following link: [create a new Colab notebook](http://colab.research.google.comcreate=true).Colab notebooks are Jupyter notebooks that are hosted by Colab. To learn more about the Jupyter project, see [jupyter.org](https://www.jupyter.org). Data scienceWith Colab you can harness the full power of popular Python libraries to analyze and visualize data. The code cell below uses **numpy** to generate some random data, and uses **matplotlib** to visualize it. To edit the code, just click the cell and start editing.
###Code
import numpy as np
from matplotlib import pyplot as plt
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Sample Visualization")
plt.show()
###Output
_____no_output_____
###Markdown
What is Colaboratory?Colaboratory, or "Colab" for short, allows you to write and execute Python in your browser, with - Zero configuration required- Free access to GPUs- Easy sharingWhether you're a **student**, a **data scientist** or an **AI researcher**, Colab can make your work easier. Watch [Introduction to Colab](https://www.youtube.com/watch?v=inN8seMm7UI) to learn more, or just get started below!
###Code
https://github.com/Mandeepkaur21/Ham-vs-Spam-Detection-Web-App-1.git
###Output
_____no_output_____
###Markdown
**Getting started**The document you are reading is not a static web page, but an interactive environment called a **Colab notebook** that lets you write and execute code.For example, here is a **code cell** with a short Python script that computes a value, stores it in a variable, and prints the result:
###Code
seconds_in_a_day = 24 * 60 * 60
seconds_in_a_day
###Output
_____no_output_____
###Markdown
To execute the code in the above cell, select it with a click and then either press the play button to the left of the code, or use the keyboard shortcut "Command/Ctrl+Enter". To edit the code, just click the cell and start editing.Variables that you define in one cell can later be used in other cells:
###Code
seconds_in_a_week = 7 * seconds_in_a_day
seconds_in_a_week
###Output
_____no_output_____
###Markdown
Colab notebooks allow you to combine **executable code** and **rich text** in a single document, along with **images**, **HTML**, **LaTeX** and more. When you create your own Colab notebooks, they are stored in your Google Drive account. You can easily share your Colab notebooks with co-workers or friends, allowing them to comment on your notebooks or even edit them. To learn more, see [Overview of Colab](/notebooks/basic_features_overview.ipynb). To create a new Colab notebook you can use the File menu above, or use the following link: [create a new Colab notebook](http://colab.research.google.comcreate=true).Colab notebooks are Jupyter notebooks that are hosted by Colab. To learn more about the Jupyter project, see [jupyter.org](https://www.jupyter.org). Data scienceWith Colab you can harness the full power of popular Python libraries to analyze and visualize data. The code cell below uses **numpy** to generate some random data, and uses **matplotlib** to visualize it. To edit the code, just click the cell and start editing.
###Code
import numpy as np
from matplotlib import pyplot as plt
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Sample Visualization")
plt.show()
###Output
_____no_output_____
###Markdown
###Code
print("Miles Duca")
print("COP4020-Fall-Class #0001\n\n\n")
students = {
"Sam": "Developer" ,
"John" : "Hacker",
"Andrew" : "Programmer" ,
2 : 2021 ,
"Hannah" : "nurse"
}
print("Initialized students dictionary:")
print(students)
print("\nAdding nested dictionary:")
students.update( { 3 :{"David" : "graduated 2021 Spring"} })
print(students)
#Accessing a element using key
print("\nAccessing an element using key: Sam")
print(students["Sam"])
#Deleting second element
print("\nDeleting second element:")
del students["John"]
print(students)
#Clearing
print("\nClearing students dictionary")
students.clear()
print(students)
###Output
Miles Duca
COP4020-Fall-Class #0001
Initialized students dictionary:
{'Sam': 'Developer', 'John': 'Hacker', 'Andrew': 'Programmer', 2: 2021, 'Hannah': 'nurse'}
Adding nested dictionary:
{'Sam': 'Developer', 'John': 'Hacker', 'Andrew': 'Programmer', 2: 2021, 'Hannah': 'nurse', 3: {'David': 'graduated 2021 Spring'}}
Accessing an element using key: Sam
Developer
Deleting second element:
{'Sam': 'Developer', 'Andrew': 'Programmer', 2: 2021, 'Hannah': 'nurse', 3: {'David': 'graduated 2021 Spring'}}
Clearing students dictionary
{}
|
steepest-descent.ipynb | ###Markdown
最急降下法を用いた最適化今回は、最急降下法を用いた関数の最適化を行いましょう。関数$f:\mathbb{R}^n\to\mathbb{R}$ に対する**最急降下法**は、適当な出発点$x^{(1)}\in\mathbb{R}^n$について、下記の反復式、\begin{align*}x^{(k+1)}:=x^{(k)}+\alpha^{(k)}d^{(k)}\quad(k=1,2,\ldots)\end{align*}によって一定の条件下で最適解に収束する点列$\{x^{(k)}\}$ を生成するものでした。ここで、$d^{(k)}:=-\nabla f(x^{(k)})$ は勾配の逆方向として定められる**最急降下方向** で、また$\alpha^{(k)}\in(0,\infty)\ (k=1,2,\ldots)$ は$-\nabla f(x^{(k)})$ 方向に沿って目的関数を十分小さくするような**ステップ幅**と呼ばれる実数です。今回は、ステップ幅を選ぶ基準としてよく用いられる**強Wolfe 条件**を満たすような実数を用い、最急降下法を実装、およびその挙動を確認してみましょう。 目的関数とその勾配今回は、目的関数として次の2変数関数、\begin{align*}f(x_0, x_1):=\sin\left(\frac{1}{2}x_0^2-\frac{1}{4}x_1^2+3\right)\cos(2x_0+1-e^{x_1})\end{align*}を考えましょう。最急降下法の実行には、与えられた点$x^{(k)}\ (k=1,2,\ldots)$ に対して、目的関数の値$f(x^{(k)})$と、その勾配$\nabla f(x^{(k)})$ が、それぞれ計算できる必要がありました。そこで、目的関数を`fun`、またその勾配を`jac` として、それぞれPython 上の関数として、まず実装しましょう。
###Code
import numpy as np
def fun(x):
return np.sin((x[0] ** 2) / 2 - (x[1] ** 2 ) / 4 + 3) * np.cos(2 * x[0] + 1 - np.exp(x[1]))
def jac(x):
u, v = (x[0] ** 2) / 2 - (x[1] ** 2 ) / 4 + 3, 2 * x[0] + 1 - np.exp(x[1])
return np.array([
x[0] * np.cos(u) * np.cos(v) - 2 * np.sin(u) * np.sin(v),
np.exp(x[1]) * np.sin(u) * np.sin(v) - (x[1] / 2) * np.cos(u) * np.cos(v)
])
###Output
_____no_output_____
###Markdown
なお、今回用いる目的関数は、下記のような複雑な形状をしているものです。
###Code
%matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize=(8, 6), dpi=80)
ax = plt.axes(projection='3d')
X, Y = np.meshgrid(np.linspace(-1, 1, 100), np.linspace(-1, 1, 100))
Z = np.array([[fun(np.array([x, y])) for x, y in zip(vx, vy)] for vx, vy in zip(X, Y)])
ax.plot_surface(X, Y, Z, cmap='plasma')
###Output
_____no_output_____
###Markdown
最急降下法の実装では早速、最急降下法のプログラムを作成しましょう。ここでは、初期点$x_1:=(-0.3, 0.2)^\top$、反復回数は15 回としました。各反復$k$ において、目的関数`fun` およびその勾配`jac` に対する、現在の点`xk` および降下方向`dk` での強Wolfe 条件を満たすステップ幅`alpha` は、```pythonalpha = line_search(fun, jac, xk, dk)[0]```により求めることができます。以下は、生成点列をリスト`sequence` として格納していく最急降下法の実装です。
###Code
import numpy as np
from scipy.optimize import line_search
xk = np.array([-0.3, 0.2])
sequence = [xk]
for k in range(15):
dk = -jac(xk)
alpha = line_search(fun, jac, xk, dk)[0]
xk = xk + alpha * dk
sequence.append(xk)
###Output
_____no_output_____
###Markdown
生成点列の挙動の確認それでは、最急降下法により生成した点列`sequence` の挙動を、関数`fun` に関する等高線グラフの上に描画し、確認してみましょう。
###Code
%matplotlib inline
import matplotlib.pyplot as plt
X, Y = np.meshgrid(np.linspace(-1, 1, 100), np.linspace(-1, 1, 100))
Z = np.array([[fun(np.array([x, y])) for x, y in zip(vx, vy)] for vx, vy in zip(X, Y)])
plt.contour(X, Y, Z, cmap='plasma', levels=np.linspace(np.min(Z), np.max(Z), 15))
sequence = np.array(sequence)
plt.plot(sequence[:, 0], sequence[:, 1], marker='o')
###Output
_____no_output_____ |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.