markdown
stringlengths
0
1.02M
code
stringlengths
0
832k
output
stringlengths
0
1.02M
license
stringlengths
3
36
path
stringlengths
6
265
repo_name
stringlengths
6
127
we will use idomain to define zones for pilot points as going in active areas of each layer.
idm = m.dis.idomain.array plt.imshow(idm[2]) plt.colorbar() idm[idm==-1]=0 # make pass through cells (e.g. idomain==-1) the same as inactive (e.g. idomain == 0) for i in range(4): plt.figure() plt.imshow(idm[i]) plt.colorbar()
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
before setting up K, need to edit the zone files to only have nonzero values in active cells
kzonefile = '../processed_data/padded_L{}_K_Zone_50mGrid.dat' zonearrs = {} for i in range(m.dis.nlay.data): kz = np.loadtxt(kzonefile.format(i)).astype(int) kz[idm[i] != 1] = 0 zonearrs[i] = kz for i in range(4): plt.figure() plt.imshow(zonearrs[i]) plt.colorbar() # quick take a look at unique zones present in each layer [np.unique(kz) for _,kz in zonearrs.items()] ## set up for K for tag,bnd in pp_tags.items(): lb, ub, ultub = bnd if tag == 'k': arrfiles = sorted([f for f in os.listdir(template_ws) if f.startswith(tag) & ('k33' not in f)]) else: arrfiles = sorted([f for f in os.listdir(template_ws) if f.startswith(tag)]) for arr_file in arrfiles: currlay = int(re.findall('\d+',arr_file.replace('k33',''))[-1]) # pilot points # set pilot point spacing: NB every 5 cells in the smaller-zone layers, and every 20 cells in others if currlay in [1,2]: pp_space = 5 else: pp_space = 20 v = pyemu.utils.geostats.ExpVario(a=sr_model.delr[0]*3*pp_space,contribution=1.0) gs = pyemu.utils.geostats.GeoStruct(variograms=v,nugget=0.0, transform='log') print('pps for layer {} --- filename: {}: idomain_sum: {}'.format(currlay, arr_file, idm[currlay].sum())) pf.add_parameters(filenames=arr_file, par_type='pilotpoints', pp_space=pp_space, upper_bound=ub, lower_bound=lb, geostruct=gs, par_name_base='{}_pp'.format(tag),alt_inst_str='', zone_array=idm[currlay], pargp='pp_{}'.format(tag), ult_ubound=ultub) # zones print('zones for layer {} --- filename: {}: idomain_sum: {}'.format(currlay, arr_file, idm[currlay].sum())) pf.add_parameters(filenames=arr_file, par_type='zone',alt_inst_str='', zone_array = zonearrs[currlay],lower_bound=lb,upper_bound=ub, pargp='zn_{}'.format(tag), par_name_base='{}_{}'.format(tag,currlay), ult_ubound=ultub) # recharge as special case because no idomain for R rtags= {'rch':[0.8,1.2, np.max(m.rch.recharge.array)*1.2]} for tag,bnd in rtags.items(): lb, ub, ultub = bnd if tag == 'k': arrfiles = sorted([f for f in os.listdir(template_ws) if f.startswith(tag) & ('k33' not in f)]) else: arrfiles = sorted([f for f in os.listdir(template_ws) if f.startswith(tag)]) for arr_file in arrfiles: # pilot points pf.add_parameters(filenames=arr_file, par_type='pilotpoints', pp_space=pp_space, upper_bound=ub, lower_bound=lb, geostruct=gs, par_name_base='{}_pp'.format(tag), zone_array=idm[3],alt_inst_str='', pargp='pp_{}'.format(tag), ult_ubound=ultub) # constant pf.add_parameters(filenames=arr_file, par_type='constant', upper_bound=ub-0.1, lower_bound=lb+0.1, par_name_base='{}_const'.format(tag), zone_array=idm[3],alt_inst_str='', pargp='pp_{}'.format(tag), ult_ubound=ultub)
2021-03-26 16:43:15.342865 starting: adding pilotpoints type multiplier style parameters for file(s) ['rch_000.dat'] 2021-03-26 16:43:15.343727 starting: loading array ..\run_data\rch_000.dat 2021-03-26 16:43:15.704764 finished: loading array ..\run_data\rch_000.dat took: 0:00:00.361037 2021-03-26 16:43:15.705800 loaded array '..\neversink_mf6\rch_000.dat' of shape (680, 619) 2021-03-26 16:43:16.246920 starting: writing array-based template file '..\run_data\rch_pp_0_pilotpoints.csv.tpl' 2021-03-26 16:43:16.246920 starting: setting up pilot point parameters 2021-03-26 16:43:16.246920 No spatial reference (containing cell spacing) passed. 2021-03-26 16:43:16.246920 OK - using spatial reference in parent object. 2021-03-26 16:43:17.495149 470 pilot point parameters created 2021-03-26 16:43:17.496147 pilot point 'pargp':rch_pp_:0 2021-03-26 16:43:17.496147 finished: setting up pilot point parameters took: 0:00:01.249227 2021-03-26 16:43:17.692599 starting: writing array-based template file '..\run_data\rch_pp_0pp.dat.tpl' 2021-03-26 16:43:17.692599 saving zone array ..\run_data\rch_pp_0pp.dat.zone for tpl file ..\run_data\rch_pp_0pp.dat.tpl 2021-03-26 16:43:17.854377 finished: adding pilotpoints type multiplier style parameters for file(s) ['rch_000.dat'] took: 0:00:02.511512 2021-03-26 16:43:17.855375 starting: adding constant type multiplier style parameters for file(s) ['rch_000.dat'] 2021-03-26 16:43:17.856372 starting: loading array ..\run_data\rch_000.dat 2021-03-26 16:43:18.242922 finished: loading array ..\run_data\rch_000.dat took: 0:00:00.386550 2021-03-26 16:43:18.242922 loaded array '..\neversink_mf6\rch_000.dat' of shape (680, 619) 2021-03-26 16:43:18.963246 starting: writing array-based template file '..\run_data\rch_const_0_constant.csv.tpl' 2021-03-26 16:43:18.963246 starting: writing template file ..\run_data\rch_const_0_constant.csv.tpl for ['rch_const_:0'] 2021-03-26 16:43:18.990173 WARNING: get_xy() warning: position of i and j in index_cols not specified, assume (i,j) are final two entries in index_cols. 2021-03-26 16:43:21.603479 finished: writing template file ..\run_data\rch_const_0_constant.csv.tpl for ['rch_const_:0'] took: 0:00:02.640233 2021-03-26 16:43:21.603479 finished: writing array-based template file '..\run_data\rch_const_0_constant.csv.tpl' took: 0:00:02.640233 2021-03-26 16:43:21.603479 saving zone array ..\run_data\rch_const_0_constant.csv.zone for tpl file ..\run_data\rch_const_0_constant.csv.tpl 2021-03-26 16:43:22.372912 finished: adding constant type multiplier style parameters for file(s) ['rch_000.dat'] took: 0:00:04.517537
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
the `build_pst` method compiles all the parameters we've added and makes a `Pst` object
pst = pf.build_pst('tmp.pst')
noptmax:0, npar_adj:4172, nnz_obs:0
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
Make a TPL file for SFR and add it to the `pst` object
sfrfilename = 'neversink_packagedata.dat' print('working on {}'.format(sfrfilename)) # read in and strip and split the input lines insfr = [line.strip().split() for line in open(os.path.join(template_ws,sfrfilename), 'r').readlines() if '#' not in line] headerlines = [line.strip() for line in open(os.path.join(template_ws,sfrfilename), 'r').readlines() if '#' in line] # set up the template line strings by segment tpl_char = ['~ sfrk_{} ~'.format(line[-1]) for line in insfr] # stick the tpl text in the K column. NB -> gotta count from the end because of # the possibility of NONE or i,j,k as indexing for line,tpl in zip(insfr,tpl_char): line[-6] = tpl # revert back to a space delimited file insfr = [' '.join(line) for line in insfr] # write out the TPL file with open(os.path.join(template_ws,'{}.tpl'.format(sfrfilename)), 'w') as ofp: ofp.write('ptf ~\n') [ofp.write('{}\n'.format(line)) for line in headerlines] [ofp.write('{}\n'.format(line)) for line in insfr] pst.add_parameters(os.path.join(template_ws,'{}.tpl'.format(sfrfilename)), pst_path='.') parval1 = pyemu.pst_utils.try_read_input_file_with_tpl(os.path.join(template_ws,'{}.tpl'.format(sfrfilename)), os.path.join(template_ws,sfrfilename)) pst.parameter_data.loc[pst.parameter_data.parnme.str.startswith('sfr'),'pargp'] = 'sfrk' pst.parameter_data.loc[pst.parameter_data.parnme == 'sfrk_700039914']
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
Add in the observations Assign meaningful observation values and prepare to run `noptmax=0` test run prior to reweighting
update_forward_run=True run_local=True update_all_obs = True
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
if `update_all_obs` is True, run the get_observations.py script to get a new INS file and reset all observations in the PEST object
if update_all_obs is True: shutil.copy2('../scripts/get_observations.py',os.path.join(template_ws,'get_observations.py')) shutil.copy2('../scripts/get_observations.py',os.path.join(sim_ws,'get_observations.py')) os.system('python {} {} True'.format(os.path.join(sim_ws,'get_observations.py'), sim_ws)) [shutil.copy2(cf, os.path.join(template_ws, os.path.basename(cf))) for cf in glob.glob(os.path.join(sim_ws, '*.ins'))] [shutil.copy2(cf, os.path.join(template_ws, os.path.basename(cf))) for cf in glob.glob(os.path.join(sim_ws, 'land_*.csv'))] pst.observation_data.loc[:,:] = np.nan pst.observation_data.dropna(inplace=True) pst.add_observations(os.path.join(template_ws,'obs_mf6.dat.ins'), pst_path='.')
526 obs added from instruction file ../run_data\.\obs_mf6.dat.ins
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
set the observation groups
obs = pst.observation_data obs.obgnme = 'head' obs.loc[obs.index.str.startswith('q_'), 'obgnme'] = 'flux' obs.loc[obs.index.str.startswith('perc'), 'obgnme'] = 'budget' obs.loc[obs.index.str.startswith('land'), 'obgnme'] = 'land_surface'
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
Set observation values
set_obs = True if set_obs: # read in sfr; make sfr obsnme/obsval dict to map to pst observation_data sfr_df = pd.read_csv('../processed_data/NWIS_DV_STREAMSTATS_SITES.csv') sfr_df['obsnme'] = 'q_' + sfr_df['site_id'].astype(str) sfr_df['obsval'] = (sfr_df['Mean_Annual_Flow_cfs'] * sfr_df['Average_BFI_value']) * 2446.5755455 # convert from cfs to m^3/day sfr_df[['obsnme', 'obsval']] sfr_dict = pd.Series(sfr_df['obsval'].values,index=sfr_df['obsnme']).to_dict() # read in nwis heads; make nwis head obsnme/obsval dict nwis_gw_df = pd.read_csv('../processed_data/NWIS_GW_DV_data.csv') nwis_gw_df['obsnme'] = 'h_' + nwis_gw_df['site_no'].astype(str) nwis_gw_df['obsval'] = nwis_gw_df['gw_elev_m'] nwis_gw_dict = pd.Series(nwis_gw_df['obsval'].values,index=nwis_gw_df['obsnme']).to_dict() # read in DEC heads; make DEC heads obsnme/obsval dict DEC_gw_df = pd.read_csv('../processed_data/NY_DEC_GW_sites.csv') DEC_gw_df['obsnme'] = ('h_' + DEC_gw_df['WellNO'].astype(str)).str.lower() DEC_gw_df['obsval'] = DEC_gw_df['gw_elev_m'] DEC_gw_dict = pd.Series(DEC_gw_df['obsval'].values,index=DEC_gw_df['obsnme']).to_dict() # map SFR values to observation_data obs.loc[obs.obsnme.isin(sfr_dict.keys()), 'obsval'] = obs.obsnme.map(sfr_dict) # map nwis heads to observation_data obs.loc[obs.obsnme.isin(nwis_gw_dict.keys()), 'obsval'] = obs.obsnme.map(nwis_gw_dict) # map DEC heads to SRF observation_data obs.loc[obs.obsnme.isin(DEC_gw_dict.keys()), 'obsval'] = obs.obsnme.map(DEC_gw_dict) # set up percent discrepancy as dummy value obs.loc[obs.obgnme=='budget', 'obsval'] = -99999 # get the land surface obs lsobs_df = pd.read_csv('../neversink_mf6/land_surf_obs-observations.csv', index_col=0) obs.loc[obs.obgnme=='land_surface', 'obsval'] = lsobs_df.obsval
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
first cut at weights
# weights based on coefficient of variation of 3.33 and 10, respecively obs.loc[obs.obsnme=='q_1436500', 'weight'] = 3.33/obs.loc[obs.obsnme=='q_1436500'].obsval obs.loc[obs.obsnme=='q_1366650', 'weight'] = 10/obs.loc[obs.obsnme=='q_1366650'].obsval # these initial weights assume that heads within 5m for measured heads or 10m for land-surface obs is acceptable obs.loc[obs.obgnme=='head', 'weight'] = 1/5 obs.loc[obs.obgnme=='land_surface', 'weight'] = 1/10 obs.loc[obs.obgnme=='budget', 'weight'] = 0.0
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
update some parameter bounds
pars = pst.parameter_data
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
K-zones set to not get too crazy high
# read in k value lookup table to df # original table k_df_original = pd.read_excel( '../processed_data/Rondout_Neversink_GeologyLookupTable.xlsx', sheet_name='Sheet2' ) k_df_original.index = k_df_original.Lookup_Code k_df = pd.read_excel( '../processed_data/Rondout_Neversink_GeologyLookupTable_jhw.xlsx', sheet_name='Sheet2' ) k_df.index = k_df.Lookup_Code print('Using mean K value') k_df['Kh_ft_d_mean'] = (k_df['Kh_ft_d_lower'] + k_df['Kh_ft_d_upper']) / 2 k_df['Kh_m_d'] = k_df['Kh_ft_d_mean'] * 0.3048 k_df['Kh_m_d_lower'] = k_df['Kh_ft_d_lower'] * .3048 k_df['Kh_m_d_upper'] = k_df['Kh_ft_d_upper'] * .3048 k_df['K_upper_mult'] = k_df['Kh_m_d_upper'] / k_df['Kh_m_d'] k_df['K_lower_mult'] = k_df['Kh_m_d_lower'] / k_df['Kh_m_d'] k_df k_mult_zones = [int(i.split(':')[-1]) for i in pars.loc[pars.parnme.str.startswith('multiplier_k')].index] np.unique(k_mult_zones) upper_mults = [k_df.loc[i].K_upper_mult for i in k_mult_zones] lower_mults = [k_df.loc[i].K_lower_mult for i in k_mult_zones] pars.loc[pars.parnme.str.startswith('multiplier_k'), 'parlbnd'] = lower_mults pars.loc[pars.parnme.str.startswith('multiplier_k'), 'parubnd'] = upper_mults
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
pilot points set to mean upper and lower bounds diffs
mean_lower = k_df.K_lower_mult.mean() mean_upper = k_df.K_upper_mult.mean() mean_lower,mean_upper pars.loc[pars.pargp.str.startswith('k'), 'parlbnd'] = mean_lower + 0.01 pars.loc[pars.pargp.str.startswith('k'), 'parubnd'] = mean_upper - 0.01 pars.loc[pars.pargp.str.startswith('sfrk'), 'parlbnd'] = 0.1 pars.loc[pars.pargp.str.startswith('sfrk'), 'parubnd'] = 10.0
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
Set CHD parameters to 'fixed'. They will not be estimated, but are present to evaluate in global sensitivity analysis which means we will free them only for that purpose
pars.loc[pars.pargp=='chd', 'partrans'] = 'fixed'
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
`pyemu` can write out a summary file with summary of the parameterization - note that all parameters are multipliers
parsum = pst.write_par_summary_table('../figures/initial_parsum.xlsx', report_in_linear_space=True) parsum
Warning: because log-transformed values being reported in linear space, stdev NOT reported
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
update the forward run to run
# note there are ways to do this within PstFrom but we were unaware of that when we set this up # note also we are putting the model run and postprocessing lines just above if __name__ == "__main__" line frunlines = open(os.path.join(template_ws, 'forward_run.py'), 'r').readlines() if update_forward_run is True and './mf6' not in ' '.join([i.strip() for i in frunlines]): print('updating forward_run.py') with open(os.path.join(template_ws, 'forward_run.py'), 'w') as ofp: for line in frunlines: if '__main__' in line: ofp.write(" os.system('./mf6')\n") ofp.write(" os.system('python get_observations.py . false')\n") ofp.write('{}\n'.format(line)) elif 'import os' in line: ofp.write('import os, sys\n') ofp.write("sys.path.append('../python_packages_static/')\n") else: ofp.write(line)
updating forward_run.py
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
set noptmax = 0 and a couple ++ options and write out PST file
pst.pestpp_options["ies_num_reals"] = 500 pst.pestpp_options["ies_bad_phi_sigma"] = 2 pst.pestpp_options["overdue_giveup_fac"] = 4 pst.pestpp_options["ies_save_rescov"] = True pst.pestpp_options["ies_no_noise"] = True pst.pestpp_options["ies_drop_conflicts"] = True pst.pestpp_options["ies_pdc_sigma_distance"] = 2.0 pst.control_data.noptmax = 0
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
write out the PST file
pst.write(os.path.join(template_ws,'prior_mc.pst'))
noptmax:0, npar_adj:4911, nnz_obs:525
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
copy over the entire pest directory to a separate folder identified by the `noptmax0_dir` variable. This is to keep the `emplate_ws` clean and allow for various testing to take place int he `noptmax0_dir` location
if os.path.exists(noptmax0_dir): shutil.rmtree(noptmax0_dir) shutil.copytree(template_ws, noptmax0_dir) pst.write_obs_summary_table('../figures/obs_initial.xlsx')
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
If running on Windows, remove backslashes from `mult2model_info.csv` for running on linux cluster
if sys.platform == 'win32': f = open(os.path.join(template_ws, 'mult2model_info.csv'), "r") lines = f.readlines() f.close() output_lines = [] for line in lines: output_lines.append(line.replace('\\', "/")) f = open(os.path.join(template_ws, 'mult2model_info.csv'), "w") f.write(''.join(output_lines)) f.close()
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
and the pest file
if sys.platform == 'win32': f = open(os.path.join(template_ws, 'prior_mc.pst'), "r") lines = f.readlines() f.close() output_lines = [] for line in lines: output_lines.append(line.replace('\\', "/")) f = open(os.path.join(template_ws, 'prior_mc.pst'), "w") f.write(''.join(output_lines)) f.close()
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
and update the forward run command
if sys.platform == 'win32': f = open(os.path.join(template_ws, 'forward_run.py'), "r") lines = f.readlines() f.close() output_lines = [] for line in lines: output_lines.append(line.replace('./mf6', "mf6")) # fix in run_dir f = open(os.path.join(template_ws, 'forward_run.py'), "w") f.write(''.join(output_lines)) f.close() # fix in noptmax_0_testing f = open(os.path.join(noptmax0_dir, 'forward_run.py'), "w") f.write(''.join(output_lines)) f.close()
_____no_output_____
CC0-1.0
notebooks_workflow_complete/0.0_PEST_parameterization.ipynb
usgs/neversink_workflow
imports
import os import sys sys.path.append('../') from glob import glob import torch import numpy as np import pandas as pd
_____no_output_____
Apache-2.0
notebooks/3.1 gcn_twitter_quarterly_prediction.ipynb
syeehyn/spug
get and split data
from spug.dataset import DatasetGenerator data_root = '../data' sotck_path = os.path.join( data_root, 'raw', 'stock', 'raw.csv' ) sec_path = os.path.join( data_root, 'raw', 'sec' ) output_path = os.path.join( data_root, 'processed' ) data_list = sorted(glob( os.path.join( data_root, 'raw', 'twitter', '*q*.npy' ) )) dg = DatasetGenerator( data_list = data_list, stock_path=sotck_path, sec_path=sec_path, freq='quarter' )
_____no_output_____
Apache-2.0
notebooks/3.1 gcn_twitter_quarterly_prediction.ipynb
syeehyn/spug
model definition
from spug.model import GCN
_____no_output_____
Apache-2.0
notebooks/3.1 gcn_twitter_quarterly_prediction.ipynb
syeehyn/spug
model training
import argparse from torch_geometric_temporal.signal import temporal_signal_split from spug.utils import Trainer dataset = dg.process() train_dataset, test_dataset = temporal_signal_split(dataset, train_ratio=0.8) INPUT_SHAPE = next(iter(train_dataset)).x.shape[1] model = GCN(input_size = INPUT_SHAPE, hidden_dims=64) args = argparse.Namespace( num_epochs = 500, learning_rate = 1e-3, device = "cpu", val_size = .1, verbose = False ) trainer = Trainer(model, train_dataset, args, test_dataset) model = trainer.train()
100%|███████████████████████████████████████████████████████████████████████████| 500/500 [00:08<00:00, 56.69it/s]
Apache-2.0
notebooks/3.1 gcn_twitter_quarterly_prediction.ipynb
syeehyn/spug
Test `second_narrows_current` ModuleRender figure object produced by the `nowcast.figures.fvcom.second_narrows_current` module.Provides data for visual testing to confirm that refactoring has not adversely changed figure for web page.Set-up and function call replicates as nearly as possible what is done in the `nowcast.workers.make_plots` worker. Notebooks like this should be developed in a[Nowcast Figures Development Environment](https://salishsea-nowcast.readthedocs.io/en/latest/figures/fig_dev_env.html)so that all of the necessary dependency packages are installed.The development has to be done on a workstation that has the Vancouver Harbour & Fraser River FVCOM model results `/opp/` parition mounted.
import io from pathlib import Path import shlex import subprocess import arrow import xarray import yaml from nowcast.figures import website_theme from nowcast.figures.fvcom.publish import second_narrows_current %matplotlib inline # Supress arrow.get() parser warnings re: changes coming in v0.15.0 # See https://github.com/crsmithdev/arrow/issues/612 # We don't use date strings that aren't included in the supported date tokens set mentioned in issue #612 import warnings from arrow.factory import ArrowParseWarning warnings.simplefilter("ignore", ArrowParseWarning)
_____no_output_____
Apache-2.0
notebooks/figures/fvcom/publish/TestSecondNarrowsCurrent.ipynb
SalishSeaCast/SalishSeaNowcast
The bits of `config/nowcast.yaml` that are required:
config = ''' vhfr fvcom runs: stations dataset filename: x2: vh_x2_station_timeseries.nc r12: vh_r12_station_timeseries.nc results archive: nowcast x2: /opp/fvcom/nowcast-x2/ forecast x2: /opp/fvcom/forecast-x2/ nowcast r12: /opp/fvcom/nowcast-r12/ ''' config = yaml.safe_load(io.StringIO(config))
_____no_output_____
Apache-2.0
notebooks/figures/fvcom/publish/TestSecondNarrowsCurrent.ipynb
SalishSeaCast/SalishSeaNowcast
The bits that the `make_plots` worker must provide: Rename FVCOM dataset layer and leval variables because `xarray` won't acceptvariables and coordinates that have the same name.
def _rename_fvcom_vars(fvcom_dataset_path): cmd = ( f'ncrename -O -v siglay,sigma_layer -v siglev,sigma_level ' f'{fvcom_dataset_path} /tmp/{fvcom_dataset_path.name}') subprocess.check_output(shlex.split(cmd))
_____no_output_____
Apache-2.0
notebooks/figures/fvcom/publish/TestSecondNarrowsCurrent.ipynb
SalishSeaCast/SalishSeaNowcast
Nowcast `X2` Figure
run_date = arrow.get('2019-07-23') model_config = "x2" run_type = 'nowcast' ddmmmyy = run_date.format('DDMMMYY').lower() fvcom_stns_datasets = {} if run_type == 'nowcast': model_configs = ("x2", "r12") if model_config == "r12" else ("x2",) for mdl_cfg in model_configs: fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename'][mdl_cfg] results_dir = Path( config['vhfr fvcom runs']['results archive'][f"{run_type} {mdl_cfg}"], ddmmmyy ) fvcom_stns_dataset_path = results_dir / fvcom_stns_dataset_filename _rename_fvcom_vars(fvcom_stns_dataset_path) fvcom_stns_datasets[mdl_cfg] = xarray.open_dataset(f'/tmp/{fvcom_stns_dataset_path.name}') else: fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename']["x2"] nowcast_results_dir = Path( config['vhfr fvcom runs']['results archive']['nowcast x2'], ddmmmyy ) nowcast_dataset_path = (nowcast_results_dir/fvcom_stns_dataset_filename) forecast_results_dir = Path( config['vhfr fvcom runs']['results archive']['forecast x2'], ddmmmyy ) forecast_dataset_path = (forecast_results_dir/fvcom_stns_dataset_filename) fvcom_stns_dataset_path = Path("/tmp", fvcom_stns_dataset_filename) cmd = ( f'ncrcat -O {nowcast_dataset_path} {forecast_dataset_path} ' f'-o {fvcom_stns_dataset_path}' ) subprocess.check_output(shlex.split(cmd)) _rename_fvcom_vars(fvcom_stns_dataset_path) fvcom_stns_datasets[model_config] = xarray.open_dataset(f'/tmp/{fvcom_stns_dataset_path.name}') obs_dataset = xarray.open_dataset( "https://salishsea.eos.ubc.ca/erddap/tabledap/ubcVFPA2ndNarrowsCurrent2sV1" ) %%timeit -n1 -r1 from importlib import reload reload(website_theme) reload(second_narrows_current) fig = second_narrows_current.make_figure( '2nd Narrows', fvcom_stns_datasets, obs_dataset )
1.41 s ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
Apache-2.0
notebooks/figures/fvcom/publish/TestSecondNarrowsCurrent.ipynb
SalishSeaCast/SalishSeaNowcast
Nowcast `R12` Figure
run_date = arrow.get('2019-07-23') model_config = "r12" run_type = 'nowcast' ddmmmyy = run_date.format('DDMMMYY').lower() fvcom_stns_datasets = {} if run_type == 'nowcast': model_configs = ("x2", "r12") if model_config == "r12" else ("x2",) for mdl_cfg in model_configs: fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename'][mdl_cfg] results_dir = Path( config['vhfr fvcom runs']['results archive'][f"{run_type} {mdl_cfg}"], ddmmmyy ) fvcom_stns_dataset_path = results_dir / fvcom_stns_dataset_filename _rename_fvcom_vars(fvcom_stns_dataset_path) fvcom_stns_datasets[mdl_cfg] = xarray.open_dataset(f'/tmp/{fvcom_stns_dataset_path.name}') else: fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename']["x2"] nowcast_results_dir = Path( config['vhfr fvcom runs']['results archive']['nowcast x2'], ddmmmyy ) nowcast_dataset_path = (nowcast_results_dir/fvcom_stns_dataset_filename) forecast_results_dir = Path( config['vhfr fvcom runs']['results archive']['forecast x2'], ddmmmyy ) forecast_dataset_path = (forecast_results_dir/fvcom_stns_dataset_filename) fvcom_stns_dataset_path = Path("/tmp", fvcom_stns_dataset_filename) cmd = ( f'ncrcat -O {nowcast_dataset_path} {forecast_dataset_path} ' f'-o {fvcom_stns_dataset_path}' ) subprocess.check_output(shlex.split(cmd)) _rename_fvcom_vars(fvcom_stns_dataset_path) fvcom_stns_datasets[model_config] = xarray.open_dataset(f'/tmp/{fvcom_stns_dataset_path.name}') obs_dataset = xarray.open_dataset( "https://salishsea.eos.ubc.ca/erddap/tabledap/ubcVFPA2ndNarrowsCurrent2sV1" ) %%timeit -n1 -r1 from importlib import reload reload(website_theme) reload(second_narrows_current) fig = second_narrows_current.make_figure( '2nd Narrows', fvcom_stns_datasets, obs_dataset )
522 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
Apache-2.0
notebooks/figures/fvcom/publish/TestSecondNarrowsCurrent.ipynb
SalishSeaCast/SalishSeaNowcast
Forecast `X2` Figure
run_date = arrow.get('2019-07-23') model_config = "x2" run_type = 'forecast' ddmmmyy = run_date.format('DDMMMYY').lower() fvcom_stns_datasets = {} if run_type == 'nowcast': model_configs = ("x2", "r12") if model_config == "r12" else ("x2",) for mdl_cfg in model_configs: fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename'][mdl_cfg] results_dir = Path( config['vhfr fvcom runs']['results archive'][f"{run_type} {mdl_cfg}"], ddmmmyy ) fvcom_stns_dataset_path = results_dir / fvcom_stns_dataset_filename _rename_fvcom_vars(fvcom_stns_dataset_path) fvcom_stns_datasets[mdl_cfg] = xarray.open_dataset(f'/tmp/{fvcom_stns_dataset_path.name}') else: fvcom_stns_dataset_filename = config['vhfr fvcom runs']['stations dataset filename']["x2"] nowcast_results_dir = Path( config['vhfr fvcom runs']['results archive']['nowcast x2'], ddmmmyy ) nowcast_dataset_path = (nowcast_results_dir/fvcom_stns_dataset_filename) forecast_results_dir = Path( config['vhfr fvcom runs']['results archive']['forecast x2'], ddmmmyy ) forecast_dataset_path = (forecast_results_dir/fvcom_stns_dataset_filename) fvcom_stns_dataset_path = Path("/tmp", fvcom_stns_dataset_filename) cmd = ( f'ncrcat -O {nowcast_dataset_path} {forecast_dataset_path} ' f'-o {fvcom_stns_dataset_path}' ) subprocess.check_output(shlex.split(cmd)) _rename_fvcom_vars(fvcom_stns_dataset_path) fvcom_stns_datasets[model_config] = xarray.open_dataset(fvcom_stns_dataset_path) obs_dataset = xarray.open_dataset( "https://salishsea.eos.ubc.ca/erddap/tabledap/ubcVFPA2ndNarrowsCurrent2sV1" ) %%timeit -n1 -r1 from importlib import reload reload(second_narrows_current) fig = second_narrows_current.make_figure( '2nd Narrows', fvcom_stns_datasets, obs_dataset )
565 ms ± 0 ns per loop (mean ± std. dev. of 1 run, 1 loop each)
Apache-2.0
notebooks/figures/fvcom/publish/TestSecondNarrowsCurrent.ipynb
SalishSeaCast/SalishSeaNowcast
Tensorflow Basics
_____no_output_____
MIT
tensorflow_basics.ipynb
sanattaori/Tensorflow-basics
Measurement GroupingSince current quantum hardware is limited to single-qubit projective measurement, only terms commuting within individual qubit's subspace can be measured together. These terms are said to be qubit-wise commuting (QWC). Thus, one can not measure the entire electronic Hamiltonian $\hat H$ at once, and instead needs to separate it into fragments. $$\hat H = \sum_n \hat H_n$$where each $\hat H_n$ is a QWC fragment.
from utility import *
_____no_output_____
MIT
Project_2_VQE_Molecules/S4_Measurement.ipynb
HermanniH/CohortProject_2020_Week2
Here we use $H_2$ as an example for finding QWC fragments. Notice below that each fragment has the same terms on all qubits. To show differences between QWC and more advanced grouping, we didn't use the qubit-tappering techinique shown in step 2.
h2 = get_qubit_hamiltonian(mol='h2', geometry=1, basis='sto3g', qubit_transf='jw') qwc_list = get_qwc_group(h2) print('Fragments 1: \n{}\n'.format(qwc_list[4])) print('Fragments 2:\n{}\n'.format(qwc_list[1])) print('Number of fragments: {}'.format(len(qwc_list)))
Fragments 1: 0.13716572937099508 [Z0] + 0.15660062488237947 [Z0 Z1] + 0.10622904490856075 [Z0 Z2] + 0.15542669077992832 [Z0 Z3] + 0.13716572937099503 [Z1] + 0.15542669077992832 [Z1 Z2] + 0.10622904490856075 [Z1 Z3] + -0.13036292057109117 [Z2] + 0.16326768673564346 [Z2 Z3] + -0.13036292057109117 [Z3] Fragments 2: -0.04919764587136755 [X0 X1 Y2 Y3] Number of fragments: 5
MIT
Project_2_VQE_Molecules/S4_Measurement.ipynb
HermanniH/CohortProject_2020_Week2
By applying extra unitaries, one may rotate more terms of $\hat H$ into a QWC fragment. Recall that in digital quantum computing, the expectation value of $\hat H_n$ given a trial wavefunction $|\psi\rangle$ is $$ E_n =\ \langle\psi| \hat H_n | \psi\rangle$$Inserting unitary transformation $\hat U_n$ does not change the expectation value.$$ E_n =\ \langle\psi| \hat U_n^\dagger \hat U_n \hat H_n \hat U_n^\dagger \hat U_n |\psi\rangle$$ This nonetheless changes the trial wavefunction and the terms to be measured. $$ |\psi\rangle \rightarrow \hat U_n |\psi\rangle = |\phi\rangle$$$$ \hat H_n \rightarrow \hat U_n \hat H_n \hat U_n^\dagger = \hat A_n$$The transformation of $|\psi \rangle$ can be done on the quantum computer, and the transformation of $\hat H_n$ is possible on the classical computer. Now, although $\hat A_n$ needs to be a QWC fragment to be measurable on a quantum computer, $\hat H_n$ does not. Instead, if we restrict $\hat U_n$ to be a clifford operation, the terms in $\hat H$ need only mutually commute. Here, we obtain measurable parts of $H_2$ by partitioning its terms into mutually commuting fragments.
comm_groups = get_commuting_group(h2) print('Number of mutually commuting fragments: {}'.format(len(comm_groups))) print('The first commuting group') print(comm_groups[1])
Number of mutually commuting fragments: 2 The first commuting group -0.32760818967480887 [] + -0.04919764587136755 [X0 X1 Y2 Y3] + 0.04919764587136755 [X0 Y1 Y2 X3] + 0.04919764587136755 [Y0 X1 X2 Y3] + -0.04919764587136755 [Y0 Y1 X2 X3] + 0.15660062488237947 [Z0 Z1] + 0.10622904490856075 [Z0 Z2] + 0.15542669077992832 [Z0 Z3] + 0.15542669077992832 [Z1 Z2] + 0.10622904490856075 [Z1 Z3] + 0.16326768673564346 [Z2 Z3]
MIT
Project_2_VQE_Molecules/S4_Measurement.ipynb
HermanniH/CohortProject_2020_Week2
To see this fragment is indeed measurable, one can construct the corresponding unitary operator $\hat U_n$.
uqwc = get_qwc_unitary(comm_groups[1]) print('This is unitary, U * U^+ = I ') print(uqwc * uqwc)
This is unitary, U * U^+ = I (0.9999999999999996+0j) []
MIT
Project_2_VQE_Molecules/S4_Measurement.ipynb
HermanniH/CohortProject_2020_Week2
Applying this unitary gives the qubit-wise commuting form of the first mutually commuting group
qwc = remove_complex(uqwc * comm_groups[1] * uqwc) print(qwc)
-0.32760818967480876 [] + 0.1554266907799282 [X0] + 0.1566006248823793 [X0 X1] + 0.04919764587136754 [X0 X1 Z3] + 0.1062290449085607 [X0 X2] + -0.04919764587136754 [X0 Z3] + 0.1062290449085607 [X1] + 0.1554266907799282 [X1 X2] + -0.04919764587136754 [X1 X2 Z3] + 0.16326768673564332 [X2] + 0.04919764587136754 [X2 Z3]
MIT
Project_2_VQE_Molecules/S4_Measurement.ipynb
HermanniH/CohortProject_2020_Week2
In addition, current quantum computer can measure only the $z$ operators. Thus, QWC fragments with $x$ or $y$ operators require extra single-qubit unitaries that rotate them into $z$.
uz = get_zform_unitary(qwc) print("Checking whether U * U^+ is identity: {}".format(uz * uz)) allz = remove_complex(uz * qwc * uz) print("\nThe all-z form of qwc fragment:\n{}".format(allz))
Checking whether U * U^+ is identity: 0.9999999999999998 [] The all-z form of qwc fragment: -0.3276081896748086 [] + 0.15542669077992813 [Z0] + 0.15660062488237922 [Z0 Z1] + 0.049197645871367504 [Z0 Z1 Z3] + 0.10622904490856065 [Z0 Z2] + -0.049197645871367504 [Z0 Z3] + 0.10622904490856065 [Z1] + 0.15542669077992813 [Z1 Z2] + -0.049197645871367504 [Z1 Z2 Z3] + 0.16326768673564326 [Z2] + 0.049197645871367504 [Z2 Z3]
MIT
Project_2_VQE_Molecules/S4_Measurement.ipynb
HermanniH/CohortProject_2020_Week2
OverviewIn this notebook, I will compare predictions on the 2021 season from my final model against historical odds. Data for the historical odds was gathered from [Sportsbook Reviews Online](https://www.sportsbookreviewsonline.com/scoresoddsarchives/nhl/nhloddsarchives.htm). Per their website: Data is sourced from various online sportsbooks including 5dimes, BetOnline, Bookmaker, Heritage, Pinnacle Sports, Sportsbook.com as well as the Westgate Superbook in Las Vegas.I will look at 2 simple betting strategies to determine profitability:1. Bet 100 on every game where either the home or away team winning probability from my model is higher than the implied odds 2. Bet to win 100 on every game where either the home or away team winning probability from my model is higher than the implied odds Data Cleaning and Merging
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np import pickle sns.set_style("darkgrid") sns.set_context("poster") pd.set_option('display.max_columns', None) odds = pd.read_excel('data/nhl odds 2021.xlsx') odds.head() team_conversion = { 'Anaheim': 'ANA', 'Arizona' :'ARI', 'Boston': 'BOS', 'Buffalo':'BUF', 'Calgary': 'CGY', 'Carolina': 'CAR', 'Chicago': 'CHI', 'Colorado': 'COL', 'Columbus': 'CBJ', 'Dallas': 'DAL', 'Detroit': 'DET', 'Edmonton': 'EDM', 'Florida': 'FLA', 'LosAngeles': 'L.A', 'Minnesota': 'MIN', 'Montreal': 'MTL', 'Nashville': 'NSH', 'NewJersey': 'N.J', 'NYIslanders': 'NYI', 'NYRangers': 'NYR', 'Ottawa': 'OTT', 'Philadelphia': 'PHI', 'Pittsburgh': 'PIT', 'SanJose': 'S.J', 'St.Louis': 'STL', 'TampaBay': 'T.B', 'Toronto': 'TOR', 'Vancouver': 'VAN', 'Vegas':'VGK', 'Washington': 'WSH', 'Winnipeg': 'WPG'} #convert date to proper datestring and create team key odds = odds.replace({'Team': team_conversion}) odds['Month'] = odds['Date'].apply(lambda x: '0'+str(x)[0]) odds['Day'] = odds['Date'].apply(lambda x: str(x)[1:]) odds['Year'] = 2021 odds['Datestring'] = odds[['Year','Month','Day']].astype(str).apply('-'.join, 1) odds['Team_Key'] = odds['Team'].astype(str)+'_'+odds['Datestring'].astype(str) #calculate implied odds odds['Implied_odds'] = np.where(odds['Close'] < 0, (odds['Close']*-1)/((odds['Close']*-1)+100) , 100/(odds['Close']+100)) odds.head(5) #import file with predictions predictions = pd.read_csv('data/Predictions_2021b') #merge my predictions with odd df df = predictions.merge(odds.loc[:,['Team_Key', 'Implied_odds', 'Close']].add_prefix('home_'), how = 'left', left_on = 'Home_Team_Key', right_on = 'home_Team_Key').drop(columns = 'home_Team_Key') df = df.merge(odds.loc[:,['Team_Key', 'Implied_odds', 'Close']].add_prefix('away_'), how = 'left', left_on = 'Away_Team_Key', right_on = 'away_Team_Key').drop(columns = 'away_Team_Key') #odds info only contains info for games up to 5/4. These are the 15 missing games below. df.isna().sum() #drop missing games from df df = df.dropna() conditions = [df['Home Win Probability'] > df['home_Implied_odds'], df['Away Win Probability'] > df['away_Implied_odds'] ] choices = ['Home', 'Away'] df['Bet'] = np.select(conditions, choices, default = 'No Bet') df['Favorites'] = np.where(df['home_Implied_odds'] >df['away_Implied_odds'], 'Home', 'Away' ) conditions = [df['Bet'] == 'No Bet', df['Bet'] == df['Favorites'], df['Bet'] != df['Favorites'] ] choices = ['No Bet', 'Favorite', 'Underdog' ] df['Bet_For'] = np.select(conditions, choices) #calculate profit for 100$ per game strat conditions = [((df['Bet'] == 'Home') & (df['Home_Team_Won'] == 1) & (df['home_Close'] <0)), ((df['Bet'] == 'Home') & (df['Home_Team_Won'] == 1) & (df['home_Close']>0)), ((df['Bet'] == 'Away') & (df['Home_Team_Won'] == 0) & (df['away_Close']<0)), ((df['Bet'] == 'Away') & (df['Home_Team_Won'] == 0) & (df['away_Close']>0)), df['Bet'] == 'No Bet' ] choices = [-100 * (100/df['home_Close']), df['home_Close'], -100 * (100/df['away_Close']), df['away_Close'], 0] df['Profit_Strat1'] = np.select(conditions, choices, default = -100) #calculate profit for bet to win 100$ strat conditions = [((df['Bet'] == 'Home') & (df['Home_Team_Won'] == 1) & (df['home_Close'] <0)), ((df['Bet'] == 'Home') & (df['Home_Team_Won'] == 1) & (df['home_Close']>0)), ((df['Bet'] == 'Home') & (df['Home_Team_Won'] == 0) & (df['home_Close']>0)), ((df['Bet'] == 'Home') & (df['Home_Team_Won'] == 0) & (df['home_Close']<0)), ((df['Bet'] == 'Away') & (df['Home_Team_Won'] == 0) & (df['away_Close']<0)), ((df['Bet'] == 'Away') & (df['Home_Team_Won'] == 0) & (df['away_Close']>0)), ((df['Bet'] == 'Away') & (df['Home_Team_Won'] == 1) & (df['away_Close']>0)), ((df['Bet'] == 'Away') & (df['Home_Team_Won'] == 1) & (df['away_Close']<0)), df['Bet'] == 'No Bet' ] choices = [100, 100, (100/df['home_Close'])*-100, df['home_Close'], 100, 100, (100/df['away_Close'])*-100, df['away_Close'], 0] df['Profit_Strat2'] = np.select(conditions, choices) #cost of bet to win 100$ strat conditions = [((df['Bet'] == 'Home') & (df['home_Close']>0)), ((df['Bet'] == 'Home') & (df['home_Close']<0)), ((df['Bet'] == 'Away') & (df['away_Close']>0)), ((df['Bet'] == 'Away') & (df['away_Close']<0)), df['Bet'] == 'No Bet' ] choices = [(100/df['home_Close'])*100, df['home_Close']*-1, (100/df['away_Close'])*100, df['away_Close']*-1, 0] df['Cost_Strat2'] = np.select(conditions, choices) #convert date to pandas datetime df['date'] = pd.to_datetime(df['date']) #calculate cumulative profit for poth strategies df['Profit_Strat2_cumsum'] = df['Profit_Strat2'].cumsum() df['Profit_Strat1_cumsum'] = df['Profit_Strat1'].cumsum() df['Won_Bet'] = np.where(df['Profit_Strat2'] > 0, 1, 0) df.head()
_____no_output_____
CC0-1.0
Evaluating Model Profitability.ipynb
gschwaeb/NHL_Game_Prediction
--- EvaluationLet's check the log loss from the implied odds. My model's log loss on the 2021 season was 0.655534. So the book implied odds are still performing slightly better with a log loss of 0.6529
from sklearn.metrics import log_loss, accuracy_score io_list = [] for index, row in df.iterrows(): io_list.append([row['away_Implied_odds'], row['home_Implied_odds']]) log_loss(df['Home_Team_Won'], io_list)
_____no_output_____
CC0-1.0
Evaluating Model Profitability.ipynb
gschwaeb/NHL_Game_Prediction
How many bets would be for home vs away vs no bet? My model is definitley favoring the home team. From the EDA notebook, The home team had won 56.0% in the 17-18 season, 53.7% in 18-19, 53.1% in 19-20 and only 52.7% in 20-21 season. THe 20-21 season having no fans may be affecting this outcome for the home team and may have hurt the model slightly for the 20-21 season.
df['Bet'].value_counts() df['Bet'].value_counts(normalize = True)
_____no_output_____
CC0-1.0
Evaluating Model Profitability.ipynb
gschwaeb/NHL_Game_Prediction
How many bets were for the favorite vs the underdrog? Interestingly the model liked underdogs more often.
df['Bet_For'].value_counts(normalize = 'True')
_____no_output_____
CC0-1.0
Evaluating Model Profitability.ipynb
gschwaeb/NHL_Game_Prediction
The strategy of betting to win 100$ resulted in a per bet ROI of 2.04%
#ROI per bet df['Profit_Strat2'].sum() / df['Cost_Strat2'].sum()
_____no_output_____
CC0-1.0
Evaluating Model Profitability.ipynb
gschwaeb/NHL_Game_Prediction
Total profit for this strategy would have been $1,473.69
#total profit df['Profit_Strat2'].sum()
_____no_output_____
CC0-1.0
Evaluating Model Profitability.ipynb
gschwaeb/NHL_Game_Prediction
The strategy was profitable initially and dipped down into the red for short period in mid March. You would have only needed an initial bankroll of 325 to implement this and then would have needed to re-up 244 later for total out of pocket costs of 569
#initial bankroll needed df[df['date'] == '2021-01-13']['Cost_Strat2'].sum() df[df['Profit_Strat2_cumsum'] < 0]
_____no_output_____
CC0-1.0
Evaluating Model Profitability.ipynb
gschwaeb/NHL_Game_Prediction
I would have won only 49.6% of bets, but since the marjority of bets were for the underdog, the lower costs benefited profitability.
df[df['Bet'] != 'No Bet']['Won_Bet'].value_counts(normalize = True)
_____no_output_____
CC0-1.0
Evaluating Model Profitability.ipynb
gschwaeb/NHL_Game_Prediction
Strategy 1, bet 100$ every bettable game was slightly profitable.
fig, ax = plt.subplots(figsize = (16,12)) ax = sns.lineplot(x = df['date'], y = df['Profit_Strat1_cumsum'], color = 'green') ax.set_title('Cumulative Profit', fontsize = 24) ax.set_ylabel('Cumulative Profit', fontsize =16, ) ax.set_xlabel('Date', fontsize =16) plt.xticks(rotation=45, fontsize = 16) ax.axhline(0, linestyle = 'dashed', color = 'black') ax.set_ylim(-2000,4000) plt.show()
_____no_output_____
CC0-1.0
Evaluating Model Profitability.ipynb
gschwaeb/NHL_Game_Prediction
Strategy 2, bet to win 100$ on every bettable game, was profitbale
fig, ax = plt.subplots(figsize = (16,12)) ax = sns.lineplot(x = df['date'], y = df['Profit_Strat2_cumsum'], color = 'green') ax.set_title('Cumulative Profit', fontsize = 24) ax.set_ylabel('Cumulative Profit', fontsize =18, ) ax.set_xlabel('Date', fontsize =18) plt.xticks(rotation=45, fontsize = 18) ax.axhline(0, linestyle = 'dashed', color = 'black') ax.set_ylim(-1000,4000) plt.show() strat2 = pd.DataFrame(df.groupby('date').agg({'Profit_Strat2': 'sum'})).reset_index() strat2['Cumulative Profit'] = strat2['Profit_Strat2'].cumsum() strat2['date'] = pd.to_datetime(strat2['date']) strat2.head() fig, ax = plt.subplots(figsize = (16,12)) ax = sns.lineplot(x = strat2['date'], y = strat2['Profit_Strat2'], palette = 'Blues') ax.set_title('Daily Profit', fontsize = 18) ax.set_ylabel('Daily Profit', fontsize =12, ) ax.set_xlabel('Date', fontsize =12) plt.xticks(rotation='vertical', fontsize = 12) ax.axhline(0, color = 'black', linestyle = 'dashed') ax.set_ylim(-900,900) plt.show()
_____no_output_____
CC0-1.0
Evaluating Model Profitability.ipynb
gschwaeb/NHL_Game_Prediction
探索电影数据集在这个项目中,你将尝试使用所学的知识,使用 `NumPy`、`Pandas`、`matplotlib`、`seaborn` 库中的函数,来对电影数据集进行探索。下载数据集:[TMDb电影数据](https://s3.cn-north-1.amazonaws.com.cn/static-documents/nd101/explore+dataset/tmdb-movies.csv) 数据集各列名称的含义:列名称idimdb_idpopularitybudgetrevenueoriginal_titlecasthomepagedirectortaglinekeywordsoverviewruntimegenresproduction_companiesrelease_datevote_countvote_averagerelease_yearbudget_adjrevenue_adj 含义编号IMDB 编号知名度预算票房名称主演网站导演宣传词关键词简介时常类别发行公司发行日期投票总数投票均值发行年份预算(调整后)票房(调整后) **请注意,你需要提交该报告导出的 `.html`、`.ipynb` 以及 `.py` 文件。** ------ 第一节 数据的导入与处理在这一部分,你需要编写代码,使用 Pandas 读取数据,并进行预处理。 **任务1.1:** 导入库以及数据1. 载入需要的库 `NumPy`、`Pandas`、`matplotlib`、`seaborn`。2. 利用 `Pandas` 库,读取 `tmdb-movies.csv` 中的数据,保存为 `movie_data`。提示:记得使用 notebook 中的魔法指令 `%matplotlib inline`,否则会导致你接下来无法打印出图像。
# 各库导入 import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sb # 数据读取 movie_data = pd.read_csv('./tmdb-movies.csv')
_____no_output_____
MIT
P2_Explore_Movie_Dataset/Explore Movie Dataset.ipynb
CCCCCaO/My_AIPND_Projects
---**任务1.2: ** 了解数据你会接触到各种各样的数据表,因此在读取之后,我们有必要通过一些简单的方法,来了解我们数据表是什么样子的。1. 获取数据表的行列,并打印。2. 使用 `.head()`、`.tail()`、`.sample()` 方法,观察、了解数据表的情况。3. 使用 `.dtypes` 属性,来查看各列数据的数据类型。4. 使用 `isnull()` 配合 `.any()` 等方法,来查看各列是否存在空值。5. 使用 `.describe()` 方法,看看数据表中数值型的数据是怎么分布的。
print('1.电影数据集的行列数为:',movie_data.shape) movie_data.head() # 2.1 电影数据集的前五行 movie_data.tail() # 2-2 电影数据集的末五行 movie_data.sample() # 2-3 随机抽取一个电影数据样本 movie_data.dtypes # 3 获取每列的数据类型 movie_data.isnull().any() # 4 检查各列是否有NaN值 movie_data['id'].describe() # 5-1 编号id列(int64)的描述性统计信息 movie_data['popularity'].describe() # 5-2 知名度popularity列(float64) movie_data['budget'].describe() # 5-3 预算budget列 (int64) movie_data['revenue'].describe() # 5-4 票房revenue列 (int64) movie_data['runtime'].describe() # 5-5 时长runtime列(int64) movie_data['vote_count'].describe() # 5-5 投票总数vote_count列(int64) movie_data['vote_average'].describe() # 5-6 投票均值vote_average列(float64) movie_data['release_year'].describe() # 5-7 发行年份release_year列(int64) movie_data['budget_adj'].describe() # 5-8 预算(调整后)budget_adj列(float64) movie_data['revenue_adj'].describe() # 5-9 票房(调整后)revenue_adj列(float64)
_____no_output_____
MIT
P2_Explore_Movie_Dataset/Explore Movie Dataset.ipynb
CCCCCaO/My_AIPND_Projects
---**任务1.3: ** 清理数据在真实的工作场景中,数据处理往往是最为费时费力的环节。但是幸运的是,我们提供给大家的 tmdb 数据集非常的「干净」,不需要大家做特别多的数据清洗以及处理工作。在这一步中,你的核心的工作主要是对数据表中的空值进行处理。你可以使用 `.fillna()` 来填补空值,当然也可以使用 `.dropna()` 来丢弃数据表中包含空值的某些行或者列。任务:使用适当的方法来清理空值,并将得到的数据保存。
# 这里采用将NaN值都替换为0 并保存至movie_data_adj中 print("处理前NaN值有:", movie_data.isnull().sum().sum(),"个") movie_data_adj = movie_data.fillna(0) print("处理前NaN值有:", movie_data_adj.isnull().sum().sum(),"个")
处理前NaN值有: 13434 个 处理前NaN值有: 0 个
MIT
P2_Explore_Movie_Dataset/Explore Movie Dataset.ipynb
CCCCCaO/My_AIPND_Projects
------ 第二节 根据指定要求读取数据相比 Excel 等数据分析软件,Pandas 的一大特长在于,能够轻松地基于复杂的逻辑选择合适的数据。因此,如何根据指定的要求,从数据表当获取适当的数据,是使用 Pandas 中非常重要的技能,也是本节重点考察大家的内容。 ---**任务2.1: ** 简单读取1. 读取数据表中名为 `id`、`popularity`、`budget`、`runtime`、`vote_average` 列的数据。2. 读取数据表中前1~20行以及48、49行的数据。3. 读取数据表中第50~60行的 `popularity` 那一列的数据。要求:每一个语句只能用一行代码实现。
# 注:参考了笔记和https://blog.csdn.net/u011089523/article/details/60341016 # 2.1.1.读取某列数据 # 各列分别读取 df[['列名']]来访问 movie_data_id = movie_data[['id']] movie_data_pop = movie_data[['popularity']] movie_data_bud = movie_data[['budget']] movie_data_rt = movie_data[['runtime']] movie_data_vote_avg = movie_data[['vote_average']] # 各列一起读取 df[['列名1','列名2'...列名的列表]]来访问 movie_data_sel = movie_data[['id', 'popularity', 'budget', 'runtime', 'vote_average']] # 2.1.2 读取x行数据 # 读取前20行的两种方法 df.head(n) 或 df[m:n] movie_data_rows_1to20_1 = movie_data.head(20) movie_data_rows_1to20_2 = movie_data[0:20] # 读取48,49行数据 注意索引从0开始 前闭后开 movie_data_rows_48to49 = movie_data[47:49]
_____no_output_____
MIT
P2_Explore_Movie_Dataset/Explore Movie Dataset.ipynb
CCCCCaO/My_AIPND_Projects
---**任务2.2: **逻辑读取(Logical Indexing)1. 读取数据表中 **`popularity` 大于5** 的所有数据。2. 读取数据表中 **`popularity` 大于5** 的所有数据且**发行年份在1996年之后**的所有数据。提示:Pandas 中的逻辑运算符如 `&`、`|`,分别代表`且`以及`或`。要求:请使用 Logical Indexing实现。
# 参考了https://blog.csdn.net/GeekLeee/article/details/75268762 # 1.读取popularity>5的所有数据 movie_data_pop_morethan5 = movie_data.loc[movie_data['popularity']>5] # 2.读取popularity>5 且 发行年份>1996的所有数据 movie_data_pop5p_rls1996p = movie_data.loc[(movie_data['popularity']>5)&(movie_data['release_year']>1996) ]
_____no_output_____
MIT
P2_Explore_Movie_Dataset/Explore Movie Dataset.ipynb
CCCCCaO/My_AIPND_Projects
---**任务2.3: **分组读取1. 对 `release_year` 进行分组,使用 [`.agg`](http://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.core.groupby.DataFrameGroupBy.agg.html) 获得 `revenue` 的均值。2. 对 `director` 进行分组,使用 [`.agg`](http://pandas.pydata.org/pandas-docs/version/0.22/generated/pandas.core.groupby.DataFrameGroupBy.agg.html) 获得 `popularity` 的均值,从高到低排列。要求:使用 `Groupby` 命令实现。
data = movie_data # 按release_year分组 获取revenue均值 revenue_mean_groupby_rlsyear = data.groupby(['release_year'])['revenue'].agg('mean') # 按director分组 获取popularity均值 popularity_mean_groupby_director = data.groupby(['director'])['popularity'].agg('mean')
_____no_output_____
MIT
P2_Explore_Movie_Dataset/Explore Movie Dataset.ipynb
CCCCCaO/My_AIPND_Projects
------ 第三节 绘图与可视化接着你要尝试对你的数据进行图像的绘制以及可视化。这一节最重要的是,你能够选择合适的图像,对特定的可视化目标进行可视化。所谓可视化的目标,是你希望从可视化的过程中,观察到怎样的信息以及变化。例如,观察票房随着时间的变化、哪个导演最受欢迎等。可视化的目标可以使用的图像 表示某一属性数据的分布饼图、直方图、散点图 表示某一属性数据随着某一个变量变化条形图、折线图、热力图 比较多个属性的数据之间的关系散点图、小提琴图、堆积条形图、堆积折线图在这个部分,你需要根据题目中问题,选择适当的可视化图像进行绘制,并进行相应的分析。对于选做题,他们具有一定的难度,你可以尝试挑战一下~ **任务3.1:**对 `popularity` 最高的20名电影绘制其 `popularity` 值。
base_color = sb.color_palette()[0] # 取第一个颜色 y_count = movie_data_adj['popularity'][:20] """ 这块有些搞不懂如何去绘制? 应该用条形图合适还是用直方图合适 感觉二者都不合适 饼图不适合20个扇形 直方图和条形图似乎x和y有些问题 这里用条形图勉强绘制出 感觉不合适 另有一个问题即如何在sb.barplot中标注出某个条形图具体的数值 在countplot中可以有办法标注出频率 我猜测应该可以在barplot标注出数值,可是并没有相关资料或者示例.... 有些疑惑,请求解答,谢谢!!! """ # 绘图 sb.barplot(x = y_count.index.values+1, y = y_count, color = base_color, orient = "v") """ 可以从图表中得知: 热度第1(数值达32.98)和热度第2(数值达28.41)的电影其流行程度远超第3以及之后所有的电影,差距达到了一倍以上。 第3到第20的电影其热度相差不大 数值均在5-15范围之内 较为稳定 """;
_____no_output_____
MIT
P2_Explore_Movie_Dataset/Explore Movie Dataset.ipynb
CCCCCaO/My_AIPND_Projects
---**任务3.2:**分析电影净利润(票房-成本)随着年份变化的情况,并简单进行分析。
# 需要考虑净利润随时间变化的情况 所以选择 折线图 适宜 # 调整分箱边缘和中心点 xbin_edges = np.arange(1960, movie_data_adj['release_year'].max()+2,2) xbin_centers = (xbin_edges + 0.25/2)[:-1] # 计算每个分箱中的统计数值 data_xbins = pd.cut(movie_data_adj['release_year'], xbin_edges, right = False, include_lowest = True) y_means = movie_data_adj['revenue_adj'].groupby(data_xbins).mean()-movie_data_adj['budget_adj'].groupby(data_xbins).mean() y_sems = movie_data_adj['revenue_adj'].groupby(data_xbins).sem()-movie_data_adj['budget_adj'].groupby(data_xbins).sem() # 绘图 plt.errorbar(x = xbin_centers, y = y_means, yerr = y_sems) plt.xlabel('release year'); plt.ylabel('Net profit'); """ 可以从图中看出: 随着年份的变化(这里选取的是电影的发行年份作参考) 净利润本在1960-1970年段先下降后上升再下降,较不稳定; 而后在1970-1980年段达到了一个净利润的峰值,可见当时的电影市场火爆; 而后在1980之后,净利润整体呈逐年下降的趋势,趋于稳定,市场也逐渐成熟。 净利润的波动(即误差线)再1960-1980年间较大,考虑到电影市场刚刚兴起,符合实际; 在后来进入市场成熟期之后,1980年之后,波动较小,更加稳定。 PS:不太清楚如何写分析,应该从哪些角度入手,哪些东西该讲,哪些不用讲.... """;
_____no_output_____
MIT
P2_Explore_Movie_Dataset/Explore Movie Dataset.ipynb
CCCCCaO/My_AIPND_Projects
This Notebook - Goals - FOR EDINA**What?:**- Standard classification method example/tutorial**Who?:**- Researchers in ML- Students in computer science- Teachers in ML/STEM**Why?:**- Demonstrate capability/simplicity of core scipy stack. - Demonstrate common ML concept known to learners and used by researchers.**Noteable features to exploit:**- use of pre-installed libraries: numpy, scikit-learn, matplotlib**How?:**- clear to understand - minimise assumed knowledge- clear visualisations - concise explanations- recognisable/familiar - use standard methods- Effective use of core libraries Classification - K nearest neighboursK nearest neighbours is a simple and effective way to deal with classification problems. This method classifies each sample based on the class of the points that are closest to it.This is a supervised learning method, meaning that data used contains information on some feature that the model should predict.This notebook shows the process of classifying handwritten digits. Import librariesOn Noteable, all the libaries required for this notebook are pre-installed, so they simply need to be imported:
import numpy as np import sklearn.datasets as ds import sklearn.model_selection as ms from sklearn import decomposition from sklearn import neighbors from sklearn import metrics import matplotlib.pyplot as plt %matplotlib inline
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
Data - Handwritten DigitsIn terms of data, [scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html) has a loading function for some data regarding hand written digits.
# get the digits data from scikit into the notebook digits = ds.load_digits()
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
The cell above loads the data as a [bunch object](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_digits.html), meaning that the data (in this case images of handwritten digits) and the target (the number that is written) can be split by accessing the attributes of the bunch object:
# store data and targets seperately X = digits.data y = digits.target print("The data is of the shape", X.shape) print("The target data is of the shape", y.shape)
The data is of the shape (1797, 64) The target data is of the shape (1797,)
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
The individual samples in the X array each represent an image. In this representation, 64 numbers are used to represent a greyscale value on an 8\*8 square. The images can be examined by using pyplot's [matshow](https://matplotlib.org/3.3.0/api/_as_gen/matplotlib.pyplot.matshow.html) function.The next cell displays the 17th sample in the dataset as an 8\*8 image.
# create figure to display the 17th sample fig = plt.matshow(digits.images[17], cmap=plt.cm.gray) fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False)
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
Suppose instead of viewing the 17th sample, we want to see the average of samples corresponding to a certain value.This can be done as follows (using 0 as an example):- All samples where the target value is 0 are located- The mean of these samples is taken- The resulting 64 long array is reshaped to be 8\*8 (for display)- The image is displayed
# take samples with target=0 izeros = np.where(y == 0) # take average across samples, reshape to visualise zeros = np.mean(X[izeros], axis=0).reshape(8,8) # display fig = plt.matshow(zeros, cmap=plt.cm.gray) fig.axes.get_xaxis().set_visible(False) fig.axes.get_yaxis().set_visible(False)
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
Fit and test the model Split the data Now that you have an understanding of the data, the model can be fitted.Fitting the model involves setting some of the data aside for testing, and allowing the model to "see" the target values corresponding to the training samples.Once the model has been fitted to the training data, the model will be tested on some data it has not seen before. The next cell uses [train_test_split](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html) to shuffle all data, then set some data aside for testing later. For this example, $\frac{1}{4}$ of the data will be set aside for testing, and the model will be trained on the remaining training set.As before, X corresponds to data samples, and y corresponds to labels.
# split data to train and test sets X_train, X_test, y_train, y_test = \ ms.train_test_split(X, y, test_size=0.25, shuffle=True, random_state=22)
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
The data can be examined - here you can see that 1347 samples have been put into the training set, and 450 have been set aside for testing.
# print shape of data print("training samples:", X_train.shape) print("testing samples :", X_test.shape) print("training targets:", y_train.shape) print("testing targets :", y_test.shape)
training samples: (1347, 64) testing samples : (450, 64) training targets: (1347,) testing targets : (450,)
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
Using PCA to visualise dataBefore diving into classifying, it is useful to visualise the data.Since each sample has 64 dimensions, some dimensionality reduction is needed in order to visualise the samples as points on a 2D map.One of the easiest ways of visualising high dimensional data is by principal component analysis (PCA). This maps the 64 dimensional image data onto a lower dimension map (here we will map to 2D) so it can be easily viewed on a screen.In this case, the 2 most important "components" are maintained.
# create PCA model with 2 components pca = decomposition.PCA(n_components=2)
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
The next step is to perform the PCA on the samples, and store the results.
# transform training data to 2 principal components X_pca = pca.fit_transform(X_train) # transform test data to 2 principal components T_pca = pca.transform(X_test) # check shape of result print(X_pca.shape) print(T_pca.shape)
(1347, 2) (450, 2)
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
As you can see from the above cell, the X_pca and T_pca data is now represented by only 2 elements per sample. The number of samples has remained the same.Now that there is a 2D representation of the data, it can be plotted on a regular scatter graph. Since the labels corresponding to each point are stored in the y_train variable, the plot can be colour coded by target value!Different coloured dots have different target values.
# choose the colours for each digit cmap_digits = plt.cm.tab10 # plot training data with labels plt.figure(figsize = (9,6)) plt.scatter(X_pca[:,0], X_pca[:,1], s=7, c=y_train, cmap=cmap_digits, alpha=0.7) plt.title("Training data coloured by target value") plt.colorbar();
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
Create and fit the modelThe scikit-learn library allows fitting of a k-NN model just as with PCA above.First, create the classifier:
# create model knn = neighbors.KNeighborsClassifier()
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
The next step fits the k-NN model using the training data.
# fit model to training data knn.fit(X_train,y_train);
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
Test modelNow use the data that was set aside earlier - this stage involves getting the model to "guess" the samples (this time without seeing their target values).Once the model has predicted the sample's class, a score can be calculated by checking how many samples the model guessed correctly.
# predict test data preds = knn.predict(X_test) # test model on test data score = round(knn.score(X_test,y_test)*100, 2) print("Score on test data: " + str(score) + "%")
Score on test data: 98.44%
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
98.44% is a really high score, one that would not likely be seen on real life applications of the method.It can often be useful to visualise the results of your example. Below are plots showing:- The labels that the model predicted for the test data- The actual labels for the test data- The data points that were incorrectly labelledIn this case, the predicted and actual plots are very similar, so these plots are not very informative. In other cases, this kind of visualisation may reveal patterns for you to explore further.
# plot 3 axes fig, axes = plt.subplots(2,2,figsize=(12,12)) # top left axis for predictions axes[0,0].scatter(T_pca[:,0], T_pca[:,1], s=5, c=preds, cmap=cmap_digits) axes[0,0].set_title("Predicted labels") # top right axis for actual targets axes[0,1].scatter(T_pca[:,0], T_pca[:,1], s=5, c=y_test, cmap=cmap_digits) axes[0,1].set_title("Actual labels") # bottom left axis coloured to show correct and incorrect axes[1,0].scatter(T_pca[:,0], T_pca[:,1], s=5, c=(preds==y_test)) axes[1,0].set_title("Incorrect labels") # bottom right axis not used axes[1,1].set_axis_off()
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
So which samples did the model get wrong?There were 7 samples that were misclassified. These can be displayed alongside their actual and predicted labels using the cell below:
# find the misclassified samples misclass = np.where(preds!=y_test)[0] # display misclassified samples r, c = 1, len(misclass) fig, axes = plt.subplots(r,c,figsize=(10,5)) for i in range(c): ax = axes[i] ax.matshow(X_test[misclass[i]].reshape(8,8),cmap=plt.cm.gray) ax.set_axis_off() act = y_test[misclass[i]] pre = preds[misclass[i]] strng = "actual: {a:.0f} \npredicted: {p:.0f}".format(a=act, p=pre) ax.set_title(strng)
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
Additionally, a confusion matrix can be used to identify which samples are misclassified by the model. This can help you identify if their are samples that are commonly misidentified - for example you may identify that 8's are often mistook for 1's.
# confusion matrix conf = metrics.confusion_matrix(y_test,preds) # figure f, ax = plt.subplots(figsize=(9,5)) im = ax.imshow(conf, cmap=plt.cm.RdBu) # set labels as ticks on axes ax.set_xticks(np.arange(10)) ax.set_yticks(np.arange(10)) ax.set_xticklabels(list(range(0,10))) ax.set_yticklabels(list(range(0,10))) ax.set_ylim(9.5,-0.5) # axes labels ax.set_ylabel("actual value") ax.set_xlabel("predicted value") ax.set_title("Digit classification confusion matrix") # display plt.colorbar(im).set_label(label="number of classifications")
_____no_output_____
BSD-3-Clause
GeneralExemplars/MLExemplars/Classification_k_NN_Notebook.ipynb
jstix/Exemplars2020
The ``FloatInput`` widget allows selecting a floating point value using a spinbox. It behaves like a slider except that lower and upper bounds are optional and a specific value can be entered. Value can be changed using keyboard (up, down, page up, page down), mouse wheel and arrow buttons.For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb). Parameters:For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb). Core* **``value``** (float): The initial value of the spinner* **``value_throttled``** (float): The initial value of the spinner* **``step``** (float): The step added or subtracted to the current value on each click* **``start``** (float): Optional minimum allowable value* **``end``** (float): Optional maximum allowable value* **``format``** (str): Optional format to convert the float value in string, see : http://numbrojs.com/old-format.html* **``page_step_multiplier``** (int): Defines the multiplication factor applied to step when the page up and page down keys are pressed. Display* **``disabled``** (boolean): Whether the widget is editable* **``name``** (str): The title of the widget* **``placeholder``** (str): A placeholder string displayed when no value is entered___
float_input = pn.widgets.FloatInput(name='FloatInput', value=5., step=1e-1, start=0, end=1000) float_input
_____no_output_____
BSD-3-Clause
examples/reference/widgets/FloatInput.ipynb
slamer59/panel
``FloatInput.value`` returns a float value:
float_input.value
_____no_output_____
BSD-3-Clause
examples/reference/widgets/FloatInput.ipynb
slamer59/panel
ControlsThe `FloatSpinner` widget exposes a number of options which can be changed from both Python and Javascript. Try out the effect of these parameters interactively:
pn.Row(float_input.controls(jslink=True), float_input)
_____no_output_____
BSD-3-Clause
examples/reference/widgets/FloatInput.ipynb
slamer59/panel
Bahdanau Attention:label:`sec_seq2seq_attention`We studied the machine translationproblem in :numref:`sec_seq2seq`,where we designedan encoder-decoder architecture based on two RNNsfor sequence to sequence learning.Specifically,the RNN encoder transformsa variable-length sequenceinto a fixed-shape context variable,thenthe RNN decodergenerates the output (target) sequence token by tokenbased on the generated tokens and the context variable.However,even though not all the input (source) tokensare useful for decoding a certain token,the *same* context variablethat encodes the entire input sequenceis still used at each decoding step.In a separate but relatedchallenge of handwriting generation for a given text sequence,Graves designed a differentiable attention modelto align text characters with the much longer pen trace,where the alignment moves only in one direction :cite:`Graves.2013`.Inspired by the idea of learning to align,Bahdanau et al. proposed a differentiable attention modelwithout the severe unidirectional alignment limitation :cite:`Bahdanau.Cho.Bengio.2014`.When predicting a token,if not all the input tokens are relevant,the model aligns (or attends)only to parts of the input sequence that are relevant to the current prediction.This is achievedby treating the context variable as an output of attention pooling. ModelWhen describing Bahdanau attentionfor the RNN encoder-decoder below,we will follow the same notation in:numref:`sec_seq2seq`.The new attention-based modelis the same as thatin :numref:`sec_seq2seq`except thatthe context variable$\mathbf{c}$in :eqref:`eq_seq2seq_s_t`is replaced by$\mathbf{c}_{t'}$at any decoding time step $t'$.Suppose thatthere are $T$ tokens in the input sequence,the context variable at the decoding time step $t'$is the output of attention pooling:$$\mathbf{c}_{t'} = \sum_{t=1}^T \alpha(\mathbf{s}_{t' - 1}, \mathbf{h}_t) \mathbf{h}_t,$$where the decoder hidden state$\mathbf{s}_{t' - 1}$ at time step $t' - 1$is the query,and the encoder hidden states $\mathbf{h}_t$are both the keys and values,and the attention weight $\alpha$is computed as in:eqref:`eq_attn-scoring-alpha`using the additive attention scoring functiondefined by:eqref:`eq_additive-attn`.Slightly different from the vanilla RNN encoder-decoder architecture in :numref:`fig_seq2seq_details`,the same architecturewith Bahdanau attention is depicted in :numref:`fig_s2s_attention_details`.![Layers in an RNN encoder-decoder model with Bahdanau attention.](../img/seq2seq-attention-details.svg):label:`fig_s2s_attention_details`
from d2l import torch as d2l import torch from torch import nn
_____no_output_____
MIT
d2l-en/pytorch/chapter_attention-mechanisms/bahdanau-attention.ipynb
gr8khan/d2lai
Defining the Decoder with AttentionTo implement the RNN encoder-decoderwith Bahdanau attention,we only need to redefine the decoder.To visualize the learned attention weights more conveniently,the following `AttentionDecoder` classdefines the base interface for decoders with attention mechanisms.
#@save class AttentionDecoder(d2l.Decoder): """The base attention-based decoder interface.""" def __init__(self, **kwargs): super(AttentionDecoder, self).__init__(**kwargs) @property def attention_weights(self): raise NotImplementedError
_____no_output_____
MIT
d2l-en/pytorch/chapter_attention-mechanisms/bahdanau-attention.ipynb
gr8khan/d2lai
Now let us implementthe RNN decoder with Bahdanau attentionin the following `Seq2SeqAttentionDecoder` class.The state of the decoderis initialized with i) the encoder final-layer hidden states at all the time steps (as keys and values of the attention);ii) the encoder all-layer hidden state at the final time step (to initialize the hidden state of the decoder);and iii) the encoder valid length (to exclude the padding tokens in attention pooling).At each decoding time step,the decoder final-layer hidden state at the previous time step is used as the query of the attention.As a result, both the attention outputand the input embedding are concatenatedas the input of the RNN decoder.
class Seq2SeqAttentionDecoder(AttentionDecoder): def __init__(self, vocab_size, embed_size, num_hiddens, num_layers, dropout=0, **kwargs): super(Seq2SeqAttentionDecoder, self).__init__(**kwargs) self.attention = d2l.AdditiveAttention( num_hiddens, num_hiddens, num_hiddens, dropout) self.embedding = nn.Embedding(vocab_size, embed_size) self.rnn = nn.GRU( embed_size + num_hiddens, num_hiddens, num_layers, dropout=dropout) self.dense = nn.Linear(num_hiddens, vocab_size) def init_state(self, enc_outputs, enc_valid_lens, *args): # Shape of `outputs`: (`num_steps`, `batch_size`, `num_hiddens`). # Shape of `hidden_state[0]`: (`num_layers`, `batch_size`, # `num_hiddens`) outputs, hidden_state = enc_outputs return (outputs.permute(1, 0, 2), hidden_state, enc_valid_lens) def forward(self, X, state): # Shape of `enc_outputs`: (`batch_size`, `num_steps`, `num_hiddens`). # Shape of `hidden_state[0]`: (`num_layers`, `batch_size`, # `num_hiddens`) enc_outputs, hidden_state, enc_valid_lens = state # Shape of the output `X`: (`num_steps`, `batch_size`, `embed_size`) X = self.embedding(X).permute(1, 0, 2) outputs, self._attention_weights = [], [] for x in X: # Shape of `query`: (`batch_size`, 1, `num_hiddens`) query = torch.unsqueeze(hidden_state[-1], dim=1) # Shape of `context`: (`batch_size`, 1, `num_hiddens`) context = self.attention( query, enc_outputs, enc_outputs, enc_valid_lens) # Concatenate on the feature dimension x = torch.cat((context, torch.unsqueeze(x, dim=1)), dim=-1) # Reshape `x` as (1, `batch_size`, `embed_size` + `num_hiddens`) out, hidden_state = self.rnn(x.permute(1, 0, 2), hidden_state) outputs.append(out) self._attention_weights.append(self.attention.attention_weights) # After fully-connected layer transformation, shape of `outputs`: # (`num_steps`, `batch_size`, `vocab_size`) outputs = self.dense(torch.cat(outputs, dim=0)) return outputs.permute(1, 0, 2), [enc_outputs, hidden_state, enc_valid_lens] @property def attention_weights(self): return self._attention_weights
_____no_output_____
MIT
d2l-en/pytorch/chapter_attention-mechanisms/bahdanau-attention.ipynb
gr8khan/d2lai
In the following, we test the implemented decoder with Bahdanau attentionusing a minibatch of 4 sequence inputsof 7 time steps.
encoder = d2l.Seq2SeqEncoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2) encoder.eval() decoder = Seq2SeqAttentionDecoder(vocab_size=10, embed_size=8, num_hiddens=16, num_layers=2) decoder.eval() X = torch.zeros((4, 7), dtype=torch.long) # (`batch_size`, `num_steps`) state = decoder.init_state(encoder(X), None) output, state = decoder(X, state) output.shape, len(state), state[0].shape, len(state[1]), state[1][0].shape
_____no_output_____
MIT
d2l-en/pytorch/chapter_attention-mechanisms/bahdanau-attention.ipynb
gr8khan/d2lai
TrainingSimilar to :numref:`sec_seq2seq_training`,here we specify hyperparemeters,instantiatean encoder and a decoder with Bahdanau attention,and train this model for machine translation.Due to the newly added attention mechanism,this training is much slower thanthat in :numref:`sec_seq2seq_training` without attention mechanisms.
embed_size, num_hiddens, num_layers, dropout = 32, 32, 2, 0.1 batch_size, num_steps = 64, 10 lr, num_epochs, device = 0.005, 250, d2l.try_gpu() train_iter, src_vocab, tgt_vocab = d2l.load_data_nmt(batch_size, num_steps) encoder = d2l.Seq2SeqEncoder( len(src_vocab), embed_size, num_hiddens, num_layers, dropout) decoder = Seq2SeqAttentionDecoder( len(tgt_vocab), embed_size, num_hiddens, num_layers, dropout) net = d2l.EncoderDecoder(encoder, decoder) d2l.train_seq2seq(net, train_iter, lr, num_epochs, tgt_vocab, device)
loss 0.020, 4902.7 tokens/sec on cuda:0
MIT
d2l-en/pytorch/chapter_attention-mechanisms/bahdanau-attention.ipynb
gr8khan/d2lai
After the model is trained,we use it to translate a few English sentencesinto French and compute their BLEU scores.
engs = ['go .', "i lost .", 'he\'s calm .', 'i\'m home .'] fras = ['va !', 'j\'ai perdu .', 'il est calme .', 'je suis chez moi .'] for eng, fra in zip(engs, fras): translation, dec_attention_weight_seq = d2l.predict_seq2seq( net, eng, src_vocab, tgt_vocab, num_steps, device, True) print(f'{eng} => {translation}, ', f'bleu {d2l.bleu(translation, fra, k=2):.3f}') attention_weights = torch.cat( [step[0][0][0] for step in dec_attention_weight_seq], 0).reshape( (1, 1, -1, num_steps))
_____no_output_____
MIT
d2l-en/pytorch/chapter_attention-mechanisms/bahdanau-attention.ipynb
gr8khan/d2lai
By visualizing the attention weightswhen translating the last English sentence,we can see that each query assigns non-uniform weightsover key-value pairs.It shows that at each decoding step,different parts of the input sequences are selectively aggregated in the attention pooling.
# Plus one to include the end-of-sequence token d2l.show_heatmaps( attention_weights[:, :, :, :len(engs[-1].split()) + 1].cpu(), xlabel='Key posistions', ylabel='Query posistions')
_____no_output_____
MIT
d2l-en/pytorch/chapter_attention-mechanisms/bahdanau-attention.ipynb
gr8khan/d2lai
Assignement 3: CNN Exercise Load libraries
import pickle import numpy as np import pandas as pd from keras.datasets import mnist from keras.utils import to_categorical from keras import layers from keras import models import matplotlib.pyplot as plt from numpy.random import seed from keras.utils import plot_model import csv import json from keras.callbacks import EarlyStopping
C:\ProgramData\Anaconda3\lib\site-packages\h5py\__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. from ._conv import register_converters as _register_converters Using TensorFlow backend.
Apache-2.0
experiment.ipynb
JeyDi/Digits-ConvNeuralNet
Load the dataset
#Load the dataset try: #Load the MNIST data (train_images, train_labels), (test_images, test_labels) = mnist.load_data() #Reshape and trasform the input train_images = train_images.reshape((60000, 28, 28, 1)) train_images = train_images.astype('float32') / 255 test_images = test_images.reshape((10000, 28, 28, 1)) test_images = test_images.astype('float32') / 255 train_labels = to_categorical(train_labels) test_labels = to_categorical(test_labels) print("Data Loaded") except: print("Error Loading Data")
Data Loaded
Apache-2.0
experiment.ipynb
JeyDi/Digits-ConvNeuralNet
Create the network, train and evaluate
seed(42) model = models.Sequential() #Create the network model.add(layers.Conv2D(16, (3, 3), activation='relu', input_shape=(28, 28, 1))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(16, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(16, activation='relu')) model.add(layers.Dense(10, activation='softmax')) #Create early stop callback earlystop = EarlyStopping(monitor='loss', min_delta=0.0001, patience=5, \ verbose=1, mode='auto') callbacks_list = [earlystop] #Compile the model model.compile( optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy']) #Fit the model to the data history = model.fit( train_images, train_labels, epochs=10, callbacks=callbacks_list, batch_size=254, validation_data=(test_images,test_labels)) print("\nTraining completed") #Evaluate the test set history_evaluation = model.evaluate(test_images, test_labels) print("\nEvaluation completed")
Train on 60000 samples, validate on 10000 samples Epoch 1/10 60000/60000 [==============================] - 19s 322us/step - loss: 0.6149 - acc: 0.8169 - val_loss: 0.3241 - val_acc: 0.8938 Epoch 2/10 60000/60000 [==============================] - 22s 361us/step - loss: 0.2056 - acc: 0.9380 - val_loss: 0.1750 - val_acc: 0.9438 Epoch 3/10 60000/60000 [==============================] - 19s 314us/step - loss: 0.1427 - acc: 0.9563 - val_loss: 0.1713 - val_acc: 0.9451 Epoch 4/10 60000/60000 [==============================] - 18s 296us/step - loss: 0.1115 - acc: 0.9659 - val_loss: 0.1271 - val_acc: 0.9624 Epoch 5/10 60000/60000 [==============================] - 18s 292us/step - loss: 0.0933 - acc: 0.9715 - val_loss: 0.1065 - val_acc: 0.9670 Epoch 6/10 60000/60000 [==============================] - 18s 292us/step - loss: 0.0807 - acc: 0.9753 - val_loss: 0.1044 - val_acc: 0.9650 Epoch 7/10 60000/60000 [==============================] - 18s 293us/step - loss: 0.0723 - acc: 0.9777 - val_loss: 0.0735 - val_acc: 0.9772 Epoch 8/10 60000/60000 [==============================] - 18s 295us/step - loss: 0.0649 - acc: 0.9803 - val_loss: 0.0742 - val_acc: 0.9777 Epoch 9/10 60000/60000 [==============================] - 18s 292us/step - loss: 0.0597 - acc: 0.9815 - val_loss: 0.0579 - val_acc: 0.9813 Epoch 10/10 60000/60000 [==============================] - 18s 292us/step - loss: 0.0544 - acc: 0.9835 - val_loss: 0.0572 - val_acc: 0.9825 Training completed 10000/10000 [==============================] - 1s 124us/step Evaluation completed History <keras.callbacks.History object at 0x00000179D8980160>
Apache-2.0
experiment.ipynb
JeyDi/Digits-ConvNeuralNet
Visualize results
model.summary() # Get training and test loss histories training_loss = history.history['loss'] test_loss = history.history['val_loss'] # Get training and test accuracy histories training_accuracy = history.history['acc'] test_accuracy = history.history['val_acc'] #print(history_evaluation) print("Training Accuracy " + str(training_accuracy[-1])) print("Training Loss: " + str(training_loss[-1])) print("Test Accuracy: " + str(test_accuracy[-1])) print("Test Loss: " + str(test_loss[-1])) print("Model Parameters: " + str(model.count_params())) # Plot the accuracy and cost summaries f, (ax1, ax2) = plt.subplots(2, 1, sharex=False, figsize=(13,13)) # Create count of the number of epochs epoch_count = range(1, len(training_loss) + 1) # Visualize loss history ax1.plot(epoch_count, training_loss, 'r--') ax1.plot(epoch_count, test_loss, 'b-') ax1.legend(['Training Loss', 'Test Loss']) ax1.set_ylabel('Loss') ax1.set_xlabel('Epoch') # Create count of the number of epochs epoch_count = range(1, len(training_accuracy) + 1) # Visualize accuracy history ax2.plot(epoch_count, training_accuracy, 'r--') ax2.plot(epoch_count, test_accuracy, 'b-') ax2.legend(['Training Accuracy', 'Test Accuracy']) ax2.set_ylabel('Accuracy Score') ax1.set_xlabel('Epoch') plt.xlabel('Epoch') plt.show();
_________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d_10 (Conv2D) (None, 26, 26, 16) 160 _________________________________________________________________ max_pooling2d_9 (MaxPooling2 (None, 13, 13, 16) 0 _________________________________________________________________ conv2d_11 (Conv2D) (None, 11, 11, 16) 2320 _________________________________________________________________ max_pooling2d_10 (MaxPooling (None, 5, 5, 16) 0 _________________________________________________________________ flatten_5 (Flatten) (None, 400) 0 _________________________________________________________________ dense_9 (Dense) (None, 16) 6416 _________________________________________________________________ dense_10 (Dense) (None, 10) 170 ================================================================= Total params: 9,066 Trainable params: 9,066 Non-trainable params: 0 _________________________________________________________________ Training Accuracy 0.9834500078916549 Training Loss: 0.05444586953427642 Test Accuracy: 0.9825000083446502 Test Loss: 0.05715448258873075 Model Parameters: 9066
Apache-2.0
experiment.ipynb
JeyDi/Digits-ConvNeuralNet
TF-IDFJoining registry and license data using TF-IDF string matching algorithm
import mwdsbe import mwdsbe.datasets.licenses as licenses import schuylkill as skool import time registry = mwdsbe.load_registry() # geopandas df license = licenses.CommercialActivityLicenses().download() # clean data ignore_words = ['inc', 'group', 'llc', 'corp', 'pc', 'incorporated', 'ltd', 'co', 'associates', 'services', 'company', 'enterprises', 'enterprise', 'service', 'corporation'] cleaned_registry = skool.clean_strings(registry, ['company_name', 'dba_name'], True, ignore_words) cleaned_license = skool.clean_strings(license, ['company_name'], True, ignore_words) print('Total number of cleaned registry:', len(cleaned_registry)) print('Total number of cleaned license:', len(cleaned_license))
Total number of cleaned license: 203578
MIT
mwdsbe/Notebooks/Matching Algorithms/TF-IDF/tf-idf.ipynb
BinnyDaBin/MWDSBE
1. Score-cutoff 90
t1 = time.time() merged = ( skool.tf_idf_merge(cleaned_registry, cleaned_license, on="company_name", score_cutoff=90) .pipe(skool.tf_idf_merge, cleaned_registry, cleaned_license, left_on="dba_name", right_on="company_name", score_cutoff=90) ) t = time.time() - t1 print('Execution time:', t, 'sec') matched = merged.dropna(subset=['company_name_y']) print('Match:', len(matched), 'out of', len(cleaned_registry)) non_exact_match = matched[matched.match_probability < 0.999999] non_exact_match = non_exact_match[['company_name_x', 'match_probability', 'company_name_y']] print('Non-exact match above 90:', len(non_exact_match), 'out of', len(matched)) # non_exact_match.to_excel (r'C:\Users\dabinlee\Desktop\mwdsbe\data\tf-idf\tf-idf-90.xlsx', index = None, header=True)
_____no_output_____
MIT
mwdsbe/Notebooks/Matching Algorithms/TF-IDF/tf-idf.ipynb
BinnyDaBin/MWDSBE
2. Score-cutoff 85
t1 = time.time() merged = ( skool.tf_idf_merge(cleaned_registry, cleaned_license, on="company_name", score_cutoff=85) .pipe(skool.tf_idf_merge, cleaned_registry, cleaned_license, left_on="dba_name", right_on="company_name", score_cutoff=85) ) t = time.time() - t1 print('Execution time:', t, 'sec') matched = merged.dropna(subset=['company_name_y']) print('Match:', len(matched), 'out of', len(cleaned_registry)) match_to_check = matched[matched.match_probability < 0.9] match_to_check = match_to_check[['company_name_x', 'match_probability', 'company_name_y']] print('Match between 85 and 90:', len(match_to_check), 'out of', len(matched)) # match_to_check.to_excel (r'C:\Users\dabinlee\Desktop\mwdsbe\data\tf-idf\tf-idf-85.xlsx', index = None, header=True)
_____no_output_____
MIT
mwdsbe/Notebooks/Matching Algorithms/TF-IDF/tf-idf.ipynb
BinnyDaBin/MWDSBE
3. Score-cutoff 80
t1 = time.time() merged = ( skool.tf_idf_merge(cleaned_registry, cleaned_license, on="company_name", score_cutoff=80) .pipe(skool.tf_idf_merge, cleaned_registry, cleaned_license, left_on="dba_name", right_on="company_name", score_cutoff=80) ) t = time.time() - t1 print('Execution time:', t, 'sec') matched = merged.dropna(subset=['company_name_y']) print('Match:', len(matched), 'out of', len(cleaned_registry)) match_to_check = matched[matched.match_probability < 0.85] match_to_check = match_to_check[['company_name_x', 'match_probability', 'company_name_y']] print('Match between 80 and 85:', len(match_to_check), 'out of', len(matched)) # match_to_check.to_excel (r'C:\Users\dabinlee\Desktop\mwdsbe\data\tf-idf\tf-idf-80.xlsx', index = None, header=True)
_____no_output_____
MIT
mwdsbe/Notebooks/Matching Algorithms/TF-IDF/tf-idf.ipynb
BinnyDaBin/MWDSBE
4. Score-cutoff 75
t1 = time.time() merged = ( skool.tf_idf_merge(cleaned_registry, cleaned_license, on="company_name", score_cutoff=75) .pipe(skool.tf_idf_merge, cleaned_registry, cleaned_license, left_on="dba_name", right_on="company_name", score_cutoff=75) ) t = time.time() - t1 print('Execution time:', t, 'sec') matched = merged.dropna(subset=['company_name_y']) print('Match:', len(matched), 'out of', len(cleaned_registry)) match_to_check = matched[matched.match_probability < 0.8] match_to_check = match_to_check[['company_name_x', 'match_probability', 'company_name_y']] print('Match between 75 and 80:', len(match_to_check), 'out of', len(matched)) # match_to_check.to_excel (r'C:\Users\dabinlee\Desktop\mwdsbe\data\tf-idf\tf-idf-75.xlsx', index = None, header=True)
_____no_output_____
MIT
mwdsbe/Notebooks/Matching Algorithms/TF-IDF/tf-idf.ipynb
BinnyDaBin/MWDSBE
Run Data Bias Analysis with SageMaker Clarify (Pre-Training) Using SageMaker Processing Jobs
import boto3 import sagemaker import pandas as pd import numpy as np sess = sagemaker.Session() bucket = sess.default_bucket() role = sagemaker.get_execution_role() region = boto3.Session().region_name sm = boto3.Session().client(service_name="sagemaker", region_name=region)
_____no_output_____
Apache-2.0
00_quickstart/09_Run_Data_Bias_Analysis_ProcessingJob.ipynb
NRauschmayr/workshop
Get Data from S3
%store -r bias_data_s3_uri print(bias_data_s3_uri) !aws s3 cp $bias_data_s3_uri ./data-clarify import pandas as pd data = pd.read_csv("./data-clarify/amazon_reviews_us_giftcards_software_videogames.csv") data.head()
_____no_output_____
Apache-2.0
00_quickstart/09_Run_Data_Bias_Analysis_ProcessingJob.ipynb
NRauschmayr/workshop
Analyze Unbalanced DataPlotting histograms for the distribution of the different features is a good way to visualize the data.
import seaborn as sns sns.countplot(data=data, x="star_rating", hue="product_category")
_____no_output_____
Apache-2.0
00_quickstart/09_Run_Data_Bias_Analysis_ProcessingJob.ipynb
NRauschmayr/workshop
Calculate Bias Metrics on Unbalanced DataSageMaker Clarify helps you detect possible pre- and post-training biases using a variety of metrics.
from sagemaker import clarify clarify_processor = clarify.SageMakerClarifyProcessor( role=role, instance_count=1, instance_type="ml.c5.2xlarge", sagemaker_session=sess )
_____no_output_____
Apache-2.0
00_quickstart/09_Run_Data_Bias_Analysis_ProcessingJob.ipynb
NRauschmayr/workshop
Pre-training BiasBias can be present in your data before any model training occurs. Inspecting your data for bias before training begins can help detect any data collection gaps, inform your feature engineering, and hep you understand what societal biases the data may reflect.Computing pre-training bias metrics does not require a trained model. Writing DataConfigA `DataConfig` object communicates some basic information about data I/O to Clarify. We specify where to find the input dataset, where to store the output, the target column (`label`), the header names, and the dataset type.
bias_report_output_path = "s3://{}/clarify".format(bucket) bias_data_config = clarify.DataConfig( s3_data_input_path=bias_data_s3_uri, s3_output_path=bias_report_output_path, label="star_rating", headers=data.columns.to_list(), dataset_type="text/csv", )
_____no_output_____
Apache-2.0
00_quickstart/09_Run_Data_Bias_Analysis_ProcessingJob.ipynb
NRauschmayr/workshop
Configure `BiasConfig`SageMaker Clarify also needs the sensitive columns (`facets`) and the desirable outcomes (`label_values_or_threshold`).We specify this information in the `BiasConfig` API. Here that the positive outcome is either `star rating==5` or `star_rating==4` and `product_category` is the sensitive facet that we analyze in this run.
bias_config = clarify.BiasConfig( label_values_or_threshold=[5, 4], facet_name="product_category", group_name="product_category" )
_____no_output_____
Apache-2.0
00_quickstart/09_Run_Data_Bias_Analysis_ProcessingJob.ipynb
NRauschmayr/workshop
Detect Bias with a SageMaker Processing Job and Clarify
clarify_processor.run_pre_training_bias( data_config=bias_data_config, data_bias_config=bias_config, methods="all", wait=False, logs=False ) run_pre_training_bias_processing_job_name = clarify_processor.latest_job.job_name run_pre_training_bias_processing_job_name from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/sagemaker/home?region={}#/processing-jobs/{}">Processing Job</a></b>'.format( region, run_pre_training_bias_processing_job_name ) ) ) from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://console.aws.amazon.com/cloudwatch/home?region={}#logStream:group=/aws/sagemaker/ProcessingJobs;prefix={};streamFilter=typeLogStreamPrefix">CloudWatch Logs</a> After About 5 Minutes</b>'.format( region, run_pre_training_bias_processing_job_name ) ) ) from IPython.core.display import display, HTML display( HTML( '<b>Review <a target="blank" href="https://s3.console.aws.amazon.com/s3/buckets/{}/{}/?region={}&tab=overview">S3 Output Data</a> After The Processing Job Has Completed</b>'.format( bucket, run_pre_training_bias_processing_job_name, region ) ) ) running_processor = sagemaker.processing.ProcessingJob.from_processing_name( processing_job_name=run_pre_training_bias_processing_job_name, sagemaker_session=sess ) processing_job_description = running_processor.describe() print(processing_job_description) running_processor.wait(logs=False)
_____no_output_____
Apache-2.0
00_quickstart/09_Run_Data_Bias_Analysis_ProcessingJob.ipynb
NRauschmayr/workshop
Download Report From S3The class-imbalance metric should match the value calculated for the unbalanced dataset using the open source version above.
!aws s3 ls $bias_report_output_path/ !aws s3 cp --recursive $bias_report_output_path ./generated_bias_report/ from IPython.core.display import display, HTML display(HTML('<b>Review <a target="blank" href="./generated_bias_report/report.html">Bias Report</a></b>'))
_____no_output_____
Apache-2.0
00_quickstart/09_Run_Data_Bias_Analysis_ProcessingJob.ipynb
NRauschmayr/workshop
Release Resources
%%html <p><b>Shutting down your kernel for this notebook to release resources.</b></p> <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button> <script> try { els = document.getElementsByClassName("sm-command-button"); els[0].click(); } catch(err) { // NoOp } </script>
_____no_output_____
Apache-2.0
00_quickstart/09_Run_Data_Bias_Analysis_ProcessingJob.ipynb
NRauschmayr/workshop