text
stringlengths 2.5k
6.39M
| kind
stringclasses 3
values |
---|---|
```
import urllib.request
import os
import geopandas as gpd
import rasterio
from rasterio.plot import show
import zipfile
import matplotlib.pyplot as plt
```
# GIS visualizations with geopandas
```
url = 'https://biogeo.ucdavis.edu/data/gadm3.6/shp/gadm36_COL_shp.zip'
dest = os.path.join('data', 'admin')
os.makedirs(dest, exist_ok=True)
urllib.request.urlretrieve(url, os.path.join(dest, 'gadm36_COL_shp.zip'))
with zipfile.ZipFile(os.path.join(dest, 'gadm36_COL_shp.zip'), 'r') as zip_ref:
zip_ref.extractall(dest)
gdf_adm0 = gpd.read_file(os.path.join(dest, 'gadm36_COL_0.shp'))
gdf_adm1 = gpd.read_file(os.path.join(dest, 'gadm36_COL_1.shp'))
gdf_adm1
gdf_adm0.plot()
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
gdf_adm0.plot(color='white', edgecolor='black', ax=ax)
gdf_adm1.plot(column='NAME_1', ax=ax, cmap='Set2',
legend=True,
legend_kwds={'loc': "upper right",
'bbox_to_anchor': (1.4, 1)})
url = 'https://download.geofabrik.de/south-america/colombia-latest-free.shp.zip'
dest = os.path.join('data', 'places')
os.makedirs(dest, exist_ok=True)
urllib.request.urlretrieve(url, os.path.join(dest, 'colombia-latest-free.shp.zip'))
with zipfile.ZipFile(os.path.join(dest, 'colombia-latest-free.shp.zip'), 'r') as zip_ref:
zip_ref.extractall(dest)
gdf_water = gpd.read_file(os.path.join(dest, 'gis_osm_water_a_free_1.shp'))
gdf_places = gpd.read_file(os.path.join(dest, 'gis_osm_places_free_1.shp'))
gdf_cities = gdf_places.loc[gdf_places['fclass']=='city'].copy()
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
gdf_adm0.plot(color='white', edgecolor='black', ax=ax)
gdf_adm1.plot(color='white', ax=ax)
gdf_water.plot(edgecolor='blue', ax=ax)
gdf_cities.plot(column='population', ax=ax, legend=True)
gdf_cities['size'] = gdf_cities['population'] / gdf_cities['population'].max() * 500
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig, ax = plt.subplots(1, 1, figsize=(12, 12))
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
gdf_adm0.plot(color='white', edgecolor='black', ax=ax)
gdf_adm1.plot(color='white', edgecolor='gray', ax=ax)
gdf_water.plot(edgecolor='lightblue', ax=ax)
gdf_cities.plot(markersize='size', column='population',
cmap='viridis', edgecolor='white',
ax=ax, cax=cax, legend=True,
legend_kwds={'label': "Population by city"})
url = 'https://data.worldpop.org/GIS/Population_Density/Global_2000_2020_1km_UNadj/2020/COL/col_pd_2020_1km_UNadj.tif'
dest = os.path.join('data', 'pop')
os.makedirs(dest, exist_ok=True)
urllib.request.urlretrieve(url, os.path.join(dest, 'col_pd_2020_1km_UNadj.tif'))
with rasterio.open(os.path.join(dest, 'col_pd_2020_1km_UNadj.tif')) as src:
fig, ax = plt.subplots(figsize=(12, 12))
show(src, ax=ax, cmap='viridis_r')
gdf_adm1.boundary.plot(edgecolor='gray', linewidth=0.5, ax=ax)
```
|
github_jupyter
|
```
import cobra
import copy
import mackinac
mackinac.modelseed.ms_client.url = 'http://p3.theseed.org/services/ProbModelSEED/'
mackinac.workspace.ws_client.url = 'http://p3.theseed.org/services/Workspace'
mackinac.genome.patric_url = 'https://www.patricbrc.org/api/'
# PATRIC user information
mackinac.get_token('mljenior')
# password: matrix54
```
### Generate models
```
# Barnesiella intestinihominis
genome_id = '742726.3'
template_id = '/chenry/public/modelsupport/templates/GramNegModelTemplate'
media_id = '/chenry/public/modelsupport/media/Complete'
file_id = '/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/barn_int.draft.json'
strain_id = 'Barnesiella intestinihominis YIT 11860'
mackinac.reconstruct_modelseed_model(genome_id, template_reference=template_id)
mackinac.gapfill_modelseed_model(genome_id, media_reference=media_id)
mackinac.optimize_modelseed_model(genome_id)
model = mackinac.create_cobra_model_from_modelseed_model(genome_id)
model.id = strain_id
cobra.io.save_json_model(model, file_id)
# Lactobacillus reuteri
genome_id = '863369.3'
template_id = '/chenry/public/modelsupport/templates/GramPosModelTemplate'
media_id = '/chenry/public/modelsupport/media/Complete'
file_id = '/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/lact_reut.draft.json'
strain_id = 'Lactobacillus reuteri mlc3'
mackinac.reconstruct_modelseed_model(genome_id, template_reference=template_id)
mackinac.gapfill_modelseed_model(genome_id, media_reference=media_id)
mackinac.optimize_modelseed_model(genome_id)
model = mackinac.create_cobra_model_from_modelseed_model(genome_id)
model.id = strain_id
cobra.io.save_json_model(model, file_id)
# Enterococcus hirae
genome_id = '768486.3'
template_id = '/chenry/public/modelsupport/templates/GramPosModelTemplate'
media_id = '/chenry/public/modelsupport/media/Complete'
file_id = '/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/ent_hir.draft.json'
strain_id = 'Enterococcus hirae ATCC 9790'
mackinac.reconstruct_modelseed_model(genome_id, template_reference=template_id)
mackinac.gapfill_modelseed_model(genome_id, media_reference=media_id)
mackinac.optimize_modelseed_model(genome_id)
model = mackinac.create_cobra_model_from_modelseed_model(genome_id)
model.id = strain_id
cobra.io.save_json_model(model, file_id)
# Anaerostipes caccae
genome_id = '411490.6'
template_id = '/chenry/public/modelsupport/templates/GramPosModelTemplate'
media_id = '/chenry/public/modelsupport/media/Complete'
file_id = '/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/ana_stip.draft.json'
strain_id = 'Anaerostipes caccae DSM 14662'
mackinac.reconstruct_modelseed_model(genome_id, template_reference=template_id)
mackinac.gapfill_modelseed_model(genome_id, media_reference=media_id)
mackinac.optimize_modelseed_model(genome_id)
model = mackinac.create_cobra_model_from_modelseed_model(genome_id)
model.id = strain_id
cobra.io.save_json_model(model, file_id)
# Staphylococcus warneri
genome_id = '596319.3'
template_id = '/chenry/public/modelsupport/templates/GramPosModelTemplate'
media_id = '/chenry/public/modelsupport/media/Complete'
file_id = '/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/staph_warn.draft.json'
strain_id = 'Staphylococcus warneri L37603'
mackinac.reconstruct_modelseed_model(genome_id, template_reference=template_id)
mackinac.gapfill_modelseed_model(genome_id, media_reference=media_id)
mackinac.optimize_modelseed_model(genome_id)
model = mackinac.create_cobra_model_from_modelseed_model(genome_id)
model.id = strain_id
cobra.io.save_json_model(model, file_id)
# Adlercreutzia equolifaciens
genome_id = '1384484.3'
template_id = '/chenry/public/modelsupport/templates/GramPosModelTemplate'
media_id = '/chenry/public/modelsupport/media/Complete'
file_id = '/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/adl_equ.draft.json'
strain_id = 'Adlercreutzia equolifaciens DSM 19450'
mackinac.reconstruct_modelseed_model(genome_id, template_reference=template_id)
mackinac.gapfill_modelseed_model(genome_id, media_reference=media_id)
mackinac.optimize_modelseed_model(genome_id)
model = mackinac.create_cobra_model_from_modelseed_model(genome_id)
model.id = strain_id
cobra.io.save_json_model(model, file_id)
```
### Curate Draft Models
```
# Read in draft models
mixB1 = cobra.io.load_json_model('/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/barn_int.draft.json')
mixB2 = cobra.io.load_json_model('/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/lact_reut.draft.json')
mixB3 = cobra.io.load_json_model('/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/ent_hir.draft.json')
mixB4 = cobra.io.load_json_model('/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/ana_stip.draft.json')
mixB5 = cobra.io.load_json_model('/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/staph_warn.draft.json')
mixB6 = cobra.io.load_json_model('/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/adl_equ.draft.json')
# Quality check functions
# Identify potentially gapfilled reactions
def _findGapfilledRxn(model, exclude):
gapfilled = []
transport = _findTransports(model)
if not type(exclude) is list:
exclude = [exclude]
for index in model.reactions:
if len(list(index.genes)) == 0:
if not index in model.boundary:
if not index.id in exclude or not index.id in transport:
gapfilled.append(index.id)
if len(gapfilled) > 0:
print(str(len(gapfilled)) + ' metabolic reactions not associated with genes')
return gapfilled
# Check for missing transport and exchange reactions
def _missingRxns(model, extracellular):
transporters = set(_findTransports(model))
exchanges = set([x.id for x in model.exchanges])
missing_exchanges = []
missing_transports = []
for metabolite in model.metabolites:
if not metabolite.compartment == extracellular:
continue
curr_rxns = set([x.id for x in list(metabolite.reactions)])
if not bool(curr_rxns & transporters):
missing_transports.append(metabolite.id)
if not bool(curr_rxns & exchanges):
missing_exchanges.append(metabolite.id)
if len(missing_transports) != 0:
print(str(len(missing_transports)) + ' extracellular metabolites are missing transport reactions')
if len(missing_exchanges) != 0:
print(str(len(missing_exchanges)) + ' extracellular metabolites are missing exchange reactions')
return missing_transports, missing_exchanges
# Checks which cytosolic metabolites are generated for free (bacteria only)
def _checkFreeMass(raw_model, cytosol):
model = copy.deepcopy(raw_model)
# Close all exchanges
for index in model.boundary:
model.reactions.get_by_id(index.id).lower_bound = 0.
# Identify all metabolites that are produced within the network
demand_metabolites = [x.reactants[0].id for x in model.demands if len(x.reactants) > 0] + [x.products[0].id for x in model.demands if len(x.products) > 0]
free = []
for index in model.metabolites:
if index.id in demand_metabolites:
continue
elif not index.compartment in cytosol:
continue
else:
demand = model.add_boundary(index, type='demand')
model.objective = demand
obj_val = model.slim_optimize(error_value=0.)
if obj_val > 1e-8:
free.append(index.id)
model.remove_reactions([demand])
if len(free) > 0:
print(str(len(free)) + ' metabolites are generated for free')
return free
# Check for mass and charge balance in reactions
def _checkBalance(model, exclude=[]):
imbalanced = []
mass_imbal = 0
charge_imbal = 0
elem_set = set()
for metabolite in model.metabolites:
try:
elem_set |= set(metabolite.elements.keys())
except:
pass
if len(elem_set) == 0:
imbalanced = model.reactions
mass_imbal = len(model.reactions)
charge_imbal = len(model.reactions)
print('No elemental data associated with metabolites!')
else:
if not type(exclude) is list: exclude = [exclude]
for index in model.reactions:
if index in model.boundary or index.id in exclude:
continue
else:
try:
test = index.check_mass_balance()
except ValueError:
continue
if len(list(test)) > 0:
imbalanced.append(index.id)
if 'charge' in test.keys():
charge_imbal += 1
if len(set(test.keys()).intersection(elem_set)) > 0:
mass_imbal += 1
if mass_imbal != 0:
print(str(mass_imbal) + ' reactions are mass imbalanced')
if charge_imbal != 0:
print(str(charge_imbal) + ' reactions are charge imbalanced')
return imbalanced
# Identify transport reactions (for any number compartments)
def _findTransports(model):
transporters = []
compartments = set(list(model.compartments))
if len(compartments) == 1:
raise Exception('Model only has one compartment!')
for reaction in model.reactions:
reactant_compartments = set([x.compartment for x in reaction.reactants])
product_compartments = set([x.compartment for x in reaction.products])
reactant_baseID = set([x.id.split('_')[0] for x in reaction.reactants])
product_baseID = set([x.id.split('_')[0] for x in reaction.products])
if reactant_compartments == product_compartments and reactant_baseID != product_baseID:
continue
elif bool(compartments & reactant_compartments) == True and bool(compartments & product_compartments) == True:
transporters.append(reaction.id)
return transporters
# Checks the quality of models by a couple metrics and returns problems
def checkQuality(model, exclude=[], cytosol='c', extracellular='e'):
gaps = _findGapfilledRxn(model, exclude)
freemass = _checkFreeMass(model, cytosol)
balance = _checkBalance(model, exclude)
trans, exch = _missingRxns(model, extracellular)
test = gaps + freemass + balance
if len(test) == 0: print('No inconsistencies detected')
# Create reporting data structure
quality = {}
quality['gaps'] = gaps
quality['freemass'] = freemass
quality['balance'] = balance
quality['trans'] = trans
quality['exch'] = exch
return quality
mixB1
mixB1_errors = checkQuality(mixB1)
mixB2
mixB2_errors = checkQuality(mixB2)
mixB3
mixB3_errors = checkQuality(mixB3)
mixB4
mixB4_errors = checkQuality(mixB4)
mixB5
mixB5_errors = checkQuality(mixB5)
mixB6
mixB6_errors = checkQuality(mixB6)
# Remove old bio1 (generic Gram-positive Biomass function) and macromolecule demand reactions
mixB1.remove_reactions(['rxn13783_c', 'rxn13784_c', 'rxn13782_c', 'bio1', 'SK_cpd11416_c'])
# Make sure all the models can grow anaerobically
model.reactions.get_by_id('EX_cpd00007_e').lower_bound = 0.
# Universal reaction bag
universal = cobra.io.load_json_model('/home/mjenior/Desktop/repos/Jenior_Cdifficile_2019/data/universal.json')
# Fix compartments
compartment_dict = {'Cytosol': 'cytosol', 'Extracellular': 'extracellular', 'c': 'cytosol', 'e': 'extracellular',
'cytosol': 'cytosol', 'extracellular': 'extracellular'}
for cpd in universal.metabolites:
cpd.compartment = compartment_dict[cpd1.compartment]
import copy
import cobra
import symengine
# pFBA gapfiller
def fast_gapfill(model, objective, universal, extracellular='extracellular', media=[], transport=False):
'''
Parameters
----------
model_file : str
Model to be gapfilled
objective : str
Reaction ID for objective function
universal_file : str
Reaction bag reference
extracellular : str
Label for extracellular compartment of model
media : list
list of metabolite IDs in media condition
transport : bool
Determine if passive transporters should be added in defined media
'''
# Define overlapping components
target_rxns = set([str(x.id) for x in model.reactions])
target_cpds = set([str(y.id) for y in model.metabolites])
ref_rxns = set([str(z.id) for z in universal.reactions])
shared_rxns = ref_rxns.intersection(target_rxns)
# Remove overlapping reactions from universal bag, add model reactions to universal bag
temp_universal = copy.deepcopy(universal)
for rxn in shared_rxns: temp_universal.reactions.get_by_id(rxn).remove_from_model()
temp_universal.add_reactions(list(copy.deepcopy(model.reactions)))
# Define minimum objective value
temp_universal.objective = objective
obj_constraint = temp_universal.problem.Constraint(temp_universal.objective.expression, lb=1.0, ub=1000.0)
temp_universal.add_cons_vars([obj_constraint])
temp_universal.solver.update()
# Set up pFBA objective
pfba_expr = symengine.RealDouble(0)
for rxn in temp_universal.reactions:
if not rxn.id in target_rxns:
pfba_expr += 1.0 * rxn.forward_variable
pfba_expr += 1.0 * rxn.reverse_variable
else:
pfba_expr += 0.0 * rxn.forward_variable
pfba_expr += 0.0 * rxn.reverse_variable
temp_universal.objective = temp_universal.problem.Objective(pfba_expr, direction='min', sloppy=True)
temp_universal.solver.update()
# Set media condition
for rxn in temp_universal.reactions:
if len(rxn.reactants) == 0 or len(rxn.products) == 0:
substrates = set([x.id for x in rxn.metabolites])
if len(media) == 0 or bool(substrates & set(media)) == True:
rxn.bounds = (max(rxn.lower_bound, -1000.), min(rxn.upper_bound, 1000.))
else:
rxn.bounds = (0.0, min(rxn.upper_bound, 1000.))
# Run FBA and save solution
solution = temp_universal.optimize()
active_rxns = set([rxn.id for rxn in temp_universal.reactions if abs(solution.fluxes[rxn.id]) > 1e-6])
# Screen new reaction IDs
new_rxns = active_rxns.difference(target_rxns)
# Get reactions and metabolites to be added to the model
new_rxns = copy.deepcopy([universal.reactions.get_by_id(rxn) for rxn in new_rxns])
new_cpds = set()
for rxn in new_rxns: new_cpds |= set([str(x.id) for x in list(rxn.metabolites)]).difference(target_cpds)
new_cpds = copy.deepcopy([universal.metabolites.get_by_id(cpd) for cpd in new_cpds])
# Create gapfilled model
new_model = copy.deepcopy(model)
new_model.add_metabolites(new_cpds)
new_model.add_reactions(new_rxns)
# Identify extracellular metabolites that need new exchanges
new_exchs = 0
model_exchanges = set()
rxns = set([str(rxn.id) for rxn in model.reactions])
for rxn in new_model.reactions:
if len(rxn.reactants) == 0 or len(rxn.products) == 0:
if extracellular in [str(cpd.compartment) for cpd in rxn.metabolites]:
model_exchanges |= set([rxn.id])
for cpd in new_model.metabolites:
if cpd.compartment != extracellular: continue
current_rxns = set([x.id for x in cpd.reactions])
if bool(current_rxns & model_exchanges) == False:
new_id = 'EX_' + cpd.id
new_model.add_boundary(cpd, type='exchange', reaction_id=new_id, lb=-1000.0, ub=1000.0)
new_exchs += 1
# Report to user
print('Gapfilled ' + str(len(new_rxns) + new_exchs) + ' reactions and ' + str(len(new_cpds)) + ' metabolites')
if new_model.slim_optimize() <= 1e-6: print('WARNING: Objective does not carries flux')
return new_model
# Load in models
model = cobra.io.load_json_model('/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/lact_reut.draft.json')
# Fix compartments
compartment_dict = {'Cytosol': 'cytosol', 'Extracellular': 'extracellular', 'c': 'cytosol', 'e': 'extracellular',
'cytosol': 'cytosol', 'extracellular': 'extracellular'}
for cpd2 in model.metabolites:
cpd2.compartment = compartment_dict[cpd2.compartment]
# Thoroughly remove orphan reactions and metabolites
def all_orphan_prune(model):
pruned_cpd = 0
pruned_rxn = 0
removed = 1
while removed == 1:
removed = 0
# Metabolites
for cpd in model.metabolites:
if len(cpd.reactions) == 0:
cpd.remove_from_model()
pruned_cpd += 1
removed = 1
# Reactions
for rxn in model.reactions:
if len(rxn.metabolites) == 0:
rxn.remove_from_model()
pruned_rxn += 1
removed = 1
if pruned_cpd > 0: print('Pruned ' + str(pruned_cpd) + ' orphan metabolites')
if pruned_rxn > 0: print('Pruned ' + str(pruned_rxn) + ' orphan reactions')
return model
# Remove incorrect biomass-related components
# Unwanted reactions
rm_reactions = ['bio1']
for x in rm_reactions:
model.reactions.get_by_id(x).remove_from_model()
# Unwanted metabolites
rm_metabolites = ['cpd15666_c','cpd17041_c','cpd17042_c','cpd17043_c']
for y in rm_metabolites:
for z in model.metabolites.get_by_id(y).reactions:
z.remove_from_model()
model.metabolites.get_by_id(y).remove_from_model()
# Remove gap-filled reactions
# Gram-positive Biomass formulation
# DNA replication
cpd00115_c = universal.metabolites.get_by_id('cpd00115_c') # dATP
cpd00356_c = universal.metabolites.get_by_id('cpd00356_c') # dCTP
cpd00357_c = universal.metabolites.get_by_id('cpd00357_c') # TTP
cpd00241_c = universal.metabolites.get_by_id('cpd00241_c') # dGTP
cpd00002_c = universal.metabolites.get_by_id('cpd00002_c') # ATP
cpd00001_c = universal.metabolites.get_by_id('cpd00001_c') # H2O
cpd00008_c = universal.metabolites.get_by_id('cpd00008_c') # ADP
cpd00009_c = universal.metabolites.get_by_id('cpd00009_c') # Phosphate
cpd00012_c = universal.metabolites.get_by_id('cpd00012_c') # PPi
cpd17042_c = cobra.Metabolite(
'cpd17042_c',
formula='',
name='DNA polymer',
compartment='cytosol')
dna_rxn = cobra.Reaction('dna_rxn')
dna_rxn.lower_bound = 0.
dna_rxn.upper_bound = 1000.
dna_rxn.add_metabolites({
cpd00115_c: -1.0,
cpd00356_c: -0.5,
cpd00357_c: -1.0,
cpd00241_c: -0.5,
cpd00002_c: -4.0,
cpd00001_c: -1.0,
cpd17042_c: 1.0,
cpd00008_c: 4.0,
cpd00009_c: 4.0,
cpd00012_c: 1.0
})
#--------------------------------------------------------------------------------#
# RNA transcription
cpd00002_c = universal.metabolites.get_by_id('cpd00002_c') # ATP
cpd00052_c = universal.metabolites.get_by_id('cpd00052_c') # CTP
cpd00062_c = universal.metabolites.get_by_id('cpd00062_c') # UTP
cpd00038_c = universal.metabolites.get_by_id('cpd00038_c') # GTP
cpd00001_c = universal.metabolites.get_by_id('cpd00001_c') # H2O
cpd00008_c = universal.metabolites.get_by_id('cpd00008_c') # ADP
cpd00009_c = universal.metabolites.get_by_id('cpd00009_c') # Phosphate
cpd00012_c = universal.metabolites.get_by_id('cpd00012_c') # PPi
cpd17043_c = cobra.Metabolite(
'cpd17043_c',
formula='',
name='RNA polymer',
compartment='cytosol')
rna_rxn = cobra.Reaction('rna_rxn')
rna_rxn.name = 'RNA transcription'
rna_rxn.lower_bound = 0.
rna_rxn.upper_bound = 1000.
rna_rxn.add_metabolites({
cpd00002_c: -2.0,
cpd00052_c: -0.5,
cpd00062_c: -0.5,
cpd00038_c: -0.5,
cpd00001_c: -1.0,
cpd17043_c: 1.0,
cpd00008_c: 2.0,
cpd00009_c: 2.0,
cpd00012_c: 1.0
})
#--------------------------------------------------------------------------------#
# Protein biosynthesis
cpd00035_c = universal.metabolites.get_by_id('cpd00035_c') # L-Alanine
cpd00051_c = universal.metabolites.get_by_id('cpd00051_c') # L-Arginine
cpd00132_c = universal.metabolites.get_by_id('cpd00132_c') # L-Asparagine
cpd00041_c = universal.metabolites.get_by_id('cpd00041_c') # L-Aspartate
cpd00084_c = universal.metabolites.get_by_id('cpd00084_c') # L-Cysteine
cpd00053_c = universal.metabolites.get_by_id('cpd00053_c') # L-Glutamine
cpd00023_c = universal.metabolites.get_by_id('cpd00023_c') # L-Glutamate
cpd00033_c = universal.metabolites.get_by_id('cpd00033_c') # Glycine
cpd00119_c = universal.metabolites.get_by_id('cpd00119_c') # L-Histidine
cpd00322_c = universal.metabolites.get_by_id('cpd00322_c') # L-Isoleucine
cpd00107_c = universal.metabolites.get_by_id('cpd00107_c') # L-Leucine
cpd00039_c = universal.metabolites.get_by_id('cpd00039_c') # L-Lysine
cpd00060_c = universal.metabolites.get_by_id('cpd00060_c') # L-Methionine
cpd00066_c = universal.metabolites.get_by_id('cpd00066_c') # L-Phenylalanine
cpd00129_c = universal.metabolites.get_by_id('cpd00129_c') # L-Proline
cpd00054_c = universal.metabolites.get_by_id('cpd00054_c') # L-Serine
cpd00161_c = universal.metabolites.get_by_id('cpd00161_c') # L-Threonine
cpd00065_c = universal.metabolites.get_by_id('cpd00065_c') # L-Tryptophan
cpd00069_c = universal.metabolites.get_by_id('cpd00069_c') # L-Tyrosine
cpd00156_c = universal.metabolites.get_by_id('cpd00156_c') # L-Valine
cpd00002_c = universal.metabolites.get_by_id('cpd00002_c') # ATP
cpd00001_c = universal.metabolites.get_by_id('cpd00001_c') # H2O
cpd00008_c = universal.metabolites.get_by_id('cpd00008_c') # ADP
cpd00009_c = universal.metabolites.get_by_id('cpd00009_c') # Phosphate
cpd17041_c = cobra.Metabolite(
'cpd17041_c',
formula='',
name='Protein polymer',
compartment='cytosol')
protein_rxn = cobra.Reaction('protein_rxn')
protein_rxn.name = 'Protein biosynthesis'
protein_rxn.lower_bound = 0.
protein_rxn.upper_bound = 1000.
protein_rxn.add_metabolites({
cpd00035_c: -0.5,
cpd00051_c: -0.25,
cpd00132_c: -0.5,
cpd00041_c: -0.5,
cpd00084_c: -0.05,
cpd00053_c: -0.25,
cpd00023_c: -0.5,
cpd00033_c: -0.5,
cpd00119_c: -0.05,
cpd00322_c: -0.5,
cpd00107_c: -0.5,
cpd00039_c: -0.5,
cpd00060_c: -0.25,
cpd00066_c: -0.5,
cpd00129_c: -0.25,
cpd00054_c: -0.5,
cpd00161_c: -0.5,
cpd00065_c: -0.05,
cpd00069_c: -0.25,
cpd00156_c: -0.5,
cpd00002_c: -20.0,
cpd00001_c: -1.0,
cpd17041_c: 1.0,
cpd00008_c: 20.0,
cpd00009_c: 20.0
})
#--------------------------------------------------------------------------------#
# Cell wall synthesis
cpd02967_c = universal.metabolites.get_by_id('cpd02967_c') # N-Acetyl-beta-D-mannosaminyl-1,4-N-acetyl-D-glucosaminyldiphosphoundecaprenol
cpd00402_c = universal.metabolites.get_by_id('cpd00402_c') # CDPglycerol
cpd00046_c = universal.metabolites.get_by_id('cpd00046_c') # CMP
cpd12894_c = cobra.Metabolite(
'cpd12894_c',
formula='',
name='Teichoic acid',
compartment='cytosol')
teichoicacid_rxn = cobra.Reaction('teichoicacid_rxn')
teichoicacid_rxn.name = 'Teichoic acid biosynthesis'
teichoicacid_rxn.lower_bound = 0.
teichoicacid_rxn.upper_bound = 1000.
teichoicacid_rxn.add_metabolites({
cpd02967_c: -1.0,
cpd00402_c: -1.0,
cpd00046_c: 1.0,
cpd12894_c: 1.0
})
# Peptidoglycan subunits
# Undecaprenyl-diphospho-N-acetylmuramoyl--N-acetylglucosamine-L-ala-D-glu-meso-2-6-diaminopimeloyl-D-ala-D-ala (right)
cpd03495_c = universal.metabolites.get_by_id('cpd03495_c')
# Undecaprenyl-diphospho-N-acetylmuramoyl-(N-acetylglucosamine)-L-alanyl-gamma-D-glutamyl-L-lysyl-D-alanyl-D-alanine (left)
cpd03491_c = universal.metabolites.get_by_id('cpd03491_c')
cpd00002_c = universal.metabolites.get_by_id('cpd00002_c') # ATP
cpd00001_c = universal.metabolites.get_by_id('cpd00001_c') # H2O
cpd02229_c = universal.metabolites.get_by_id('cpd02229_c') # Bactoprenyl diphosphate
cpd00117_c = universal.metabolites.get_by_id('cpd00117_c') # D-Alanine
cpd00008_c = universal.metabolites.get_by_id('cpd00008_c') # ADP
cpd00009_c = universal.metabolites.get_by_id('cpd00009_c') # Phosphate
cpd16661_c = cobra.Metabolite(
'cpd16661_c',
formula='',
name='Peptidoglycan polymer',
compartment='cytosol')
peptidoglycan_rxn = cobra.Reaction('peptidoglycan_rxn')
peptidoglycan_rxn.name = 'Peptidoglycan biosynthesis'
peptidoglycan_rxn.lower_bound = 0.
peptidoglycan_rxn.upper_bound = 1000.
peptidoglycan_rxn.add_metabolites({
cpd03491_c: -1.0,
cpd03495_c: -1.0,
cpd00002_c: -4.0,
cpd00001_c: -1.0,
cpd16661_c: 1.0,
cpd02229_c: 1.0,
cpd00117_c: 0.5, # D-Alanine
cpd00008_c: 4.0, # ADP
cpd00009_c: 4.0 # Phosphate
})
cellwall_c = cobra.Metabolite(
'cellwall_c',
formula='',
name='Cell Wall polymer',
compartment='cytosol')
cellwall_rxn = cobra.Reaction('cellwall_rxn')
cellwall_rxn.name = 'Cell wall biosynthesis'
cellwall_rxn.lower_bound = 0.
cellwall_rxn.upper_bound = 1000.
cellwall_rxn.add_metabolites({
cpd16661_c: -1.5,
cpd12894_c: -0.05,
cellwall_c: 1.0
})
#--------------------------------------------------------------------------------#
# Lipid pool
cpd15543_c = universal.metabolites.get_by_id('cpd15543_c') # Phosphatidylglycerophosphate ditetradecanoyl
cpd15545_c = universal.metabolites.get_by_id('cpd15545_c') # Phosphatidylglycerophosphate dihexadecanoyl
cpd15540_c = universal.metabolites.get_by_id('cpd15540_c') # Phosphatidylglycerol dioctadecanoyl
cpd15728_c = universal.metabolites.get_by_id('cpd15728_c') # Diglucosyl-1,2 dipalmitoylglycerol
cpd15729_c = universal.metabolites.get_by_id('cpd15729_c') # Diglucosyl-1,2 dimyristoylglycerol
cpd15737_c = universal.metabolites.get_by_id('cpd15737_c') # Monoglucosyl-1,2 dipalmitoylglycerol
cpd15738_c = universal.metabolites.get_by_id('cpd15738_c') # Monoglucosyl-1,2 dimyristoylglycerol
cpd11852_c = cobra.Metabolite(
'cpd11852_c',
formula='',
name='Lipid Pool',
compartment='cytosol')
lipid_rxn = cobra.Reaction('lipid_rxn')
lipid_rxn.name = 'Lipid composition'
lipid_rxn.lower_bound = 0.
lipid_rxn.upper_bound = 1000.
lipid_rxn.add_metabolites({
cpd15543_c: -0.005,
cpd15545_c: -0.005,
cpd15540_c: -0.005,
cpd15728_c: -0.005,
cpd15729_c: -0.005,
cpd15737_c: -0.005,
cpd15738_c: -0.005,
cpd11852_c: 1.0
})
#--------------------------------------------------------------------------------#
# Ions, Vitamins, & Cofactors
# Vitamins
cpd00104_c = universal.metabolites.get_by_id('cpd00104_c') # Biotin MDM
cpd00644_c = universal.metabolites.get_by_id('cpd00644_c') # Pantothenate MDM
cpd00263_c = universal.metabolites.get_by_id('cpd00263_c') # Pyridoxine MDM
cpd00393_c = universal.metabolites.get_by_id('cpd00393_c') # folate
cpd00133_c = universal.metabolites.get_by_id('cpd00133_c') # nicotinamide
cpd00443_c = universal.metabolites.get_by_id('cpd00443_c') # p-aminobenzoic acid
cpd00220_c = universal.metabolites.get_by_id('cpd00220_c') # riboflavin
cpd00305_c = universal.metabolites.get_by_id('cpd00305_c') # thiamin
# Ions
cpd00149_c = universal.metabolites.get_by_id('cpd00149_c') # Cobalt
cpd00030_c = universal.metabolites.get_by_id('cpd00030_c') # Manganese
cpd00254_c = universal.metabolites.get_by_id('cpd00254_c') # Magnesium
cpd00971_c = universal.metabolites.get_by_id('cpd00971_c') # Sodium
cpd00063_c = universal.metabolites.get_by_id('cpd00063_c') # Calcium
cpd10515_c = universal.metabolites.get_by_id('cpd10515_c') # Iron
cpd00205_c = universal.metabolites.get_by_id('cpd00205_c') # Potassium
cpd00099_c = universal.metabolites.get_by_id('cpd00099_c') # Chloride
# Cofactors
cpd00022_c = universal.metabolites.get_by_id('cpd00022_c') # Acetyl-CoA
cpd00010_c = universal.metabolites.get_by_id('cpd00010_c') # CoA
cpd00015_c = universal.metabolites.get_by_id('cpd00015_c') # FAD
cpd00003_c = universal.metabolites.get_by_id('cpd00003_c') # NAD
cpd00004_c = universal.metabolites.get_by_id('cpd00004_c') # NADH
cpd00006_c = universal.metabolites.get_by_id('cpd00006_c') # NADP
cpd00005_c = universal.metabolites.get_by_id('cpd00005_c') # NADPH
# Energy molecules
cpd00002_c = universal.metabolites.get_by_id('cpd00002_c') # ATP
cpd00008_c = universal.metabolites.get_by_id('cpd00008_c') # ADP
cpd00009_c = universal.metabolites.get_by_id('cpd00009_c') # Phosphate
cpd00012_c = universal.metabolites.get_by_id('cpd00012_c') # PPi
cpd00038_c = universal.metabolites.get_by_id('cpd00038_c') # GTP
cpd00031_c = universal.metabolites.get_by_id('cpd00031_c') # GDP
cpd00274_c = universal.metabolites.get_by_id('cpd00274_c') # Citrulline
cofactor_c = cobra.Metabolite(
'cofactor_c',
formula='',
name='Cofactor Pool',
compartment='cytosol')
cofactor_rxn = cobra.Reaction('cofactor_rxn')
cofactor_rxn.name = 'Cofactor Pool'
cofactor_rxn.lower_bound = 0.
cofactor_rxn.upper_bound = 1000.
cofactor_rxn.add_metabolites({
cpd00104_c: -0.005,
cpd00644_c: -0.005,
cpd00263_c: -0.005,
cpd00393_c: -0.005,
cpd00133_c: -0.005,
cpd00443_c: -0.005,
cpd00220_c: -0.005,
cpd00305_c: -0.005,
cpd00149_c: -0.005,
cpd00030_c: -0.005,
cpd00254_c: -0.005,
cpd00971_c: -0.005,
cpd00063_c: -0.005,
cpd10515_c: -0.005,
cpd00205_c: -0.005,
cpd00099_c: -0.005,
cpd00022_c: -0.005,
cpd00010_c: -0.0005,
cpd00015_c: -0.0005,
cpd00003_c: -0.005,
cpd00004_c: -0.005,
cpd00006_c: -0.005,
cpd00005_c: -0.005,
cpd00002_c: -0.005,
cpd00008_c: -0.005,
cpd00009_c: -0.5,
cpd00012_c: -0.005,
cpd00038_c: -0.005,
cpd00031_c: -0.005,
cofactor_c: 1.0
})
#--------------------------------------------------------------------------------#
# Final Biomass
cpd11416_c = cobra.Metabolite(
'cpd11416_c',
formula='',
name='Biomass',
compartment='cytosol')
biomass_rxn = cobra.Reaction('biomass')
biomass_rxn.name = 'Gram-positive Biomass Reaction'
biomass_rxn.lower_bound = 0.
biomass_rxn.upper_bound = 1000.
biomass_rxn.add_metabolites({
cpd17041_c: -0.4, # Protein
cpd17043_c: -0.15, # RNA
cpd17042_c: -0.05, # DNA
cpd11852_c: -0.05, # Lipid
cellwall_c: -0.2,
cofactor_c: -0.2,
cpd00001_c: -20.0,
cpd00002_c: -20.0,
cpd00008_c: 20.0,
cpd00009_c: 20.0,
cpd11416_c: 1.0 # Biomass
})
grampos_biomass_components = [dna_rxn,rna_rxn, protein_rxn, teichoicacid_rxn, peptidoglycan_rxn, cellwall_rxn, lipid_rxn, cofactor_rxn, biomass_rxn]
# Add components to new model
model.add_reactions(grampos_biomass_components)
model.add_boundary(cpd11416_c, type='sink', reaction_id='EX_biomass', lb=0.0, ub=1000.0)
# Set new objective
model.objective = 'biomass'
model
model.slim_optimize()
# Gapfill
new_model = fast_gapfill(model, universal=universal, objective='biomass')
new_model
# Define minimal components and add to minimal media formulation
from cobra.medium import minimal_medium
ov = new_model.slim_optimize() * 0.1
components = minimal_medium(new_model, ov) # pick out necessary cofactors
essential = ['cpd00063_e','cpd00393_e','cpd00048_e','cpd00305_e','cpd00205_e','cpd00104_e','cpd00099_e',
'cpd00099_e','cpd00099_e','cpd00149_e','cpd00030_e','cpd10516_e','cpd00254_e','cpd00220_e',
'cpd00355_e','cpd00064_e','cpd00971_e','cpd00067_e']
# Wegkamp et al. 2009. Applied Microbiology.
# Lactobacillus minimal media
pmm5 = ['cpd00001_e','cpd00009_e','cpd00026_e','cpd00029_e','cpd00059_e','cpd00051_e','cpd00023_e',
'cpd00107_e','cpd00322_e','cpd00060_e','cpd00066_e','cpd00161_e','cpd00065_e','cpd00069_e',
'cpd00156_e','cpd00218_e','cpd02201_e','cpd00220_e','cpd00220_e','cpd04877_e','cpd28790_e',
'cpd00355_e']
minimal = essential + pmm5
new_model = fast_gapfill(new_model, universal=universal, objective='biomass', media=minimal)
new_model
new_model = all_orphan_prune(new_model)
new_model
print(len(new_model.genes))
new_model.slim_optimize()
new_model.name = 'Lactobactillus reuteri mlc3'
new_model.id = 'iLr488'
cobra.io.save_json_model(new_model, '/home/mjenior/Desktop/Lawley_MixB/draft_reconstructions/lact_reut.curated.json')
```
|
github_jupyter
|
# SVM
```
import numpy as np
import sympy as sym
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
%matplotlib inline
np.random.seed(1)
```
## Simple Example Application
对于简单的数据样本例子(也就是说可以进行线性划分,且不包含噪声点)
**算法:**
输入:线性可分训练集$T={(x_1,y_1),(x_2,y_2),...,(x_N,y_N)}$,其中$x_i \in \textit{X}=\textit{R},y_i \in \textit{Y}={+1,-1},i=1,2...,N$
输出:分离超平面和分类决策函数
(1) 构造并求解约束条件最优化问题
$\underset{\alpha}{min}$ $\frac{1}{2}\sum_{i=1}^{N}\sum_{j=1}^{N}\alpha_i \alpha_j y_i y_j <x_i \cdot x_j>-\sum_{i=1}^{N}\alpha_i$
s.t $\sum_{i=1}^{N}\alpha_i y_i=0$
$\alpha_i \geq 0,i=1,2,...,N$
求得最优$\alpha^{*}=(\alpha_1^{*},\alpha_2^{*},...,\alpha_n^{*})$
其中正分量$\alpha_j^{*}>0$就为支持向量
(2) 计算
$w^{*} = \sum_{i=1}^{N}\alpha_i^{*}y_ix_i$
选择$\alpha^{*}$的一个正分量$\alpha_j^{*}>0$,计算
$b^{*}=y_j-\sum_{i=1}^{N}\alpha_i^{*}y_i<x_i \cdot x_j>$
(3) 求得分离超平面
$w^{*}\cdot x + b^{*}=0$
分类决策函数:
$f(x)=sign(w^{*}\cdot x + b^{*})$
这里的sign表示:值大于0的为1,值小于0的为-1.
```
def loadSimpleDataSet():
"""
从文本加载数据集
返回:
数据集和标签集
"""
train_x = np.array([[3,3],[4,3],[1,1]]).T
train_y = np.array([[1,1,-1]]).T
return train_x,train_y
train_x,train_y = loadSimpleDataSet()
print("train_x shape is : ",train_x.shape)
print("train_y shape is : ",train_y.shape)
plt.scatter(train_x[0,:],train_x[1,:],c=np.squeeze(train_y))
```
为了方便计算$\sum_{i=1}^{N}\sum_{j=1}^{N}\alpha_i \alpha_j y_i y_j <x_i \cdot x_j>$
我们需要先求出train_x、train_y、alphas的内积然后逐个元素相乘然后累加.
计算train_x的内积
```
Inner_train_x = np.dot(train_x.T,train_x)
print("Train_x is:\n",train_x)
print("Inner train x is:\n",Inner_train_x)
```
计算train_y的内积
```
Inner_train_y = np.dot(train_y,train_y.T)
print("Train y is:\n",train_y)
print("Inner train y is:\n",Inner_train_y)
```
计算alphas(拉格朗日乘子)的内积,但是要注意,我们在这里固定拉格朗日乘子中的某两个alpha之外的其他alpha,因为根据理论知识,我们需要固定两个alpha之外的其他alphas,然后不断的再一堆alphas中去迭代更新这两个alpha.由于这个例子过于简单,且只有3个样本点(事实上$\alpha_1,\alpha_3$就是支持向量).
将约束条件带入其中:
$\sum_{i=1}^3\alpha_i y_i=\alpha_1y_1+\alpha_2y_2+\alpha_3y_3 =0 \Rightarrow $
--
$\alpha_3 = -(\alpha_1y_1+\alpha_2y_2)/y_3 $
--
```
alphas_sym = sym.symbols('alpha1:4')
alphas = np.array([alphas_sym]).T
alphas[-1]= -np.sum(alphas[:-1,:]*train_y[:-1,:]) / train_y[-1,:]
Inner_alphas = np.dot(alphas,alphas.T)
print("alphas is: \n",alphas)
print("Inner alphas is:\n",Inner_alphas)
```
现在求最优的$\alpha^{*}=(\alpha_1^{*},\alpha_2^{*},...,\alpha_n^{*})$
$\underset{\alpha}{min}$ $\frac{1}{2}\sum_{i=1}^{N}\sum_{j=1}^{N}\alpha_i \alpha_j y_i y_j <x_i \cdot x_j>-\sum_{i=1}^{N}\alpha_i$
**注意:**
这里需要使用sympy库,详情请见[柚子皮-Sympy符号计算库](https://blog.csdn.net/pipisorry/article/details/39123247)
或者[Sympy](https://www.sympy.org/en/index.html)
```
def compute_dual_function(alphas,Inner_alphas,Inner_train_x,Inner_train_y):
"""
Parameters:
alphas: initialization lagrange multiplier,shape is (n,1).
n:number of example.
Inner_alphas: Inner product of alphas.
Inner_train_x: Inner product of train x set.
Inner_train_y: Inner product of train y set.
simplify : simplify compute result of dual function.
return:
s_alpha: result of dual function
"""
s_alpha = sym.simplify(1/2*np.sum(Inner_alphas * Inner_train_x*Inner_train_y) - (np.sum(alphas)))
return s_alpha
s_alpha = compute_dual_function(alphas,Inner_alphas,Inner_train_x,Inner_train_y)
print('s_alpha is:\n ',s_alpha)
```
现在对每一个alpha求偏导令其等于0.
```
def Derivative_alphas(alphas,s_alpha):
"""
Parameters:
alphas: lagrange multiplier.
s_alpha: dual function
return:
bool value.
True: Meet all constraints,means,all lagrange multiplier >0
False:Does not satisfy all constraints,means some lagrange multiplier <0.
"""
cache_derivative_alpha = []
for alpha in alphas.squeeze()[:-1]: # remove the last element.
derivative = s_alpha.diff(alpha) # diff: derivative
cache_derivative_alpha.append(derivative)
derivative_alpha = sym.solve(cache_derivative_alpha,set=True) # calculate alphas.
print('derivative_alpha is: ',derivative_alpha)
# check alpha > 0
check_alpha_np = np.array(list(derivative_alpha[1])) > 0
return check_alpha_np.all()
check_alpha = Derivative_alphas(alphas,s_alpha)
print("Constraint lagrange multiplier is: ",check_alpha)
```
可以看出如果是对于$\alpha_2<0$,不满足$\alpha_2 \geqslant 0 $所以我们不能使用极值
-------------
由于在求偏导的情况下不满足拉格朗日乘子约束条件,所以我们将固定某一个$\alpha_i$,将其他的$\alpha$令成0,使偏导等于0求出当前$\alpha_i$,然后在带入到对偶函数中求出最后的结果.比较所有的结果挑选出结果最小的值所对应的$\alpha_i$,在从中选出$\alpha_i>0$的去求我们最开始固定的其他alphas.
**算法:**
输入: 拉格朗日乘子数组,数组中不包括最开始固定的其他alphas
输出: 最优的拉格朗日乘子,也就是支持向量
(1) 将输入的拉格朗日数组扩增一行或者一列并初始化为0
- alphas_zeros = np.zeros((alphas.shape[0],1))[:-1]
- alphas_add_zeros = np.c_[alphas[:-1],alphas_zeros]
(2) 将扩增后的数组进行"mask"掩模处理,目的是为了将一个$\alpha$保留,其他的$\alpha$全部为0.
- mask_alpha = np.ma.array(alphas_add_zeros, mask=False) # create mask array.
- mask_alpha.mask[i] = True # masked alpha
- 在sysmpy中使用掩模处理会报出一个警告:将掩模值处理为None,其实问题不大,应该不会改变对偶方程中的alpha对象
(3) 使用掩模后的数组放入对偶函数中求偏导$\alpha_i$,并令其等于0求出$\alpha_i$
(4) 将求出的$\alpha_i$和其他都等于0的alphas带入到对偶函数中求出值
(5) 比较所有的对偶函数中的值,选取最小值所对应的alpha组.计算最开始固定值的alphas.
```
def choose_best_alphas(alphas,s_alpha):
"""
Parameters:
alphas: Lagrange multiplier.
s_alpha: dual function
return:
best_vector: best support vector machine.
"""
# add col in alphas,and initialize value equal 0. about 2 lines.
alphas_zeros = np.zeros((alphas.shape[0],1))[:-1]
alphas_add_zeros = np.c_[alphas[:-1],alphas_zeros]
# cache some parameters.
cache_alphas_add = np.zeros((alphas.shape[0],1))[:-1] # cache derivative alphas.
cache_alphas_compute_result = np.zeros((alphas.shape[0],1))[:-1] # cache value in dual function result
cache_alphas_to_compute = alphas_add_zeros.copy() # get minmux dual function value,cache this values.
for i in range(alphas_add_zeros.shape[0]):
mask_alpha = np.ma.array(alphas_add_zeros, mask=False) # create mask array.
mask_alpha.mask[i] = True # masked alpha
value = sym.solve(s_alpha.subs(mask_alpha).diff())[0] # calculate alpha_i
cache_alphas_add[i] = value
cache_alphas_to_compute[i][1] = value
cache_alphas_compute_result[i][0] = s_alpha.subs(cache_alphas_to_compute) # calculate finally dual function result.
cache_alphas_to_compute[i][1] = 0 # make sure other alphas equal 0.
min_alpha_value_index = cache_alphas_compute_result.argmin()
best_vector =np.array([cache_alphas_add[min_alpha_value_index]] + [- cache_alphas_add[min_alpha_value_index] / train_y[-1]])
return [min_alpha_value_index]+[2],best_vector
min_alpha_value_index,best_vector = choose_best_alphas(alphas,s_alpha)
print(min_alpha_value_index)
print('support vector machine is:',alphas[min_alpha_value_index])
```
$w^{*} = \sum_{i=1}^{N}\alpha_i^{*}y_ix_i$
```
w = np.sum(np.multiply(best_vector , train_y[min_alpha_value_index].T) * train_x[:,min_alpha_value_index],axis=1)
print("W is: ",w)
```
选择$\alpha^{*}$的一个正分量$\alpha_j^{*}>0$,计算
$b^{*}=y_j-\sum_{i=1}^{N}\alpha_i^{*}y_i<x_i \cdot x_j>$
这里我选alpha1
```
b = train_y[0]-np.sum(best_vector.T * np.dot(train_x[:,min_alpha_value_index].T,train_x[:,min_alpha_value_index])[0]
* train_y[min_alpha_value_index].T)
print("b is: ",b)
```
所以超平面为:
$f(x)=sign[wx+b]$
# SMO
这里实现简单版本的smo算法,这里所谓的简单版本指的是速度没有SVC快,参数自动选择没有SCV好等.但是通过调节参数一样可以达到和SVC差不多的结果
### 算法:
#### 1.SMO选择第一个变量的过程为选择一个违反KKT条件最严重的样本点为$\alpha_1$,即违反以下KKT条件:
$\alpha_i=0\Leftrightarrow y_ig(x_i)\geqslant1$
$0<\alpha_i<C\Leftrightarrow y_ig(x_i)=1$
$\alpha_i=C \Leftrightarrow y_ig(x_i)\leqslant1$
其中:
$g(x_i)=\sum_{j=1}^{N}\alpha_iy_iK(x_i,x_j)+b$
**注意:**
- 初始状态下$\alpha_i$定义为0,且和样本数量一致.
- 该检验是在$\varepsilon$范围内的
- 在检验过程中我们先遍历所有满足$0<\alpha_i<C$的样本点,即在间隔边界上的支持向量点,找寻违反KKT最严重的样本点
- 如果没有满足$0<\alpha_i<C$则遍历所有的样本点,找违反KKT最严重的样本点
- 这里的*违反KKT最严重的样本点*可以选择为$y_ig(x_i)$最小的点作为$\alpha_1$
#### 2.SMO选择第二个变量的过程为希望$\alpha_2$有足够的变化
因为$\alpha_2^{new}$是依赖于$|E_1-E_2|$的,并且使得|E_1-E_2|最大,为了加快计算,一种简单的做法是:
如果$E_1$是正的,那么选择最小的$E_i$作为$E_2$,如果$E_1$是负的,那么选择最大的$E_i$作为$E_2$,为了节省计算时间,将$E_i$保存在一个列表中
**注意:**
- 如果通过以上方法找到的$\alpha_2$不能使得目标函数有足够的下降,那么采用以下启发式方法继续选择$\alpha_2$,遍历在间隔边上的支持向量的点依次将其对应的变量作为$\alpha_2$试用,直到目标函数有足够的下降,若还是找不到使得目标函数有足够下降,则抛弃第一个$\alpha_1$,在重新选择另一个$\alpha_1$
- 这个简单版本的SMO算法并没有处理这种特殊情况
#### 3.计算$\alpha_1^{new},\alpha_2^{new}$
计算$\alpha_1^{new},\alpha_2^{new}$,是为了计算$b_i,E_i$做准备.
3.1 计算$\alpha_2$的边界:
- if $y_1 \neq y_2$:$L=max(0,\alpha_2^{old}-\alpha_1^{old})$,$H=min(C,C+\alpha_2^{old}-\alpha_1^{old})$
- if $y_1 = y_2$:$L=max(0,\alpha_2^{old}+\alpha_1^{old}-C)$,$H=min(C,C+\alpha_2^{old}+\alpha_1^{old})$
3.2 计算$\alpha_2^{new,unc} = \alpha_2^{old}+\frac{y_2(E_1-E_2)}{\eta}$
其中:
$\eta = K_{11}+K_{22}-2K_{12}$,这里的$K_n$值得是核函数,可以是高斯核,多项式核等.
3.3 修剪$\alpha_2$
$\alpha_2^{new}=\left\{\begin{matrix}
H, &\alpha_2^{new,unc}>H \\
\alpha_2^{new,unc},& L\leqslant \alpha_2^{new,unc}\leqslant H \\
L,& \alpha_2^{new,unc}<L
\end{matrix}\right.$
3.3 计算$\alpha_1^{new}$
$\alpha_1^{new}=\alpha_1^{old}+y_1y_2(\alpha_2^{old}-\alpha_2^{new})$
#### 4.计算阈值b和差值$E_i$
$b_1^{new}=-E_1-y_1K_{11}(\alpha_1^{new}-\alpha_1^{old})-y_2K_{21}(\alpha_2^{new}-\alpha_2^{old})+b^{old}$
$b_2^{new}=-E_2-y_1K_{12}(\alpha_1^{new}-\alpha_1^{old})-y_2K_{22}(\alpha_2^{new}-\alpha_2^{old})+b^{old}$
如果$\alpha_1^{new},\alpha_2^{new}$,同时满足条件$0<\alpha_i^{new}<C,i=1,2$,
那么$b_1^{new}=b_2^{new}=b^{new}$.
如果$\alpha_1^{new},\alpha_2^{new}$是0或者C,那么$b_1^{new},b_2^{new}$之间的数
都符合KKT条件阈值,此时取中点为$b^{new}$
$E_i^{new}=(\sum_sy_j\alpha_jK(x_i,x_j))+b^{new}-y_i$
其中s是所有支持向量$x_j$的集合.
#### 5. 更新参数
更新$\alpha_i,E_i,b_i$
#### 注意:
在训练完毕后,绝大部分的$\alpha_i$的分量都为0,只有极少数的分量不为0,那么那些不为0的分量就是支持向量
### SMO简单例子
加载数据,来自于scikit中的的鸢尾花数据,其每次请求是变化的
```
# data
def create_data():
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['label'] = iris.target
df.columns = ['sepal length', 'sepal width', 'petal length', 'petal width', 'label']
data = np.array(df.iloc[:100, [0, 1, -1]])
for i in range(len(data)):
if data[i,-1] == 0:
data[i,-1] = -1
return data[:,:2], data[:,-1]
X, y = create_data()
# 划分训练样本和测试样本
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
plt.scatter(X[:,0],X[:,1],c=y)
```
### 开始搭建SMO算法代码
```
class SVM:
def __init__(self,max_iter = 100,kernel = 'linear',C=1.,is_print=False,sigma=1):
"""
Parameters:
max_iter:最大迭代数
kernel:核函数,这里只定义了"线性"和"高斯"
sigma:高斯核函数的参数
C:惩罚项,松弛变量
is_print:是否打印
"""
self.max_iter = max_iter
self.kernel = kernel
self.C = C # 松弛变量C
self.is_print = is_print
self.sigma = sigma
def init_args(self,features,labels):
"""
self.m:样本数量
self.n:特征数
"""
self.m,self.n = features.shape
self.X = features
self.Y = labels
self.b = 0.
# 将E_i 保存在一个列表中
self.alpha = np.zeros(self.m) + 0.0001
self.E = [self._E(i) for i in range(self.m)]
def _g(self,i):
"""
预测值g(x_i)
"""
g_x = np.sum(self.alpha*self.Y*self._kernel(self.X[i],self.X)) + self.b
return g_x
def _E(self,i):
"""
E(x) 为g(x) 对输入x的预测值和y的差值
"""
g_x = self._g(i) - self.Y[i]
return g_x
def _kernel(self,x1,x2):
"""
计算kernel
"""
if self.kernel == "linear":
return np.sum(np.multiply(x1,x2),axis=1)
if self.kernel == "Gaussion":
return np.sum(np.exp(-((x1-x2)**2)/(2*self.sigma)),axis=1)
def _KKT(self,i):
"""
判断KKT
"""
y_g = np.round(np.float64(np.multiply(self._g(i),self.Y[i]))) # 存在精度问题也就是说在epsilon范围内,所以这里使用round
if self.alpha[i] == 0:
return y_g >= 1,y_g
elif 0<self.alpha[i]<self.C:
return y_g == 1,y_g
elif self.alpha[i] == self.C:
return y_g <=1,y_g
else:
return ValueError
def _init_alpha(self):
"""
外层循环首先遍历所有满足0<a<C的样本点,检验是否满足KKT
0<a<C的样本点为间隔边界上支持向量点
"""
index_array = np.where(np.logical_and(self.alpha>0,self.alpha<self.C))[0] # 因为这里where的特殊性,所以alpha必须是(m,)
if len(index_array) !=0:
cache_list = []
for i in index_array:
bool_,y_g = self._KKT(i)
if not bool_:
cache_list.append((y_g,i))
# 如果没有则遍历整个样本
else:
cache_list = []
for i in range(self.m):
bool_,y_g = self._KKT(i)
if not bool_:
cache_list.append((y_g,i))
#获取违反KKT最严重的样本点,也就是g(x_i)*y_i 最小的
min_i = sorted(cache_list,key=lambda x:x[0])[0][1]
# 选择第二个alpha2
E1 = self.E[min_i]
if E1 > 0:
j = np.argmin(self.E)
else:
j = np.argmax(self.E)
return min_i,j
def _prune(self,alpha,L,H):
"""
修剪alpha
"""
if alpha > H:
return H
elif L<=alpha<=H:
return alpha
elif alpha < L:
return L
else:
return ValueError
def fit(self,features, labels):
self.init_args(features, labels)
for t in range(self.max_iter):
# 开始寻找alpha1,和alpha2
i1,i2 = self._init_alpha()
# 计算边界
if self.Y[i1] == self.Y[i2]: # 同号
L = max(0,self.alpha[i2]+self.alpha[i1]-self.C)
H = min(self.C,self.alpha[i2]+self.alpha[i1])
else:
L = max(0,self.alpha[i2]-self.alpha[i1])
H = min(self.C,self.C+self.alpha[i2]-self.alpha[i1])
# 计算阈值b_i 和差值E_i
E1 = self.E[i1]
E2 = self.E[i2]
eta = self._kernel(self.X[np.newaxis,i1],self.X[np.newaxis,i1]) + \
self._kernel(self.X[np.newaxis,i2],self.X[np.newaxis,i2]) - \
2 * self._kernel(self.X[np.newaxis,i1],self.X[np.newaxis,i2])
if eta <=0:
continue
alpha2_new_nuc = self.alpha[i2] + (self.Y[i2] * (E1-E2) /eta)
# 修剪 alpha2_new_nuc
alpha2_new = self._prune(alpha2_new_nuc,L,H)
alpha1_new = self.alpha[i1] + self.Y[i1] * self.Y[i2] * (self.alpha[i2]-alpha2_new)
# 计算b_i
b1_new = -E1-self.Y[i1]*self._kernel(self.X[np.newaxis,i1],self.X[np.newaxis,i1])*(alpha1_new - self.alpha[i1])\
- self.Y[i2] * self._kernel(self.X[np.newaxis,i2],self.X[np.newaxis,i1])*(alpha2_new - self.alpha[i2]) + self.b
b2_new = -E2-self.Y[i1]*self._kernel(self.X[np.newaxis,i1],self.X[np.newaxis,i2])*(alpha1_new - self.alpha[i1])\
- self.Y[i2] * self._kernel(self.X[np.newaxis,i2],self.X[np.newaxis,i2])*(alpha2_new - self.alpha[i2]) + self.b
if 0 < alpha1_new < self.C:
b_new = b1_new
elif 0 < alpha2_new < self.C:
b_new = b2_new
else:
# 选择中点
b_new = (b1_new + b2_new) / 2
# 更新参数
self.alpha[i1] = alpha1_new
self.alpha[i2] = alpha2_new
self.b = b_new
self.E[i1] = self._E(i1)
self.E[i2] = self._E(i2)
if self.is_print:
print("Train Done!")
def predict(self,data):
predict_y = np.sum(self.alpha*self.Y*self._kernel(data,self.X)) + self.b
return np.sign(predict_y)[0]
def score(self,test_X,test_Y):
m,n = test_X.shape
count = 0
for i in range(m):
predict_i = self.predict(test_X[i])
if predict_i == np.float(test_Y[i]):
count +=1
return count / m
```
由于鸢尾花数据每次请求都会变化,我们在这里取正确率的均值与SVC进行对比
```
count = 0
failed2 = []
for i in range(20):
X, y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
svm = SVM(max_iter=200,C=2,kernel='linear')
svm.fit(X_train,y_train)
test_accourate = svm.score(X_test,y_test)
train_accourate = svm.score(X_train,y_train)
if test_accourate < 0.8:
failed2.append((X_train, X_test, y_train, y_test)) # 储存正确率过低的样本集
print("Test accourate:",test_accourate)
print("Train accourate:",train_accourate)
print('--------------------------')
count += test_accourate
print("Test average accourate is: ",count/20)
```
可以发现,有些数据的正确率较高,有些正确率非常的底,我们将低正确率的样本保存,取出进行试验
```
failed2X_train, failed2X_test, failed2y_train, failed2y_test= failed2[2]
```
我们可以看出,在更改C后,正确率依然是客观的,这说明简单版本的SMO算法是可行的.只是我们在测算
平均正确率的时候,C的值没有改变,那么可能有些样本的C值不合适.
```
svm = SVM(max_iter=200,C=5,kernel='linear')
svm.fit(failed2X_train,failed2y_train)
accourate = svm.score(failed2X_test,failed2y_test)
accourate
```
使用Scikit-SVC测试
### Scikit-SVC
基于scikit-learn的[SVM](https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html#sklearn.svm.SVC.decision_function)
例子1:
```
from sklearn.svm import SVC
count = 0
for i in range(10):
X, y = create_data()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
clf = SVC(kernel="linear",C=2)
clf.fit(X_train, y_train)
accourate = clf.score(X_test, y_test)
print("accourate",accourate)
count += accourate
print("average accourate is: ",count/10)
```
当然由于是简单版本的SMO算法,所以平均正确率肯定没有SVC高,但是我们可以调节C和kernel来使得正确率提高
## Multilabel classification
多标签:一个实例可以有多个标签比如一个电影可以是动作,也可以是爱情.
多类分类(multi-class classification):有多个类别需要分类,但一个样本只属于一个类别
多标签分类(multi-label classificaton):每个样本有多个标签
对于多类分类,最后一层使用softmax函数进行预测,训练阶段使用categorical_crossentropy作为损失函数
对于多标签分类,最后一层使用sigmoid函数进行预测,训练阶段使用binary_crossentropy作为损失函数
This example simulates a multi-label document classification problem. The dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more than 2, and that the document length is never zero. Likewise, we reject classes which have already been chosen. The documents that are assigned to both classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal components found by [PCA](http://www.cnblogs.com/jerrylead/archive/2011/04/18/2020209.html) and [CCA](https://files-cdn.cnblogs.com/files/jerrylead/%E5%85%B8%E5%9E%8B%E5%85%B3%E8%81%94%E5%88%86%E6%9E%90.pdf) for visualisation purposes, followed by using the [sklearn.multiclass.OneVsRestClassifier](https://scikit-learn.org/stable/modules/generated/sklearn.multiclass.OneVsRestClassifier.html#sklearn.multiclass.OneVsRestClassifier) metaclassifier using two SVCs with linear kernels to learn a discriminative model for each class. Note that PCA is used to perform an unsupervised dimensionality reduction, while CCA is used to perform a supervised one.
Note: in the plot, “unlabeled samples” does not mean that we don’t know the labels (as in semi-supervised learning) but that the samples simply do not have a label.
```
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplance(clf,min_x,max_x,linestyle,label):
# get the separating heyperplance
# 0 = w0*x0 + w1*x1 +b
w = clf.coef_[0]
a = -w[0] /w[1]
xx = np.linspace(min_x -5,max_x + 5)
yy = a * xx -(clf.intercept_[0]) / w[1] # clf.intercept_[0] get parameter b,
plt.plot(xx,yy,linestyle,label=label)
def plot_subfigure(X,Y,subplot,title,transform):
if transform == "pca": # pca执行无监督分析(不注重label)
X = PCA(n_components=2).fit_transform(X)
print("PCA",X.shape)
elif transform == "cca": # pca 执行监督分析(注重label),也即是说会分析label之间的关系
X = CCA(n_components=2).fit(X, Y).transform(X)
print("CCA",X.shape)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear')) # 使用 one -reset 进行SVM训练
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0]) # 找到第一类的label 索引
one_class = np.where(Y[:, 1]) # 找到第二类的
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray', edgecolors=(0, 0, 0))
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
# classif.estimators_[0],获取第一个估算器,得到第一个决策边界
plot_hyperplance(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
# classif.estimators_[1],获取第二个估算器,得到第一个决策边界
plot_hyperplance(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
```
**make_multilabel_classification:**
make_multilabel_classification(n_samples=100, n_features=20, n_classes=5, n_labels=2, length=50, allow_unlabeled=True, sparse=False, return_indicator='dense', return_distributions=False, random_state=None)
```
plt.figure(figsize=(8, 6))
# If ``True``, some instances might not belong to any class.也就是说某些实例可以并不属于任何标签([[0,0]]),使用hot形式
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
print("Original:",X.shape)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
print("Original:",X.shape)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
```
由于是使用多标签(也就是说一个实例可以有多个标签),无论是标签1还是标签2还是未知标签(“没有标签的样本”).图中直观来看应该是CCA会由于PCA(无论是有没有采用"没有标签的样本"),因为CCA考虑了label之间的关联.
因为我们有2个标签在实例中,所以我们能够绘制2条决策边界(使用classif.estimators_[index])获取,并使用$x_1 = \frac{w_0}{w_1}x_1-\frac{b}{w_1}$绘制决策边界
|
github_jupyter
|
This notebooks finetunes VGG16 by adding a couple of Dense layers and trains it to classify between cats and dogs.
This gives a better classification of around 95% accuracy on the validation dataset
```
%load_ext autoreload
%autoreload 2
import numpy as np
import tensorflow as tf
from tensorflow.contrib.keras import layers
from tensorflow.contrib.keras import models
from tensorflow.contrib.keras import optimizers
from tensorflow.contrib.keras import applications
from tensorflow.contrib.keras.python.keras.preprocessing import image
from tensorflow.contrib.keras.python.keras.applications import imagenet_utils
def get_batches(dirpath, gen=image.ImageDataGenerator(), shuffle=True, batch_size=64, class_mode='categorical'):
return gen.flow_from_directory(dirpath, target_size=(224,224), class_mode=class_mode, shuffle=shuffle, batch_size=batch_size)
batch_size = 64
train_batches = get_batches('./data/train', batch_size=batch_size)
val_batches = get_batches('./data/valid', batch_size=batch_size)
```
Model creation
```
vgg16 = applications.VGG16(weights="imagenet", include_top=False, input_shape=(224, 224, 3))
##
finetune_in = vgg16.output
x = layers.Flatten(name='flatten')(finetune_in)
x = layers.Dense(4096, activation='relu', name='fc1')(x)
x = layers.BatchNormalization()(x)
x = layers.Dropout(0.5)(x)
x = layers.Dense(4096, activation='relu', name='fc2')(x)
x = layers.BatchNormalization()(x)
x = layers.Dropout(0.5)(x)
predictions = layers.Dense(train_batches.num_class, activation='softmax', name='predictions')(x)
model = models.Model(inputs=vgg16.input, outputs=predictions)
##
```
We tell the model to train on the last 3 layers
```
for layer in model.layers[:-7]:
layer.trainable = False
model.summary()
for i, layer in enumerate(model.layers):
print(i, layer.name, layer.trainable)
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
epochs = 1
steps_per_epoch = train_batches.samples // train_batches.batch_size
validation_steps = val_batches.samples // val_batches.batch_size
model.fit_generator(train_batches, validation_data=val_batches, epochs=epochs,
steps_per_epoch=steps_per_epoch,validation_steps=validation_steps)
```
This give us a validation score of around: `val_loss: 0.2865 - val_acc: 0.9536`
## Gen submission file
```
import submission
test_batches, steps = submission.test_batches()
preds = model.predict_generator(test_batches, steps)
preds.shape
submission.gen_file(preds, test_batches)
```
This gave a score of around `0.39` on the public leaderboard
|
github_jupyter
|
## Introduction
**Offer Recommender example:**
___
In this example we will show how to:
- Setup the required environment for accessing the ecosystem prediction server.
- View and track business performance of the Offer Recommender.
## Setup
**Setting up import path:**
___
Add path of ecosystem notebook wrappers. It needs to point to the ecosystem notebook wrapper to allow access to the packages required for running the prediction server via python.
- **notebook_path:** Path to notebook repository.
```
notebook_path = "/path of to ecosystem notebook repository"
# ---- Uneditible ----
import sys
sys.path.append(notebook_path)
# ---- Uneditible ----
```
**Import required packages:**
___
Import and load all packages required for the following usecase.
```
# ---- Uneditible ----
import pymongo
from bson.son import SON
import pprint
import pandas as pd
import json
import numpy
import operator
import datetime
import time
import os
import matplotlib.pyplot as plt
from prediction import jwt_access
from prediction import notebook_functions
from prediction.apis import functions
from prediction.apis import data_munging_engine
from prediction.apis import data_management_engine
from prediction.apis import worker_h2o
from prediction.apis import prediction_engine
from prediction.apis import worker_file_service
%matplotlib inline
# ---- Uneditible ----
```
**Setup prediction server access:**
___
Create access token for prediction server.
- **url:** Url for the prediction server to access.
- **username:** Username for prediction server.
- **password:** Password for prediction server.
```
url = "http://demo.ecosystem.ai:3001/api"
username = "[email protected]"
password = "cd486be3-9955-4364-8ccc-a9ab3ffbc168"
# ---- Uneditible ----
auth = jwt_access.Authenticate(url, username, password)
# ---- Uneditible ----
database = "master"
collection = "bank_customer"
field = "{}"
limit = 100
projections = "{}"
skip = 0
output = data_management_engine.get_data(auth, database, collection, field, limit, projections, skip)
df = pd.DataFrame(output)
df.head()
counts = df["education"].value_counts()
counts.plot(kind="bar")
counts = df["gender"].value_counts()
counts.plot(kind="bar")
counts = df["changeIndicatorThree"].value_counts()
counts.plot(kind="bar")
counts = df["language"].value_counts()
counts.plot(kind="bar")
counts = df["numberOfProducts"].value_counts()
counts.plot(kind="bar")
counts = df["changeIndicatorSix"].value_counts()
counts.plot(kind="bar")
counts = df["numberOfChildren"].value_counts()
counts.plot(kind="bar")
counts = df["changeIndicatorSix"].value_counts()
counts.plot(kind="bar")
counts = df["numberOfChildren"].value_counts()
counts.plot(kind="bar")
counts = df["numberOfAddresses"].value_counts()
counts.plot(kind="bar")
counts = df["segment_enum"].value_counts()
counts.plot(kind="bar")
counts = df["region"].value_counts()
counts.plot(kind="bar")
counts = df["age"].value_counts()
counts.plot(kind="bar")
counts = df["proprtyOwnership"].value_counts()
counts.plot(kind="bar")
```
|
github_jupyter
|
> Code to accompany **Chapter 10: Defending Against Adversarial Inputs**
# Fashion-MNIST - Generating Adversarial Examples on a Drop-out Network
This notebook demonstrates how to generate adversarial examples using a network that incorporates randomised drop-out.
```
import tensorflow as tf
from tensorflow import keras
import numpy as np
fashion_mnist = tf.keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
train_images = train_images/255.0
test_images = test_images/255.0
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
```
## Create a Simple Network with drop-out for Image Classification
We need to use the Keras __functional API__ (rather than the sequential API) to access the
dropout capability with `training = True` at test time.
The cell below has drop-out enabled at training time only. You can experiment by moving the drop-out layer
or adding drop-out to test time by replacing the `Dropout` line as indicated in the comments.
```
from tensorflow.keras.layers import Input, Dense, Flatten, Dropout
from tensorflow.keras.models import Model
inputs = Input(shape=(28,28))
x = Flatten()(inputs)
x = Dense(56, activation='relu')(x)
x = Dropout(0.2)(x) # Use this line for drop-out at training time only
# x = Dropout(0.2)(x, training=True) # Use this line instead for drop-out at test and training time
x = Dense(56, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
print(model)
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.summary()
```
Train the model and evaluate it.
If drop-out is included at test time, the model will be unpredictable.
```
model.fit(train_images, train_labels, epochs=6)
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Model accuracy based on test data:', test_acc)
```
## Create Some Adversarial Examples Using the Model
```
# Import helper function
import sys
sys.path.append('..')
from strengtheningdnns.adversarial_utils import generate_adversarial_data
import foolbox
fmodel = foolbox.models.TensorFlowModel.from_keras(model, bounds=(0, 255))
num_images = 1000
x_images = train_images[0:num_images, :]
attack_criterion = foolbox.criteria.Misclassification()
attack_fn = foolbox.attacks.GradientSignAttack(fmodel, criterion=attack_criterion)
x_adv_images, x_adv_perturbs, x_labels = generate_adversarial_data(original_images = x_images,
predictions = model.predict(x_images),
attack_fn = attack_fn)
```
## Take a Peek at some Results
The adversarial examples plotted should all be misclassified. However, if the model is running with drop-out at test
time also (see model creation above), they may be classified correctly due to uncertainty of the model's behaviour.
```
images_to_plot = x_adv_images
import matplotlib.pyplot as plt
adversarial_predictions = model.predict(images_to_plot)
plt.figure(figsize=(15, 30))
for i in range(30):
plt.subplot(10,5,i+1)
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(images_to_plot[i], cmap=plt.cm.binary)
predicted_label = np.argmax(adversarial_predictions[i])
original_label = x_labels[i]
if predicted_label == original_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} ({})".format(class_names[predicted_label],
class_names[original_label]),
color=color)
```
Save the images if you wish so you can load them later.
```
np.save('../resources/test_images_GSAttack_dropout', x_adv_images)
```
|
github_jupyter
|
```
# Install TensorFlow
# !pip install -q tensorflow-gpu==2.0.0-beta1
try:
%tensorflow_version 2.x # Colab only.
except Exception:
pass
import tensorflow as tf
print(tf.__version__)
# Load in the data
from sklearn.datasets import load_breast_cancer
# load the data
data = load_breast_cancer()
# check the type of 'data'
type(data)
# note: it is a Bunch object
# this basically acts like a dictionary where you can treat the keys like attributes
data.keys()
# 'data' (the attribute) means the input data
data.data.shape
# it has 569 samples, 30 features
# 'targets'
data.target
# note how the targets are just 0s and 1s
# normally, when you have K targets, they are labeled 0..K-1
# their meaning is not lost
data.target_names
# there are also 569 corresponding targets
data.target.shape
# you can also determine the meaning of each feature
data.feature_names
# normally we would put all of our imports at the top
# but this lets us tell a story
from sklearn.model_selection import train_test_split
# split the data into train and test sets
# this lets us simulate how our model will perform in the future
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, test_size=0.33)
N, D = X_train.shape
# Scale the data
# you'll learn why scaling is needed in a later course
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Now all the fun Tensorflow stuff
# Build the model
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(D,)),
tf.keras.layers.Dense(1, activation='sigmoid')
])
# Alternatively, you can do:
# model = tf.keras.models.Sequential()
# model.add(tf.keras.layers.Dense(1, input_shape=(D,), activation='sigmoid'))
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
# Train the model
r = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=100)
# Evaluate the model - evaluate() returns loss and accuracy
print("Train score:", model.evaluate(X_train, y_train))
print("Test score:", model.evaluate(X_test, y_test))
# Plot what's returned by model.fit()
import matplotlib.pyplot as plt
plt.plot(r.history['loss'], label='loss')
plt.plot(r.history['val_loss'], label='val_loss')
plt.legend()
# Plot the accuracy too
plt.plot(r.history['accuracy'], label='acc')
plt.plot(r.history['val_accuracy'], label='val_acc')
plt.legend()
```
# Part 2: Making Predictions
This goes with the lecture "Making Predictions"
```
# Make predictions
P = model.predict(X_test)
print(P) # they are outputs of the sigmoid, interpreted as probabilities p(y = 1 | x)
# Round to get the actual predictions
# Note: has to be flattened since the targets are size (N,) while the predictions are size (N,1)
import numpy as np
P = np.round(P).flatten()
print(P)
# Calculate the accuracy, compare it to evaluate() output
print("Manually calculated accuracy:", np.mean(P == y_test))
print("Evaluate output:", model.evaluate(X_test, y_test))
```
# Part 3: Saving and Loading a Model
This goes with the lecture "Saving and Loading a Model"
```
# Let's now save our model to a file
model.save('linearclassifier.h5')
# Check that the model file exists
!ls -lh
# Let's load the model and confirm that it still works
# Note: there is a bug in Keras where load/save only works if you DON'T use the Input() layer explicitly
# So, make sure you define the model with ONLY Dense(1, input_shape=(D,))
# At least, until the bug is fixed
# https://github.com/keras-team/keras/issues/10417
model = tf.keras.models.load_model('linearclassifier.h5')
print(model.layers)
model.evaluate(X_test, y_test)
# Download the file - requires Chrome (at this point)
from google.colab import files
files.download('linearclassifier.h5')
```
|
github_jupyter
|
# Writing OER sets to file for
---
### Import Modules
```
import os
print(os.getcwd())
import sys
import time; ti = time.time()
import json
import pandas as pd
import numpy as np
# #########################################################
from methods import (
get_df_features_targets,
get_df_jobs,
get_df_jobs_paths,
get_df_atoms_sorted_ind,
)
from methods import create_name_str_from_tup
from methods import get_df_jobs_paths, get_df_jobs_data
# #########################################################
from local_methods import write_other_jobs_in_set
from methods import isnotebook
isnotebook_i = isnotebook()
if isnotebook_i:
from tqdm.notebook import tqdm
verbose = True
else:
from tqdm import tqdm
verbose = False
```
### Read Data
```
df_jobs = get_df_jobs()
df_jobs_paths = get_df_jobs_paths()
df_features_targets = get_df_features_targets()
df_atoms = get_df_atoms_sorted_ind()
df_jobs_paths = get_df_jobs_paths()
df_jobs_data = get_df_jobs_data()
df_atoms = df_atoms.set_index("job_id")
```
### Main loop | writing OER sets
```
# # TEMP
# name_i = ('slac', 'wufulafe_03', 58.0)
# df_features_targets = df_features_targets.loc[[name_i]]
# # TEMP
# print(111 * "TEMP | ")
# # df_features_targets.index[329]
# indices = [
# ('slac', 'relovalu_12', 24.0),
# ]
# df_features_targets = df_features_targets.loc[indices]
# for name_i, row_i in df_features_targets.iterrows():
iterator = tqdm(df_features_targets.index, desc="1st loop")
for i_cnt, index_i in enumerate(iterator):
row_i = df_features_targets.loc[index_i]
# if verbose:
# print(name_i)
# #####################################################
job_id_o_i = row_i.data.job_id_o.iloc[0]
job_id_bare_i = row_i.data.job_id_bare.iloc[0]
job_id_oh_i = row_i.data.job_id_oh.iloc[0]
# #####################################################
if job_id_bare_i is None:
continue
oh_exists = False
if job_id_oh_i is not None:
oh_exists = True
# #####################################################
df_atoms__o = df_atoms.loc[job_id_o_i]
df_atoms__bare = df_atoms.loc[job_id_bare_i]
# #####################################################
atoms__o = df_atoms__o.atoms_sorted_good
atoms__bare = df_atoms__bare.atoms_sorted_good
if oh_exists:
df_atoms__oh = df_atoms.loc[job_id_oh_i]
atoms__oh = df_atoms__oh.atoms_sorted_good
# #########################################################
# #########################################################
# dir_name = create_name_str_from_tup(name_i)
dir_name = create_name_str_from_tup(index_i)
dir_path = os.path.join(
os.environ["PROJ_irox_oer"],
"dft_workflow/job_analysis/prepare_oer_sets",
"out_data/oer_group_files",
dir_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# #####################################################
atoms__o.write(
os.path.join(dir_path, "atoms__o.traj"))
atoms__o.write(
os.path.join(dir_path, "atoms__o.cif"))
atoms__bare.write(
os.path.join(dir_path, "atoms__bare.traj"))
atoms__bare.write(
os.path.join(dir_path, "atoms__bare.cif"))
if oh_exists:
atoms__oh.write(
os.path.join(dir_path, "atoms__oh.traj"))
atoms__oh.write(
os.path.join(dir_path, "atoms__oh.cif"))
# #####################################################
data_dict_to_write = dict(
job_id_o=job_id_o_i,
job_id_bare=job_id_bare_i,
job_id_oh=job_id_oh_i,
)
data_path = os.path.join(dir_path, "data.json")
with open(data_path, "w") as outfile:
json.dump(data_dict_to_write, outfile, indent=2)
# #####################################################
# Write other jobs in OER set
write_other_jobs_in_set(
job_id_bare_i,
dir_path=dir_path,
df_jobs=df_jobs, df_atoms=df_atoms,
df_jobs_paths=df_jobs_paths,
df_jobs_data=df_jobs_data,
)
```
# Writing top systems to file ROUGH TEMP
```
# TOP SYSTEMS
if False:
# if True:
df_features_targets = df_features_targets.loc[
[
("slac", "tefovuto_94", 16.0),
# slac__nifupidu_92__032
# sherlock__bihetofu_24__036
('slac', 'hobukuno_29', 16.0),
('sherlock', 'ramufalu_44', 56.0),
('slac', 'nifupidu_92', 32.0),
('sherlock', 'bihetofu_24', 36.0),
('slac', 'dotivela_46', 32.0),
('slac', 'vovumota_03', 33.0),
('slac', 'ralutiwa_59', 32.0),
('sherlock', 'bebodira_65', 16.0),
('sherlock', 'soregawu_05', 62.0),
('slac', 'hivovaru_77', 26.0),
('sherlock', 'vegarebo_06', 50.0),
('slac', 'ralutiwa_59', 30.0),
('sherlock', 'kamevuse_75', 49.0),
('nersc', 'hesegula_40', 94.0),
('slac', 'fewirefe_11', 39.0),
('sherlock', 'vipikema_98', 60.0),
('slac', 'gulipita_22', 48.0),
('sherlock', 'rofetaso_24', 48.0),
('slac', 'runopeno_56', 32.0),
('slac', 'magiwuni_58', 26.0),
]
]
for name_i, row_i in df_features_targets.iterrows():
# #####################################################
job_id_o_i = row_i.data.job_id_o.iloc[0]
job_id_bare_i = row_i.data.job_id_bare.iloc[0]
job_id_oh_i = row_i.data.job_id_oh.iloc[0]
# #####################################################
oh_exists = False
if job_id_oh_i is not None:
oh_exists = True
# #####################################################
df_atoms__o = df_atoms.loc[job_id_o_i]
df_atoms__bare = df_atoms.loc[job_id_bare_i]
# #####################################################
atoms__o = df_atoms__o.atoms_sorted_good
atoms__bare = df_atoms__bare.atoms_sorted_good
if oh_exists:
df_atoms__oh = df_atoms.loc[job_id_oh_i]
atoms__oh = df_atoms__oh.atoms_sorted_good
# #########################################################
# #########################################################
dir_name = create_name_str_from_tup(name_i)
dir_path = os.path.join(
os.environ["PROJ_irox_oer"],
"dft_workflow/job_analysis/prepare_oer_sets",
"out_data/top_overpot_sys")
# dir_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# atoms__o.write(
# os.path.join(dir_path, dir_name + "_o.cif"))
# atoms__bare.write(
# os.path.join(dir_path, dir_name + "_bare.cif"))
if oh_exists:
atoms__oh.write(
os.path.join(dir_path, dir_name + "_oh.cif"))
```
# MISC | Writing random cifs to file to open in VESTA
```
df_subset = df_features_targets.sample(n=6)
if False:
for name_i, row_i in df_subset.iterrows():
tmp = 42
job_id_oh_i = row_i[("data", "job_id_oh", "", )]
# # #####################################################
# job_id_o_i = row_i.data.job_id_o.iloc[0]
# job_id_bare_i = row_i.data.job_id_bare.iloc[0]
# job_id_oh_i = row_i.data.job_id_oh.iloc[0]
# # #####################################################
# if job_id_bare_i is None:
# continue
oh_exists = False
if job_id_oh_i is not None:
oh_exists = True
# # #####################################################
# df_atoms__o = df_atoms.loc[job_id_o_i]
# df_atoms__bare = df_atoms.loc[job_id_bare_i]
# # #####################################################
# atoms__o = df_atoms__o.atoms_sorted_good
# atoms__bare = df_atoms__bare.atoms_sorted_good
if oh_exists:
df_atoms__oh = df_atoms.loc[job_id_oh_i]
atoms__oh = df_atoms__oh.atoms_sorted_good
# #########################################################
# #########################################################
file_name_i = create_name_str_from_tup(name_i)
print(file_name_i)
dir_path = os.path.join(
os.environ["PROJ_irox_oer"],
"dft_workflow/job_analysis/prepare_oer_sets",
"out_data/misc_cif_files_oh")
# dir_name)
if not os.path.exists(dir_path):
os.makedirs(dir_path)
# #####################################################
# atoms__o.write(
# os.path.join(dir_path, "atoms__o.traj"))
# atoms__o.write(
# os.path.join(dir_path, "atoms__o.cif"))
# atoms__bare.write(
# os.path.join(dir_path, "atoms__bare.traj"))
# atoms__bare.write(
# os.path.join(dir_path, "atoms__bare.cif"))
if oh_exists:
atoms__oh.write(
os.path.join(dir_path, file_name_i + ".cif"))
# os.path.join(dir_path, "atoms__oh.traj"))
# atoms__oh.write(
# os.path.join(dir_path, "atoms__oh.cif"))
# #########################################################
print(20 * "# # ")
print("All done!")
print("Run time:", np.round((time.time() - ti) / 60, 3), "min")
print("write_oer_sets.ipynb")
print(20 * "# # ")
# #########################################################
```
```
# import os
# print(os.getcwd())
# import sys
# import pickle
# pd.set_option('display.max_columns', None)
# # pd.set_option('display.max_rows', None)
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/jantic/DeOldify/blob/master/ImageColorizerColabStable.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
### **<font color='blue'> Stable Colorizer </font>**
#◢ DeOldify - Colorize your own photos!
####**Credits:**
Special thanks to:
Matt Robinson and María Benavente for pioneering the DeOldify image colab notebook.
Dana Kelley for doing things, breaking stuff & having an opinion on everything.
---
#◢ Verify Correct Runtime Settings
**<font color='#FF000'> IMPORTANT </font>**
In the "Runtime" menu for the notebook window, select "Change runtime type." Ensure that the following are selected:
* Runtime Type = Python 3
* Hardware Accelerator = GPU
#◢ Git clone and install DeOldify
```
!git clone https://github.com/jantic/DeOldify.git DeOldify
cd DeOldify
```
#◢ Setup
```
#NOTE: This must be the first call in order to work properly!
from deoldify import device
from deoldify.device_id import DeviceId
#choices: CPU, GPU0...GPU7
device.set(device=DeviceId.GPU0)
import torch
if not torch.cuda.is_available():
print('GPU not available.')
!pip install -r colab_requirements.txt
import fastai
from deoldify.visualize import *
torch.backends.cudnn.benchmark = True
!mkdir 'models'
!wget https://www.dropbox.com/s/mwjep3vyqk5mkjc/ColorizeStable_gen.pth?dl=0 -O ./models/ColorizeStable_gen.pth
!wget https://media.githubusercontent.com/media/jantic/DeOldify/master/resource_images/watermark.png -O ./resource_images/watermark.png
colorizer = get_image_colorizer(artistic=False)
```
#◢ Instructions
### source_url
Type in a url to a direct link of an image. Usually that means they'll end in .png, .jpg, etc. NOTE: If you want to use your own image, upload it first to a site like Imgur.
### render_factor
The default value of 35 has been carefully chosen and should work -ok- for most scenarios (but probably won't be the -best-). This determines resolution at which the color portion of the image is rendered. Lower resolution will render faster, and colors also tend to look more vibrant. Older and lower quality images in particular will generally benefit by lowering the render factor. Higher render factors are often better for higher quality images, but the colors may get slightly washed out.
### watermarked
Selected by default, this places a watermark icon of a palette at the bottom left corner of the image. This is intended to be a standard way to convey to others viewing the image that it is colorized by AI. We want to help promote this as a standard, especially as the technology continues to improve and the distinction between real and fake becomes harder to discern. This palette watermark practice was initiated and lead by the company MyHeritage in the MyHeritage In Color feature (which uses a newer version of DeOldify than what you're using here).
#### How to Download a Copy
Simply right click on the displayed image and click "Save image as..."!
## Pro Tips
You can evaluate how well the image is rendered at each render_factor by using the code at the bottom (that cell under "See how well render_factor values perform on a frame here").
## Troubleshooting
If you get a 'CUDA out of memory' error, you probably have the render_factor too high.
#◢ Colorize!!
```
source_url = '' #@param {type:"string"}
render_factor = 35 #@param {type: "slider", min: 7, max: 45}
watermarked = True #@param {type:"boolean"}
if source_url is not None and source_url !='':
image_path = colorizer.plot_transformed_image_from_url(url=source_url, render_factor=render_factor, compare=True, watermarked=watermarked)
show_image_in_notebook(image_path)
else:
print('Provide an image url and try again.')
```
## See how well render_factor values perform on the image here
```
for i in range(10,45,2):
colorizer.plot_transformed_image('test_images/image.png', render_factor=i, display_render_factor=True, figsize=(8,8))
```
---
#⚙ Recommended image sources
* [/r/TheWayWeWere](https://www.reddit.com/r/TheWayWeWere/)
|
github_jupyter
|
```
# testing scRFE
pip list
from scRFE import scRFE
from scRFE import scRFEimplot
from scRFE.scRFE import makeOneForest
import numpy as np
import pandas as pd
from anndata import read_h5ad
adata = read_h5ad('/Users/madelinepark/Downloads/Liver_droplet.h5ad')
madeForest = makeOneForest(dataMatrix=adata, classOfInterest='age', labelOfInterest='3m', nEstimators=10,
randomState=0, min_cells=15, keep_small_categories=True,
nJobs=-1, oobScore=True, Step=0.2, Cv=3, verbosity=True)
type(madeForest[4])
from scRFE.scRFE import scRFEimplot
scRFEimplot(X_new=madeForest[3], y = madeForest[4])
from scRFE.scRFE import scRFE
from scRFE.scRFE import scRFEimplot
from scRFE.scRFE import makeOneForest
scRFE(adata, classOfInterest = 'age', nEstimators = 10, Cv = 3)
```
# scRFE
```
# Imports
import numpy as np
import pandas as pd
import scanpy as sc
import random
from anndata import read_h5ad
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import accuracy_score
from sklearn.feature_selection import RFE
from sklearn.feature_selection import RFECV
import seaborn as sns
import matplotlib.pyplot as plt
import scanpy.external as sce
import logging as logg
adata = read_h5ad('/Users/madelinepark/Downloads/Liver_droplet.h5ad')
def columnToString (dataMatrix):
cat_columns = dataMatrix.obs.select_dtypes(['category']).columns
dataMatrix.obs[cat_columns] = dataMatrix.obs[cat_columns].astype(str)
return dataMatrix
def filterNormalize (dataMatrix, classOfInterest, verbosity):
np.random.seed(644685)
# sc.pp.filter_cells(dataMatrix, min_genes=0)
# sc.pp.filter_genes(dataMatrix, min_cells=0)
dataMatrix = dataMatrix[dataMatrix.obs[classOfInterest]!='nan']
dataMatrix = dataMatrix[~dataMatrix.obs[classOfInterest].isna()]
if verbosity == True:
print ('na data removed')
return dataMatrix
filterNormalize(dataMatrix = adata, classOfInterest = 'age', verbosity = True)
def labelSplit (dataMatrix, classOfInterest, labelOfInterest, verbosity):
dataMatrix = filterNormalize (dataMatrix, classOfInterest, verbosity)
dataMatrix.obs['classification_group'] = 'B'
dataMatrix.obs.loc[dataMatrix.obs[dataMatrix.obs[classOfInterest]==labelOfInterest]
.index,'classification_group'] = 'A' #make labels based on A/B of
# classofInterest
return dataMatrix
def downsampleToSmallestCategory(dataMatrix, random_state, min_cells,
keep_small_categories, verbosity,
classOfInterest = 'classification_group'
) -> sc.AnnData:
"""
returns an annData object in which all categories in 'classOfInterest' have
the same size
classOfInterest
column with the categories to downsample
min_cells
Minimum number of cells to downsample.
Categories having less than `min_cells` are discarded unless
keep_small_categories is True
keep_small_categories
Be default categories with less than min_cells are discarded.
Set to true to keep them
"""
counts = dataMatrix.obs[classOfInterest].value_counts(sort=False)
if len(counts[counts < min_cells]) > 0 and keep_small_categories is False:
logg.warning(
"The following categories have less than {} cells and will be "
"ignored: {}".format(min_cells, dict(counts[counts < min_cells]))
)
min_size = min(counts[counts >= min_cells])
sample_selection = None
for sample, num_cells in counts.items():
if num_cells <= min_cells:
if keep_small_categories:
sel = dataMatrix.obs.index.isin(
dataMatrix.obs[dataMatrix.obs[classOfInterest] == sample].index)
else:
continue
else:
sel = dataMatrix.obs.index.isin(
dataMatrix.obs[dataMatrix.obs[classOfInterest] == sample]
.sample(min_size, random_state=random_state)
.index
)
if sample_selection is None:
sample_selection = sel
else:
sample_selection |= sel
logg.info(
"The cells in category {!r} had been down-sampled to have each {} cells. "
"The original counts where {}".format(classOfInterest, min_size, dict(counts))
)
return dataMatrix[sample_selection].copy()
def makeOneForest (dataMatrix, classOfInterest, labelOfInterest, nEstimators,
randomState, min_cells, keep_small_categories,
nJobs, oobScore, Step, Cv, verbosity):
"""
Builds and runs a random forest for one label in a class of interest
Parameters
----------
dataMatrix : anndata object
The data file of interest
classOfInterest : str
The class you will split the data by in the set of dataMatrix.obs
labelOfInterest : str
The specific label within the class that the random forezt will run a
"one vs all" classification on
nEstimators : int
The number of trees in the forest
randomState : int
Controls random number being used
nJobs : int
The number of jobs to run in parallel
oobScore : bool
Whether to use out-of-bag samples to estimate the generalization accuracy
Step : float
Corresponds to percentage of features to remove at each iteration
Cv : int
Determines the cross-validation splitting strategy
Returns
-------
feature_selected : list
list of top features from random forest
selector.estimator_.feature_importances_ : list
list of top ginis corresponding to to features
"""
splitDataMatrix = labelSplit (dataMatrix, classOfInterest, labelOfInterest, verbosity)
downsampledMatrix = downsampleToSmallestCategory (dataMatrix = splitDataMatrix,
random_state = randomState, min_cells = min_cells,
keep_small_categories = keep_small_categories, verbosity = verbosity,
classOfInterest = 'classification_group', )
feat_labels = downsampledMatrix.var_names
X = downsampledMatrix.X
y = downsampledMatrix.obs['classification_group'] #'A' or 'B' labels from labelSplit
clf = RandomForestClassifier(n_estimators = nEstimators, random_state = randomState,
n_jobs = nJobs, oob_score = oobScore)
selector = RFECV(clf, step = Step, cv = Cv)
clf.fit(X, y)
selector.fit(X, y)
feature_selected = feat_labels[selector.support_]
dataMatrix.obs['classification_group'] = 'B'
return feature_selected, selector.estimator_.feature_importances_
def resultWrite (classOfInterest, results_df, labelOfInterest,
feature_selected, feature_importance):
column_headings = []
column_headings.append(labelOfInterest)
column_headings.append(labelOfInterest + '_gini')
resaux = pd.DataFrame(columns = column_headings)
resaux[labelOfInterest] = feature_selected
resaux[labelOfInterest + '_gini'] = feature_importance
resaux = resaux.sort_values(by = [labelOfInterest + '_gini'], ascending = False)
resaux.reset_index(drop = True, inplace = True)
results_df = pd.concat([results_df, resaux], axis=1)
return results_df
def scRFE (adata, classOfInterest, nEstimators = 5000, randomState = 0, min_cells = 15,
keep_small_categories = True, nJobs = -1, oobScore = True, Step = 0.2, Cv = 5,
verbosity = True):
"""
Builds and runs a random forest with one vs all classification for each label
for one class of interest
Parameters
----------
adata : anndata object
The data file of interest
classOfInterest : str
The class you will split the data by in the set of dataMatrix.obs
nEstimators : int
The number of trees in the forest
randomState : int
Controls random number being used
min_cells : int
Minimum number of cells in a given class to downsample.
keep_small_categories : bool
Whether to keep classes with small number of observations, or to remove.
nJobs : int
The number of jobs to run in parallel
oobScore : bool
Whether to use out-of-bag samples to estimate the generalization accuracy
Step : float
Corresponds to percentage of features to remove at each iteration
Cv : int
Determines the cross-validation splitting strategy
Returns
-------
results_df : pd.DataFrame
Dataframe with results for each label in the class, formatted as
"label" for one column, then "label + gini" for the corresponding column
"""
dataMatrix = adata.copy()
dataMatrix = columnToString (dataMatrix)
dataMatrix = filterNormalize (dataMatrix, classOfInterest, verbosity)
results_df = pd.DataFrame()
for labelOfInterest in np.unique(dataMatrix.obs[classOfInterest]):
dataMatrix_labelOfInterest = dataMatrix.copy()
feature_selected, feature_importance = makeOneForest(
dataMatrix = dataMatrix_labelOfInterest, classOfInterest = classOfInterest,
labelOfInterest = labelOfInterest,
nEstimators = nEstimators, randomState = randomState, min_cells = min_cells,
keep_small_categories = keep_small_categories, nJobs = nJobs,
oobScore = oobScore, Step = Step, Cv = Cv, verbosity=verbosity)
results_df = resultWrite (classOfInterest, results_df,
labelOfInterest = labelOfInterest,
feature_selected = feature_selected,
feature_importance = feature_importance)
return results_df
adata = read_h5ad('/Users/madelinepark/Downloads/Liver_droplet.h5ad')
scRFE (adata, classOfInterest = 'age', nEstimators = 10, Cv = 3)
import logging
logging.info('%s before you %s', 'Look', 'leap!')
def logprint (verbosity):
if verbosity == True:
print('hi')
logprint(verbosity=True)
```
|
github_jupyter
|
```
import pickle
PIK = 'data/sirt6/final/20191217_m87e_counts.pkl'
with open(PIK, 'rb') as f:
m87e_clobs = pickle.load(f)
m87e_clobs
import pandas as pd
def extract_panda(clob_list):
dictlist = []
for i in range(len(clob_list)):
dictlist += [clob_list[i].to_dict()]
DF = pd.DataFrame(dictlist)
return DF
m87e_clobs[1].to_dict()
m87df = extract_panda(m87e_clobs)
m87df
check = ['ctx', 'hip']
for idx, row in m87df.iterrows():
for c in check:
if c in row['name']:
m87df.ix[idx, 'brain_loc'] = c
m87df
tn = m87df.name[20]
tn
'ctx' in tn
m87df.brain_loc[20]
ctx_m87df = m87df[m87df.brain_loc == 'ctx']
hip_m87df = m87df[m87df.brain_loc == 'hip']
ctx_m87df
ctx_m87_mean_cells_per_um = ctx_m87df.cells_per_area.mean()
hip_m87_mean_cells_per_um = hip_m87df.cells_per_area.mean()
print('87E CTX mean cells per um:', ctx_m87_mean_cells_per_um)
print('87E HIP mean cells per um:', hip_m87_mean_cells_per_um)
ctx_m87_mean_cells_per_mm2 = ctx_m87_mean_cells_per_um * 10e5
hip_m87_mean_cells_per_mm2 = hip_m87_mean_cells_per_um * 10e5
print('87E CTX mean cells per mm2:', ctx_m87_mean_cells_per_mm2)
print('87E HIP mean cells per mm2:', hip_m87_mean_cells_per_mm2)
PIK = 'data/sirt6/final/20191217_m91e_counts.pik'
with open(PIK, 'rb') as f:
m91e_clobs = pickle.load(f)
m91df = extract_panda(m91e_clobs)
m91df
m91e_clobs[1].to_dict()
check = ['Ctx', 'Hip']
for idx, row in m91df.iterrows():
for c in check:
if c in row['name']:
m91df.ix[idx, 'brain_loc'] = c
m91df
ctx_m91df = m91df[m91df.brain_loc == 'Ctx']
hip_m91df = m91df[m91df.brain_loc == 'Hip']
ctx_m91_mean_cells_per_um = ctx_m91df.cells_per_area.mean()
hip_m91_mean_cells_per_um = hip_m91df.cells_per_area.mean()
print('91E CTX mean cells per um:', ctx_m91_mean_cells_per_um)
print('91E HIP mean cells per um:', hip_m91_mean_cells_per_um)
ctx_m91_mean_cells_per_mm2 = ctx_m91_mean_cells_per_um * 10e5
hip_m91_mean_cells_per_mm2 = hip_m91_mean_cells_per_um * 10e5
print('91E CTX mean cells per mm2:', ctx_m91_mean_cells_per_mm2)
print('91E HIP mean cells per mm2:', hip_m91_mean_cells_per_mm2)
print('87E CTX mean cells per mm2:', ctx_m87_mean_cells_per_mm2)
print('87E HIP mean cells per mm2:', hip_m87_mean_cells_per_mm2)
print('91E CTX mean cells per mm2:', ctx_m91_mean_cells_per_mm2)
print('91E HIP mean cells per mm2:', hip_m91_mean_cells_per_mm2)
ctx_m87_sd = ctx_m87df.cells_per_area.std() * 10e5
hip_m87_sd = hip_m87df.cells_per_area.std() * 10e5
ctx_m91_sd = ctx_m91df.cells_per_area.std() * 10e5
hip_m91_sd = hip_m91df.cells_per_area.std() * 10e5
print('87E CTX mean cells per mm2:', ctx_m87_mean_cells_per_mm2)
print('87E CTX std cells per mm2: ', ctx_m87_sd)
print('')
print('91E CTX mean cells per mm2:', ctx_m91_mean_cells_per_mm2)
print('91E CTX std cells per mm2: ', ctx_m91_sd)
print('')
print('')
print('87E HIP mean cells per mm2:', hip_m87_mean_cells_per_mm2)
print('87E HIP std cells per mm2: ', hip_m87_sd)
print('')
print('91E HIP mean cells per mm2:', hip_m91_mean_cells_per_mm2)
print('91E HIP std cells per mm2: ', hip_m91_sd)
print('')
m87e_ctx = ctx_m87df.cells_per_area * 10e5
m87e_hip = hip_m87df.cells_per_area * 10e5
m91e_ctx = ctx_m91df.cells_per_area * 10e5
m91e_hip = hip_m91df.cells_per_area * 10e5
combined = pd.DataFrame(m87e_ctx, m87e_hip, m91e_ctx, m91e_hip)
combined
m91e_ctx.plot.box()
m91e_hip.plot.box()
import matplotlib.pyplot as plt
data_to_plot = [m87e_ctx, m87e_hip, m91e_ctx, m91e_hip]
# Create a figure instance
fig = plt.figure(1, figsize=(9, 6))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
bp = ax.boxplot(data_to_plot)
# Save the figure
fig.savefig('fig1.png', bbox_inches='tight')
ax.set_xticklabels(['Control Cortex', 'Control Hip', 'S6cKO Cortex', 'S6cKO Hip'])
ax.set_ylabel('Average Cell Count per mm^2')
ax.set_title('Average Cell Counts Per mm^2 in Sirt6cKO vs Littermate Control', fontsize = 12, fontweight = 'bold')
```
|
github_jupyter
|
# Performing the Hyperparameter tuning
**Learning Objectives**
1. Learn how to use `cloudml-hypertune` to report the results for Cloud hyperparameter tuning trial runs
2. Learn how to configure the `.yaml` file for submitting a Cloud hyperparameter tuning job
3. Submit a hyperparameter tuning job to Cloud AI Platform
## Introduction
Let's see if we can improve upon that by tuning our hyperparameters.
Hyperparameters are parameters that are set *prior* to training a model, as opposed to parameters which are learned *during* training.
These include learning rate and batch size, but also model design parameters such as type of activation function and number of hidden units.
Here are the four most common ways to finding the ideal hyperparameters:
1. Manual
2. Grid Search
3. Random Search
4. Bayesian Optimzation
**1. Manual**
Traditionally, hyperparameter tuning is a manual trial and error process. A data scientist has some intution about suitable hyperparameters which they use as a starting point, then they observe the result and use that information to try a new set of hyperparameters to try to beat the existing performance.
Pros
- Educational, builds up your intuition as a data scientist
- Inexpensive because only one trial is conducted at a time
Cons
- Requires alot of time and patience
**2. Grid Search**
On the other extreme we can use grid search. Define a discrete set of values to try for each hyperparameter then try every possible combination.
Pros
- Can run hundreds of trials in parallel using the cloud
- Gauranteed to find the best solution within the search space
Cons
- Expensive
**3. Random Search**
Alternatively define a range for each hyperparamter (e.g. 0-256) and sample uniformly at random from that range.
Pros
- Can run hundreds of trials in parallel using the cloud
- Requires less trials than Grid Search to find a good solution
Cons
- Expensive (but less so than Grid Search)
**4. Bayesian Optimization**
Unlike Grid Search and Random Search, Bayesian Optimization takes into account information from past trials to select parameters for future trials. The details of how this is done is beyond the scope of this notebook, but if you're interested you can read how it works here [here](https://cloud.google.com/blog/products/gcp/hyperparameter-tuning-cloud-machine-learning-engine-using-bayesian-optimization).
Pros
- Picks values intelligenty based on results from past trials
- Less expensive because requires fewer trials to get a good result
Cons
- Requires sequential trials for best results, takes longer
**AI Platform HyperTune**
AI Platform HyperTune, powered by [Google Vizier](https://ai.google/research/pubs/pub46180), uses Bayesian Optimization by default, but [also supports](https://cloud.google.com/ml-engine/docs/tensorflow/hyperparameter-tuning-overview#search_algorithms) Grid Search and Random Search.
When tuning just a few hyperparameters (say less than 4), Grid Search and Random Search work well, but when tunining several hyperparameters and the search space is large Bayesian Optimization is best.
```
# Use the chown command to change the ownership of the repository
!sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst
# Installing the latest version of the package
!pip install --user google-cloud-bigquery==1.25.0
```
**Note**: Restart your kernel to use updated packages.
Kindly ignore the deprecation warnings and incompatibility errors related to google-cloud-storage.
```
# Importing the necessary module
import os
from google.cloud import bigquery
# Change with your own bucket and project below:
BUCKET = "<BUCKET>"
PROJECT = "<PROJECT>"
REGION = "<YOUR REGION>"
OUTDIR = "gs://{bucket}/taxifare/data".format(bucket=BUCKET)
os.environ['BUCKET'] = BUCKET
os.environ['OUTDIR'] = OUTDIR
os.environ['PROJECT'] = PROJECT
os.environ['REGION'] = REGION
os.environ['TFVERSION'] = "2.6"
%%bash
# Setting up cloud SDK properties
gcloud config set project $PROJECT
gcloud config set compute/region $REGION
```
## Make code compatible with AI Platform Training Service
In order to make our code compatible with AI Platform Training Service we need to make the following changes:
1. Upload data to Google Cloud Storage
2. Move code into a trainer Python package
4. Submit training job with `gcloud` to train on AI Platform
## Upload data to Google Cloud Storage (GCS)
Cloud services don't have access to our local files, so we need to upload them to a location the Cloud servers can read from. In this case we'll use GCS.
## Create BigQuery tables
If you haven not already created a BigQuery dataset for our data, run the following cell:
```
bq = bigquery.Client(project = PROJECT)
dataset = bigquery.Dataset(bq.dataset("taxifare"))
# Creating a dataset
try:
bq.create_dataset(dataset)
print("Dataset created")
except:
print("Dataset already exists")
```
Let's create a table with 1 million examples.
Note that the order of columns is exactly what was in our CSV files.
```
%%bigquery
CREATE OR REPLACE TABLE taxifare.feateng_training_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
'unused' AS key
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 1000)) = 1
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
```
Make the validation dataset be 1/10 the size of the training dataset.
```
%%bigquery
CREATE OR REPLACE TABLE taxifare.feateng_valid_data AS
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
'unused' AS key
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 10000)) = 2
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
```
## Export the tables as CSV files
```
%%bash
echo "Deleting current contents of $OUTDIR"
gsutil -m -q rm -rf $OUTDIR
echo "Extracting training data to $OUTDIR"
bq --location=US extract \
--destination_format CSV \
--field_delimiter "," --noprint_header \
taxifare.feateng_training_data \
$OUTDIR/taxi-train-*.csv
echo "Extracting validation data to $OUTDIR"
bq --location=US extract \
--destination_format CSV \
--field_delimiter "," --noprint_header \
taxifare.feateng_valid_data \
$OUTDIR/taxi-valid-*.csv
# List the files of the bucket
gsutil ls -l $OUTDIR
# Here, it shows the short header for each object
!gsutil cat gs://$BUCKET/taxifare/data/taxi-train-000000000000.csv | head -2
```
If all ran smoothly, you should be able to list the data bucket by running the following command:
```
# List the files of the bucket
!gsutil ls gs://$BUCKET/taxifare/data
```
## Move code into python package
Here, we moved our code into a python package for training on Cloud AI Platform. Let's just check that the files are there. You should see the following files in the `taxifare/trainer` directory:
- `__init__.py`
- `model.py`
- `task.py`
```
# It will list all the files in the mentioned directory with a long listing format
!ls -la taxifare/trainer
```
To use hyperparameter tuning in your training job you must perform the following steps:
1. Specify the hyperparameter tuning configuration for your training job by including a HyperparameterSpec in your TrainingInput object.
2. Include the following code in your training application:
- Parse the command-line arguments representing the hyperparameters you want to tune, and use the values to set the hyperparameters for your training trial.
Add your hyperparameter metric to the summary for your graph.
- To submit a hyperparameter tuning job, we must modify `model.py` and `task.py` to expose any variables we want to tune as command line arguments.
### Modify model.py
```
%%writefile ./taxifare/trainer/model.py
# Importing the necessary modules
import datetime
import hypertune
import logging
import os
import shutil
import numpy as np
import tensorflow as tf
from tensorflow.keras import activations
from tensorflow.keras import callbacks
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow import feature_column as fc
logging.info(tf.version.VERSION)
CSV_COLUMNS = [
'fare_amount',
'pickup_datetime',
'pickup_longitude',
'pickup_latitude',
'dropoff_longitude',
'dropoff_latitude',
'passenger_count',
'key',
]
LABEL_COLUMN = 'fare_amount'
DEFAULTS = [[0.0], ['na'], [0.0], [0.0], [0.0], [0.0], [0.0], ['na']]
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
# Splits features and labels from feature dictionary
def features_and_labels(row_data):
for unwanted_col in ['key']:
row_data.pop(unwanted_col)
label = row_data.pop(LABEL_COLUMN)
return row_data, label
# Loads dataset using the tf.data API from CSV files
def load_dataset(pattern, batch_size, num_repeat):
dataset = tf.data.experimental.make_csv_dataset(
file_pattern=pattern,
batch_size=batch_size,
column_names=CSV_COLUMNS,
column_defaults=DEFAULTS,
num_epochs=num_repeat,
)
return dataset.map(features_and_labels)
# Prefetch overlaps the preprocessing and model execution of a training step
def create_train_dataset(pattern, batch_size):
dataset = load_dataset(pattern, batch_size, num_repeat=None)
return dataset.prefetch(1)
def create_eval_dataset(pattern, batch_size):
dataset = load_dataset(pattern, batch_size, num_repeat=1)
return dataset.prefetch(1)
# Parse a string and return a datetime.datetime
def parse_datetime(s):
if type(s) is not str:
s = s.numpy().decode('utf-8')
return datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S %Z")
# Here, tf.sqrt Computes element-wise square root of the input tensor
def euclidean(params):
lon1, lat1, lon2, lat2 = params
londiff = lon2 - lon1
latdiff = lat2 - lat1
return tf.sqrt(londiff*londiff + latdiff*latdiff)
# Timestamp.weekday() function return the day of the week represented by the date in the given Timestamp object
def get_dayofweek(s):
ts = parse_datetime(s)
return DAYS[ts.weekday()]
# It wraps a python function into a TensorFlow op that executes it eagerly
@tf.function
def dayofweek(ts_in):
return tf.map_fn(
lambda s: tf.py_function(get_dayofweek, inp=[s], Tout=tf.string),
ts_in
)
def transform(inputs, NUMERIC_COLS, STRING_COLS, nbuckets):
# Pass-through columns
transformed = inputs.copy()
del transformed['pickup_datetime']
feature_columns = {
colname: fc.numeric_column(colname)
for colname in NUMERIC_COLS
}
# Scaling longitude from range [-70, -78] to [0, 1]
for lon_col in ['pickup_longitude', 'dropoff_longitude']:
transformed[lon_col] = layers.Lambda(
lambda x: (x + 78)/8.0,
name='scale_{}'.format(lon_col)
)(inputs[lon_col])
# Scaling latitude from range [37, 45] to [0, 1]
for lat_col in ['pickup_latitude', 'dropoff_latitude']:
transformed[lat_col] = layers.Lambda(
lambda x: (x - 37)/8.0,
name='scale_{}'.format(lat_col)
)(inputs[lat_col])
# Adding Euclidean dist (no need to be accurate: NN will calibrate it)
transformed['euclidean'] = layers.Lambda(euclidean, name='euclidean')([
inputs['pickup_longitude'],
inputs['pickup_latitude'],
inputs['dropoff_longitude'],
inputs['dropoff_latitude']
])
feature_columns['euclidean'] = fc.numeric_column('euclidean')
# hour of day from timestamp of form '2010-02-08 09:17:00+00:00'
transformed['hourofday'] = layers.Lambda(
lambda x: tf.strings.to_number(
tf.strings.substr(x, 11, 2), out_type=tf.dtypes.int32),
name='hourofday'
)(inputs['pickup_datetime'])
feature_columns['hourofday'] = fc.indicator_column(
fc.categorical_column_with_identity(
'hourofday', num_buckets=24))
latbuckets = np.linspace(0, 1, nbuckets).tolist()
lonbuckets = np.linspace(0, 1, nbuckets).tolist()
b_plat = fc.bucketized_column(
feature_columns['pickup_latitude'], latbuckets)
b_dlat = fc.bucketized_column(
feature_columns['dropoff_latitude'], latbuckets)
b_plon = fc.bucketized_column(
feature_columns['pickup_longitude'], lonbuckets)
b_dlon = fc.bucketized_column(
feature_columns['dropoff_longitude'], lonbuckets)
ploc = fc.crossed_column(
[b_plat, b_plon], nbuckets * nbuckets)
dloc = fc.crossed_column(
[b_dlat, b_dlon], nbuckets * nbuckets)
pd_pair = fc.crossed_column([ploc, dloc], nbuckets ** 4)
feature_columns['pickup_and_dropoff'] = fc.embedding_column(
pd_pair, 100)
return transformed, feature_columns
# Here, tf.sqrt Computes element-wise square root of the input tensor
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
def build_dnn_model(nbuckets, nnsize, lr):
# input layer is all float except for pickup_datetime which is a string
STRING_COLS = ['pickup_datetime']
NUMERIC_COLS = (
set(CSV_COLUMNS) - set([LABEL_COLUMN, 'key']) - set(STRING_COLS)
)
inputs = {
colname: layers.Input(name=colname, shape=(), dtype='float32')
for colname in NUMERIC_COLS
}
inputs.update({
colname: layers.Input(name=colname, shape=(), dtype='string')
for colname in STRING_COLS
})
# transforms
transformed, feature_columns = transform(
inputs, NUMERIC_COLS, STRING_COLS, nbuckets=nbuckets)
dnn_inputs = layers.DenseFeatures(feature_columns.values())(transformed)
x = dnn_inputs
for layer, nodes in enumerate(nnsize):
x = layers.Dense(nodes, activation='relu', name='h{}'.format(layer))(x)
output = layers.Dense(1, name='fare')(x)
model = models.Model(inputs, output)
lr_optimizer = tf.keras.optimizers.Adam(learning_rate=lr)
model.compile(optimizer=lr_optimizer, loss='mse', metrics=[rmse, 'mse'])
return model
# Define train and evaluate method to evaluate performance of the model
def train_and_evaluate(hparams):
batch_size = hparams['batch_size']
eval_data_path = hparams['eval_data_path']
nnsize = hparams['nnsize']
nbuckets = hparams['nbuckets']
lr = hparams['lr']
num_evals = hparams['num_evals']
num_examples_to_train_on = hparams['num_examples_to_train_on']
output_dir = hparams['output_dir']
train_data_path = hparams['train_data_path']
if tf.io.gfile.exists(output_dir):
tf.io.gfile.rmtree(output_dir)
timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
savedmodel_dir = os.path.join(output_dir, 'savedmodel')
model_export_path = os.path.join(savedmodel_dir, timestamp)
checkpoint_path = os.path.join(output_dir, 'checkpoints')
tensorboard_path = os.path.join(output_dir, 'tensorboard')
dnn_model = build_dnn_model(nbuckets, nnsize, lr)
logging.info(dnn_model.summary())
trainds = create_train_dataset(train_data_path, batch_size)
evalds = create_eval_dataset(eval_data_path, batch_size)
steps_per_epoch = num_examples_to_train_on // (batch_size * num_evals)
checkpoint_cb = callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
tensorboard_cb = callbacks.TensorBoard(tensorboard_path,
histogram_freq=1)
history = dnn_model.fit(
trainds,
validation_data=evalds,
epochs=num_evals,
steps_per_epoch=max(1, steps_per_epoch),
verbose=2, # 0=silent, 1=progress bar, 2=one line per epoch
callbacks=[checkpoint_cb, tensorboard_cb]
)
# Exporting the model with default serving function.
tf.saved_model.save(dnn_model, model_export_path)
# TODO 1
hp_metric = history.history['val_rmse'][num_evals-1]
# TODO 1
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='rmse',
metric_value=hp_metric,
global_step=num_evals
)
return history
```
### Modify task.py
```
%%writefile taxifare/trainer/task.py
# Importing the necessary module
import argparse
import json
import os
from trainer import model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--batch_size",
help = "Batch size for training steps",
type = int,
default = 32
)
parser.add_argument(
"--eval_data_path",
help = "GCS location pattern of eval files",
required = True
)
parser.add_argument(
"--nnsize",
help = "Hidden layer sizes (provide space-separated sizes)",
nargs = "+",
type = int,
default=[32, 8]
)
parser.add_argument(
"--nbuckets",
help = "Number of buckets to divide lat and lon with",
type = int,
default = 10
)
parser.add_argument(
"--lr",
help = "learning rate for optimizer",
type = float,
default = 0.001
)
parser.add_argument(
"--num_evals",
help = "Number of times to evaluate model on eval data training.",
type = int,
default = 5
)
parser.add_argument(
"--num_examples_to_train_on",
help = "Number of examples to train on.",
type = int,
default = 100
)
parser.add_argument(
"--output_dir",
help = "GCS location to write checkpoints and export models",
required = True
)
parser.add_argument(
"--train_data_path",
help = "GCS location pattern of train files containing eval URLs",
required = True
)
parser.add_argument(
"--job-dir",
help = "this model ignores this field, but it is required by gcloud",
default = "junk"
)
args, _ = parser.parse_known_args()
hparams = args.__dict__
hparams["output_dir"] = os.path.join(
hparams["output_dir"],
json.loads(
os.environ.get("TF_CONFIG", "{}")
).get("task", {}).get("trial", "")
)
print("output_dir", hparams["output_dir"])
model.train_and_evaluate(hparams)
```
### Create config.yaml file
Specify the hyperparameter tuning configuration for your training job
Create a HyperparameterSpec object to hold the hyperparameter tuning configuration for your training job, and add the HyperparameterSpec as the hyperparameters object in your TrainingInput object.
In your HyperparameterSpec, set the hyperparameterMetricTag to a value representing your chosen metric. If you don't specify a hyperparameterMetricTag, AI Platform Training looks for a metric with the name training/hptuning/metric. The following example shows how to create a configuration for a metric named metric1:
```
%%writefile hptuning_config.yaml
# Setting parameters for hptuning_config.yaml
trainingInput:
scaleTier: BASIC
hyperparameters:
goal: MINIMIZE
maxTrials: 10 # TODO 2
maxParallelTrials: 2 # TODO 2
hyperparameterMetricTag: rmse # TODO 2
enableTrialEarlyStopping: True
params:
- parameterName: lr
# TODO 2
type: DOUBLE
minValue: 0.0001
maxValue: 0.1
scaleType: UNIT_LOG_SCALE
- parameterName: nbuckets
# TODO 2
type: INTEGER
minValue: 10
maxValue: 25
scaleType: UNIT_LINEAR_SCALE
- parameterName: batch_size
# TODO 2
type: DISCRETE
discreteValues:
- 15
- 30
- 50
```
#### Report your hyperparameter metric to AI Platform Training
The way to report your hyperparameter metric to the AI Platform Training service depends on whether you are using TensorFlow for training or not. It also depends on whether you are using a runtime version or a custom container for training.
We recommend that your training code reports your hyperparameter metric to AI Platform Training frequently in order to take advantage of early stopping.
TensorFlow with a runtime version
If you use an AI Platform Training runtime version and train with TensorFlow, then you can report your hyperparameter metric to AI Platform Training by writing the metric to a TensorFlow summary. Use one of the following functions.
You may need to install `cloudml-hypertune` on your machine to run this code locally.
```
# Installing the latest version of the package
!pip install cloudml-hypertune
```
Kindly ignore, if you get the version warnings related to pip install command.
```
%%bash
# Testing our training code locally
EVAL_DATA_PATH=./taxifare/tests/data/taxi-valid*
TRAIN_DATA_PATH=./taxifare/tests/data/taxi-train*
OUTPUT_DIR=./taxifare-model
rm -rf ${OUTDIR}
export PYTHONPATH=${PYTHONPATH}:${PWD}/taxifare
python3 -m trainer.task \
--eval_data_path $EVAL_DATA_PATH \
--output_dir $OUTPUT_DIR \
--train_data_path $TRAIN_DATA_PATH \
--batch_size 5 \
--num_examples_to_train_on 100 \
--num_evals 1 \
--nbuckets 10 \
--lr 0.001 \
--nnsize 32 8
ls taxifare-model/tensorboard
```
The below hyperparameter training job step will take **upto 45 minutes** to complete.
```
%%bash
PROJECT_ID=$(gcloud config list project --format "value(core.project)")
BUCKET=$PROJECT_ID
REGION="us-central1"
TFVERSION="2.4"
# Output directory and jobID
OUTDIR=gs://${BUCKET}/taxifare/trained_model_$(date -u +%y%m%d_%H%M%S)
JOBID=taxifare_$(date -u +%y%m%d_%H%M%S)
echo ${OUTDIR} ${REGION} ${JOBID}
gsutil -m rm -rf ${OUTDIR}
# Model and training hyperparameters
BATCH_SIZE=15
NUM_EXAMPLES_TO_TRAIN_ON=100
NUM_EVALS=10
NBUCKETS=10
LR=0.001
NNSIZE="32 8"
# GCS paths
GCS_PROJECT_PATH=gs://$BUCKET/taxifare
DATA_PATH=$GCS_PROJECT_PATH/data
TRAIN_DATA_PATH=$DATA_PATH/taxi-train*
EVAL_DATA_PATH=$DATA_PATH/taxi-valid*
# TODO 3
gcloud ai-platform jobs submit training $JOBID \
--module-name=trainer.task \
--package-path=taxifare/trainer \
--staging-bucket=gs://${BUCKET} \
--config=hptuning_config.yaml \
--python-version=3.7 \
--runtime-version=${TFVERSION} \
--region=${REGION} \
-- \
--eval_data_path $EVAL_DATA_PATH \
--output_dir $OUTDIR \
--train_data_path $TRAIN_DATA_PATH \
--batch_size $BATCH_SIZE \
--num_examples_to_train_on $NUM_EXAMPLES_TO_TRAIN_ON \
--num_evals $NUM_EVALS \
--nbuckets $NBUCKETS \
--lr $LR \
--nnsize $NNSIZE
```
Copyright 2021 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
github_jupyter
|
## _*Using Qiskit Aqua for exact cover problems*_
In mathematics, given a collection $S$ of subsets of a set $X$.
An exact cover is a subcollection $S_{ec} \subseteq S$ such that each element in $X$ is contained in exactly one subset $\in S_{ec}$.
We will go through three examples to show (1) how to run the optimization in the non-programming way, (2) how to run the optimization in the programming way, (3) how to run the optimization with the VQE.
We will omit the details for the support of CPLEX, which are explained in other notebooks such as maxcut.
### The problem and the brute-force method.
first, let us take a look at the list of subsets.
```
import numpy as np
import json
from qiskit import Aer
from qiskit_aqua import run_algorithm
from qiskit_aqua.input import EnergyInput
from qiskit_aqua.translators.ising import exactcover
from qiskit_aqua.algorithms import ExactEigensolver
input_file = 'sample.exactcover'
with open(input_file) as f:
list_of_subsets = json.load(f)
print(list_of_subsets)
qubitOp, offset = exactcover.get_exactcover_qubitops(list_of_subsets)
algo_input = EnergyInput(qubitOp)
```
Then we apply the brute-force method. Basically, we exhaustively try all the binary assignments. In each binary assignment, the entry of a subset is either 0 (meaning the subset is not in the cover) or 1 (meaning the subset is in the cover). We print the binary assignment that satisfies the definition of the exact cover.
```
def brute_force():
# brute-force way: try every possible assignment!
has_sol = False
def bitfield(n, L):
result = np.binary_repr(n, L)
return [int(digit) for digit in result] # [2:] to chop off the "0b" part
L = len(list_of_subsets)
max = 2**L
for i in range(max):
cur = bitfield(i, L)
cur_v = exactcover.check_solution_satisfiability(cur, list_of_subsets)
if cur_v:
has_sol = True
break
return has_sol, cur
has_sol, cur = brute_force()
if has_sol:
print("solution is", cur)
else:
print("no solution is found")
```
### Part I: run the optimization in the non-programming way
```
params = {
'problem': {'name': 'ising'},
'algorithm': {'name': 'ExactEigensolver'}
}
result = run_algorithm(params, algo_input)
x = exactcover.sample_most_likely(len(list_of_subsets), result['eigvecs'][0])
ising_sol = exactcover.get_solution(x)
np.testing.assert_array_equal(ising_sol, [0, 1, 1, 0])
if exactcover.check_solution_satisfiability(ising_sol, list_of_subsets):
print("solution is", ising_sol)
else:
print("no solution is found")
```
### Part II: run the optimization in the programming way
```
algo = ExactEigensolver(algo_input.qubit_op, k=1, aux_operators=[])
result = algo.run()
x = exactcover.sample_most_likely(len(list_of_subsets), result['eigvecs'][0])
ising_sol = exactcover.get_solution(x)
np.testing.assert_array_equal(ising_sol, [0, 1, 1, 0])
if exactcover.check_solution_satisfiability(ising_sol, list_of_subsets):
print("solution is", ising_sol)
else:
print("no solution is found")
```
### Part III: run the optimization with VQE
```
algorithm_cfg = {
'name': 'VQE',
'operator_mode': 'matrix'
}
optimizer_cfg = {
'name': 'COBYLA'
}
var_form_cfg = {
'name': 'RYRZ',
'depth': 5
}
params = {
'problem': {'name': 'ising', 'random_seed': 10598},
'algorithm': algorithm_cfg,
'optimizer': optimizer_cfg,
'variational_form': var_form_cfg
}
backend = Aer.get_backend('statevector_simulator')
result = run_algorithm(params, algo_input, backend=backend)
x = exactcover.sample_most_likely(len(list_of_subsets), result['eigvecs'][0])
ising_sol = exactcover.get_solution(x)
if exactcover.check_solution_satisfiability(ising_sol, list_of_subsets):
print("solution is", ising_sol)
else:
print("no solution is found")
```
|
github_jupyter
|
# Overlays
Spatial overlays allow you to compare two GeoDataFrames containing polygon or multipolygon geometries
and create a new GeoDataFrame with the new geometries representing the spatial combination *and*
merged properties. This allows you to answer questions like
> What are the demographics of the census tracts within 1000 ft of the highway?
The basic idea is demonstrated by the graphic below but keep in mind that overlays operate at the dataframe level,
not on individual geometries, and the properties from both are retained

Now we can load up two GeoDataFrames containing (multi)polygon geometries...
```
%matplotlib inline
from shapely.geometry import Point
from geopandas import datasets, GeoDataFrame, read_file
from geopandas.tools import overlay
# NYC Boros
zippath = datasets.get_path('nybb')
polydf = read_file(zippath)
# Generate some circles
b = [int(x) for x in polydf.total_bounds]
N = 10
polydf2 = GeoDataFrame([
{'geometry': Point(x, y).buffer(10000), 'value1': x + y, 'value2': x - y}
for x, y in zip(range(b[0], b[2], int((b[2] - b[0]) / N)),
range(b[1], b[3], int((b[3] - b[1]) / N)))])
```
The first dataframe contains multipolygons of the NYC boros
```
polydf.plot()
```
And the second GeoDataFrame is a sequentially generated set of circles in the same geographic space. We'll plot these with a [different color palette](https://matplotlib.org/examples/color/colormaps_reference.html).
```
polydf2.plot(cmap='tab20b')
```
The `geopandas.tools.overlay` function takes three arguments:
* df1
* df2
* how
Where `how` can be one of:
['intersection',
'union',
'identity',
'symmetric_difference',
'difference']
So let's identify the areas (and attributes) where both dataframes intersect using the `overlay` method.
```
newdf = polydf.overlay(polydf2, how="intersection")
newdf.plot(cmap='tab20b')
```
And take a look at the attributes; we see that the attributes from both of the original GeoDataFrames are retained.
```
polydf.head()
polydf2.head()
newdf.head()
```
Now let's look at the other `how` operations:
```
newdf = polydf.overlay(polydf2, how="union")
newdf.plot(cmap='tab20b')
newdf = polydf.overlay(polydf2, how="identity")
newdf.plot(cmap='tab20b')
newdf = polydf.overlay(polydf2, how="symmetric_difference")
newdf.plot(cmap='tab20b')
newdf = polydf.overlay(polydf2, how="difference")
newdf.plot(cmap='tab20b')
```
|
github_jupyter
|
# Calling RES with Python in SPARK
## Pre-Requisite
* Python 3.5 for Spark
## Initializing Python environment with ODM Jars files and ODM Model archive
* Create a Spark Session
* Initialize the Python environment
```
from io import StringIO
import requests
import json
import pandas as pd
#from pyspark.sql import SQLContext
import os
import os
cwd = os.getcwd()
cwd = "/home/spark/shared/user-libs/spark2"
print(cwd)
# Download Material for Rule Exection
!curl -o {cwd}/miniloan-xom.jar https://raw.githubusercontent.com/ODMDev/decisions-on-spark/master/data/miniloan/miniloan-xom.jar
!curl -o {cwd}/miniloan-ruleapp.jar https://raw.githubusercontent.com/ODMDev/decisions-on-spark/master/data/miniloan/miniloan-ruleapp.jar
# Download ODM Library
!curl -o {cwd}/j2ee_connector-1_5-fr.jar http://xxx.xxx.xxx.xxx:xxxxx/download/lib/ODM8920/j2ee_connector-1_5-fr.jar
!curl -o {cwd}/jrules-engine.jar http://xxx.xxx.xxx.xxx:xxxxx/download/lib/ODM8920/jrules-engine.jar
!curl -o {cwd}/jrules-res-execution.jar http://xxx.xxx.xxx.xxx:xxxxx/download/lib/ODM8920/jrules-res-execution-memory.jar
os.environ['PYSPARK_SUBMIT_ARGS'] = "--jars local:"+cwd+"/miniloan-ruleapp.jar,local:"+cwd+"/miniloan-xom.jar,local:"+cwd+"/jrules-engine.jar,local:"+cwd+"/j2ee_connector-1_5-fr.jar,local:"+cwd+"/jrules-res-execution.jar pyspark-shell"
#import pyspark # only run after findspark.init()
#from pyspark.sql import SparkSession
# Create a Spark Session
#sc = SparkSession.builder.getOrCreate()
sc
```
## Load Dataset
```
# Create a SParkSQL Context to load the data in a dataframe
from pyspark.sql import SQLContext
sql = SQLContext(sc)
new_decisions_pd = pd.read_csv("https://raw.githubusercontent.com/ODMDev/decisions-on-spark/master/data/miniloan/miniloan-decisions-defaultly-1K.csv")
request_df = sql.createDataFrame(new_decisions_pd)
request_df.printSchema()
request_df.show(10)
from pyspark.sql import Row
def CreateODMSession(sc):
if not hasattr(CreateODMSession, "fac"):
sc = SparkSession.builder.getOrCreate()
factoryConfig = sc._jvm.ilog.rules.res.session.IlrJ2SESessionFactory.createDefaultConfig()
xuConfig = factoryConfig.getXUConfig();
xuConfig.setLogAutoFlushEnabled(True);
xuConfig.getPersistenceConfig().setPersistenceType(sc._jvm.ilog.rules.res.session.config.IlrPersistenceType.MEMORY);
xuConfig.getManagedXOMPersistenceConfig().setPersistenceType(sc._jvm.ilog.rules.res.session.config.IlrPersistenceType.MEMORY);
CreateODMSession.fac=sc._jvm.ilog.rules.res.session.IlrJ2SESessionFactory(factoryConfig)
return CreateODMSession.fac
def execute (row):
sc = SparkSession.builder.getOrCreate()
factory=CreateODMSession(sc)
sessionRequest = factory.createRequest()
sessionRequest.setRulesetPath(sc._jvm.ilog.rules.res.model.IlrPath.parsePath("/miniloanruleapp/miniloanrules"))
# Ensure latest version of the ruleset is taken into account
sessionRequest.setForceUptodate(True)
# Set the input parameters for the execution of the rules
inputParameters = sc._jvm.java.util.HashMap()
borrower = sc._jvm.miniloan.Borrower(row.name, row.creditScore,row.income)
loan = sc._jvm.miniloan.Loan()
loan.setAmount(row.loanAmount)
loan.setDuration(row.monthDuration)
loan.setYearlyInterestRate(row.rate)
# Set parameters
inputParameters["loan"]=loan
inputParameters["borrower"]=borrower
sessionRequest.setInputParameters(inputParameters)
session = factory.createStatelessSession()
# Perfrom ODM Execution
response = session.execute(sessionRequest)
col= response.getOutputParameters()
# for key in col:
# print (key, "corresponds to", col[key])
loanResult= response.getOutputParameters().get("loan")
return Row(isApproved=loanResult.isApproved(),firedRulesCount=col['ilog.rules.firedRulesCount'])
#execute("dd")
dfResult = request_df.rdd.map(execute).toDF()
#count= dfResult.count()
print("Execution fininsh")
#rddResult
# Count the nb of Loan approved
```
# We can query execution Results
```
dfResult.createOrReplaceTempView("loan")
sql = SQLContext(sc)
sql.sql("SELECT isApproved FROM loan").show()
```
|
github_jupyter
|
Before you turn this problem in, make sure everything runs as expected. First, **restart the kernel** (in the menubar, select Kernel$\rightarrow$Restart) and then **run all cells** (in the menubar, select Cell$\rightarrow$Run All).
Make sure you fill in any place that says `YOUR CODE HERE` or "YOUR ANSWER HERE", as well as your name and collaborators below:
```
NAME = ""
COLLABORATORS = ""
```
---
<!--NOTEBOOK_HEADER-->
*This notebook contains material from [PyRosetta](https://RosettaCommons.github.io/PyRosetta.notebooks);
content is available [on Github](https://github.com/RosettaCommons/PyRosetta.notebooks.git).*
<!--NAVIGATION-->
< [Introduction to Folding](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/04.00-Introduction-to-Folding.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Low-Res Scoring and Fragments](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/04.02-Low-Res-Scoring-and-Fragments.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/04.01-Basic-Folding-Algorithm.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
# Basic Folding Algorithm
Keywords: pose_from_sequence(), random move, scoring move, Metropolis, assign(), Pose()
```
# Notebook setup
import sys
if 'google.colab' in sys.modules:
!pip install pyrosettacolabsetup
import pyrosettacolabsetup
pyrosettacolabsetup.setup()
print ("Notebook is set for PyRosetta use in Colab. Have fun!")
from pyrosetta import *
from pyrosetta.teaching import *
init()
```
## Building the Pose
In this workshop, you will be folding a 10 residue protein by building a simple de novo folding algorithm. Start by initializing PyRosetta as usual.
Create a simple poly-alanine `pose` with 10 residues for testing your folding algorithm. Store the pose in a variable called "polyA."
```
# YOUR CODE HERE
raise NotImplementedError()
polyA.pdb_info().name("polyA")
```
__Question:__
Check the backbone dihedrals of a few residues (except the first and last) using the `.phi()` and `.psi()` methods in `Pose`. What are the values of $\phi$ and $\psi$ dihedrals? You should see ideal bond lengths and angles, but the dihedrals may not be as realistic.
```
# YOUR CODE HERE
raise NotImplementedError()
```
OPTIONAL:
We may want to visualize folding as it happens. Before starting with the folding protocol, instantiate a PyMOL mover and use a UNIQUE port number between 10,000 and 65,536. We will retain history in order to view the entire folding process by utilizing the `.keep_history()` method. Make sure it says `PyMOL <---> PyRosetta link started!` on its command line.
```
pmm = PyMOLMover()
pmm.keep_history(True)
```
Use the PyMOL mover to view the `polyA` `Pose`. You should see a long thread-like structure in PyMOL.
```
pmm.apply(polyA)
```
## Building A Basic *de Novo* Folding Algorithm
Now, write a program that implements a Monte Carlo algorithm to optimize the protein conformation. You can do this here in the notebook, or you may use a code editor to write a `.py` file and execute in a Python or iPython shell.
Our main program will include 100 iterations of making a random trial move, scoring the protein, and accepting/rejecting the move. Therefore, we can break this algorithm down into three smaller subroutines: **random, score, and decision.**
### Step 1: Random Move
For the **random** trial move, write a subroutine to choose one residue at random using `random.randint()` and then randomly perturb either the φ or ψ angles by a random number chosen from a Gaussian distribution. Use the Python built-in function `random.gauss()` from the `random` library with a mean of the current angle and a standard deviation of 25°. After changing the torsion angle, use `pmm.apply(polyA)` to update the structure in PyMOL.
```
import math
import random
def randTrial(your_pose):
# YOUR CODE HERE
raise NotImplementedError()
return your_pose
```
### Step 2: Scoring Move
For the **scoring** step, we need to create a scoring function and make a subroutine that simply returns the numerical energy score of the pose.
```
sfxn = get_fa_scorefxn()
def score(your_pose):
# YOUR CODE HERE
raise NotImplementedError()
```
### Step 3: Accepting/Rejecting Move
For the **decision** step, we need to make a subroutine that either accepts or rejects the new conformatuon based on the Metropolis criterion. The Metropolis criterion has a probability of accepting a move as $P = \exp( -\Delta G / kT )$. When $ΔE ≥ 0$, the Metropolis criterion probability of accepting the move is $P = \exp( -\Delta G / kT )$. When $ΔE < 0$, the Metropolis criterion probability of accepting the move is $P = 1$. Use $kT = 1$ Rosetta Energy Unit (REU).
```
def decision(before_pose, after_pose):
# YOUR CODE HERE
raise NotImplementedError()
```
### Step 4: Execution
Now we can put these three subroutines together in our main program! Write a loop in the main program so that it performs 100 iterations of: making a random trial move, scoring the protein, and accepting/rejecting the move.
After each iteration of the search, output the current pose energy and the lowest energy ever observed. **The final output of this program should be the lowest energy conformation that is achieved at *any* point during the simulation.** Be sure to use `low_pose.assign(pose)` rather than `low_pose = pose`, since the latter will only copy a pointer to the original pose.
```
def basic_folding(your_pose):
"""Your basic folding algorithm that completes 100 Monte-Carlo iterations on a given pose"""
lowest_pose = Pose() # Create an empty pose for tracking the lowest energy pose.
# YOUR CODE HERE
raise NotImplementedError()
return lowest_pose
```
Finally, output the last pose and the lowest-scoring pose observed and view them in PyMOL. Plot the energy and lowest-energy observed vs. cycle number. What are the energies of the initial, last, and lowest-scoring pose? Is your program working? Has it converged to a good solution?
```
basic_folding(polyA)
```
Here's an example of the PyMOL view:
```
from IPython.display import Image
Image('./Media/folding.gif',width='300')
```
### Exercise 1: Comparing to Alpha Helices
Using the program you wrote for Workshop #2, force the $A_{10}$ sequence into an ideal α-helix.
**Questions:** Does this helical structure have a lower score than that produced by your folding algorithm above? What does this mean about your sampling or discrimination?
### Exercise 2: Optimizing Algorithm
Since your program is a stochastic search algorithm, it may not produce an ideal structure consistently, so try running the simulation multiple times or with a different number of cycles (if necessary). Using a kT of 1, your program may need to make up to 500,000 iterations.
<!--NAVIGATION-->
< [Introduction to Folding](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/04.00-Introduction-to-Folding.ipynb) | [Contents](toc.ipynb) | [Index](index.ipynb) | [Low-Res Scoring and Fragments](http://nbviewer.jupyter.org/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/04.02-Low-Res-Scoring-and-Fragments.ipynb) ><p><a href="https://colab.research.google.com/github/RosettaCommons/PyRosetta.notebooks/blob/master/notebooks/04.01-Basic-Folding-Algorithm.ipynb"><img align="left" src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open in Colab" title="Open in Google Colaboratory"></a>
|
github_jupyter
|
##### Copyright 2020 The Cirq Developers
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Circuits
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://quantumai.google/cirq/circuits"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/circuits.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/circuits.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/circuits.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
</td>
</table>
```
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
```
## Conceptual overview
The primary representation of quantum programs in Cirq is the `Circuit` class. A `Circuit` is a collection of `Moments`. A `Moment` is a collection of `Operations` that all act during the same abstract time slice. An `Operation` is a some effect that operates on a specific subset of Qubits, the most common type of `Operation` is a `GateOperation`.

Let's unpack this.
At the base of this construction is the notion of a qubit. In Cirq, qubits and other quantum objects are identified by instances of subclasses of the Qid base class. Different subclasses of Qid can be used for different purposes. For example, the qubits that Google’s Xmon devices use are often arranged on the vertices of a square grid. For this, the class GridQubit subclasses Qid. For example, we can create a 3 by 3 grid of qubits using
```
import cirq
qubits = [cirq.GridQubit(x, y) for x in range(3) for y in range(3)]
print(qubits[0])
```
The next level up is the notion of a `Gate`. A `Gate` represents a physical process that occurs on a `Qubit`. The important property of a `Gate` is that it can be applied to one or more qubits. This can be done via the `Gate.on` method itself or via `()`, and doing this turns the `Gate` into a `GateOperation`.
```
# This is an Pauli X gate. It is an object instance.
x_gate = cirq.X
# Applying it to the qubit at location (0, 0) (defined above)
# turns it into an operation.
x_op = x_gate(qubits[0])
print(x_op)
```
A `Moment` is simply a collection of operations, each of which operates on a different set of qubits, and which conceptually represents these operations as occurring during this abstract time slice. The `Moment` structure itself is not required to be related to the actual scheduling of the operations on a quantum computer, or via a simulator, though it can be. For example, here is a `Moment` in which **Pauli** `X` and a `CZ` gate operate on three qubits:
```
cz = cirq.CZ(qubits[0], qubits[1])
x = cirq.X(qubits[2])
moment = cirq.Moment([x, cz])
print(moment)
```
The above is not the only way one can construct moments, nor even the typical method, but illustrates that a `Moment` is just a collection of operations on disjoint sets of qubits.
Finally, at the top level a `Circuit` is an ordered series of `Moment` objects. The first `Moment` in this series contains the first `Operations that will be applied. Here, for example, is a simple circuit made up of two moments:
```
cz01 = cirq.CZ(qubits[0], qubits[1])
x2 = cirq.X(qubits[2])
cz12 = cirq.CZ(qubits[1], qubits[2])
moment0 = cirq.Moment([cz01, x2])
moment1 = cirq.Moment([cz12])
circuit = cirq.Circuit((moment0, moment1))
print(circuit)
```
Note that the above is one of the many ways to construct a `Circuit`, which illustrates the concept that a `Circuit` is an iterable of `Moment` objects.
## Constructing circuits
Constructing Circuits as a series of `Moment` objects, with each `Moment` being hand-crafted, is tedious. Instead, we provide a variety of different ways to create a `Circuit`.
One of the most useful ways to construct a `Circuit` is by appending onto the `Circuit` with the `Circuit.append` method.
```
from cirq.ops import CZ, H
q0, q1, q2 = [cirq.GridQubit(i, 0) for i in range(3)]
circuit = cirq.Circuit()
circuit.append([CZ(q0, q1), H(q2)])
print(circuit)
```
This appended a new moment to the qubit, which we can continue to do:
```
circuit.append([H(q0), CZ(q1, q2)])
print(circuit)
```
In these two examples, we appended full moments, what happens when we append all of these at once?
```
circuit = cirq.Circuit()
circuit.append([CZ(q0, q1), H(q2), H(q0), CZ(q1, q2)])
print(circuit)
```
We see that here we have again created two `Moment` objects. How did `Circuit` know how to do this? `Circuit`'s `Circuit.append` method (and its cousin, `Circuit.insert`) both take an argument called the `InsertStrategy`. By default, `InsertStrategy` is `InsertStrategy.NEW_THEN_INLINE`.
### InsertStrategies
`InsertStrategy` defines how `Operations` are placed in a `Circuit` when requested to be inserted at a given location. Here, a location is identified by the index of the `Moment` (in the `Circuit`) where the insertion is requested to be placed at (in the case of `Circuit.append`, this means inserting at the `Moment`, at an index one greater than the maximum moment index in the `Circuit`).
There are four such strategies: `InsertStrategy.EARLIEST`, `InsertStrategy.NEW`, `InsertStrategy.INLINE` and `InsertStrategy.NEW_THEN_INLINE`.
`InsertStrategy.EARLIEST` is defined as:
*Scans backward from the insert location until a moment with operations touching qubits affected by the operation to insert is found. The operation is added to the moment just after that location.*
For example, if we first create an `Operation` in a single moment, and then use `InsertStrategy.EARLIEST`, `Operation` can slide back to this first ` Moment` if there is space:
```
from cirq.circuits import InsertStrategy
circuit = cirq.Circuit()
circuit.append([CZ(q0, q1)])
circuit.append([H(q0), H(q2)], strategy=InsertStrategy.EARLIEST)
print(circuit)
```
After creating the first moment with a `CZ` gate, the second append uses the `InsertStrategy.EARLIEST` strategy. The `H` on `q0` cannot slide back, while the `H` on `q2` can and so ends up in the first `Moment`.
Contrast this with the `InsertStrategy.NEW` `InsertStrategy`:
*Every operation that is inserted is created in a new moment.*
```
circuit = cirq.Circuit()
circuit.append([H(q0), H(q1), H(q2)], strategy=InsertStrategy.NEW)
print(circuit)
```
Here every operator processed by the append ends up in a new moment. `InsertStrategy.NEW` is most useful when you are inserting a single operation and do not want it to interfere with other `Moments`.
Another strategy is `InsertStrategy.INLINE`:
*Attempts to add the operation to insert into the moment just before the desired insert location. But, if there’s already an existing operation affecting any of the qubits touched by the operation to insert, a new moment is created instead.*
```
circuit = cirq.Circuit()
circuit.append([CZ(q1, q2)])
circuit.append([CZ(q1, q2)])
circuit.append([H(q0), H(q1), H(q2)], strategy=InsertStrategy.INLINE)
print(circuit)
```
After two initial `CZ` between the second and third qubit, we try to insert three `H` `Operations`. We see that the `H` on the first qubit is inserted into the previous `Moment`, but the `H` on the second and third qubits cannot be inserted into the previous `Moment`, so a new `Moment` is created.
Finally, we turn to the default strategy:
*Creates a new moment at the desired insert location for the first operation, but then switches to inserting operations according to `InsertStrategy.INLINE`.*
```
circuit = cirq.Circuit()
circuit.append([H(q0)])
circuit.append([CZ(q1,q2), H(q0)], strategy=InsertStrategy.NEW_THEN_INLINE)
print(circuit)
```
The first append creates a single moment with an `H` on the first qubit. Then, the append with the `InsertStrategy.NEW_THEN_INLINE` strategy begins by inserting the `CZ` in a new `Moment` (the `InsertStrategy.NEW` in `InsertStrategy.NEW_THEN_INLINE`). Subsequent appending is done `InsertStrategy.INLINE`, so the next `H` on the first qubit is appending in the just created `Moment`.
### Patterns for arguments to append and insert
In the above examples, we used a series of `Circuit.append `calls with a list of different `Operations` added to the circuit. However, the argument where we have supplied a list can also take more than just list values. For instance:
```
def my_layer():
yield CZ(q0, q1)
yield [H(q) for q in (q0, q1, q2)]
yield [CZ(q1, q2)]
yield [H(q0), [CZ(q1, q2)]]
circuit = cirq.Circuit()
circuit.append(my_layer())
for x in my_layer():
print(x)
print(circuit)
```
Recall that Python functions with a `yield` are generators. Generators are functions that act as iterators. In the above example, we see that we can iterate `over my_layer()`. In this case, each of the `yield` returns produces what was yielded, and here these are:
* `Operations`,
* lists of `Operations`,
* or lists of `Operations` mixed with lists of `Operations`.
When we pass an iterator to the `append` method, `Circuit` is able to flatten all of these and pass them as one giant list to `Circuit.append` (this also works for `Circuit.insert`).
The above idea uses the concept of `OP_TREE`. An `OP_TREE` is not a class, but a *contract*. The basic idea is that, if the input can be iteratively flattened into a list of operations, then the input is an `OP_TREE`.
A very nice pattern emerges from this structure: define generators for sub-circuits, which can vary by size or `Operation` parameters.
Another useful method to construct a `Circuit` fully formed from an `OP_TREE` is to pass the `OP_TREE` into `Circuit` when initializing it:
```
circuit = cirq.Circuit(H(q0), H(q1))
print(circuit)
```
### Slicing and iterating over circuits
Circuits can be iterated over and sliced. When they are iterated, each item in the iteration is a moment:
```
circuit = cirq.Circuit(H(q0), CZ(q0, q1))
for moment in circuit:
print(moment)
```
Slicing a `Circuit`, on the other hand, produces a new `Circuit` with only the moments corresponding to the slice:
```
circuit = cirq.Circuit(H(q0), CZ(q0, q1), H(q1), CZ(q0, q1))
print(circuit[1:3])
```
Especially useful is dropping the last moment (which are often just measurements): `circuit[:-1]`, or reversing a circuit: `circuit[::-1]`.
### Related
- [Transform circuits](transform.ipynb) - features related to circuit optimization and compilation
- [Devices](devices.ipynb) - validate circuits against device constraints
- [Import/export circuits](interop.ipynb) - features to serialize/deserialize circuits into/from different formats
|
github_jupyter
|

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/115) 03:42*
In numerical variable, you want to take the average mean and infer the average and the differences. In categorical variable, you take the proportion of frequency, you may want to perform some contigency table.Studies that take percentage are likely categorical variables (XX% support vs XX% oppose same sex marriage).
In this blog we're going to observe one categorical variable. We talk about binary classification, and then more than two classification. Then we're going to compare two categorical variables, again with binary classification and more than two classification.
<!--TEASER_END-->
## Sampling Distribution

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/115) 05:10*
Recall that sampling distribution is when you take infinite times/all possible combination of sample, for particular sample size, and draw the summary statistic from each of the sample distribution and make a distribution out of it.
So for example we observe categorical variable of smoker vs non-smoker. Because we don't know the population proportion and size, we make an estimate on each of the country. We take 1000 sample size for each country and calculate the proportion. The proportions will make a sampling distribution, and the average of proportions will be approximate proportion of whole population. So you see, in the beginning **we have categorical variable, but we just observe one levels and convert its sample statistics, which is numerical variables. **

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/115) 07:20*
CLT is the same for proportion when use it on mean. From CLT, we want to know shape, center, and spread. The CLT requires us to use random sampling/assignment, and the mean is just equal the proportion. Spread can be calculated by incorporates the proportions and the complements divided by sample size. Then for sample size, this is similar to what we require in binomial, where you have incorporates sample size, success proportion, and failure proportions.So if p population is unknown, use point estimate proportion.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/115) 11:01*
Take a look at this example. We're going to calculate the probability of at least 95% of 200 randomly selected sampled that are flowering plants. That what we usually calculate it before using *pnorm* function in R. Check the condtitions whether it satisfied the CLT or not. After that, we also want to meet binomial conditions for our sample size. If both conditions satisfied, we can shade the distribution and calculate it by using R what we get is.Remember that we're using *least* not exact in the distribution,because there's no such thing as cut exact in probability.
```
pnorm(0.95,mean=0.9,sd=0.0212,lower.tail=F)
```
We also can do this in binomial. We know that using binomial, the expected value (mean) is just sample size times probability of success.
```
n = 200
p = 0.95
n*p
```
So using just binomial distribution, we're summing up 190 to 200, because we're interested of probability of getting at least 190,
P(190) + P(191) .. + P(200),
```
sum(dbinom(190:200,200,0.90))
dbinom(30*0.12,250,0.08)
```
##### It's not exactly the same as we previously calculated, but nevertheless look similar.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/115) 15:40*
So what if the success-failure (np > 10 && n(1-p) > 10) conditions are not met?
The center of sampling distribution will be around at the population proportion. You see that we have 0 and 1 boundary. This is intuitive, as there are no >100% proportion or < 0% proportion. So we have those boundary. We have one proportion that closer to zero, and we also have proportion that closer to one. You see that sampling distribution is centered around the proportion of the sample.
The spread can be calculated using standard error formula. We have proportion, proportion complements and sample size.
But the shape is what differs. When the proportion is closer to zero, like 0.20 in the example, we have natural boundary of the distribution can't be less than 1, this will make a long tail towards 1. Same goes for propotion that in 0.8. So depending on the propotion, we can have skewed distribution.
Intuitively, using previous examples when we have 50 random sample, 90% of p will gives us left skewed distribution.
# Confidence Interval

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/119) 01:38*
Don't try to focus on this question, it's just act as a basis of next research question. So we're know that controlling is a better study design, we divided into treatment and control groups, so b is a better choice. This question asked to 670 people and we focus on the proportion.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/119) 02:17*
So we want to estimate the proportion of population, as parameter of interest. The point estimate is what we can calculate using our sample.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/119) 09:59*
So before, we calculate the standard error of population proportion. But we also know that it's almost not available.So what we can do is get the standard error of point estimate proportion.
So how do you calculate the confidence interval? Use 95% confidence level.
First check the conditions
* Independence. We know that 670 is less than 10% population, and GSS is sampling randomly. So we know that one sampled of American has good intuition about experimental design will be independent of one another.
* Sample size/skewness. For the sample size, we can use formula to assert the enough sample size. But actually we can eyeballing by looking at the example. we have 571 successes and 99 failures, and both are greater than 10. So failures and successes are met, so we know that our sampling distribution will be nearly normal.
```
p = 0.85
n = 670
CL = 0.95
SE = sqrt(p*(1-p)/n)
z_star = round(qnorm((1-CL)/2,lower.tail=F),digits=2)
ME = z_star * SE
c(p-ME, p+ME)
```
So based on this data, we can interpret confidence interval as:
* We are 95% confident that 83% to 87% of all Americans have good intuition about experimental design.
* 95% of random samples of 670 Americans will yield confidence interval that will capture true proportion of Americans that have good intuition about experimental design.
## Required sample size of desired ME

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/119) 08:41*
So we previously learned about required sampled size for point estimate mean. For categorical variable, we use manipulate the parameter in standard of error. Since this is same example, we're going to use same proportion. Specifying the desired ME, and leave n for final calculations, we get the results. Since this is the threshold of minimal requirements, .04 will be round up 1, since there are .04 person(numerical discrete)
```
#Required sample size proportion for desired ME
p = 0.85
z_star = 1.96
ME = 0.01
z_star**2*p*(1-p)/ME**2
```
If we have $\hat{p}$, we can use the value to put into our calculations. If we don't have it, we use 0.5. This is picked for two reasons
* 50:50 is best estimate (prior) for one categorical variable with two levels.
* 0.5 will give us the largest possible sample size.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/119) 09:27*
HT framework is also applies really similar to what we do in the mean.
First we set null and alternative hypothesis test. Remember that we use population parameter, like in CI, because they both want to infer the population parameter. What you have in this data, will reject or fail to reject the null hypothesis.
We set our point estimate as proportion sampled.
We check the conditions. Similar to mean we want to have less than 10% population and random sample/assignment. But the diference is when you have larger than 30 for mean, you want to have larger than 10 successes and larger than 10 failures. This is the expected value like in the binomial, so p will be picked, instead of p hat.
Draw the sampling distribution and shade p-value are (ALWAYS). calculate the Z critical, again we use the proportion for our SE(always when available).
Finally we make a decision, if it smaller than the threshold, reject null hypothesis. Otherwise we fail to reject null hypothesis.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/119) 04:33*
So there notice any difference that you have between CI and HT. When you in CI, you calculate the proportion and range of difference. When you're in HT, you're given the null value, the true proportion of the population. So you use that instead. We're always using true proportion whenever possible. In CI case, the true proportion is unknown so we use the point estimate. in HT, because true proportion is given (as null value), we calculate the failures,successes, and SE based on null value.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/119) 07:00*
Here in the example we want to use hypothesis test to know whether the majority of the Americans believe in evolution. Majority will be whichever proportion that are greater than 50%. We want to test it using our given data.
Since the question is about the alternative hypothesis, we can infer the null hypothesis, and we have proportion estimate of 0.6. Then we check the conditions.
1. Independence. 1983 less than 10% population. Whether Americans in sampled believes in population is independent of one another.
2. Sample size/skew. 1983 * 0.5 = 991.5 > 10. We don't have to calculate the complements, because 0.5 applies to both. S-F condition met, we know that sampling distribution will be nearly normal.
After we validate the condition, we now proceed to the next step.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/119) 09:24*
```
#Hypothesis testing, one categorical variable, given null value(p)
p = 0.5
p_hat = 0.6
n = 1983
SL = 0.05
SE = sqrt(p*(1-p)/n)
z_star = round(qnorm((1-CL)/2,lower.tail=F),digits=2)
pnorm(p_hat,mean=p,sd=SE,lower.tail=p_hat < p)
```
The p-value is practically zero, thus we reject the null hypothesis. There is almost 0% chance that 1983 randomly selected Americans where 60% of them or more believe in evolution, if in fact 50% of Americans believe in population. So the data provide convincing evidence that majority of all Americans believe in evolution.
### Summary
When defining population proportion, you use p. When you define sample proportion, you use $\hat{p}$. Plug population proportion to standard error formula. But since it almost always not known, use sample proportion.
For proportion, CLT states that the distribution of sample distribution will be nearly normal, centered at the true population proportion,with standard error as long as:
* Observations in the sample are independent to one another.
* At least 10 expected success and 10 expected failures in the observations.
For confidence interval, we use sampled proportion (if we already know the true population proportion, it's useless to build an interval to capture it). For hypothesis testing, we have true population,and incorporate it to our standard error calculation.For numerical variable, standard error doesn't incorporate mean, it uses standard deviation. So it doesn't have discrepancy for computing confidence interval and hypothesis testing.
When calculating required sample size for particular margin of error, if sampled proportion is unknown, we use 0.5. This have advantage in two ways. First, if categorical variable only have two levels, we have fair judgement, best prior uniform. Second, 0.5 will gives us the largest sample size.
# Estimating difference between two proportions (confidence interval)
In this section we want to calculate the proportions of each of group in categorical variable.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/123) 04:05*
Here we have sample from population, one from Gallup Survey, and the other one from Coursera. The proportion of success is the proportion of citizen that yes, believe there should be law to ban all handgun possesion beside police officer. Here we have different proportion between US and Coursera. It could be that those strong issue that happen in the US, is not so much in the Coursera which consist international students.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/123) 04:19*
Posted a question, we're making a definition between parameter of interest and point estimate.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/123) 05:55*
Again we're calculating confidence interval, so we're going the calculate the point estimate difference and standard error difference. Standard error will bigger since we're including variability of both p1 and p2. Mind that we use p hat because population parameter is unknown, later in HT we're going to replace that based on population parameter.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/123) 08:00*
Again the confidence interval of two group must be check.
* **Independence**. Ensure that within groups must be independent(random sampling/assignment, without replacement < 10% population). And between groups is independent as wel (non-paired).
* **Sample size/skew**. each of the group must validate success/failure condition (have at least 10 succeses and 10 failures). Remember that we're using proportion sample since proportion population is unknown.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/123) 10:48*
So, again we continue the example. Check the conditions.
* Each of the sample size is higher than 10% of their respective population.Gallup survey has doing good joob of random sampling **but not for Coursera. their survey is voluntarily survey**. So we can say that sampled Americans are independent, sampled Americans may not independent.
* using subtraction on each of the table we have 257 sucesses and 771 failures on US, and 59 succeses and 24 failures on Courserians. Because both of them more than 10, we can say **sampling distributions of both proportions are nearly normal**.
So let's put it into the equation.
```
#Observe one level in categorical variable, of categorical of two levels.
#1 = Coursera, 2 = US
n_1 = 83
p_1 = 0.71
n_2 = 1028
p_2 = 0.25
CL = 0.95
SE = sqrt( (p_1*(1-p_1)/n_1)+(p_2*(1-p_2)/n_2) )
z_star = round(qnorm((1-CL)/2, lower.tail=F),digits=2)
ME = z_star*SE
c((p_1-p_2)-ME, (p_1-p_2)+ME)
```
Since the difference proportion is Coursera-US, we can say that **we are 95% confident that proportion of Courserians is 36% to 56% higher than US that believe there should be law for banning gun possesion** . Eventhough we change the order, we get same results, and US will be lower than Courserians, which is equals to the statement earlier.
Should we expect significant difference when we do hypothesis testing? Of course! we know that Courserians has 36% to 56% higher than US, then that would means the difference will be significance(compared to null hypothesis, null value is 0% difference). We also know that 0% is not in the (36,56) interval

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/123) 02:31*
In this example we perform hypothesis test to see the sucess proportions(yes, bullied) between parent that are male/female (only represent by one parent). The null hypothesis there will be no difference,while the alternative state there are a difference. Remember that hypothesis testing is always about the true proportion.In CI, you use observed proportion(if you already know the true proportion, then you shouldn't calculate the interval to capture the the value, because you already know exactly the true proportion).
Remember that for CI we use the observed proportion, but it's little difficult for HT. we don't know the exact value of proportion 1 and proportion 2 equal to. So what do we do? We make one up. The idea is because the proportion is equal, they should be equal proportion if they joined into one population (which is both female and male are two levels of one categorical variable). So what we get is
$$\hat{p}_\mathbf{pool} = \frac{Nsuccess_1+Nsucces_2}{n_1+n_2}$$
calculating p pool we get,
```
np_1 = 34
np_2 = 61
n_1 = 90
n_2 = 122
(np_1+np_2)/(n_1+n_2)
```
So wherever p hat exist in the calculation for hypothesis testing, we replace that with pool proporton

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/123) 07:20*
Calculating p-pool, we know that p-pool is closer to the female than male.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/123) 08:36*
So what's so different from before(mean)? Well, in mean, SE we don't mean is not getting into equation when calculating Standard Error. So mean is useless for calculating standard error. But standard error incorporates the proportion into standard error.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/123) 11:01*
After we recalculate the p pool, we we check the conditions for hypothesis testing.
* **Independence**. Within groups, therer are less than 10% population for both female and male. Between groups, there are no dependent(paired) data, if it does paired, there should be at least equal number of female and male. Therefore we can conclude that sampled males are independent of each other, sampled females as well. We also expect that male and female are independent to one another.
* **Sample size/skew**. We calculate each of the sample size into our new p pool and validate the conditions of successes and failures for each female and male.If the quarduple value is at least 10, we can assume that sampling distribution of proportion differences is nearly normal.
Next, we proceed to calculate hypothesis testing.

*Screenshot taken from [Coursera](https://class.coursera.org/statistics-003/lecture/123) 12:25*
So you maybe notice something different. Yes, **p-pool is not value that you use for null value!** It only represent what value that represent equal proportion for both female and male. But since the p pool is equal for both male and female, the difference is still zero, hence the null value is zero. So calculating everything,
```
#1 = Male, 2 = Female
n_1 = 90
p_1 = 0.38
n_2 = 122
p_2 = 0.5
p_pool = 0.45
null = 0
SE = sqrt((p_pool*(1-p_pool)/n_1) + (p_pool*(1-p_pool)/n_2))
pe = p_1 - p_2
pnorm(pe,mean=null,sd=SE, lower.tail=pe < null) * 2
SE
```
Based on the p-value and 5% significance level, we would failed to reject null hypothesis, and states **there is no difference between males and females with respect to likelihood reporting their kids to being bullied**
### Summary
Calculating for standard error of two categorical variable, testing the difference, is different when we have confidence interval or hypothesis testing that have null value other than zero. We join standard error of both propotion of categorical variable. But for hypothesis testing that have null value zero, both of categorical variable proportion is not known. Hence we use pool proportion, joining successes divided by sample size of both categorical variables. The reason behind another discrepancy for hypothesis testing with null value zero, is that assumed that proportions are equal for levels in categorical variable, we have to use common proportions that fit both levels.
> **REFERENCES**:
> Dr. Mine Çetinkaya-Rundel, [Cousera](https://class.coursera.org/statistics-003/lecture)
|
github_jupyter
|
# Question repository
A list of open questions and possibly ambiguous stuff encountered throughout the material.
TODO: Tag exam-related ones appropriately, to differentiate them from (exclusively) curiosity-related ones.
**Note:** An alternative design would consist of adding a questions section to every notebook, tagging it appropriately using IPython metadata, and then using something like a Python/shell script to print all open questions in a centralized way. However...

## 2. Approximate retrieval
* Why perform first step of hashing if we only have a small number of features (e.g. 100)? If many features, why not just do a PCA first?
- might be because we want the shingle representation (0s and 1s) for the nice properties that Jaccard similarity offers us
## 3. Classification
* When transitioning from the first SVM formulation (with slack variables), to the second one aren't we loosening any constraint by fixing $\xi$?
* (tentative) It seems we're not, since we're taking multiple cases into consideration and merging them together into a single formulation using max.
* Slide 04:18: Is the first (primal) SVM formulation a (ii)-type one (since it has a minimization and its constraint as separate equation), or is it not eligible for this categorization?
* Slide 06:15: How do we go from step 1 to 2? Isn't the $\lambda \| w \|_2^2$ term outside the sum?
- yes it is, but the sum has a convenient $\frac{1}{T}$ in front of it, so we're safe to add the regularization term into the sum.
* Why do some SVM OCP implementations *always* regularize, even when the model was not updated at that stage.
## 4. Non-linear classification
* How exactly is the Lagrangian dual reformulation step (SVMs) different from the first time we reformulated the SVM problem statement to get rid of the slack variables?
- it's different because we changed the objective! We no longer have $\min_w$ or $\min_{w, \xi}$, it's now a maximization of the Lagrance coefficients: $\max_\alpha$; it's not a *reformulation*, but an *equivalent problem*
## 5. Active learning
* When doing active learning based on uncertainty sampling, how exactly do we know when we can safely infer some labels?
## 6. Clustering
* Homework 5 solution, 2.2: Why is:
\begin{equation}
\operatorname{Var}_{\hat{x}_i \sim q}\left [ \frac{1}{m} \sum_{i=1}^{m} \frac{d(\hat{x}_i; \mu)}{q(\hat{x}_i)} \right ] = \frac{1}{m^2} \sum_{i=1}^m \operatorname{Var}_{x_i \sim q} \left[ \frac{d(x_i; \mu)}{q(x_i)} \right]
\end{equation}
* And why doe we still have the $i$ subscript in the variance formulation? Can't we just write $x \tilde{} q$?
* Have to discuss this with friends!
## 7. Bandits
## 8. Exam-specific (and/or for review session on Jan 20)
* Exam 2014 Problem 6 (Submodular functions)
- solved by Syd in The Notes. Yay!
|
github_jupyter
|
# Model Checking
After running an MCMC simulation, `sample` returns a `MultiTrace` object containing the samples for all the stochastic and deterministic random variables. The final step in Bayesian computation is model checking, in order to ensure that inferences derived from your sample are valid. There are two components to model checking:
1. Convergence diagnostics
2. Goodness of fit
Convergence diagnostics are intended to detect lack of convergence in the Markov chain Monte Carlo sample; it is used to ensure that you have not halted your sampling too early. However, a converged model is not guaranteed to be a good model. The second component of model checking, goodness of fit, is used to check the internal validity of the model, by comparing predictions from the model to the data used to fit the model.
## Convergence Diagnostics
Valid inferences from sequences of MCMC samples are based on the
assumption that the samples are derived from the true posterior
distribution of interest. Theory guarantees this condition as the number
of iterations approaches infinity. It is important, therefore, to
determine the **minimum number of samples** required to ensure a reasonable
approximation to the target posterior density. Unfortunately, no
universal threshold exists across all problems, so convergence must be
assessed independently each time MCMC estimation is performed. The
procedures for verifying convergence are collectively known as
*convergence diagnostics*.
One approach to analyzing convergence is **analytical**, whereby the
variance of the sample at different sections of the chain are compared
to that of the limiting distribution. These methods use distance metrics
to analyze convergence, or place theoretical bounds on the sample
variance, and though they are promising, they are generally difficult to
use and are not prominent in the MCMC literature. More common is a
**statistical** approach to assessing convergence. With this approach,
rather than considering the properties of the theoretical target
distribution, only the statistical properties of the observed chain are
analyzed. Reliance on the sample alone restricts such convergence
criteria to **heuristics**. As a result, convergence cannot be guaranteed.
Although evidence for lack of convergence using statistical convergence
diagnostics will correctly imply lack of convergence in the chain, the
absence of such evidence will not *guarantee* convergence in the chain.
Nevertheless, negative results for one or more criteria may provide some
measure of assurance to users that their sample will provide valid
inferences.
For most simple models, convergence will occur quickly, sometimes within
a the first several hundred iterations, after which all remaining
samples of the chain may be used to calculate posterior quantities. For
more complex models, convergence requires a significantly longer burn-in
period; sometimes orders of magnitude more samples are needed.
Frequently, lack of convergence will be caused by **poor mixing**.
Recall that *mixing* refers to the degree to which the Markov
chain explores the support of the posterior distribution. Poor mixing
may stem from inappropriate proposals (if one is using the
Metropolis-Hastings sampler) or from attempting to estimate models with
highly correlated variables.
```
%matplotlib inline
import numpy as np
import seaborn as sns; sns.set_context('notebook')
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from pymc3 import Normal, Binomial, sample, Model
from pymc3.math import invlogit
# Samples for each dose level
n = 5 * np.ones(4, dtype=int)
# Log-dose
dose = np.array([-.86, -.3, -.05, .73])
deaths = np.array([0, 1, 3, 5])
with Model() as bioassay_model:
# Logit-linear model parameters
alpha = Normal('alpha', 0, sd=100)
beta = Normal('beta', 0, sd=100)
# Calculate probabilities of death
theta = invlogit(alpha + beta * dose)
# Data likelihood
obs_deaths = Binomial('obs_deaths', n=n, p=theta, observed=deaths)
with bioassay_model:
bioassay_trace = sample(1000)
from pymc3 import traceplot
traceplot(bioassay_trace, varnames=['alpha'])
```
### Informal Methods
The most straightforward approach for assessing convergence is based on
simply **plotting and inspecting traces and histograms** of the observed
MCMC sample. If the trace of values for each of the stochastics exhibits
asymptotic behavior over the last $m$ iterations, this may be
satisfactory evidence for convergence.
```
traceplot(bioassay_trace, varnames=['beta'])
```
A similar approach involves
plotting a histogram for every set of $k$ iterations (perhaps 50-100)
beyond some burn in threshold $n$; if the histograms are not visibly
different among the sample intervals, this may be considered some evidence for
convergence. Note that such diagnostics should be carried out for each
stochastic estimated by the MCMC algorithm, because convergent behavior
by one variable does not imply evidence for convergence for other
variables in the analysis.
```
import matplotlib.pyplot as plt
beta_trace = bioassay_trace['beta']
fig, axes = plt.subplots(2, 5, figsize=(14,6))
axes = axes.ravel()
for i in range(10):
axes[i].hist(beta_trace[100*i:100*(i+1)])
plt.tight_layout()
```
An extension of this approach can be taken
when multiple parallel chains are run, rather than just a single, long
chain. In this case, the final values of $c$ chains run for $n$
iterations are plotted in a histogram; just as above, this is repeated
every $k$ iterations thereafter, and the histograms of the endpoints are
plotted again and compared to the previous histogram. This is repeated
until consecutive histograms are indistinguishable.
Another *ad hoc* method for detecting lack of convergence is to examine
the traces of several MCMC chains initialized with different starting
values. Overlaying these traces on the same set of axes should (if
convergence has occurred) show each chain tending toward the same
equilibrium value, with approximately the same variance. Recall that the
tendency for some Markov chains to converge to the true (unknown) value
from diverse initial values is called *ergodicity*. This property is
guaranteed by the reversible chains constructed using MCMC, and should
be observable using this technique. Again, however, this approach is
only a heuristic method, and cannot always detect lack of convergence,
even though chains may appear ergodic.
```
with bioassay_model:
bioassay_trace = sample(1000, chains=2, start=[{'alpha':0.5}, {'alpha':5}])
bioassay_trace.get_values('alpha', chains=0)[0]
plt.plot(bioassay_trace.get_values('alpha', chains=0)[:200], 'r--')
plt.plot(bioassay_trace.get_values('alpha', chains=1)[:200], 'k--')
```
A principal reason that evidence from informal techniques cannot
guarantee convergence is a phenomenon called ***metastability***. Chains may
appear to have converged to the true equilibrium value, displaying
excellent qualities by any of the methods described above. However,
after some period of stability around this value, the chain may suddenly
move to another region of the parameter space. This period
of metastability can sometimes be very long, and therefore escape
detection by these convergence diagnostics. Unfortunately, there is no
statistical technique available for detecting metastability.
### Formal Methods
Along with the *ad hoc* techniques described above, a number of more
formal methods exist which are prevalent in the literature. These are
considered more formal because they are based on existing statistical
methods, such as time series analysis.
PyMC currently includes three formal convergence diagnostic methods. The
first, proposed by [Geweke (1992)](http://projecteuclid.org/DPubS?service=UI&version=1.0&verb=Display&handle=euclid.ss/1177011446), is a time-series approach that
compares the mean and variance of segments from the beginning and end of
a single chain.
$$z = \frac{\bar{\theta}_a - \bar{\theta}_b}{\sqrt{S_a(0) + S_b(0)}}$$
where $a$ is the early interval and $b$ the late interval, and $S_i(0)$ is the spectral density estimate at zero frequency for chain segment $i$. If the
z-scores (theoretically distributed as standard normal variates) of
these two segments are similar, it can provide evidence for convergence.
PyMC calculates z-scores of the difference between various initial
segments along the chain, and the last 50% of the remaining chain. If
the chain has converged, the majority of points should fall within 2
standard deviations of zero.
In PyMC, diagnostic z-scores can be obtained by calling the `geweke` function. It
accepts either (1) a single trace, (2) a Node or Stochastic object, or
(4) an entire Model object:
```
from pymc3 import geweke
with bioassay_model:
tr = sample(2000, tune=1000)
z = geweke(tr, intervals=15)
plt.scatter(*z[0]['alpha'].T)
plt.hlines([-1,1], 0, 1000, linestyles='dotted')
plt.xlim(0, 1000)
```
The arguments expected are the following:
- `x` : The trace of a variable.
- `first` : The fraction of series at the beginning of the trace.
- `last` : The fraction of series at the end to be compared with the section at the beginning.
- `intervals` : The number of segments.
Plotting the output displays the scores in series, making it is easy to
see departures from the standard normal assumption.
A second convergence diagnostic provided by PyMC is the Gelman-Rubin
statistic [Gelman and Rubin (1992)](http://projecteuclid.org/DPubS?service=UI&version=1.0&verb=Display&handle=euclid.ss/1177011136). This diagnostic uses multiple chains to
check for lack of convergence, and is based on the notion that if
multiple chains have converged, by definition they should appear very
similar to one another; if not, one or more of the chains has failed to
converge.
The Gelman-Rubin diagnostic uses an analysis of variance approach to
assessing convergence. That is, it calculates both the between-chain
varaince (B) and within-chain varaince (W), and assesses whether they
are different enough to worry about convergence. Assuming $m$ chains,
each of length $n$, quantities are calculated by:
$$\begin{align}B &= \frac{n}{m-1} \sum_{j=1}^m (\bar{\theta}_{.j} - \bar{\theta}_{..})^2 \\
W &= \frac{1}{m} \sum_{j=1}^m \left[ \frac{1}{n-1} \sum_{i=1}^n (\theta_{ij} - \bar{\theta}_{.j})^2 \right]
\end{align}$$
for each scalar estimand $\theta$. Using these values, an estimate of
the marginal posterior variance of $\theta$ can be calculated:
$$\hat{\text{Var}}(\theta | y) = \frac{n-1}{n} W + \frac{1}{n} B$$
Assuming $\theta$ was initialized to arbitrary starting points in each
chain, this quantity will overestimate the true marginal posterior
variance. At the same time, $W$ will tend to underestimate the
within-chain variance early in the sampling run. However, in the limit
as $n \rightarrow
\infty$, both quantities will converge to the true variance of $\theta$.
In light of this, the Gelman-Rubin statistic monitors convergence using
the ratio:
$$\hat{R} = \sqrt{\frac{\hat{\text{Var}}(\theta | y)}{W}}$$
This is called the potential scale reduction, since it is an estimate of
the potential reduction in the scale of $\theta$ as the number of
simulations tends to infinity. In practice, we look for values of
$\hat{R}$ close to one (say, less than 1.1) to be confident that a
particular estimand has converged. In PyMC, the function
`gelman_rubin` will calculate $\hat{R}$ for each stochastic node in
the passed model:
```
from pymc3 import gelman_rubin
gelman_rubin(bioassay_trace)
```
For the best results, each chain should be initialized to highly
dispersed starting values for each stochastic node.
By default, when calling the `forestplot` function using nodes with
multiple chains, the $\hat{R}$ values will be plotted alongside the
posterior intervals.
```
from pymc3 import forestplot
forestplot(bioassay_trace)
```
## Autocorrelation
In general, samples drawn from MCMC algorithms will be autocorrelated. This is not a big deal, other than the fact that autocorrelated chains may require longer sampling in order to adequately characterize posterior quantities of interest. The calculation of autocorrelation is performed for each lag $i=1,2,\ldots,k$ (the correlation at lag 0 is, of course, 1) by:
$$\hat{\rho}_i = 1 - \frac{V_i}{2\hat{\text{Var}}(\theta | y)}$$
where $\hat{\text{Var}}(\theta | y)$ is the same estimated variance as calculated for the Gelman-Rubin statistic, and $V_i$ is the variogram at lag $i$ for $\theta$:
$$\text{V}_i = \frac{1}{m(n-i)}\sum_{j=1}^m \sum_{k=i+1}^n (\theta_{jk} - \theta_{j(k-i)})^2$$
This autocorrelation can be visualized using the `autocorrplot` function in PyMC3:
```
from pymc3 import autocorrplot
autocorrplot(tr);
```
### Effective sample size
The effective sample size is estimated using the partial sum:
$$\hat{n}_{eff} = \frac{mn}{1 + 2\sum_{i=1}^T \hat{\rho}_i}$$
where $T$ is the first odd integer such that $\hat{\rho}_{T+1} + \hat{\rho}_{T+2}$ is negative.
The issue here is related to the fact that we are **estimating** the effective sample size from the fit output. Values of $n_{eff} / n_{iter} < 0.001$ indicate a biased estimator, resulting in an overestimate of the true effective sample size.
```
from pymc3 import effective_n
effective_n(bioassay_trace)
```
Both low $n_{eff}$ and high $\hat{R}$ indicate **poor mixing**.
It is tempting to want to **thin** the chain to eliminate the autocorrelation (*e.g.* taking every 20th sample from the traces above), but this is a waste of time. Since thinning deliberately throws out the majority of the samples, no efficiency is gained; you ultimately require more samples to achive a particular desired sample size.
## Diagnostics for Gradient-based Samplers
Hamiltonian Monte Carlo is a powerful and efficient MCMC sampler when set up appropriately. However, this typically requires carefull tuning of the sampler parameters, such as tree depth, leapfrog step size and target acceptance rate. Fortunately, the NUTS algorithm takes care of some of this for us. Nevertheless, tuning must be carefully monitored for failures that frequently arise. This is particularly the case when fitting challenging models, such as those with high curvature or heavy tails.
Fortunately, however, gradient-based sampling provides the ability to diagnose these pathologies. PyMC makes several diagnostic statistics available as attributes of the `MultiTrace` object returned by the `sample` function.
```
bioassay_trace.stat_names
```
- `mean_tree_accept`: The mean acceptance probability for the tree that generated this sample. The mean of these values across all samples but the burn-in should be approximately `target_accept` (the default for this is 0.8).
- `diverging`: Whether the trajectory for this sample diverged. If there are many diverging samples, this usually indicates that a region of the posterior has high curvature. Reparametrization can often help, but you can also try to increase `target_accept` to something like 0.9 or 0.95.
- `energy`: The energy at the point in phase-space where the sample was accepted. This can be used to identify posteriors with problematically long tails. See below for an example.
- `energy_error`: The difference in energy between the start and the end of the trajectory. For a perfect integrator this would always be zero.
- `max_energy_error`: The maximum difference in energy along the whole trajectory.
- `depth`: The depth of the tree that was used to generate this sample
- `tree_size`: The number of leafs of the sampling tree, when the sample was accepted. This is usually a bit less than $2 ^ \text{depth}$. If the tree size is large, the sampler is using a lot of leapfrog steps to find the next sample. This can for example happen if there are strong correlations in the posterior, if the posterior has long tails, if there are regions of high curvature ("funnels"), or if the variance estimates in the mass matrix are inaccurate. Reparametrisation of the model or estimating the posterior variances from past samples might help.
- `tune`: This is `True`, if step size adaptation was turned on when this sample was generated.
- `step_size`: The step size used for this sample.
- `step_size_bar`: The current best known step-size. After the tuning samples, the step size is set to this value. This should converge during tuning.
If the name of the statistic does not clash with the name of one of the variables, we can use indexing to get the values. The values for the chains will be concatenated.
We can see that the step sizes converged after the 2000 tuning samples for both chains to about the same value. The first 3000 values are from chain 1, the second from chain 2.
```
with bioassay_model:
trace = sample(1000, tune=2000, init=None, chains=2, discard_tuned_samples=False)
plt.plot(trace['step_size_bar'])
```
The `get_sampler_stats` method provides more control over which values should be returned, and it also works if the name of the statistic is the same as the name of one of the variables. We can use the `chains` option, to control values from which chain should be returned, or we can set `combine=False` to get the values for the individual chains:
The `NUTS` step method has a maximum tree depth parameter so that infinite loops (which can occur for non-identified models) are avoided. When the maximum tree depth is reached (the default value is 10), the trajectory is stopped. However complex (but identifiable) models can saturate this threshold, which reduces sampling efficiency.
The `MultiTrace` stores the tree depth for each iteration, so inspecting these traces can reveal saturation if it is occurring.
```
sizes1, sizes2 = trace.get_sampler_stats('depth', combine=False)
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True, sharey=True)
ax1.plot(sizes1)
ax2.plot(sizes2)
```
We can also check the acceptance for the trees that generated this sample. The mean of these values across all samples (except the tuning stage) is expected to be the same as `target_accept`, which is 0.8 by default.
```
accept = trace.get_sampler_stats('mean_tree_accept', burn=1000)
sns.distplot(accept, kde=False)
```
### Divergent transitions
Recall that simulating Hamiltonian dynamics via a symplectic integrator uses a discrete approximation of a continuous function. This is only a reasonable approximation when the step sizes of the integrator are suitably small. A divergent transition may indicate that the approximation is poor.
If there are too many divergent transitions, then samples are not being drawn from the full posterior, and inferences based on the resulting sample will be biased
If there are diverging transitions, PyMC3 will issue warnings indicating how many were discovered. We can obtain the indices of them from the trace.
```
trace['diverging'].nonzero()
```
### Bayesian Fraction of Missing Information
The Bayesian fraction of missing information (BFMI) is a measure of how hard it is to
sample level sets of the posterior at each iteration. Specifically, it quantifies how well momentum resampling matches the marginal energy distribution. A small value indicates that the adaptation phase of the sampler was unsuccessful, and invoking the central limit theorem may not be valid. It indicates whether the sampler is able to adequately explore the posterior distribution.
Though there is not an established rule of thumb for an adequate threshold, values close to one are optimal. Reparameterizing the model is sometimes helpful for improving this statistic.
```
from pymc3 import bfmi
bfmi(trace)
```
Another way of diagnosting this phenomenon is by comparing the overall distribution of
energy levels with the *change* of energy between successive samples. Ideally, they should be very similar.
If the distribution of energy transitions is narrow relative to the marginal energy distribution, this is a sign of inefficient sampling, as many transitions are required to completely explore the posterior. On the other hand, if the energy transition distribution is similar to that of the marginal energy, this is evidence of efficient sampling, resulting in near-independent samples from the posterior.
```
energy = trace['energy']
energy_diff = np.diff(energy)
sns.distplot(energy - energy.mean(), label='energy')
sns.distplot(energy_diff, label='energy diff')
plt.legend()
```
If the overall distribution of energy levels has longer tails, the efficiency of the sampler will deteriorate quickly.
## Goodness of Fit
Checking for model convergence is only the first step in the evaluation
of MCMC model outputs. It is possible for an entirely unsuitable model
to converge, so additional steps are needed to ensure that the estimated
model adequately fits the data. One intuitive way of evaluating model
fit is to compare model predictions with the observations used to fit
the model. In other words, the fitted model can be used to simulate
data, and the distribution of the simulated data should resemble the
distribution of the actual data.
Fortunately, simulating data from the model is a natural component of
the Bayesian modelling framework. Recall, from the discussion on
imputation of missing data, the posterior predictive distribution:
$$p(\tilde{y}|y) = \int p(\tilde{y}|\theta) f(\theta|y) d\theta$$
Here, $\tilde{y}$ represents some hypothetical new data that would be
expected, taking into account the posterior uncertainty in the model
parameters.
Sampling from the posterior predictive distribution is easy
in PyMC. The `sample_ppc` function draws posterior predictive checks from all of the data likelhioods. Consider the `gelman_bioassay` example,
where deaths are modeled as a binomial random variable for which
the probability of death is a logit-linear function of the dose of a
particular drug.
The posterior predictive distribution of deaths uses the same functional
form as the data likelihood, in this case a binomial stochastic. Here is
the corresponding sample from the posterior predictive distribution (we typically need very few samples relative to the MCMC sample):
```
from pymc3 import sample_ppc
with bioassay_model:
deaths_sim = sample_ppc(bioassay_trace, samples=500)
```
The degree to which simulated data correspond to observations can be evaluated in at least two ways. First, these quantities can simply be compared visually. This allows for a qualitative comparison of model-based replicates and observations. If there is poor fit, the true value of the data may appear in the tails of the histogram of replicated data, while a good fit will tend to show the true data in high-probability regions of the posterior predictive distribution. The Matplot package in PyMC provides an easy way of producing such plots, via the `gof_plot` function.
```
fig, axes = plt.subplots(1, 4, figsize=(14, 4))
for obs, sim, ax in zip(deaths, deaths_sim['obs_deaths'].T, axes):
ax.hist(sim, bins=range(7))
ax.plot(obs+0.5, 1, 'ro')
```
## Exercise: Meta-analysis of beta blocker effectiveness
Carlin (1992) considers a Bayesian approach to meta-analysis, and includes the following examples of 22 trials of beta-blockers to prevent mortality after myocardial infarction.
In a random effects meta-analysis we assume the true effect (on a log-odds scale) $d_i$ in a trial $i$
is drawn from some population distribution. Let $r^C_i$ denote number of events in the control group in trial $i$,
and $r^T_i$ denote events under active treatment in trial $i$. Our model is:
$$\begin{aligned}
r^C_i &\sim \text{Binomial}\left(p^C_i, n^C_i\right) \\
r^T_i &\sim \text{Binomial}\left(p^T_i, n^T_i\right) \\
\text{logit}\left(p^C_i\right) &= \mu_i \\
\text{logit}\left(p^T_i\right) &= \mu_i + \delta_i \\
\delta_i &\sim \text{Normal}(d, t) \\
\mu_i &\sim \text{Normal}(m, s)
\end{aligned}$$
We want to make inferences about the population effect $d$, and the predictive distribution for the effect $\delta_{\text{new}}$ in a new trial. Build a model to estimate these quantities in PyMC, and (1) use convergence diagnostics to check for convergence and (2) use posterior predictive checks to assess goodness-of-fit.
Here are the data:
```
r_t_obs = [3, 7, 5, 102, 28, 4, 98, 60, 25, 138, 64, 45, 9, 57,
25, 33, 28, 8, 6, 32, 27, 22]
n_t_obs = [38, 114, 69, 1533, 355, 59, 945, 632, 278,1916, 873, 263,
291, 858, 154, 207, 251, 151, 174, 209, 391, 680]
r_c_obs = [3, 14, 11, 127, 27, 6, 152, 48, 37, 188, 52, 47, 16, 45,
31, 38, 12, 6, 3, 40, 43, 39]
n_c_obs = [39, 116, 93, 1520, 365, 52, 939, 471, 282, 1921, 583, 266,
293, 883, 147, 213, 122, 154, 134, 218, 364, 674]
N = len(n_c_obs)
# Write your answer here
```
## References
Gelman, A., & Rubin, D. B. (1992). Inference from iterative simulation using multiple sequences. Statistical Science. A Review Journal of the Institute of Mathematical Statistics, 457–472.
Geweke, J., Berger, J. O., & Dawid, A. P. (1992). Evaluating the accuracy of sampling-based approaches to the calculation of posterior moments. In Bayesian Statistics 4.
Brooks, S. P., Catchpole, E. A., & Morgan, B. J. T. (2000). Bayesian Animal Survival Estimation. Statistical Science. A Review Journal of the Institute of Mathematical Statistics, 15(4), 357–376. doi:10.1214/ss/1177010123
Gelman, A., Meng, X., & Stern, H. (1996). Posterior predicitive assessment of model fitness via realized discrepencies with discussion. Statistica Sinica, 6, 733–807.
Betancourt, M. (2017). A Conceptual Introduction to Hamiltonian Monte Carlo. arXiv.org.
|
github_jupyter
|
# 16장. 로지스틱 회귀 분석 과제
```
import matplotlib.pyplot as plt
import os
from typing import List, Tuple
import csv
from scratch.linear_algebra import Vector, get_column
```
## 1. 데이터셋
### 1.1 데이터셋 다운로드
```
import requests
data = requests.get("https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data")
dataset_path = os.path.join('data', 'wdbc.data')
with open(dataset_path, "w") as f:
f.write(data.text)
```
### 1.2 데이터 파싱
```
def parse_cancer_row(row: List[str]) -> Tuple[Vector, int]:
measurements = [float(value) for value in row[2:]]
label = row[1]
label = 1 if label == 'M' else 0
return measurements, label
```
### 1.3 데이터 읽기
위스콘신 유방암 진단 데이터셋 (Wisconsin Breast Cancer Diagnostic dataset)
https://www.kaggle.com/uciml/breast-cancer-wisconsin-data
```
X_cancer : List[Vector] = []
y_cancer : List[int] = []
with open(dataset_path) as f:
reader = csv.reader(f)
for row in reader:
x, y = parse_cancer_row(row)
X_cancer.append(x)
y_cancer.append(y)
print(X_cancer[0])
print(y_cancer[0])
```
#### 1.4 데이터 컬럼명
```
columns = [
"radius_mean", "texture_mean", "perimeter_mean", "area_mean", "smoothness_mean",
"compactness_mean", "concavity_mean", "points_mean", "symmetry_mean", "dimension_mean",
"radius_se", "texture_se", "perimeter_se", "area_se", "smoothness_se",
"compactness_se", "concavity_se", "points_se", "symmetry_se", "dimension_se",
"radius_worst", "texture_worst", "perimeter_worst", "area_worst", "smoothness_worst",
"compactness_worst", "concavity_worst", "points_worst", "symmetry_worst", "dimension_worst",
]
```
## 2. 데이터 탐색
### 2.1 클래스 비율 확인
```
from collections import defaultdict
label_type = defaultdict(int)
for y in y_cancer:
label = 'M' if y == 1 else 'B'
label_type[label] += 1
plt.figure(figsize=(8,4))
plt.subplot(1, 2, 1)
plt.bar(label_type.keys(),
label_type.values(),
0.5,
facecolor="#2E495E",
edgecolor=(0, 0, 0)) # Black edges for each bar
plt.xlabel("Diagnosis")
plt.ylabel("# of diagnosis")
plt.title("Cancer diagnosis")
plt.subplot(1, 2, 2)
pies = plt.pie(label_type.values(),
labels=label_type.keys(),
startangle=90)
plt.legend()
plt.show()
```
### 2.2 특징 별 히스토그램
```
def histogram(ax, col : int):
n, bins, patches = ax.hist(get_column(X_cancer, col),
8,
facecolor="#2E495E",
edgecolor=(0, 0, 0))
ax.set_title(columns[col], fontsize=8)
from matplotlib import pyplot as plt
num_rows = 6
num_cols = 5
fig, ax = plt.subplots(num_rows, num_cols, figsize=(num_cols*4, num_rows*4))
for row in range(num_rows):
for col in range(num_cols):
histogram(ax[row][col], num_cols * row + col)
plt.show()
```
### 2.3 특징 쌍 별 산포도
```
from typing import Dict
points_by_diagnosis: Dict[str, List[Vector]] = defaultdict(list)
for i, x in enumerate(X_cancer):
y = y_cancer[i]
label = 'M' if y == 1 else 'B'
points_by_diagnosis[label].append(x)
start = 0
end = start + 10
pairs = [(i, j) for i in range(start, end) for j in range(i+1, end) if i < j]
print(pairs)
marks = ['+', '.']
from matplotlib import pyplot as plt
num_rows = 9
num_cols = 5
fig, ax = plt.subplots(num_rows, num_cols, figsize=(num_cols*3, num_rows*3))
for row in range(num_rows):
for col in range(num_cols):
i, j = pairs[num_cols * row + col]
ax[row][col].set_title(f"{columns[i]} vs {columns[j]}", fontsize=8)
ax[row][col].set_xticks([])
ax[row][col].set_yticks([])
for mark, (diagnosis, points) in zip(marks, points_by_diagnosis.items()):
xs = [point[i] for point in points]
ys = [point[j] for point in points]
ax[row][col].scatter(xs, ys, marker=mark, label=diagnosis)
ax[-1][-1].legend(loc='lower right', prop={'size': 6})
plt.show()
```
## 3. 데이터 전처리
### 3.1 데이터셋 분리
#### 입력 데이터에 상수 항에 대한 입력 1 추가
```
X_cancer = [[1.0] + row for row in X_cancer]
import random
from scratch.machine_learning import train_test_split
random.seed(12)
X_train, X_test, y_train, y_test = train_test_split(X_cancer, y_cancer, 0.25)
print('train dataset :', len(X_train))
print('test dataset :', len(X_test))
```
### 3.2 데이터 표준화 (Standardization) (Q1)
훈련 데이터의 평균과 표준 편차로 테스트 데이터를 표준화 하도록 normalization() 함수를 작성해 보시오.
```
from scratch.working_with_data import scale, rescale
def normalization(data: List[Vector],
means : Vector = None,
stdevs : Vector = None) -> List[Vector]:
# your code
dim = len(data[0])
if (means == None or stdevs == None):
means, stdevs = scale(data)
# Make a copy of each vector
rescaled = [v[:] for v in data]
for v in rescaled:
for i in range(dim):
if stdevs[i] > 0:
v[i] = (v[i] - means[i]) / stdevs[i]
return rescaled, means, stdevs
X_train_normed, X_train_means, X_train_stdevs = normalization(X_train)
X_test_normed, _, _ = normalization(X_test, X_train_means, X_train_stdevs)
```
## 4. 로지스틱 회귀
### 4.1 로지스틱 함수 (Logistic Function) (Q2)
로지스틱 함수와 미분을 구현해 보시오.
```
import math
# your code
#로지스틱 함수
def logistic(x: float) -> float:
return 1.0/(1 + math.exp(-x))
#로지스틱 함수의 미분
def logistic_prime(x: float) -> float:
y = logistic(x)
return y * (1 - y)
```
### 4.2 손실 함수 (Q3)
베르누이 분포의 음의 로그 우도(NLL)로 정의되는 손실 함수를 구현해 보시오.
```
from scratch.linear_algebra import Vector, dot
from typing import List
# your code
#음의 로그 우도
def _negative_log_likelihood(x: Vector, y: float, beta: Vector) -> float:
if y == 1:
return -math.log(logistic(dot(x, beta)))
else:
return -math.log(1 - logistic(dot(x, beta)))
#전체 데이터셋에 대한 NLL 합산
def negative_log_likelihood(xs: List[Vector],
ys: List[float],
beta: Vector) -> float:
return sum(_negative_log_likelihood(x, y, beta)
for x, y in zip(xs, ys))
```
### 4.3 손실 함수 미분 (Q4)
NLL의 그래디언트를 구현해 보시오.
```
from scratch.linear_algebra import vector_sum
# your code
def _negative_log_partial_j(x: Vector, y: float, beta: Vector, j: int) -> float:
return -(y - logistic(dot(x, beta))) * x[j]
def _negative_log_gradient(x: Vector, y: float, beta: Vector) -> Vector:
return [_negative_log_partial_j(x, y, beta, j) for j in range(len(beta))]
def negative_log_gradient(xs: List[Vector],
ys: List[float],
beta: Vector) -> Vector:
return vector_sum([_negative_log_gradient(x, y, beta)
for x, y in zip(xs, ys)])
```
### 4.4 모델 훈련 (Q5)
로지스틱 회귀 모델 학습을 경사 하강법으로 구현하시오.
```
import random
import tqdm
import IPython.display as display
from scratch.linear_algebra import vector_mean
from scratch.gradient_descent import gradient_step
def minibatches(xs: List[Vector], batch_size=20):
for start in range(0, len(xs), batch_size):
batch_xs=xs[start:start+batch_size]
batch_ys=ys[start:start+batch_size]
return batch_xs, batch_ys
def logistic_regression(xs: List[Vector],
ys: List[float],
learning_rate: float = 0.001,
num_steps: int = 1000,
batch_size: int = 1) -> Vector:
# your code
#초기화
beta = [random.random() for _ in range(31)]
history = []
with tqdm.trange(5000) as t:
for epoch in t:
for batch in minibatches(xs, batch_size=20):
gradient = negative_log_gradient(xs, ys, beta)
beta = gradient_step(beta, gradient, -learning_rate)
loss = negative_log_likelihood(xs, ys, beta)
t.set_description(f"loss: {loss:.3f} beta: {beta}")
history.append(loss)
if epoch and epoch % 100 == 0:
display.clear_output(wait=True)
plt.plot(history)
plt.show()
return beta
beta = logistic_regression(X_train_normed, y_train)
```
#### 𝜷 확인
```
plt.plot(beta)
plt.show()
```
### 3.7 모델 테스트 (Q6)
테스트 데이터를 이용해서 모델 예측을 해보고 TP, FP, FN, TN을 계산해 보시오.
```
# your code
true_positives = false_positives = true_negatives = false_negatives = 0
for x_i, y_i in zip(X_test_normed, y_test):
prediction = logistic(dot(beta, x_i))
if y_i == 1 and prediction >= 0.5: # TP: paid and we predict paid
true_positives += 1
elif y_i == 1: # FN: paid and we predict unpaid
false_negatives += 1
elif prediction >= 0.5: # FP: unpaid and we predict paid
false_positives += 1
else: # TN: unpaid and we predict unpaid
true_negatives += 1
TP = true_positives
FN = false_negatives
FP = false_positives
TN = true_negatives
confusion_matrix = [[TP, FP], [FN, TN]]
```
### 3.8 모델 성능
```
from scratch.machine_learning import accuracy, precision, recall, f1_score
print(confusion_matrix)
print("accuracy :", accuracy(TP, FP, FN, TN))
print("precision :", precision(TP, FP, FN, TN))
print("recall :", recall(TP, FP, FN, TN))
print("f1_score :", f1_score(TP, FP, FN, TN))
predictions = [logistic(dot(beta, x)) for x in X_test_normed]
plt.scatter(predictions, y_test, marker='+')
plt.xlabel("predicted probability")
plt.ylabel("actual outcome")
plt.title("Logistic Regression Predicted vs. Actual")
plt.show()
```
|
github_jupyter
|
```
%pylab inline
import os
import glob
import pandas as pd
import re
from collections import OrderedDict
import seaborn as sns
sns.set_context('paper', font_scale=2)
sns.set_style('white')
def clean_tx(tx):
return re.sub(r'\.[0-9]+', '', tx)
root_dir = '/staging/as/skchoudh/re-ribo-analysis/hg38/SRP010679/ribocop_results_Feb2019_longest/'
def get_uorf(df):
return df.loc[df.ORF_type=='uORF',]
def get_translating(df):
return df.loc[df.status=='translating']
def get_tx_from_orfid(orfid):
splitted = orfid.split('_')
if len(splitted) == 4:
txid = splitted[0]
elif len(splitted)==5:
txid = splitted[0] + '_' + splitted[1]
else:
raise RuntimeError('Found:{}'.format(splitted))
return txid
annotation = pd.read_table('/staging/as/skchoudh/ribocop_hg38_feb2019_annotation_candidate_orfs.tsv').set_index('ORF_ID')
annotation
orf_df = OrderedDict()
for f in glob.glob('{}/*_translating_ORFs.tsv'.format(root_dir)):
filename = os.path.basename(f).replace('_translating_ORFs.tsv', '')
orf_df[filename] = pd.read_table(f)
orf_df_annotated = OrderedDict()
for key, df in orf_df.items():
df = df[df.ORF_type=='annotated'].sort_values(by=['transcript_id', 'ORF_ID', 'gene_id'])
orf_df_annotated[key] = df
print(df.shape)
def eval_profile(profile):
profile = eval(profile)
return sum(profile)
orf_profiles_sum = OrderedDict()
for key, df in orf_df_annotated.items():
df['profile_sum'] = df.profile.apply(eval_profile)
orf_profiles_sum[key] = df[['transcript_id', 'profile_sum']]
for key,df in orf_profiles_sum.items():
df.to_csv('/staging/as/skchoudh/SRP010679_tx_counts/{}.tsv'.format(key), sep='\t', header=None, index=False)
def get_start_end_coordinates(orf_id):
#orf_id = row['ORF_ID']
splitted = orf_id.split('_')
stop = splitted[-2]
start = splitted[-3]
return pd.Series([int(start), int(stop)])
```
# Approach
We need the transcript level counts to process in riborex. We take the simple approach of first groupring
everythong at the gene level. We only care about the protein_coding genes and the annotated ones.
```
selected_df = orf_df[filename]
selected_df[['start', 'stop']] =selected_df['ORF_ID'].apply(get_start_end_coordinates)
selected_df_grouped = selected_df.groupby(['gene_id', 'transcript_id'])
for key, group_df in selected_df_grouped:
profile_series_sum = pd.Series([])
index_series_sum = pd.Series([])
for index, row in group_df.iterrows():
profile = eval(row['profile'])
profile_series = pd.Series(profile, index=range(int(row['start']), int(row['stop'])+1 ))
index_series = pd.Series([1]*(row['stop']-row['start']+1), index=range(int(row['start']), int(row['stop'])+1 ))
profile_series_sum = profile_series_sum.add(profile_series)
index_series_sum = index_series_sum.add(index_series)
break
orf_id_range_index = {}
for orf_id, row in annotation.iterrows():
intervals_string = row['coordinate']
intervals = intervals_string.split(',')
interval_range_index = []
for interval in intervals:
start, end = interval.split('-')
interval_range_index = interval_range_index + list(range(int(start), int(end)+1))
orf_id_range_index[orf_id] = interval_range_index
metadata = pd.read_table('/staging/as/skchoudh/SRP010679_tx_counts/metadata_ribo.tsv')
metadata
metadata = metadata.loc[metadata.treatment.isin(['vehicle', 'pp242']),]
metadata
vehicle1 = orf_df['SRX118286']
vehicle2 = orf_df['SRX118292']
treatment1 = orf_df['SRX118290']
treatment2 = orf_df['SRX118296']
vehicle1 = get_uorf(vehicle1)
vehicle2 = get_uorf(vehicle2)
treatment1 = get_uorf(treatment1)
treatment2 = get_uorf(treatment2)
vehicle1['vehicle1_profile_sum'] = vehicle1.profile.apply(eval_profile)
vehicle2['vehicle2_profile_sum'] = vehicle2.profile.apply(eval_profile)
treatment1['treatment1_profile_sum'] = treatment1.profile.apply(eval_profile)
treatment2['treatment2_profile_sum'] = treatment2.profile.apply(eval_profile)
vehicle1_sum_df = vehicle1[['ORF_ID', 'vehicle1_profile_sum']]
vehicle2_sum_df = vehicle2[['ORF_ID', 'vehicle2_profile_sum']]
treatment1_sum_df = treatment1[['ORF_ID', 'treatment1_profile_sum']]
treatment2_sum_df = treatment2[['ORF_ID', 'treatment2_profile_sum']]
vehicle1_sum_df.to_csv('/staging/as/skchoudh/SRP010679_uORF_differential_analysis/SRX118286.tsv', header=False, index=False, sep='\t')
vehicle2_sum_df.to_csv('/staging/as/skchoudh/SRP010679_uORF_differential_analysis/SRX118292.tsv', header=False, index=False, sep='\t')
treatment1_sum_df.to_csv('/staging/as/skchoudh/SRP010679_uORF_differential_analysis/SRX118290.tsv', header=False, index=False, sep='\t')
treatment2_sum_df.to_csv('/staging/as/skchoudh/SRP010679_uORF_differential_analysis/SRX118296.tsv', header=False, index=False, sep='\t')
vehicle1 = orf_df['SRX118286']
vehicle2 = orf_df['SRX118292']
treatment1 = orf_df['SRX118290']
treatment2 = orf_df['SRX118296']
vehicle1 = get_uorf(vehicle1)
vehicle2 = get_uorf(vehicle2)
treatment1 = get_uorf(treatment1)
treatment2 = get_uorf(treatment2)
vehicle1 = get_translating(vehicle1)
vehicle2 = get_translating(vehicle2)
treatment1 = get_translating(treatment1)
treatment2 = get_translating(treatment2)
vehicle1['vehicle1_profile_sum'] = vehicle1.profile.apply(eval_profile)
vehicle2['vehicle2_profile_sum'] = vehicle2.profile.apply(eval_profile)
treatment1['treatment1_profile_sum'] = treatment1.profile.apply(eval_profile)
treatment2['treatment2_profile_sum'] = treatment2.profile.apply(eval_profile)
vehicle1_sum_df = vehicle1[['ORF_ID', 'vehicle1_profile_sum']]
vehicle2_sum_df = vehicle2[['ORF_ID', 'vehicle2_profile_sum']]
treatment1_sum_df = treatment1[['ORF_ID', 'treatment1_profile_sum']]
treatment2_sum_df = treatment2[['ORF_ID', 'treatment2_profile_sum']]
vehicle_both = list(sorted(set(vehicle1.ORF_ID).intersection(set(vehicle2.ORF_ID))))
treatment_both = list(sorted(set(treatment1.ORF_ID).intersection(set(treatment2.ORF_ID))))
combined_list = list(sorted(set(vehicle_both).intersection(treatment_both)))
vehicle1_sum_df = vehicle1_sum_df[vehicle1_sum_df.ORF_ID.isin(combined_list)].set_index('ORF_ID')
vehicle2_sum_df = vehicle2_sum_df[vehicle2_sum_df.ORF_ID.isin(combined_list)].set_index('ORF_ID')
treatment1_sum_df = treatment1_sum_df[treatment1_sum_df.ORF_ID.isin(combined_list)].set_index('ORF_ID')
treatment2_sum_df = treatment2_sum_df[treatment2_sum_df.ORF_ID.isin(combined_list)].set_index('ORF_ID')
vehicle1_sum_df.to_csv('/staging/as/skchoudh/SRP010679_uORF_translating_only_differential_analysis/SRX118286.tsv', header=False, index=True, sep='\t')
vehicle2_sum_df.to_csv('/staging/as/skchoudh/SRP010679_uORF_translating_only_differential_analysis/SRX118292.tsv', header=False, index=True, sep='\t')
treatment1_sum_df.to_csv('/staging/as/skchoudh/SRP010679_uORF_translating_only_differential_analysis/SRX118290.tsv', header=False, index=True, sep='\t')
treatment2_sum_df.to_csv('/staging/as/skchoudh/SRP010679_uORF_translating_only_differential_analysis/SRX118296.tsv', header=False, index=True, sep='\t')
vehicle_tx_list = [clean_tx(get_tx_from_orfid(x)) for x in vehicle_both]
treatment_tx_list = [clean_tx(get_tx_from_orfid(x)) for x in treatment_both]
cds_pp242_vs_vehicle_sig_down = pd.read_table('/staging/as/skchoudh/SRP010679_tx_differential_analysis/ribo_pp242_vs_vehicle.sig.down.tsv')
cds_pp242_vs_vehicle_sig_up = pd.read_table('/staging/as/skchoudh/SRP010679_tx_differential_analysis/ribo_pp242_vs_vehicle.sig.up.tsv')
cds_pp242_vs_vehicle_sig_up
cds_pp242_vs_vehicle_sig = pd.read_table('/staging/as/skchoudh/SRP010679_tx_differential_analysis/ribo_pp242_vs_vehicle.sig.tsv').reset_index().rename(columns={'index': 'txid', 'log2FoldChange': 'log2FC_CDS'})
cds_pp242_vs_vehicle_sig = cds_pp242_vs_vehicle_sig[['txid', 'log2FC_CDS']].sort_values(by=['txid']).set_index('txid')
cds_pp242_vs_vehicle_sig
uorf_pp242_vs_vehicle = pd.read_table('/staging/as/skchoudh/SRP010679_uORF_translating_only_differential_analysis/ribo_pp242_vs_vehicle.tsv').reset_index()
uorf_pp242_vs_vehicle['txid'] = uorf_pp242_vs_vehicle['index'].apply(get_tx_from_orfid)
uorf_pp242_vs_vehicle = uorf_pp242_vs_vehicle.sort_values(by='txid')
uorf_tx_level_fc = uorf_pp242_vs_vehicle[['log2FoldChange', 'txid']].groupby('txid').max().reset_index().sort_values(by=['txid']).rename(columns={'log2FoldChange': 'log2FC_uorf'}).set_index('txid')
uorf_cds_fc_df = uorf_tx_level_fc.join(cds_pp242_vs_vehicle_sig, how='inner')
fig, ax = plt.subplots(figsize=(8,8))
ax.scatter(uorf_cds_fc_df.log2FC_CDS, uorf_cds_fc_df.log2FC_uorf)
ax.set_xlabel('log2FC_CDS')
ax.set_ylabel('log2FC_uORF')
fig.tight_layout()
ax = sns.lmplot(x='log2FC_CDS', y= 'log2FC_uorf', data=uorf_cds_fc_df)
ax.set(xlabel='log2FC_CDS', ylabel = 'log2FC_uORF')
plt.savefig('uORF_CDS_log2FC.pdf')
```
# uORF-vs-TE Final results
```
annotation = pd.read_table('/staging/as/skchoudh/ribocop_hg38_feb2019_annotation_longest_candidate_orfs.tsv').set_index('ORF_ID')
t2g = annotation[['transcript_id', 'gene_id']].drop_duplicates().set_index('transcript_id')
diff_uorf_sig = pd.read_table('/home/cmb-panasas2/wenzhenl/github/ribocop-results/real/foldchange.txt')
diff_uorf_sig['transcript_id'] = diff_uorf_sig.ORF_ID.apply(get_tx_from_orfid)
diff_uorf_sig['gene_id'] = t2g.loc[diff_uorf_sig['transcript_id'], 'gene_id'].values
diff_uorf_sig['transcript_id'] = diff_uorf_sig['transcript_id'].apply(clean_tx)
diff_uorf_sig['gene_id'] = diff_uorf_sig['gene_id'].apply(clean_tx)
diff_uorf_sig.to_csv('/home/cmb-panasas2/skchoudh/github_projects/ribocop-results/real/foldchange_annotated.tsv', index=False, sep='\t')
diff_T_sig = pd.read_table('/home/cmb-panasas2/skchoudh/github_projects/ribocop-results/real/diff_T_sig.txt', sep=' ').reset_index().rename(columns={'index': 'ORF_ID'})
diff_T_sig['transcript_id'] = diff_T_sig.ORF_ID.apply(get_tx_from_orfid)
diff_T_sig['gene_id'] = t2g.loc[diff_T_sig['transcript_id'], 'gene_id'].values
diff_T_sig['transcript_id'] = diff_T_sig['transcript_id'].apply(clean_tx)
diff_T_sig['gene_id'] = diff_T_sig['gene_id'].apply(clean_tx)
diff_T_sig.to_csv('/home/cmb-panasas2/skchoudh/github_projects/ribocop-results/real/diff_T_sig_annotated.tsv', index=False, sep='\t')
diff_T_sig = pd.read_table('/home/cmb-panasas2/skchoudh/github_projects/ribocop-results/real/diff_T_sig.txt', sep=' ').reset_index().rename(columns={'index': 'ORF_ID'})
diff_T_sig['transcript_id'] = diff_T_sig.ORF_ID.apply(get_tx_from_orfid)
diff_T_sig['gene_id'] = t2g.loc[diff_T_sig['transcript_id'], 'gene_id'].values
diff_T_sig['transcript_id'] = diff_T_sig['transcript_id'].apply(clean_tx)
diff_T_sig['gene_id'] = diff_T_sig['gene_id'].apply(clean_tx)
diff_T_sig.to_csv('/home/cmb-panasas2/skchoudh/github_projects/ribocop-results/real/diff_T_sig_annotated.tsv', index=False, sep='\t')
diff_U_sig = pd.read_table('/home/cmb-panasas2/skchoudh/github_projects/ribocop-results/real/diff_U_sig.txt', sep=' ').reset_index().rename(columns={'index': 'ORF_ID'})
diff_U_sig['transcript_id'] = diff_U_sig.ORF_ID.apply(get_tx_from_orfid)
diff_U_sig['gene_id'] = t2g.loc[diff_U_sig['transcript_id'], 'gene_id'].values
diff_U_sig['transcript_id'] = diff_U_sig['transcript_id'].apply(clean_tx)
diff_U_sig['gene_id'] = diff_U_sig['gene_id'].apply(clean_tx)
diff_U_sig.to_csv('/home/cmb-panasas2/skchoudh/github_projects/ribocop-results/real/diff_U_sig_annotated.tsv', index=False, sep='\t')
with open('/home/cmb-panasas2/skchoudh/github_projects/ribocop-results/real/diff_U_sig_genelist.txt', 'w') as fh:
for gene in diff_U_sig.gene_id.unique():
fh.write('{}\n'.format(gene))
with open('/home/cmb-panasas2/skchoudh/github_projects/ribocop-results/real/diff_T_sig_genelist.txt', 'w') as fh:
for gene in diff_T_sig.gene_id.unique():
fh.write('{}\n'.format(gene))
with open('/home/cmb-panasas2/skchoudh/github_projects/ribocop-results/real/foldchange_genelist.txt', 'w') as fh:
for gene in diff_uorf_sig.gene_id.unique():
fh.write('{}\n'.format(gene))
```
|
github_jupyter
|
# Nearest Centroid Classification with MInMaxScaler & PowerTransformer
This Code template is for the Classification task using a simple NearestCentroid with feature rescaling technique MinMaxScaler and feature tranformation technique used is PowerTransformer in a pipeline.
### Required Packages
```
!pip install imblearn
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as se
from imblearn.over_sampling import RandomOverSampler
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import LabelEncoder,MinMaxScaler, PowerTransformer
from sklearn.model_selection import train_test_split
from sklearn.neighbors import NearestCentroid
from sklearn.metrics import classification_report,plot_confusion_matrix
warnings.filterwarnings('ignore')
```
### Initialization
Filepath of CSV file
```
#filepath
file_path= ""
```
List of features which are required for model training .
```
#x_values
features = []
```
Target feature for prediction.
```
#y_value
target=''
```
### Data Fetching
Pandas is an open-source, BSD-licensed library providing high-performance, easy-to-use data manipulation and data analysis tools.
We will use panda's library to read the CSV file using its storage path.And we use the head function to display the initial row or entry.
```
df=pd.read_csv(file_path)
df.head()
```
### Feature Selections
It is the process of reducing the number of input variables when developing a predictive model. Used to reduce the number of input variables to both reduce the computational cost of modelling and, in some cases, to improve the performance of the model.
We will assign all the required input features to X and target/outcome to Y.
```
X = df[features]
Y = df[target]
```
### Data Preprocessing
Since the majority of the machine learning models in the Sklearn library doesn't handle string category data and Null value, we have to explicitly remove or replace null values. The below snippet have functions, which removes the null value if any exists. And convert the string classes data in the datasets by encoding them to integer classes.
```
def NullClearner(df):
if(isinstance(df, pd.Series) and (df.dtype in ["float64","int64"])):
df.fillna(df.mean(),inplace=True)
return df
elif(isinstance(df, pd.Series)):
df.fillna(df.mode()[0],inplace=True)
return df
else:return df
def EncodeX(df):
return pd.get_dummies(df)
def EncodeY(df):
if len(df.unique())<=2:
return df
else:
un_EncodedT=np.sort(pd.unique(df), axis=-1, kind='mergesort')
df=LabelEncoder().fit_transform(df)
EncodedT=[xi for xi in range(len(un_EncodedT))]
print("Encoded Target: {} to {}".format(un_EncodedT,EncodedT))
return df
x=X.columns.to_list()
for i in x:
X[i]=NullClearner(X[i])
X=EncodeX(X)
Y=EncodeY(NullClearner(Y))
X.head()
```
#### Correlation Map
In order to check the correlation between the features, we will plot a correlation matrix. It is effective in summarizing a large amount of data where the goal is to see patterns.
```
f,ax = plt.subplots(figsize=(18, 18))
matrix = np.triu(X.corr())
se.heatmap(X.corr(), annot=True, linewidths=.5, fmt= '.1f',ax=ax, mask=matrix)
plt.show()
```
#### Distribution Of Target Variable
```
plt.figure(figsize = (10,6))
se.countplot(Y)
```
### Data Splitting
The train-test split is a procedure for evaluating the performance of an algorithm. The procedure involves taking a dataset and dividing it into two subsets. The first subset is utilized to fit/train the model. The second subset is used for prediction. The main motive is to estimate the performance of the model on new data.
```
x_train,x_test,y_train,y_test=train_test_split(X,Y,test_size=0.2,random_state=123)
```
#### Handling Target Imbalance
The challenge of working with imbalanced datasets is that most machine learning techniques will ignore, and in turn have poor performance on, the minority class, although typically it is performance on the minority class that is most important.
One approach to addressing imbalanced datasets is to oversample the minority class. The simplest approach involves duplicating examples in the minority class.We will perform overspampling using imblearn library.
```
x_train,y_train = RandomOverSampler(random_state=123).fit_resample(x_train, y_train)
```
### Data Scaling
Used sklearn.preprocessing.MinMaxScaler
This estimator scales and translates each feature individually such that it is in the given range on the training set, e.g. between zero and one.
Read more at [scikit-learn.org](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.MinMaxScaler.html)
### Feature Transformation
Used sklearn.preprocessing.PowerTransformer
Apply a power transform featurewise to make data more Gaussian-like.
Power transforms are a family of parametric, monotonic transformations that are applied to make data more Gaussian-like. This is useful for modeling issues related to heteroscedasticity (non-constant variance), or other situations where normality is desired.
Read more at [scikit-learn.org](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.PowerTransformer.html)
### Model
The NearestCentroid classifier is a simple algorithm that represents each class by the centroid of its members. In effect, this makes it similar to the label updating phase of the KMeans algorithm. It also has no parameters to choose, making it a good baseline classifier. It does, however, suffer on non-convex classes, as well as when classes have drastically different variances, as equal variance in all dimensions is assumed.
#### Tuning Parameter
> **metric** : The metric to use when calculating distance between instances in a feature array. If metric is a string or callable, it must be one of the options allowed by metrics.pairwise.pairwise_distances for its metric parameter. The centroids for the samples corresponding to each class is the point from which the sum of the distances of all samples that belong to that particular class are minimized. If the “manhattan” metric is provided, this centroid is the median and for all other metrics, the centroid is now set to be the mean.
> **shrink_threshold** :Threshold for shrinking centroids to remove features.
```
# Build Model here
model = make_pipeline(MinMaxScaler(),PowerTransformer(),NearestCentroid())
model.fit(x_train, y_train)
```
#### Model Accuracy
score() method return the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy which is a harsh metric since you require for each sample that each label set be correctly predicted.
```
print("Accuracy score {:.2f} %\n".format(model.score(x_test,y_test)*100))
```
#### Confusion Matrix
A confusion matrix is utilized to understand the performance of the classification model or algorithm in machine learning for a given test set where results are known.
```
plot_confusion_matrix(model,x_test,y_test,cmap=plt.cm.Blues)
```
#### Classification Report
A Classification report is used to measure the quality of predictions from a classification algorithm. How many predictions are True, how many are False.
* **where**:
- Precision:- Accuracy of positive predictions.
- Recall:- Fraction of positives that were correctly identified.
- f1-score:- percent of positive predictions were correct
- support:- Support is the number of actual occurrences of the class in the specified dataset.
```
print(classification_report(y_test,model.predict(x_test)))
```
#### Creator: Snehaan Bhawal , Github: [Profile](https://github.com/Sbhawal)
|
github_jupyter
|
```
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms, models
from torch.autograd import Variable
data_dir = 'Cat_Dog_data'
train_transforms = transforms.Compose([transforms.RandomRotation(30),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
test_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
train_data = datasets.ImageFolder(data_dir + '/train', transform=train_transforms)
test_data = datasets.ImageFolder(data_dir + '/test', transform=test_transforms)
test_data
train_data
train_loader = torch.utils.data.DataLoader(train_data,batch_size=128,shuffle=True)
test_loader = torch.utils.data.DataLoader(test_data,batch_size=128)
model = models.densenet121(pretrained=True)
print(model)
for param in model.parameters():
param.requires_grad = False
from collections import OrderedDict
classifier = nn.Sequential(OrderedDict([('fc1',nn.Linear(1024,500)),
('relu',nn.ReLU()),
('fc2',nn.Linear(500,2)),
('output',nn.LogSoftmax(dim=1))]))
model.classifier = classifier
torch.cuda.is_available()
import time
#for cuda in [True, False]:
criterion = nn.NLLLoss()
optimizer = optim.Adam(model.classifier.parameters(),lr=0.003)
# if cuda:
# model.cuda()
# else:
# model.cpu()
for ii, (inputs, labels) in enumerate(train_loader):
inputs, labels = Variable(inputs), Variable(labels)
# if cuda:
# inputs, labels = inputs.cuda(), labels.cuda()
# else:
# inputs, labels = inputs.cpu(), labels.cpu()
start = time.time()
outputs = model.forward(inputs)
loss = criterion(outputs,labels)
loss.backward()
optimizer.step()
if ii==1:
break
print(f"Cuda = {cuda}; Time per batch: {(time.time()-start)/3:.3f} seconds")
```
### Full Model
```
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = models.densenet121(pretrained=True)
# Freeze parameters so we don't backprop through them
for param in model.parameters():
param.requires_grad = False
model.classifier = nn.Sequential(nn.Linear(1024, 256),
nn.ReLU(),
nn.Dropout(0.2),
nn.Linear(256, 2),
nn.LogSoftmax(dim=1))
criterion = nn.NLLLoss()
# Only train the classifier parameters, feature parameters are frozen
optimizer = optim.Adam(model.classifier.parameters(), lr=0.003)
model.to(device);
epochs = 1
steps = 0
running_loss = 0
print_every = 5
for epoch in range(epochs):
for inputs, labels in trainloader:
steps += 1
# Move input and label tensors to the default device
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
logps = model.forward(inputs)
loss = criterion(logps, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if steps % print_every == 0:
test_loss = 0
accuracy = 0
model.eval()
with torch.no_grad():
for inputs, labels in testloader:
inputs, labels = inputs.to(device), labels.to(device)
logps = model.forward(inputs)
batch_loss = criterion(logps, labels)
test_loss += batch_loss.item()
# Calculate accuracy
ps = torch.exp(logps)
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
accuracy += torch.mean(equals.type(torch.FloatTensor)).item()
print(f"Epoch {epoch+1}/{epochs}.. "
f"Train loss: {running_loss/print_every:.3f}.. "
f"Test loss: {test_loss/len(testloader):.3f}.. "
f"Test accuracy: {accuracy/len(testloader):.3f}")
running_loss = 0
model.train()
```
|
github_jupyter
|
# IEEE MEGA PROJECT
**Team Name: BetaTech**
**Team Leader: Mollika Garg**
**Email Id: [email protected]**
**Team Member: Shreya Sharma**
**Email Id: [email protected]**
**Team Member: Koushiki Chakrabarti**
**Email Id: [email protected]**
### PROJECT DETAILS
**Domain: Machine Learning**
**Project Name: Tackling Dengue Cases**
### PROJECT DESCRIPTION
Predict dengue cases from climate and determine potential dengue hotspots by detecting stagnant water areas from satellite data. Make ML algorithms predict the number of dengue cases based on climate factors and use thresholding techniques to predict stagnant water hotspots by using satellite data.
```
##IMPORTS
# used for manipulating directory paths
import os
# used to analyze data
import pandas as pd
# scientific and vector computation for python
import numpy as np
# for image visualisation
from matplotlib import pyplot as plt
# encode target labels
from sklearn.preprocessing import LabelEncoder
# performs the task of Standardization
from sklearn.preprocessing import StandardScaler
# to find the error
from sklearn.metrics import mean_absolute_error
# used for training SVM
from sklearn.svm import SVR
# used for training KNN
from sklearn.neighbors import KNeighborsRegressor
# used for training Random Forest
from sklearn.ensemble import RandomForestRegressor
```
### READING DATA
```
## Read Data
malaria_features = pd.read_excel("C:\\Users\\molli\\OneDrive\\Desktop\\Data Set\\Malaria_Data.xlsx")
malaria_labels= malaria_features["No. of cases"]
malaria_features=malaria_features.drop(labels="No. of cases",axis=1)
## Displaying head of the data
malaria_features.head()
## Encoding labels
lmap={"Jan":0,"Feb":1,"Mar":2,"Apr":3,"May":4,"Jun":5, "Jul":6, "Aug":7, "Sep":8, "Oct":9, "Nov":10, "Dec":11}
malaria_features["Month "]=malaria_features["Month "].map(lmap)
## Droping the feature 'City'
malaria_features=malaria_features.drop("City", axis=1)
## Displaying head of the data
malaria_features.head()
## storing feature values in X and labels in Y
X=malaria_features.values
Y=malaria_labels.values
## spiltting data into training and testing
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X,Y,test_size = 0.2,random_state = 0)
print(y_test)
## scaling the data
scaler = StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
```
### Training The Data and Chosing the Best Hyperparameters
#### 1) K Nearest Neighbours
```
knn = KNeighborsRegressor(n_neighbors=4)
knn.fit(x_train, y_train)
y_pred = knn.predict(x_test)
print(mean_absolute_error(y_test, y_pred))
```
#### 2) Random Forest
```
rf = RandomForestRegressor(n_estimators=200)
rf.fit(x_train, y_train)
y_pred1 = rf.predict(x_test)
print(mean_absolute_error(y_test, y_pred1))
```
#### 3) Support Vector Machine
```
clf = SVR(C=3000, tol=1e-3)
clf.fit(x_train, y_train)
y_pred2 = clf.predict(x_test)
print(mean_absolute_error(y_test, y_pred2))
```
### Comparing Predicted and Test data values Trained on SVM
```
## printing predicted and test data values
print(y_pred)
print(y_test)
## In case of negative values, converting them to 0 to obtain better accuracy
for i in range(0,len(y_pred)):
if y_pred[i]<0:
y_pred[i]=0
print(mean_absolute_error(y_test, y_pred))
```
### Conclusion
We tried a differnet types of models. We tried KNN, Random Forest and SVM. In the end we found that, the data trained on KNN give the best testing result with mean absolute error of 16.7
|
github_jupyter
|
```
from os import listdir
from numpy import array
from keras.preprocessing.text import Tokenizer, one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Model
from keras.utils import to_categorical
from keras.layers import Embedding, TimeDistributed, RepeatVector, LSTM, concatenate , Input, Reshape, Dense, Flatten
from keras.preprocessing.image import array_to_img, img_to_array, load_img
from keras.applications.inception_resnet_v2 import InceptionResNetV2, preprocess_input
import numpy as np
# Load the images and preprocess them for inception-resnet
images = []
all_filenames = listdir('images/')
all_filenames.sort()
for filename in all_filenames:
images.append(img_to_array(load_img('images/'+filename, target_size=(299, 299))))
images = np.array(images, dtype=float)
images = preprocess_input(images)
# Run the images through inception-resnet and extract the features without the classification layer
IR2 = InceptionResNetV2(weights='imagenet', include_top=False)
features = IR2.predict(images)
# We will cap each input sequence to 100 tokens
max_caption_len = 100
# Initialize the function that will create our vocabulary
tokenizer = Tokenizer(filters='', split=" ", lower=False)
# Read a document and return a string
def load_doc(filename):
file = open(filename, 'r')
text = file.read()
file.close()
return text
# Load all the HTML files
X = []
all_filenames = listdir('html/')
all_filenames.sort()
for filename in all_filenames:
X.append(load_doc('html/'+filename))
# Create the vocabulary from the html files
tokenizer.fit_on_texts(X)
# Add +1 to leave space for empty words
vocab_size = len(tokenizer.word_index) + 1
# Translate each word in text file to the matching vocabulary index
sequences = tokenizer.texts_to_sequences(X)
# The longest HTML file
max_length = max(len(s) for s in sequences)
# Intialize our final input to the model
X, y, image_data = list(), list(), list()
for img_no, seq in enumerate(sequences):
for i in range(1, len(seq)):
# Add the entire sequence to the input and only keep the next word for the output
in_seq, out_seq = seq[:i], seq[i]
# If the sentence is shorter than max_length, fill it up with empty words
in_seq = pad_sequences([in_seq], maxlen=max_length)[0]
# Map the output to one-hot encoding
out_seq = to_categorical([out_seq], num_classes=vocab_size)[0]
# Add and image corresponding to the HTML file
image_data.append(features[img_no])
# Cut the input sentence to 100 tokens, and add it to the input data
X.append(in_seq[-100:])
y.append(out_seq)
X, y, image_data = np.array(X), np.array(y), np.array(image_data)
# Create the encoder
image_features = Input(shape=(8, 8, 1536,))
image_flat = Flatten()(image_features)
image_flat = Dense(128, activation='relu')(image_flat)
ir2_out = RepeatVector(max_caption_len)(image_flat)
language_input = Input(shape=(max_caption_len,))
language_model = Embedding(vocab_size, 200, input_length=max_caption_len)(language_input)
language_model = LSTM(256, return_sequences=True)(language_model)
language_model = LSTM(256, return_sequences=True)(language_model)
language_model = TimeDistributed(Dense(128, activation='relu'))(language_model)
# Create the decoder
decoder = concatenate([ir2_out, language_model])
decoder = LSTM(512, return_sequences=False)(decoder)
decoder_output = Dense(vocab_size, activation='softmax')(decoder)
# Compile the model
model = Model(inputs=[image_features, language_input], outputs=decoder_output)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Train the neural network
model.fit([image_data, X], y, batch_size=64, shuffle=False, epochs=2)
# map an integer to a word
def word_for_id(integer, tokenizer):
for word, index in tokenizer.word_index.items():
if index == integer:
return word
return None
# generate a description for an image
def generate_desc(model, tokenizer, photo, max_length):
# seed the generation process
in_text = 'START'
# iterate over the whole length of the sequence
for i in range(900):
# integer encode input sequence
sequence = tokenizer.texts_to_sequences([in_text])[0][-100:]
# pad input
sequence = pad_sequences([sequence], maxlen=max_length)
# predict next word
yhat = model.predict([photo,sequence], verbose=0)
# convert probability to integer
yhat = np.argmax(yhat)
# map integer to word
word = word_for_id(yhat, tokenizer)
# stop if we cannot map the word
if word is None:
break
# append as input for generating the next word
in_text += ' ' + word
# Print the prediction
print(' ' + word, end='')
# stop if we predict the end of the sequence
if word == 'END':
break
return
# Load and image, preprocess it for IR2, extract features and generate the HTML
test_image = img_to_array(load_img('images/87.jpg', target_size=(299, 299)))
test_image = np.array(test_image, dtype=float)
test_image = preprocess_input(test_image)
test_features = IR2.predict(np.array([test_image]))
generate_desc(model, tokenizer, np.array(test_features), 100)
# Load and image, preprocess it for IR2, extract features and generate the HTML
test_image = img_to_array(load_img('images/86.jpg', target_size=(299, 299)))
test_image = np.array(test_image, dtype=float)
test_image = preprocess_input(test_image)
test_features = IR2.predict(np.array([test_image]))
generate_desc(model, tokenizer, np.array(test_features), 100)
```
|
github_jupyter
|
# Modeling and Simulation in Python
Chapter 13
Copyright 2017 Allen Downey
License: [Creative Commons Attribution 4.0 International](https://creativecommons.org/licenses/by/4.0)
```
# Configure Jupyter so figures appear in the notebook
%matplotlib inline
# Configure Jupyter to display the assigned value after an assignment
%config InteractiveShell.ast_node_interactivity='last_expr_or_assign'
# import functions from the modsim.py module
from modsim import *
```
### Code from previous chapters
`make_system`, `plot_results`, and `calc_total_infected` are unchanged.
```
def make_system(beta, gamma):
"""Make a system object for the SIR model.
beta: contact rate in days
gamma: recovery rate in days
returns: System object
"""
init = State(S=89, I=1, R=0)
init /= np.sum(init)
t0 = 0
t_end = 7 * 14
return System(init=init, t0=t0, t_end=t_end,
beta=beta, gamma=gamma)
def plot_results(S, I, R):
"""Plot the results of a SIR model.
S: TimeSeries
I: TimeSeries
R: TimeSeries
"""
plot(S, '--', label='Susceptible')
plot(I, '-', label='Infected')
plot(R, ':', label='Recovered')
decorate(xlabel='Time (days)',
ylabel='Fraction of population')
def calc_total_infected(results):
"""Fraction of population infected during the simulation.
results: DataFrame with columns S, I, R
returns: fraction of population
"""
return get_first_value(results.S) - get_last_value(results.S)
```
Here's an updated version of `run_simulation` that uses `unpack`.
```
def run_simulation(system, update_func):
"""Runs a simulation of the system.
system: System object
update_func: function that updates state
returns: TimeFrame
"""
unpack(system)
frame = TimeFrame(columns=init.index)
frame.row[t0] = init
for t in linrange(t0, t_end):
frame.row[t+1] = update_func(frame.row[t], t, system)
return frame
```
**Exercise:** Write a version of `update_func` that uses `unpack`.
```
# Original
def update_func(state, t, system):
"""Update the SIR model.
state: State (s, i, r)
t: time
system: System object
returns: State (sir)
"""
s, i, r = state
infected = system.beta * i * s
recovered = system.gamma * i
s -= infected
i += infected - recovered
r += recovered
return State(S=s, I=i, R=r)
def update_func(state, t, system):
"""Update the SIR model.
state: State (s, i, r)
t: time
system: System object
returns: State (sir)
"""
unpack(system)
s, i, r = state
infected = beta * i * s
recovered = gamma * i
s -= infected
i += infected - recovered
r += recovered
return State(S=s, I=i, R=r)
```
Test the updated code with this example.
```
system = make_system(0.333, 0.25)
results = run_simulation(system, update_func)
results.head()
plot_results(results.S, results.I, results.R)
```
### Sweeping beta
Make a range of values for `beta`, with constant `gamma`.
```
beta_array = linspace(0.1, 1.1, 11)
gamma = 0.25
```
Run the simulation once for each value of `beta` and print total infections.
```
for beta in beta_array:
system = make_system(beta, gamma)
results = run_simulation(system, update_func)
print(system.beta, calc_total_infected(results))
```
Wrap that loop in a function and return a `SweepSeries` object.
```
def sweep_beta(beta_array, gamma):
"""Sweep a range of values for beta.
beta_array: array of beta values
gamma: recovery rate
returns: SweepSeries that maps from beta to total infected
"""
sweep = SweepSeries()
for beta in beta_array:
system = make_system(beta, gamma)
results = run_simulation(system, update_func)
sweep[system.beta] = calc_total_infected(results)
return sweep
```
Sweep `beta` and plot the results.
```
infected_sweep = sweep_beta(beta_array, gamma)
label = 'gamma = ' + str(gamma)
plot(infected_sweep, label=label)
decorate(xlabel='Contacts per day (beta)',
ylabel='Fraction infected')
savefig('figs/chap06-fig01.pdf')
```
### Sweeping gamma
Using the same array of values for `beta`
```
beta_array
```
And now an array of values for `gamma`
```
gamma_array = [0.2, 0.4, 0.6, 0.8]
```
For each value of `gamma`, sweep `beta` and plot the results.
```
for gamma in gamma_array:
infected_sweep = sweep_beta(beta_array, gamma)
label = 'γ = ' + str(gamma)
plot(infected_sweep, label=label)
decorate(xlabel='Contacts per day (beta)',
ylabel='Fraction infected',
loc='upper left')
savefig('figs/chap06-fig02.pdf')
```
** Exercise:** Suppose the infectious period for the Freshman Plague is known to be 2 days on average, and suppose during one particularly bad year, 40% of the class is infected at some point. Estimate the time between contacts.
```
beta_array = linspace(0.4, 0.5, 100)
gamma = 0.5
infected_sweep = sweep_beta(beta_array, gamma)
# Solution goes here
```
|
github_jupyter
|
# Writing Your Own Graph Algorithms
The analytical engine in GraphScope derives from [GRAPE](https://dl.acm.org/doi/10.1145/3282488), a graph processing system proposed on SIGMOD-2017. GRAPE differs from prior systems in its ability to parallelize sequential graph algorithms as a whole. In GRAPE, sequential algorithms can be easily **plugged into** with only minor changes and get parallelized to handle large graphs efficiently.
In this tutorial, we will show how to define and run your own algorithm in PIE and Pregel models.
Sounds like fun? Excellent, here we go!
## Writing algorithm in PIE model
GraphScope enables users to write algorithms in the [PIE](https://dl.acm.org/doi/10.1145/3282488) programming model in a pure Python mode, first of all, you should import **graphscope** package and the **pie** decorator.
```
import graphscope
from graphscope.framework.app import AppAssets
from graphscope.analytical.udf.decorators import pie
```
We use the single source shortest path ([SSSP](https://en.wikipedia.org/wiki/Shortest_path_problem)) algorithm as an example. To implement the PIE model, you just need to **fulfill this class**
```
@pie(vd_type="double", md_type="double")
class SSSP_PIE(AppAssets):
@staticmethod
def Init(frag, context):
pass
@staticmethod
def PEval(frag, context):
pass
@staticmethod
def IncEval(frag, context):
pass
```
The **pie** decorator contains two params named `vd_type` and `md_type` , which represent the vertex data type and message type respectively.
You may specify types for your own algorithms, optional values are `int`, `double`, and `string`.
In our **SSSP** case, we compute the shortest distance to the source for all nodes, so we use `double` value for `vd_type` and `md_type` both.
In `Init`, `PEval`, and `IncEval`, it has **frag** and **context** as parameters. You can use these two parameters to access the fragment data and intermediate results. Detail usage please refer to [Cython SDK API](https://graphscope.io/docs/reference/cython_sdk.html).
### Fulfill Init Function
```
@pie(vd_type="double", md_type="double")
class SSSP_PIE(AppAssets):
@staticmethod
def Init(frag, context):
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
nodes = frag.nodes(v_label_id)
context.init_value(
nodes, v_label_id, 1000000000.0, PIEAggregateType.kMinAggregate
)
context.register_sync_buffer(v_label_id, MessageStrategy.kSyncOnOuterVertex)
@staticmethod
def PEval(frag, context):
pass
@staticmethod
def IncEval(frag, context):
pass
```
The `Init` function are responsable for 1) setting the initial value for each node; 2) defining the strategy of message passing; and 3) specifing aggregator for handing received message on each rounds.
Note that the algorithm you defined will run on a property graph. So we should get the vertex label first by `v_label_num = frag.vertex_label_num()`, then we can traverse all nodes with the same label
and set the initial value by `nodes = frag.nodes(v_label_id)` and `context.init_value(nodes, v_label_id, 1000000000.0, PIEAggregateType.kMinAggregate)`.
Since we are computing the shorest path between the source node and others nodes. So we use `PIEAggregateType.kMinAggregate` as the aggregator for mesaage aggregation, which means it will
perform `min` operation upon all received messages. Other avaliable aggregators are `kMaxAggregate`, `kSumAggregate`, `kProductAggregate`, and `kOverwriteAggregate`.
At the end of `Init` function, we register the sync buffer for each node with `MessageStrategy.kSyncOnOuterVertex`, which tells the engine how to pass the message.
### Fulfill PEval Function
```
@pie(vd_type="double", md_type="double")
class SSSP_PIE(AppAssets):
@staticmethod
def Init(frag, context):
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
nodes = frag.nodes(v_label_id)
context.init_value(
nodes, v_label_id, 1000000000.0, PIEAggregateType.kMinAggregate
)
context.register_sync_buffer(v_label_id, MessageStrategy.kSyncOnOuterVertex)
@staticmethod
def PEval(frag, context):
src = int(context.get_config(b"src"))
graphscope.declare(graphscope.Vertex, source)
native_source = False
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
if frag.get_inner_node(v_label_id, src, source):
native_source = True
break
if native_source:
context.set_node_value(source, 0)
else:
return
e_label_num = frag.edge_label_num()
for e_label_id in range(e_label_num):
edges = frag.get_outgoing_edges(source, e_label_id)
for e in edges:
dst = e.neighbor()
distv = e.get_int(2)
if context.get_node_value(dst) > distv:
context.set_node_value(dst, distv)
@staticmethod
def IncEval(frag, context):
pass
```
In `PEval` of **SSSP**, it gets the queried source node by `context.get_config(b"src")`.
`PEval` checks each fragment whether it contains source node by `frag.get_inner_node(v_label_id, src, source)`. Note that the `get_inner_node` method needs a `source` parameter in type `Vertex`, which you can declare by `graphscope.declare(graphscope.Vertex, source)`
If a fragment contains the source node, it will traverse the outgoing edges of the source with `frag.get_outgoing_edges(source, e_label_id)`. For each vertex, it computes the distance from the source, and updates the value if the it less than the initial value.
### Fulfill IncEval Function
```
@pie(vd_type="double", md_type="double")
class SSSP_PIE(AppAssets):
@staticmethod
def Init(frag, context):
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
nodes = frag.nodes(v_label_id)
context.init_value(
nodes, v_label_id, 1000000000.0, PIEAggregateType.kMinAggregate
)
context.register_sync_buffer(v_label_id, MessageStrategy.kSyncOnOuterVertex)
@staticmethod
def PEval(frag, context):
src = int(context.get_config(b"src"))
graphscope.declare(graphscope.Vertex, source)
native_source = False
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
if frag.get_inner_node(v_label_id, src, source):
native_source = True
break
if native_source:
context.set_node_value(source, 0)
else:
return
e_label_num = frag.edge_label_num()
for e_label_id in range(e_label_num):
edges = frag.get_outgoing_edges(source, e_label_id)
for e in edges:
dst = e.neighbor()
distv = e.get_int(2)
if context.get_node_value(dst) > distv:
context.set_node_value(dst, distv)
@staticmethod
def IncEval(frag, context):
v_label_num = frag.vertex_label_num()
e_label_num = frag.edge_label_num()
for v_label_id in range(v_label_num):
iv = frag.inner_nodes(v_label_id)
for v in iv:
v_dist = context.get_node_value(v)
for e_label_id in range(e_label_num):
es = frag.get_outgoing_edges(v, e_label_id)
for e in es:
u = e.neighbor()
u_dist = v_dist + e.get_int(2)
if context.get_node_value(u) > u_dist:
context.set_node_value(u, u_dist)
```
The only difference between `IncEval` and `PEval` of **SSSP** algorithm is that `IncEval` are invoked
on each fragment, rather than only the fragment with source node. A fragment will repeat the `IncEval` until there is no messages received. When all the fragments are finished computation, the algorithm is terminated.
### Run Your Algorithm on A Graph.
First, let's establish a session and load a graph for testing.
```
from graphscope.framework.loader import Loader
# the location of the property graph for testing
property_dir = '/home/jovyan/datasets/property'
graphscope.set_option(show_log=True)
k8s_volumes = {
"data": {
"type": "hostPath",
"field": {
"path": "/testingdata",
"type": "Directory"
},
"mounts": {
"mountPath": "/home/jovyan/datasets",
"readOnly": True
}
}
}
sess = graphscope.session(k8s_volumes=k8s_volumes)
graph = sess.g(directed=False)
graph = graph.add_vertices("/home/jovyan/datasets/property/p2p-31_property_v_0", label="person")
graph = graph.add_edges("/home/jovyan/datasets/property/p2p-31_property_e_0", label="knows")
```
Then initialize your algorithm and query the shorest path from vertex `6` over the graph.
```
sssp = SSSP_PIE()
ctx = sssp(graph, src=6)
```
Runing this cell, your algorithm should evaluate successfully. The results are stored in vineyard in the distributed machies. Let's fetch and check the results.
```
r1 = (
ctx.to_dataframe({"node": "v:person.id", "r": "r:person"})
.sort_values(by=["node"])
.to_numpy(dtype=float)
)
r1
```
### Dump and Reload Your Algorithm
You can dump and save your define algorithm for future use.
```
import os
# specify the path you want to dump
dump_path = os.path.expanduser("~/Workspace/sssp_pie.gar")
# dump
SSSP_PIE.to_gar(dump_path)
```
Now, you can find a package named `sssp_pie.gar` in your `~/Workspace`. Reload this algorithm with following code.
```
from graphscope.framework.app import load_app
# specify the path you want to dump
dump_path = os.path.expanduser("~/Workspace/sssp_pie.gar")
sssp2 = load_app("SSSP_PIE", dump_path)
```
### Write Algorithm in Pregel Model
In addition to the sub-graph based PIE model, GraphScope supports vertex-centric Pregel model. To define a Pregel algorithm, you should import **pregel** decorator and fulfil the functions defined on vertex.
```
import graphscope
from graphscope.framework.app import AppAssets
from graphscope.analytical.udf.decorators import pregel
@pregel(vd_type="double", md_type="double")
class SSSP_Pregel(AppAssets):
@staticmethod
def Init(v, context):
pass
@staticmethod
def Compute(messages, v, context):
pass
```
The **pregel** decorator has two parameters named `vd_type` and `md_type`, which represent the vertex data type and message type respectively.
You can specify the types for your algorithm, options are `int`, `double`, and `string`. For **SSSP**, we set both to `double`.
Since Pregel model are defined on vertex, the `Init` and `Compute` functions has a parameter `v` to access the vertex data. See more details in [Cython SDK API](https://graphscope.io/docs/reference/cython_sdk.html).
### Fulfill Init Function¶
```
@pregel(vd_type="double", md_type="double")
class SSSP_Pregel(AppAssets):
@staticmethod
def Init(v, context):
v.set_value(1000000000.0)
@staticmethod
def Compute(messages, v, context):
pass
```
The `Init` function sets the initial value for each node by `v.set_value(1000000000.0)`
### Fulfill Compute function¶
```
@pregel(vd_type="double", md_type="double")
class SSSP_Pregel(AppAssets):
@staticmethod
def Init(v, context):
v.set_value(1000000000.0)
@staticmethod
def Compute(messages, v, context):
src_id = context.get_config(b"src")
cur_dist = v.value()
new_dist = 1000000000.0
if v.id() == src_id:
new_dist = 0
for message in messages:
new_dist = min(message, new_dist)
if new_dist < cur_dist:
v.set_value(new_dist)
for e_label_id in range(context.edge_label_num()):
edges = v.outgoing_edges(e_label_id)
for e in edges:
v.send(e.vertex(), new_dist + e.get_int(2))
v.vote_to_halt()
```
The `Compute` function for **SSSP** computes the new distance for each node by the following steps:
1) Initialize the new value with value 1000000000
2) If the vertex is source node, set its distance to 0.
3) Compute the `min` value of messages received, and set the value if it less than the current value.
Repeat these, until no more new messages(shorter distance) are generated.
### Optional Combiner
Optionally, we can define a combiner to reduce the message communication overhead.
```
@pregel(vd_type="double", md_type="double")
class SSSP_Pregel(AppAssets):
@staticmethod
def Init(v, context):
v.set_value(1000000000.0)
@staticmethod
def Compute(messages, v, context):
src_id = context.get_config(b"src")
cur_dist = v.value()
new_dist = 1000000000.0
if v.id() == src_id:
new_dist = 0
for message in messages:
new_dist = min(message, new_dist)
if new_dist < cur_dist:
v.set_value(new_dist)
for e_label_id in range(context.edge_label_num()):
edges = v.outgoing_edges(e_label_id)
for e in edges:
v.send(e.vertex(), new_dist + e.get_int(2))
v.vote_to_halt()
@staticmethod
def Combine(messages):
ret = 1000000000.0
for m in messages:
ret = min(ret, m)
return ret
```
### Run Your Pregel Algorithm on Graph.
Next, let's run your Pregel algorithm on the graph, and check the results.
```
sssp_pregel = SSSP_Pregel()
ctx = sssp_pregel(graph, src=6)
r2 = (
ctx.to_dataframe({"node": "v:person.id", "r": "r:person"})
.sort_values(by=["node"])
.to_numpy(dtype=float)
)
r2
```
It is important to release resources when they are no longer used.
```
sess.close()
```
### Aggregator in Pregel
Pregel aggregators are a mechanism for global communication, monitoring, and counting. Each vertex can provide a value to an aggregator in superstep `S`, the system combines these
values using a reducing operator, and the resulting value is made available to all vertices in superstep `S+1`. GraphScope provides a number of predefined aggregators for Pregel algorithms, such as `min`, `max`, or `sum` operations on data types.
Here is a example for use a builtin aggregator, more details can be found in [Cython SDK API](https://graphscope.io/docs/reference/cython_sdk.html)
```
@pregel(vd_type="double", md_type="double")
class Aggregators_Pregel_Test(AppAssets):
@staticmethod
def Init(v, context):
# int
context.register_aggregator(
b"int_sum_aggregator", PregelAggregatorType.kInt64SumAggregator
)
context.register_aggregator(
b"int_max_aggregator", PregelAggregatorType.kInt64MaxAggregator
)
context.register_aggregator(
b"int_min_aggregator", PregelAggregatorType.kInt64MinAggregator
)
# double
context.register_aggregator(
b"double_product_aggregator", PregelAggregatorType.kDoubleProductAggregator
)
context.register_aggregator(
b"double_overwrite_aggregator",
PregelAggregatorType.kDoubleOverwriteAggregator,
)
# bool
context.register_aggregator(
b"bool_and_aggregator", PregelAggregatorType.kBoolAndAggregator
)
context.register_aggregator(
b"bool_or_aggregator", PregelAggregatorType.kBoolOrAggregator
)
context.register_aggregator(
b"bool_overwrite_aggregator", PregelAggregatorType.kBoolOverwriteAggregator
)
# text
context.register_aggregator(
b"text_append_aggregator", PregelAggregatorType.kTextAppendAggregator
)
@staticmethod
def Compute(messages, v, context):
if context.superstep() == 0:
context.aggregate(b"int_sum_aggregator", 1)
context.aggregate(b"int_max_aggregator", int(v.id()))
context.aggregate(b"int_min_aggregator", int(v.id()))
context.aggregate(b"double_product_aggregator", 1.0)
context.aggregate(b"double_overwrite_aggregator", 1.0)
context.aggregate(b"bool_and_aggregator", True)
context.aggregate(b"bool_or_aggregator", False)
context.aggregate(b"bool_overwrite_aggregator", True)
context.aggregate(b"text_append_aggregator", v.id() + b",")
else:
if v.id() == b"1":
assert context.get_aggregated_value(b"int_sum_aggregator") == 62586
assert context.get_aggregated_value(b"int_max_aggregator") == 62586
assert context.get_aggregated_value(b"int_min_aggregator") == 1
assert context.get_aggregated_value(b"double_product_aggregator") == 1.0
assert (
context.get_aggregated_value(b"double_overwrite_aggregator") == 1.0
)
assert context.get_aggregated_value(b"bool_and_aggregator") == True
assert context.get_aggregated_value(b"bool_or_aggregator") == False
assert (
context.get_aggregated_value(b"bool_overwrite_aggregator") == True
)
context.get_aggregated_value(b"text_append_aggregator")
v.vote_to_halt()
```
|
github_jupyter
|
# SuStaIn tutorial using simulated data
Written by Alex Young in April 2020, updated in April 2021. Please email [email protected] with any questions.
This tutorial demonstrates how to run Subtype and Stage Inference (SuStaIn) using simulated data. SuStaIn is an unsupervised learning algorithm that identifies subgroups of individuals with distinct biomarker progression patterns. See [Young et al. Nature Communications 2018](https://doi.org/10.1038/s41467-018-05892-0) for more details.
SuStaIn is a generalisable algorithm in which you can choose how to model the progression of biomarkers within a subtype. In this tutorial I use the linear z-score model I used in [Young et al. 2018](https://doi.org/10.1038/s41467-018-05892-0), but it is possible to use other models of biomarker evolution, such as the event-based model.
For a demonstration of the 'event-based'/mixture modelling version of SuStaIn (i.e. MixtureSustain), see simrun.py, available in the /sim subdirectory of the pySuStaIn package.
## Installing SuStaIn and setting it up to run in a notebook
To get SuStaIn up and running first you need to install the package. I'm using Anaconda and had some conflicts with existing packagaes so I had to create a new environment and set it up to be accessible from a jupyter notebook. For me the whole set up process looked like this...
Step 1: Create a new environment "sustain_env" in anaconda that uses python 3.7 and activate the environment ready to install pySuStaIn.
```console
conda create --name sustain_env python=3.7
conda activate sustain_env
```
Step 2: Install pySuStaIn within the environment. The first line installs the kde-ebm (not needed for this tutorial), and the second line installs pySutaIn. Using the -e option enables you to edit the code (most users won't need to do this so can remove the -e).
```console
pip install -e ./awkde
pip install -e .
```
Step 3: To get the new environment to run inside a notebook I had to install ipykernel and use that to add the environment as a new kernel.
```console
conda install ipykernel
python3.7 -m ipykernel install --user --name sustain_env
```
When running the notebook you then need to make sure you're using the new kernel (it should appear as sustain_env in the top right hand corner, or you need to select it using Kernel>Change kernel>sustain_env).
## Importing packages
```
# import the python packages needed to generate simulated data for the tutorial
import os
import shutil
import numpy as np
import matplotlib.pyplot as plt
import pickle
from pathlib import Path
import sklearn.model_selection
import pandas as pd
import pylab
import sys
import pySuStaIn
sys.path.insert(0,'../sim/')
from simfuncs import generate_random_Zscore_sustain_model, generate_data_Zscore_sustain
```
## The linear z-score model
The linear z-score model I use in this tutorial describes a subtype progression pattern as the linear evolution of biomarkers between different z-scores. Figure 1 below shows an example of what this model looks like for a single subtype. The model is indexed by a set of discrete stages. Each stage corresponds to a biomarker reaching a new z-score from the set of z-scores for each biomarker, Z_vals. Each biomarker starts with a minimum value of 0 at stage 0 and reaches a maximum of Z_max at the final stage of the progression. The number of stages is determined by the number of biomarkers and z-scores in Z_vals. The SuStaIn algorithm identifies subgroups of individuals and their progression patterns - for the linear z-score model the progression pattern would be the ordering of the different biomarker z-scores in Z_vals - gt_sequence below.
You can play around generating different sequences and altering the settings of the linear z-score model using the code below.
```
N = 5 # number of biomarkers
SuStaInLabels = []
for i in range(N):
SuStaInLabels.append( 'Biomarker '+str(i)) # labels of biomarkers for plotting
Z_vals = np.array([[1,2,3]]*N) # Z-scores for each biomarker
Z_max = np.array([5]*N) # maximum z-score
# To demonstrate how to set different biomarkers to have different z-scores,
# set biomarker 0 to have z-scores of 1 and 2 only and a maximum of 3
# to do this change the corresponding row of Z_vals to read 1 2 0
# and change the corresponding row of Z_max to 3
Z_vals[np.array(0),np.array(2)] = 0
Z_max[np.array(0)] = 3
# and set biomarker 2 to have a z-score of 1 only and a maximum of 2
# to do this change the corresponding row of Z_vals to read 1 0 0
# and change the corresponding row of Z_max to 2
Z_vals[np.array(2),np.array([1,2])] = 0
Z_max[np.array(2)] = 2
# generate a random sequence for the linear z-score model
gt_sequence = generate_random_Zscore_sustain_model(Z_vals,
1)
# ignore this part, it's only necessary so that the generate_data_sustain function
# can be used in this demo setting
gt_stages = np.array([0])
gt_subtypes = np.array([0])
# this code generates data from z-score sustain
# - here i've just output the z-score model itself rather than any datapoints
_, _, gt_stage_value = generate_data_Zscore_sustain(gt_subtypes,
gt_stages,
gt_sequence,
Z_vals,
Z_max)
# ignore this part, just calculates some parameters of sustain to output below
stage_zscore = np.array([y for x in Z_vals.T for y in x])
stage_zscore = stage_zscore.reshape(1,len(stage_zscore))
IX_select = stage_zscore>0
stage_zscore = stage_zscore[IX_select]
stage_zscore = stage_zscore.reshape(1,len(stage_zscore))
num_zscores = Z_vals.shape[1]
IX_vals = np.array([[x for x in range(N)]] * num_zscores).T
stage_biomarker_index = np.array([y for x in IX_vals.T for y in x])
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
stage_biomarker_index = stage_biomarker_index[IX_select]
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
# print out some of the values and plot a picture of the model
print('Simulated sequence:',(gt_sequence.astype(int).flatten()))
print('At the beginning of the progression (stage 0) the biomarkers have scores of 0')
print('At the stages:',1+np.arange(np.array(stage_zscore).shape[1]))
print('the biomarkers:',stage_biomarker_index[:,gt_sequence.astype(int).flatten()].flatten())
print('reach z-scores of:',stage_zscore[:,gt_sequence.astype(int).flatten()].flatten())
print('At the end of the progression (stage',np.array(stage_zscore).shape[1]+2,') the biomarkers reach scores of:',Z_max)
print('The z-score model assumes individuals belong to some unknown stage of this progression,')
print('with gaussian noise with a standard deviation of 1 for each biomarker')
temp_stages = np.array(range(np.array(stage_zscore).shape[1]+2))
for b in range(N):
ax = plt.plot(temp_stages, gt_stage_value[b,:,:])
_ = plt.xlabel('SuStaIn stage')
_ = plt.ylabel('Z-score')
_ = plt.legend(SuStaInLabels)
_ = plt.title('Figure 1')
```
## Important note on the linear z-score model
It's natural to think of the progression pattern in Figure 1 as linear in time but this isn't necessarily the case. For example, the time between stages 2 and 3 may be much longer than between stages 8 and 9. This means that the shape of the trajectories may look quite different if indexed by time (although the general order in which the biomarkers progress to different z-scores would remain the same). The linear z-score model simply describes the patient snapshots you would expect to see in a cross-sectional dataset for any particular subtype at a particular stage. The subtypes and stages are considered as hidden variables, which the SuStaIn algorithm identifies directly from the data.
## Generating simulated data
This section of code generates simulated data for the tutorial. Any variables labelled as 'gt' (ground truth) are for generating the simulated data only and would typically not be known beforehand in a real dataset.
You can ignore many of the settings below for now, but in particular
- N_S_gt is the number of subtypes to simulate
- gt_f is the proportion of individuals belonging to each subtype
- gt_sequence is the order in which each biomarker approaches each z-score for each subtype
- gt_subtypes is the subtype of each individual
- gt_stages is the stage of each individual along the progression pattern of their subtype
You can alter these to get a feel for how SuStaIn works on different simulated datasets.
```
N = 5 # number of biomarkers
M = 500 # number of observations ( e.g. subjects )
M_control = 100 # number of these that are control subjects
N_S_gt = 2 # number of ground truth subtypes
SuStaInLabels = []
for i in range(N):
SuStaInLabels.append( 'Biomarker '+str(i)) # labels of biomarkers for plotting
Z_vals = np.array([[1,2,3]]*N) # Z-scores for each biomarker
Z_max = np.array([5]*N) # maximum z-score
# ground truth proportion of individuals belonging to each subtype
gt_f = [1+0.5*x for x in range(N_S_gt)]
gt_f = [x/sum(gt_f) for x in gt_f][::-1]
# ground truth sequence for each subtype
gt_sequence = generate_random_Zscore_sustain_model(Z_vals,
N_S_gt)
# simulate subtypes and stages for individuals, including a control population at stage 0
N_k = np.sum(Z_vals>0)+1
gt_subtypes = np.random.choice(range(N_S_gt), M, replace=True, p=gt_f)
gt_stages_control = np.zeros((M_control,1))
gt_stages = np.concatenate((gt_stages_control,
np.ceil(np.random.rand(M-M_control,1)*N_k)),
axis=0)
# generate simulated data
data, gt_data_denoised, gt_stage_value = generate_data_Zscore_sustain(gt_subtypes,
gt_stages,
gt_sequence,
Z_vals,
Z_max)
# ignore this part, just calculates some parameters of sustain to output below
stage_zscore = np.array([y for x in Z_vals.T for y in x])
stage_zscore = stage_zscore.reshape(1,len(stage_zscore))
IX_select = stage_zscore>0
stage_zscore = stage_zscore[IX_select]
stage_zscore = stage_zscore.reshape(1,len(stage_zscore))
num_zscores = Z_vals.shape[1]
IX_vals = np.array([[x for x in range(N)]] * num_zscores).T
stage_biomarker_index = np.array([y for x in IX_vals.T for y in x])
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
stage_biomarker_index = stage_biomarker_index[IX_select]
stage_biomarker_index = stage_biomarker_index.reshape(1,len(stage_biomarker_index))
for s in range (N_S_gt):
# print out the parameters
print('For subtype',s,'(',gt_f[s]*100,'% of individuals)')
print('Simulated sequence:',(gt_sequence[s,:].astype(int).flatten()))
print('At the beginning of the progression (stage 0) the biomarkers have scores of 0')
print('At the stages:',1+np.arange(np.array(stage_zscore).shape[1]))
print('the biomarkers:',stage_biomarker_index[:,gt_sequence[s,:].astype(int).flatten()].flatten())
print('reach z-scores of:',stage_zscore[:,gt_sequence[s,:].astype(int).flatten()].flatten())
print('At the end of the progression (stage',np.array(stage_zscore).shape[1]+2,') the biomarkers reach scores of:',Z_max)
print('')
```
## The SuStaIn algorithm
SuStaIn identifies subtypes with distinct progression patterns from cross-sectional data. The algorithm proceeds hierarchically, first fitting a single subtype to the data, then two, then three, etc., up to a maximum number of subtypes chosen by the user. The fitting of the nth subtype model works but splitting each of the previous n-1 clusters into two and then using this as an initialisation to fit the n subtype model. For each of the n subtype models, SuStaIn uses MCMC sampling to estimate the uncertainty in the subtype progression patterns and the proportion of individuals that belong to each subtype. The optimal number of subtypes is selected by using cross-validation to compute the cross-validation information criterion (CVIC).
## Preparing data for SuStaIn
The data for SuStaIn needs to be z-scored relative to a control population such that the mean of the control population is 0 and the standard deviation of the control population is 1. To do this simply subtract the mean of the control population from your data and divide by the standard deviation of the control population. Double check that if you apply this transformation to the control population only that the control population has a mean of 0 and a standard deviation of 1. The data further needs to be transformed to increase in z-score with disease progression. If the biomarkers you are using decrease with disease progression you need to multiply the data for those biomarkers by -1.
I'd suggest the following workflow for getting your data ready to run SuStaIn on.
1. Regress out the effects of covariates. Learn the effects of covariates in a control population and use this model to regress out the effect of covariates for all the subjects. Learning the model in the control population will avoid regressing out disease effects, which you want to keep in your dataset.
2. Calculate the mean and standard deviation of each biomarker in your control dataset, mean_control and std_control.
3. Z-score your data by taking (data-mean_control)/std_control.
4. Identify any biomarkers that decrease with disease progression, these will have mean_data < mean_control. Multiply the data for these biomarkers by -1.
Steps 2-4 are illustrated in the section below but will have little effect on the simulated data because it is generated as z-scores already.
```
# extract data for control subjects
data_control = data[np.tile(gt_stages,(1,N))==0].reshape(M_control,N)
# compute the mean and standard deviation of the control population
mean_control = np.mean(data_control,axis=0)
std_control = np.std(data_control,axis=0)
# z-score the data
data = (data-mean_control)/std_control
data_control = (data_control-mean_control)/std_control
# multiply data for decreasing biomarkers by -1
IS_decreasing = np.mean(data,axis=0)<np.mean(data_control,axis=0)
data[np.tile(IS_decreasing,(M,1))] = -1*data[np.tile(IS_decreasing,(M,1))]
data_control[np.tile(IS_decreasing,(M_control,1))] = -1*data_control[np.tile(IS_decreasing,(M_control,1))]
# Check that the mean of the control population is 0
print('Mean of controls is ',np.mean(data_control,axis=0))
# Check that the standard deviation of the control population is 1
print('Standard deviation of controls is ',np.std(data_control,axis=0))
# Check that the mean of the whole dataset is positive
print('Mean of whole dataset is ',np.mean(data,axis=0))
# Check that the standard deviation of the whole dataset is greater than 1
print('Standard deviation of whole dataset is ',np.std(data,axis=0))
```
## Choosing the settings
The SuStaIn algorithm requires the following inputs.
### data
The data you want to run SuStaIn on, of size M subjects by N biomarkers. This needs to be z-scored in the way described in the previous section.
### Z_vals
This is the set of z-scores you want to include for each biomarker. The more z-scores you use the longer the SuStaIn algorithm will take to run. Z_vals has size N biomarkers by Z z-scores. If you have more z-scores for some biomarkers than others you can simply leave zeros at the end of biomarker rows with fewer z-scores.
### Z_max
The maximum z-score reached at the end of the progression, with size N biomarkers by 1. I'd suggest choosing a value around the 95th percentile of your data but you can experiment with different values. I typically choose an integer for interpretability but you don't have to.
### SuStaInLabels
The names of the biomarkers you are using, for plotting purposes.
### N_startpoints
The number of startpoints to use when fitting the subtypes hierarchichally. I'd suggest using 25.
### N_S_max
The maximum number of subtypes to fit. I'd suggest starting with a lower number - maybe three - and then increasing that if you're getting a significantly better fit with the maximum number of subtypes. You can judge this roughly from the MCMC plot. To properly evaluate the optimal number of subtypes you need to run cross-validation.
### N_iterations_MCMC
The number of iterations for the MCMC sampling of the uncertainty in the progression pattern. I'd recommend using 1x10^5 or 1x10^6.
### output_folder
Choose an output folder for the results.
### dataset_name
Name the results files outputted by SuStaIn.
### use_parellel_startpoints
Boolean for whether or not to parallelize the startpoints.
### Additional note
There are approximate and exact versions of the computation of the data likelihood for the linear z-score model. Currently the python version only supports the approximate version. If you want to use the exact version please see the Matlab version at https://github.com/ucl-pond/SuStaInMatlab.
```
# Input the settings for z-score SuStaIn
# To make the tutorial run faster I've set
# N_startpoints = 10 and N_iterations_MCMC = int(1e4)
# I recommend using N_startpoints = 25 and
# N_iterations_MCMC = int(1e5) or int(1e6) in general though
N_startpoints = 10
N_S_max = N_S_gt+1
N_iterations_MCMC = int(1e4)
output_folder = os.path.join(os.getcwd(), 'sim')
dataset_name = 'sim'
sustain_input = pySuStaIn.ZscoreSustain(data,
Z_vals,
Z_max,
SuStaInLabels,
N_startpoints,
N_S_max,
N_iterations_MCMC,
output_folder,
dataset_name,
False)
```
## Deleting previous SuStaIn results if necessary
This code snippet deletes any previous SuStaIn results. By default the SuStaIn code checks for previous results to avoid running the algorithm again unnecessarily so you'll need to run this section each time you generate a new simulated dataset that you want to fit the SuStaIn model. If you don't want to overwrite your previous results you can choose a new dataset_name and/or output_folder.
```
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
# output_folder = os.path.join(os.getcwd(), 'sim2')
# dataset_name = 'sim2'
# sustain_input = ZscoreSustain(data,
# Z_vals,
# Z_max,
# SuStaInLabels,
# N_startpoints,
# N_S_max,
# N_iterations_MCMC,
# output_folder,
# dataset_name,
# False)
```
## Create folder for results if it doesn't exist already
```
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
```
## Running the SuStaIn algorithm
We're finally ready to run the SuStaIn algorithm! The main outputs are samples_sequence and samples_f. samples_sequence gives MCMC samples of the ordering of the biomarker z-scores for each n subtype model. samples_f gives MCMC samples of the proportion of individuals that belong to each subtype for each n subtype model. These can be found in the outputted files.
```
# runs the sustain algorithm with the inputs set in sustain_input above
samples_sequence, \
samples_f, \
ml_subtype, \
prob_ml_subtype, \
ml_stage, \
prob_ml_stage, \
prob_subtype_stage = sustain_input.run_sustain_algorithm()
# Just added this to demonstrate what happens if you re-run the SuStaIn algorithm
# with the same dataset name and folder as previously
# The code recognises the files are there already rather than re-running SuStaIn
# This is useful if you want to increase the number of subtypes without
# starting right from the beginning again
samples_sequence, \
samples_f, \
ml_subtype, \
prob_ml_subtype, \
ml_stage, \
prob_ml_stage, \
prob_subtype_stage = sustain_input.run_sustain_algorithm()
```
## Comparison with ground truth
Figure 3 shows the expected progression patterns for the simulated data in the form of positional variance diagrams so it's easier to compare the output of SuStaIn with the ground truth from the simulated data. In a positional variance diagram each entry tells you the probability each biomarker has reached each z-score at each SuStaIn stage. Here, z-scores of 1 are shown in red, 2 in magenta and 3 in blue. I've plotted the positional variance diagrams in Figure 3 without any uncertainty. You'd expect the results from the simulated dataset to have a similar progression pattern on average to those in Figure 3, but with uncertainty due to the simulated noise. Figure 4 shows the output of SuStaIn for the ground truth number of subtypes.
```
# Output a figure showing the ground truth
temp_gt_sequence = gt_sequence.reshape((gt_sequence.shape[0],gt_sequence.shape[1],1))
temp_gt_f = np.asarray(gt_f).reshape(len(gt_f),1)
pySuStaIn.ZscoreSustain._plot_sustain_model(sustain_input,temp_gt_sequence,temp_gt_f,M)
_ = plt.suptitle('Figure 3: Ground truth progression pattern')
# The code below opens the results for the ground truth number of subtypes
# and plots the output
s = N_S_gt-1
pickle_filename_s = output_folder + '/pickle_files/' + dataset_name + '_subtype' + str(s) + '.pickle'
pickle_filepath = Path(pickle_filename_s)
pickle_file = open(pickle_filename_s, 'rb')
loaded_variables = pickle.load(pickle_file)
samples_sequence = loaded_variables["samples_sequence"]
samples_f = loaded_variables["samples_f"]
pickle_file.close()
pySuStaIn.ZscoreSustain._plot_sustain_model(sustain_input,samples_sequence,samples_f,M)
_ = plt.suptitle('Figure 4: SuStaIn output')
```
## Assessing the output
Now we've run the SuStaIn algorithm, we need to assess the output and decide whether to change any of the settings.
### MCMC trace
The first thing to look at is the MCMC trace (Figure 5 below). It should be periodic, i.e. with a structure that regularly repeats itself, rather than having long flat sections where it gets stuck at a particular likelihood. If this isn't the case SuStaIn is not working well on your data. There's some troubleshooting you can try:
- Check that your data is z-scored correctly
- Check that the choice of z-scores (Z_vals and Z_max) are sensible
- Check that your data looks normally distributed in your control population
-- if not, try an alternative version of SuStaIn that might be more suitable for your data, e.g. SuStaIn using a KDE event-based model for non normally distributed data
- Increase the number of startpoints (N_startpoints)
- Increase the number of MCMC samples (N_iterations_MCMC)
### Histograms of model likelihood
The next thing to look at are histograms of the model likelihood (Figure 6 below). Whilst the number of subtypes should be determined through cross-validation, these histograms can give a reasonable indication of the number of subtypes in your dataset, which will enable you to decide whether to fit more subtypes and what to set as the maximum number of subtypes for the cross-validation. When there's a large overlap between the histograms of the model likelihood as you increase the subtypes it means that the likelihood isn't improving very much when you increase the number of subtypes, which means you've probably gone past the optimal number of subtypes. You want to fit enough subtypes so that at least one model is too complex (has too many subtypes) for your data so that you can be sure you've chosen the optimal number of subtypes. If you're not seeing overlapping histograms you need to increase N_S_max and run SuStaIn again from your previous setting of N_S_max.
### Positional variance diagrams
If the end stages of the positional variance diagrams look very blurry with no clear predominant progression pattern, it usually means that there aren't many individuals that fit well with the end stages of the progression. If this is the case you might want to consider removing some biomarker z-scores and re-running SuStaIn.
```
# go through each subtypes model and plot MCMC samples of the likelihood
for s in range(N_S_max):
pickle_filename_s = output_folder + '/pickle_files/' + dataset_name + '_subtype' + str(s) + '.pickle'
pickle_filepath = Path(pickle_filename_s)
pickle_file = open(pickle_filename_s, 'rb')
loaded_variables = pickle.load(pickle_file)
samples_likelihood = loaded_variables["samples_likelihood"]
pickle_file.close()
_ = plt.figure(0)
_ = plt.plot(range(N_iterations_MCMC), samples_likelihood, label="subtype" + str(s))
_ = plt.figure(1)
_ = plt.hist(samples_likelihood, label="subtype" + str(s))
_ = plt.figure(0)
_ = plt.legend(loc='upper right')
_ = plt.xlabel('MCMC samples')
_ = plt.ylabel('Log likelihood')
_ = plt.title('Figure 5: MCMC trace')
_ = plt.figure(1)
_ = plt.legend(loc='upper right')
_ = plt.xlabel('Log likelihood')
_ = plt.ylabel('Number of samples')
_ = plt.title('Figure 6: Histograms of model likelihood')
```
## Cross-validation
To determine the optimal number of subtypes it's necessary to perform cross-validation and compute the cross-validation information criterion (CVIC).
## Stratified cross-validation
It's a good idea to use stratified training and test sets so you have similar numbers of cases and controls in each fold, as shown below.
```
# identify a control population
index_control = np.reshape(gt_stages,(M))==0
# label cases and controls to perform stratified cross-validation
labels = 1 * np.ones(data.shape[0], dtype=int)
labels[index_control] = 0
# choose the number of folds - here i've used three for speed but i recommend 10 typically
N_folds = 3
# generate stratified cross-validation training and test set splits
cv = sklearn.model_selection.StratifiedKFold(n_splits=N_folds,
shuffle=True)
cv_it = cv.split(data, labels)
test_idxs = []
for train, test in cv_it:
test_idxs.append(test)
test_idxs = np.array(test_idxs)
```
## Performing cross-validation
Next you need to run the cross-validation on your training folds and validate on the test folds. The code below does this sequentially for all folds. It's also possible to specify a specific fold if you wanted to run each fold of the cross-validation separately in parallel (e.g. on the cluster).
```
# perform cross-validation and output the cross-validation information criterion and
# log-likelihood on the test set for each subtypes model and fold combination
CVIC, loglike_matrix = sustain_input.cross_validate_sustain_model(test_idxs)
# Just added this to demonstrate what happens if you re-run the cross-validation
# with the same dataset name and folder as previously
# The code recognises the files are there already rather than re-running SuStaIn
# This is useful if you want to increase the number of subtypes without
# starting right from the beginning again
CVIC, loglike_matrix = sustain_input.cross_validate_sustain_model(test_idxs)
```
## Choosing the optimal number of subtypes
The optimal number of subtypes is chosen using the CVIC, shown in Figure 7 below. The CVIC is an information criterion (like the AIC/BIC/WAIC) that balances model complexity with model accuracy, with a lower CVIC indicating a better balance between the two. Generally speaking, the model with the lowest CVIC is the best. However, you do sometimes get a very small improvement (less than ~6) in the CVIC with a more complex model, in which case I would tend to favour the less complex (i.e. fewer subtypes) model.
Another useful metric to look at is the log-likelihood of each subtypes model on the test set, shown in Figure 8. A better model should show a consistent improvement in the test set log-likelihood across folds.
```
# go through each subtypes model and plot the log-likelihood on the test set and the CVIC
print("CVIC for each subtype model: " + str(CVIC))
print("Average test set log-likelihood for each subtype model: " + str(np.mean(loglike_matrix, 0)))
_ = plt.figure(1)
_ = plt.plot(np.arange(N_S_max,dtype=int),CVIC)
_ = plt.xticks(np.arange(N_S_max,dtype=int))
_ = plt.ylabel('CVIC')
_ = plt.xlabel('Subtypes model')
_ = plt.title('Figure 7: CVIC')
_ = plt.figure(0)
df_loglike = pd.DataFrame(data = loglike_matrix, columns = ["s_" + str(i) for i in range(sustain_input.N_S_max)])
df_loglike.boxplot(grid=False)
for i in range(sustain_input.N_S_max):
y = df_loglike[["s_" + str(i)]]
x = np.random.normal(1+i, 0.04, size=len(y)) # Add some random "jitter" to the x-axis
pylab.plot(x, y, 'r.', alpha=0.2)
_ = plt.ylabel('Log likelihood')
_ = plt.xlabel('Subtypes model')
_ = plt.title('Figure 8: Test set log-likelihood across folds')
```
## Cross-validated positional variance diagrams
Another useful output of the cross-validation that you can look at are positional variance diagrams averaged across cross-validation folds. These give you an idea of the variability in the progression patterns across different training datasets.
```
#this part estimates cross-validated positional variance diagrams
for i in range(N_S_max):
sustain_input.combine_cross_validated_sequences(i+1, N_folds)
# Output a figure showing the ground truth
temp_gt_sequence = gt_sequence.reshape((gt_sequence.shape[0],gt_sequence.shape[1],1))
temp_gt_f = np.asarray(gt_f).reshape(len(gt_f),1)
pySuStaIn.ZscoreSustain._plot_sustain_model(sustain_input,temp_gt_sequence,temp_gt_f,M)
_ = plt.suptitle('Figure 9: Ground truth progression pattern')
# The code below opens the results for the ground truth number of subtypes
# and plots the output
s = N_S_gt-1
pickle_filename_s = output_folder + '/pickle_files/' + dataset_name + '_subtype' + str(s) + '.pickle'
pickle_filepath = Path(pickle_filename_s)
pickle_file = open(pickle_filename_s, 'rb')
loaded_variables = pickle.load(pickle_file)
samples_sequence = loaded_variables["samples_sequence"]
samples_f = loaded_variables["samples_f"]
pickle_file.close()
pySuStaIn.ZscoreSustain._plot_sustain_model(sustain_input,samples_sequence,samples_f,M)
_ = plt.suptitle('Figure 10: SuStaIn output')
sustain_input.combine_cross_validated_sequences(N_S_gt, N_folds)
_ = plt.suptitle('Figure 11: Cross-validated SuStaIn output')
```
## Subtyping and staging
Once you've used the CVIC to choose the optimal number of subtypes you can use the SuStaIn output from that subtypes model to subtype and stage individuals in your dataset. This will already have been output by the SuStaIn algorithm, you can extract the outputs using the code below. The outputs are
### ml_subtype and prob_ml_subtype
The maximum likelihood subtype and the probability of that subtype for each individual.
### ml_stage and prob_ml_stage
The maximum likelihood stage and the probability of that stage for each individual.
### prob_subtype, prob_stage and prob_subtype_stage
The probability each individual belongs to each subtype, to each stage, and to each subtype and stage combination.
```
s = N_S_gt-1
pickle_filename_s = output_folder + '/pickle_files/' + dataset_name + '_subtype' + str(s) + '.pickle'
pickle_filepath = Path(pickle_filename_s)
pickle_file = open(pickle_filename_s, 'rb')
loaded_variables = pickle.load(pickle_file)
ml_subtype = loaded_variables["ml_subtype"]
prob_ml_subtype = loaded_variables["prob_ml_subtype"]
ml_stage = loaded_variables["ml_stage"]
prob_ml_stage = loaded_variables["prob_ml_stage"]
prob_subtype = loaded_variables["prob_subtype"]
prob_stage = loaded_variables["prob_stage"]
prob_subtype_stage = loaded_variables["prob_subtype_stage"]
pickle_file.close()
# You can also subtype and stage new data using
# N_samples = 1000
# ml_subtype, \
# prob_ml_subtype, \
# ml_stage, \
# prob_ml_stage, \
# prob_subtype, \
# prob_stage, \
# prob_subtype_stage = sustain_input.subtype_and_stage_individuals_newData(new_data,
# samples_sequence,
# samples_f,
# N_samples)
```
## Subtyping and staging using cross-validated models
The code below subtypes and stages individuals using the cross-validated positional variance diagrams in Figure 11.
```
s = N_S_gt-1
Nfolds = len(test_idxs)
for fold in range(Nfolds):
pickle_filename_fold_s = sustain_input.output_folder + '/pickle_files/' + sustain_input.dataset_name + '_fold' + str(fold) + '_subtype' + str(s) + '.pickle'
pickle_filepath = Path(pickle_filename_fold_s)
pickle_file = open(pickle_filename_fold_s, 'rb')
loaded_variables = pickle.load(pickle_file)
samples_sequence = loaded_variables["samples_sequence"]
samples_f = loaded_variables["samples_f"]
pickle_file.close()
if fold == 0:
samples_sequence_cval = samples_sequence
samples_f_cval = samples_f
else:
samples_sequence_cval = np.concatenate((samples_sequence_cval, samples_sequence), axis=2)
samples_f_cval = np.concatenate((samples_f_cval, samples_f), axis=1)
N_samples = 1000
ml_subtype_cval, \
prob_ml_subtype_cval, \
ml_stage_cval, \
prob_ml_stage_cval, \
prob_subtype_cval, \
prob_stage_cval, \
prob_subtype_stage_cval = sustain_input.subtype_and_stage_individuals_newData(data,
samples_sequence_cval,
samples_f_cval,
N_samples)
```
|
github_jupyter
|
# Math and Statistics Review for ML
Using the smallpox data set, review relevant mathematical and statistical methods commonly used in machine learning. An example will be shown using the Utah data. Choose another state and perform the same operations on the data for that state.
```
import pandas as pd
import numpy as np
data = pd.read_csv('http://apmonitor.com/pds/uploads/Main/smallpox.txt')
data.head()
```
### Summary Statistics with Pandas describe() Function
Pandas imports data, generates summary statistics, and manipulates data tables. There are many functions that allow efficient manipulation for the preliminary steps of data analysis problems. The data.describe() command gives a table of summary statistics.
```
data.describe()
```
### Summary Statistics with Pandas Profiling
Pandas Profiling generates more detailed analysis than the pandas describe function. It produces an overview of data with only a couple lines of code. Use minimial=True to avoid running the correlation plots that take a long time with large data sets. Run the code to generate the profile of the data and view either as a Notebook widget or as an html file. Explore the data to determine the relationships between the variables. The `minimal=True` avoids the analysis sections that take a long time with large data sets.
```
from pandas_profiling import ProfileReport
profile = ProfileReport(data, explorative=True, minimal=True)
try:
profile.to_widgets() # view as widget in Notebook
except:
profile.to_file('smallpox.html') # save as html file
```
# Mathematics
### Scalars, Vectors, and Matrices
**Scalars** are simple numerical values, and are typically denoted by an italicized letter, like _x_ or _a_.
**Vectors** are ordered lists of scalar values, and are denoted with bold letters, such as **x** or **w**. The specific attribute of a vector is denoted by a superscript, like so: _w_$^{(j)}$. For example, the vector **w** = [3,2] has the attribute _w_$^{(1)}$ = 3.
**Matrices** are rectangular arrays of numbers arranged in rows or columns, and are denoted with bold capital letters, such as **A** or **W**.
In Python, matrices and vectors are called "arrays."
```
UT = data[data['state'] == 'UT']
# Create a matrix containing data from the 'cases' and 'indicence_per_capita' columns
matrix = UT[['cases','incidence_per_capita']].values
print('Matrix:',matrix)
print('Dimensions:',matrix.shape)
# The vector is composed of the values in the '0'
# column (first columns) of the matrix
# Use the : to get all values in the row, and
# the 0 to get all values in the '0' column
vector = matrix[:,0]
print('Vector:',vector)
print('Dimension:',vector.shape)
# Create a scalar out of the 6th attribute of the vector
# Python starts counts with the number 0, so the [5]
# indicates the 6th value in the array
scalar = vector[5]
print('Scalar:',scalar)
print('Dimension:',scalar.shape)
```
**Activity:**
1. Create a matrix **X** out of the 'cases' and 'incidence_per_capita' columns of a state of your choice
2. Create a vector **x** containing all the values in the 'incidence_per_capita' column of **X**
3. Create a scalar _x_$^{(12)}$ from the 12th value in **x** (remember to use x[11])
4. Find the dot product between the 2 distinct vectors in matrix **X**. It may be helpful to import numpy and use np.dot()
### Summations and Products
It's sometimes useful to sum or multiply all attributes of a vector. The notation for summations over a vector **x** = [x$^{(1)}$,x$^{(2)}$,...x$^{(n-1)}$,x$^{(n)}$] looks like this:
$$\sum_{i=1}^nx^{(i)} = x^{(1)} + x^{(2)} + ...+x^{(n-1)} + x^{(n)}$$
Similarly, the product is summarized using the capital pi notation:
$$\prod_{i=1}^nx^{(i)} = x^{(1)} \cdot x^{(2)} \cdot ...\cdot x^{(n-1)} \cdot x^{(n)}$$
```
# Find the total number of smallpox cases over all time in Utah
UT_sum = np.sum(UT['cases'])
print('Sum:',UT_sum)
```
**Activity**: Find the total number of cases in another state.
### Derivatives and Gradients
You should be familiar with analytical derivatives and gradients from calculus courses. While these are most commonly used behind the scenes to solve problems, it's useful to know about these for two reasons. First, you should understand how a numerical derivative works. Second, you may find it beneficial to use the numerical derivative as an additional feature for your machine learning model. You can calculate the numerical derivative as follows:
```
# Be sure the data is sorted according to week
UT = UT.sort_values(by=['week'])
# Find the difference in cases for each consecutive week
UT['dcases'] = UT['cases'].diff()
# Find the difference in weeks for each consecutive week
# should be 1, except for when there are missing weeks
UT['dweeks'] = UT['week'].diff()
# Find the numerical derivative, dcases/dweeks
UT['dcases/dweeks'] = UT['dcases'] / UT['dweeks']
# Plot the numerical derivative
UT['dcases/dweeks'].plot()
```
**Activity:**
1. Find the weekly difference in smallpox cases for your state.
2. Calculate the numerical derivative (finite difference) for weekly smallpox cases.
3. Plot the numerical derivative for weekly smallpox cases. Interpret the graph.
Repeat the activity by looking at the difference in cases over a time period of 10 weeks. You can use the argument `periods = 10` inside of the `.diff()` function.
# Statistics
### Distributions
The distribution describes the makeup of the data in each column. Distributions can be either discrete (set numbers) or continuous (all numbers in a range are possible). Plotting a distribution allows you not only to see what the data look like, but also to determine if there are suspicious values or if you need to do additional preprocessing.
```
# Plot the distribution of 'incidence_per_capita'; use only non-zero values
UT[UT['incidence_per_capita'] != 0]['incidence_per_capita'].hist()
```
**Activity:**
1. Plot the distribution of `'incidence_per_capita'` for your state of choice.
2. Sometimes the log scaled values are more useful or give better results. Log scale the `'incidence_per_capita'` and observe the difference.
### Mean and standard deviation
These values are useful in describing a normal distribution. There are also other potentially useful statistical measures (such as kurtosis and skew, which describe deviations from a normal distribution). These statistical measures are not only useful for describing data, but you can potentially use them as features in your machine learning model.
```
print('All Utah cases')
UT['cases'].describe()
UT_non0 = UT[UT['cases'] != 0]
print('Non-zero Utah cases')
UT_non0['cases'].describe()
```
**Activity:**
1. Display the mean, standard deviation, quartiles, max, and min for the data in another state besides Utah.
2. Save the statistics as variables for later use. Use `UT['cases'].mean()`, `.std()`, `.quantile()`, `.max()`, and `.min()`.
3. Find the kurtosis and skew of the data. Use `.kurtosis()` and `.skew()`
|
github_jupyter
|
# Gas Mixtures: Perfect and Semiperfect Models
This Notebook is an example about how to declare and use *Gas Mixtures* with **pyTurb**. Gas Mixtures in **pyTurb** are treated as a combination of different gases of **pyTurb**:
- *PerfectIdealGas*: Ideal Equation of State ($pv=R_gT$) and constant $c_p$, $c_v$, $\gamma_g$
- *SemiperfectIdealGas*: Ideal Equation of State and $c_p\left(T\right)$, $c_v\left(T\right)$, $\gamma_g\left(T\right)$ as a function of temperature
The *Gas Mixture* class and the rest of the gas models can be found at the following folder:
- pyturb
- gas_models
- thermo_prop
- PerfectIdealGas
- SemiperfectIdealGas
- GasMixture
```python
from pyturb.gas_models import GasMixture
from pyturb.gas_models import PerfectIdealGas
from pyturb.gas_models import SemiperfectIdealGas
from pyturb.gas_models import GasMixture
```
When the `GasMixture` object is imported the gas model must be selected: The mixture can be treated as a *Perfect Gas* or *Semiperfect Gas*. Note that both options are *ideal* gases (the *ideal equation of state* $pv=R_gT$ is available). Thus:
- If the gas is Perfect: $c_v, c_p, \gamma_g \equiv constant$
- If the gas is Semiperfect: $c_v(T), c_p(T), \gamma_g(T) \equiv f(T)$
To choose one of the gas models simply specify it when creating the Gas Mixture object:
```python
gas_mix_perfect = GasMixture(gas_model='Perfect')
gas_mix_semiperfect = GasMixture(gas_model='Semiperfect')
```
Note that 'gas_model' options are not case sensitive e.g. `Semi-perfect`, `semiperfect` or `Semiperfect` yield the same result.
A *gas mixture* can be defined adding the gas species that conform the mixture. For that purpose, the method `add_gas` can be used:
```python
gas_mix = GasMixture()
gas_mix.add_gas(species, moles=quantity)
gas_mix.add_gas(species, mass=quantity)
```
Note that the gas species (pure substance) specified in `species` must be available as a `PerfectIdealGas` or `SemiperfectIdealGas`. The gas availability can be checked using the `is_available` function at `ThermoProperties`.
When using `add_gas`, the quantity of the gas to be added must be specified. This can be done by introducing the moles or the mass of the gas. For example, if a mixture of $1.5mol$ of $Ar$ and $3mol$ of $He$ is intended:
```python
gas_mix = GasMixture(gas_model='Perfect')
gas_mix.add_gas('Ar', moles=1.5)
gas_mix.add_gas('He', moles=3.5)
```
Whilst a mix of $500g$ of $O_2$ and $500g$ of $H_2$ would be:
```python
gas_mix = GasMixture(gas_model='Perfect')
gas_mix.add_gas('O2', mass=0.5)
gas_mix.add_gas('H2', mass=0.5)
```
Finally, the gas mixture provides the same outputs of a `PerfectIdealGas` or `SemiperfectIdealGas`, plus the molar and mass fractions:
- **Gas properties:** Ru, Rg, Mg, cp, cp_molar, cv, cv_molar, gamma
- **Gas enthalpies, moles and mass:** h0, h0_molar, mg, Ng
- **Mixture condition:** Molar fraction, mass fraction
---
### Gas Mixture example:
Let's create a mixture Perfect Gases, with $500g$ of $O_2$ and $500g$ of $H_2$
```
from pyturb.gas_models import GasMixture
gas_mix = GasMixture(gas_model='Perfect')
gas_mix.add_gas('O2', mass=0.5)
gas_mix.add_gas('H2', mass=0.5)
```
To inspect the gas mixture contidions, we can use *Pandas Dataframe* contained in `gas_mixture`:
```
gas_mix.mixture_gases
```
Note that the `gas_mixture` dataframe contains the information of the mixture: amount of moles, amount of mass, molar and mass fractions and the objects containing the pure subtance information.
---
It is also possible to create a gas mixtures defining moles:
```
gas_mix2 = GasMixture(gas_model='Perfect')
gas_mix2.add_gas('O2', moles=0.5)
gas_mix2.add_gas('H2', moles=0.5)
gas_mix2.mixture_gases
```
One can also define the mixture defining some pure substances as moles and some as mass:
```
gas_mix3 = GasMixture(gas_model='Perfect')
gas_mix3.add_gas('O2', mass=0.5)
gas_mix3.add_gas('H2', moles=0.121227)
gas_mix3.mixture_gases
```
Note that `gas_mix` and `gas_mix3` are equivalent.
---
### Perfect Air as a mixture
In this example we will create a gas mixture following the air composition (as a perfect mix of oxygen, nitrogen, argon and carbon dioxide) and we will compare it to the 'Air' substance from `PerfectIdelGas`.
>Note that **Air** is an available gas at the *Nasa Glenn* coefficients and is therefore available as a `PerfectIdealGas` and as `SemiperfectIdeal`.
>Thus there is no need to declare Air as a gas mixture from pyTurb. However, for the sake of clarity, we will compare both mixtures.
From the `PerfectIdealGas` class:
```
from pyturb.gas_models import PerfectIdealGas
air_perfgas = PerfectIdealGas('Air')
print(air_perfgas.thermo_prop)
```
And now, applying a mixture of molar quantities (per unit mole):
- Diatomic Oxygen: $O_2$ 20.9476\%
- Diatomic nitrogen: $N_2$ 78.0840\%
- Argon: $Ar$ 0.9365\%
- Carbon dioxide: $CO_2$ 0.0319\%
```
pyturb_mix = GasMixture('Perfect')
pyturb_mix.add_gas('O2', 0.209476)
pyturb_mix.add_gas('N2', 0.78084)
pyturb_mix.add_gas('Ar', 0.009365)
pyturb_mix.add_gas('CO2', 0.000319)
```
Therefore, the mixture is composed of:
```
pyturb_mix.mixture_gases
```
Where the gas constant, heat capacity at constant pressure, heat capacity at constant volume and the heat capacity ratio are:
```
print('pyTurb air mixture: Rair={0:6.1f}J/kg/K; cp={1:6.1f} J/kg/K; cv={2:6.1f} J/kg/K; gamma={3:4.1f}'.format(pyturb_mix.Rg, pyturb_mix.cp(), pyturb_mix.cv(), pyturb_mix.gamma()))
print('Perfect air: Rair={0:6.1f}J/kg/K; cp={1:6.1f} J/kg/K; cv={2:6.1f} J/kg/K; gamma={3:4.1f}'.format(air_perfgas.Rg, air_perfgas.cp(), air_perfgas.cv(), air_perfgas.gamma()))
```
---
### Semiperfect Gas Mixture
Following the last example, a Semi Perfect model can be used by just changing the `gas_model` option:
```
# Objective temperature:
T = 1500 #K
# Gas mixture:
pyturb_mix_sp = GasMixture('Semiperfect')
pyturb_mix_sp.add_gas('O2', 0.209476)
pyturb_mix_sp.add_gas('N2', 0.78084)
pyturb_mix_sp.add_gas('Ar', 0.009365)
pyturb_mix_sp.add_gas('CO2', 0.000319)
print('pyTurb air mixture: Rair={0:6.1f}J/kg/K; cp={1:6.1f} J/kg/K; cv={2:6.1f} J/kg/K; gamma={3:4.1f}'.format(pyturb_mix_sp.Rg, pyturb_mix_sp.cp(T), pyturb_mix_sp.cv(T), pyturb_mix_sp.gamma(T)))
```
|
github_jupyter
|
```
%reload_ext autoreload
%autoreload 2
from fastai.gen_doc.gen_notebooks import *
from pathlib import Path
```
### To update this notebook
Run `tools/sgen_notebooks.py
Or run below:
You need to make sure to refresh right after
```
import glob
for f in Path().glob('*.ipynb'):
generate_missing_metadata(f)
```
# Metadata generated below
```
update_nb_metadata('tutorial.itemlist.ipynb',
summary='Advanced tutorial, explains how to create your custom `ItemBase` or `ItemList`',
title='Custom ItemList')
update_nb_metadata('tutorial.inference.ipynb',
summary='Intermediate tutorial, explains how to create a Learner for inference',
title='Inference Learner')
update_nb_metadata('tutorial.data.ipynb',
summary="Beginner's tutorial, explains how to quickly look at your data or model predictions",
title='Look at data')
update_nb_metadata('callbacks.csv_logger.ipynb',
summary='Callbacks that saves the tracked metrics during training',
title='callbacks.csv_logger')
update_nb_metadata('callbacks.tracker.ipynb',
summary='Callbacks that take decisions depending on the evolution of metrics during training',
title='callbacks.tracker')
update_nb_metadata('torch_core.ipynb',
summary='Basic functions using pytorch',
title='torch_core')
update_nb_metadata('gen_doc.convert2html.ipynb',
summary='Converting the documentation notebooks to HTML pages',
title='gen_doc.convert2html')
update_nb_metadata('metrics.ipynb',
summary='Useful metrics for training',
title='metrics')
update_nb_metadata('callbacks.fp16.ipynb',
summary='Training in mixed precision implementation',
title='callbacks.fp16')
update_nb_metadata('callbacks.general_sched.ipynb',
summary='Implementation of a flexible training API',
title='callbacks.general_sched')
update_nb_metadata('text.ipynb',
keywords='fastai',
summary='Application to NLP, including ULMFiT fine-tuning',
title='text')
update_nb_metadata('callback.ipynb',
summary='Implementation of the callback system',
title='callback')
update_nb_metadata('tabular.models.ipynb',
keywords='fastai',
summary='Model for training tabular/structured data',
title='tabular.models')
update_nb_metadata('callbacks.mixup.ipynb',
summary='Implementation of mixup',
title='callbacks.mixup')
update_nb_metadata('applications.ipynb',
summary='Types of problems you can apply the fastai library to',
title='applications')
update_nb_metadata('vision.data.ipynb',
summary='Basic dataset for computer vision and helper function to get a DataBunch',
title='vision.data')
update_nb_metadata('overview.ipynb',
summary='Overview of the core modules',
title='overview')
update_nb_metadata('training.ipynb',
keywords='fastai',
summary='Overview of fastai training modules, including Learner, metrics, and callbacks',
title='training')
update_nb_metadata('text.transform.ipynb',
summary='NLP data processing; tokenizes text and creates vocab indexes',
title='text.transform')
# do not overwrite this notebook, or changes may get lost!
# update_nb_metadata('jekyll_metadata.ipynb')
update_nb_metadata('collab.ipynb',
summary='Application to collaborative filtering',
title='collab')
update_nb_metadata('text.learner.ipynb',
summary='Easy access of language models and ULMFiT',
title='text.learner')
update_nb_metadata('gen_doc.nbdoc.ipynb',
summary='Helper function to build the documentation',
title='gen_doc.nbdoc')
update_nb_metadata('vision.learner.ipynb',
summary='`Learner` support for computer vision',
title='vision.learner')
update_nb_metadata('core.ipynb',
summary='Basic helper functions for the fastai library',
title='core')
update_nb_metadata('fastai_typing.ipynb',
keywords='fastai',
summary='Type annotations names',
title='fastai_typing')
update_nb_metadata('gen_doc.gen_notebooks.ipynb',
summary='Generation of documentation notebook skeletons from python module',
title='gen_doc.gen_notebooks')
update_nb_metadata('basic_train.ipynb',
summary='Learner class and training loop',
title='basic_train')
update_nb_metadata('gen_doc.ipynb',
keywords='fastai',
summary='Documentation modules overview',
title='gen_doc')
update_nb_metadata('callbacks.rnn.ipynb',
summary='Implementation of a callback for RNN training',
title='callbacks.rnn')
update_nb_metadata('callbacks.one_cycle.ipynb',
summary='Implementation of the 1cycle policy',
title='callbacks.one_cycle')
update_nb_metadata('vision.ipynb',
summary='Application to Computer Vision',
title='vision')
update_nb_metadata('vision.transform.ipynb',
summary='List of transforms for data augmentation in CV',
title='vision.transform')
update_nb_metadata('callbacks.lr_finder.ipynb',
summary='Implementation of the LR Range test from Leslie Smith',
title='callbacks.lr_finder')
update_nb_metadata('text.data.ipynb',
summary='Basic dataset for NLP tasks and helper functions to create a DataBunch',
title='text.data')
update_nb_metadata('text.models.ipynb',
summary='Implementation of the AWD-LSTM and the RNN models',
title='text.models')
update_nb_metadata('tabular.data.ipynb',
summary='Base class to deal with tabular data and get a DataBunch',
title='tabular.data')
update_nb_metadata('callbacks.ipynb',
keywords='fastai',
summary='Callbacks implemented in the fastai library',
title='callbacks')
update_nb_metadata('train.ipynb',
summary='Extensions to Learner that easily implement Callback',
title='train')
update_nb_metadata('callbacks.hooks.ipynb',
summary='Implement callbacks using hooks',
title='callbacks.hooks')
update_nb_metadata('vision.image.ipynb',
summary='Image class, variants and internal data augmentation pipeline',
title='vision.image')
update_nb_metadata('vision.models.unet.ipynb',
summary='Dynamic Unet that can use any pretrained model as a backbone.',
title='vision.models.unet')
update_nb_metadata('vision.models.ipynb',
keywords='fastai',
summary='Overview of the models used for CV in fastai',
title='vision.models')
update_nb_metadata('tabular.transform.ipynb',
summary='Transforms to clean and preprocess tabular data',
title='tabular.transform')
update_nb_metadata('index.ipynb',
keywords='fastai',
toc='false',
title='Welcome to fastai')
update_nb_metadata('layers.ipynb',
summary='Provides essential functions to building and modifying `Model` architectures.',
title='layers')
update_nb_metadata('tabular.ipynb',
keywords='fastai',
summary='Application to tabular/structured data',
title='tabular')
update_nb_metadata('basic_data.ipynb',
summary='Basic classes to contain the data for model training.',
title='basic_data')
update_nb_metadata('datasets.ipynb')
update_nb_metadata('tmp.ipynb',
keywords='fastai')
update_nb_metadata('callbacks.tracking.ipynb')
update_nb_metadata('data_block.ipynb',
keywords='fastai',
summary='The data block API',
title='data_block')
update_nb_metadata('callbacks.tracker.ipynb',
keywords='fastai',
summary='Callbacks that take decisions depending on the evolution of metrics during training',
title='callbacks.tracking')
update_nb_metadata('widgets.ipynb')
update_nb_metadata('text_tmp.ipynb')
update_nb_metadata('tabular_tmp.ipynb')
update_nb_metadata('tutorial.data.ipynb')
update_nb_metadata('tutorial.itemlist.ipynb')
update_nb_metadata('tutorial.inference.ipynb')
```
|
github_jupyter
|
# maysics.calculus模块使用说明
calculus模块包含七个函数
|名称|作用|
|---|---|
|lim|极限|
|ha|哈密顿算符|
|grad|梯度|
|nebla_dot|nebla算子点乘|
|nebla_cross|nebla算子叉乘|
|laplace|拉普拉斯算子|
|inte|积分|
<br></br>
## 求极限:lim
lim(f, x0, acc=0.01, method='both')
<br>求函数```f```在```acc```的误差下,$x\rightarrow x_{0}$的函数值
<br>```method```可选'both'、'+'、'-',分别表示双边极限、右极限、左极限
### DEMO 1-1:求函数$y=\frac{sin(x)}{x}$中$x\rightarrow0$时的值
```
from maysics.calculus import lim
import numpy as np
def f(x):
return np.sin(x) / x
lim(f, 0)
```
<br></br>
## 哈密顿算符:ha
哈密顿算符:$\hat{H}=-\frac{\hbar^{2}{\nabla^{2}}}{2m}+U$
<br>ha(f, m, U, acc=0.1)
<br>求函数```f```在```acc```误差下,粒子质量为```m```,势能为```U```时,通过哈密顿算符生成的新函数
<br>```f```需要以数组作为输入(不能是数)
<br>```U```是常数或函数
### DEMO 2-1:求函数$y=x$通过哈密顿算符生成的新函数
```
from maysics.calculus import ha
def f(x):
return x
# m=1, U=2
f_new = ha(f, 1, 2)
# 输出x=(1, 2, 3)时的函数值
f_new([1, 2, 3])
```
<br></br>
## 梯度:grad
grad(f, x, acc=0.1)
<br>在acc误差下计算函数f在x处的梯度
### DEMO 3-1:求函数$y=x^{2}+y^{2}$在点$(3, 3)$处的梯度
```
from maysics.calculus import grad
def f(x):
return x[0]**2 + x[1]**2
grad(f, [3, 3])
```
<br></br>
## nebla算子:nebla_dot和nebla_cross
nebla_dot用于点乘矢量函数:$\nabla\centerdot\vec{f}$
<br>nebla_dot(f, x, acc=0.1)
<br>nebla_cross用于叉乘矢量函数:$\nabla\times\vec{f}$(此时函数f的输出必须是三维的)
<br>nebla_cross(f, x, acc=0.1)
<br>用法类似grad函数
### DEMO 4-1:$\nabla\centerdot\vec{f}$,$\vec{f}=x^{2}\vec{i}+y^{2}\vec{j}+z^{2}\vec{k}$在点$(1,1,1)$的函数值
```
from maysics.calculus import nebla_dot
def f(x):
return x[0]**2, x[1]**2, x[2]**2
nebla_dot(f, [1, 1, 1])
```
### DEMO 4-2:$\nabla\times\vec{f}$,$\vec{f}=x^{2}\vec{i}+y^{2}\vec{j}+z^{2}\vec{k}$在点$(1,1,1)$的函数值
```
from maysics.calculus import nebla_cross
def f(x):
return x[0]**2, x[1]**2, x[2]**2
nebla_cross(f, [1, 1, 1])
```
<br></br>
## 拉普拉斯算子:laplace
$\Delta=\nabla^{2}$
<br>laplace(f, x, acc=0.1)
<br>函数```f```需以一维数组作为输入,且不支持批量输入
### DEMO 5-1:不支持小批量输入函数:$f(x,y,z)=x^{2}+y^{2}+z^{2}$在点$(1,1,1)$的$\Delta f$值
```
from maysics.calculus import laplace
def f(x):
return sum(x**2)
laplace(f, [1,1,1])
```
### DEMO 5-2:支持小批量输入函数:$f(x,y,z)=x^{2}+y^{2}+z^{2}$在点集${(1,1,1),(2,2,2)}$的$\Delta f$值
```
from maysics.calculus import laplace
def f(x):
return (x**2).sum(axis=1)
laplace(f, [[1,1,1],[2,2,2]])
```
<br></br>
## 定积分:inte
inte(func, area, method='rect', dim=1, args={}, condition=None, param={}, acc=0.1, loop=10000, height=1, random_state=None)
<br>```func```是被积函数
<br>```area```是一个二维数组,表示各个维度的积分范围
<br>```method```可选'rect'和'mc',分别表示使用矩形法和蒙特卡洛法进行积分,```acc```参数仅对矩形法起作用,```loop```、```height```和```random_state```参数仅对蒙特卡洛法起作用
<br>```dim```参数表示输入函数的维度,默认为一维函数
<br>```args```表示输入函数f除了自变量以外的其他参数
<br>```condition```是条件函数,当```condition```不为None时,只有满足```condition```(即输出为True)的点才会纳入积分范围
<br>```param```表示函数```condition```除了自变量以外的其他参数
<br>```acc```既可以是数类型,也可以是一维数组类型,前者表示各个维度精度一致,后者则可以各个维度精度不同
### 使用矩形法进行定积分
超矩形的大小为:$f(x)\times acc^{dim}$
### DEMO 6-1:求$f(x)=sin(x)$在0到π上的积分
```
from maysics.calculus import inte
import numpy as np
inte(np.sin, [[0, np.pi]])
```
### DEMO 6-2:求$f(x)=Asin(x)$在0到π上的积分
```
from maysics.calculus import inte
import numpy as np
def f(x, A):
return A * np.sin(x)
# 取A=2
inte(f, [[0, np.pi]], args={'A':2})
```
### DEMO 6-3:求$f(x)=2sin(x)$在0到π上函数值小于等于1区域的积分
```
from maysics.calculus import inte
import numpy as np
def c(x):
if 2 * np.sin(x) <= 1:
return True
else:
return False
# 取A=2
inte(np.sin, [[0, np.pi]], condition=c)
```
### DEMO 6-4:求$f(x,y)=x^{2}+y^{2}$在$x\in[-2,2]$,$y\in[-1,1]$的积分
```
from maysics.calculus import inte
def f(x):
return x[0]**2 + x[1]**2
inte(f, [[-2, 2], [-1, 1]])
```
### 使用蒙特卡洛法进行定积分
在$area\times height$的超矩形中随机产生loop个散点(注意$height\geq maxf(x)$在area中恒成立)
<br>将$y\leq f(x)$的散点数记为n,则积分$\approx\frac{n}{loop}\times area \times height$
<br>random_state是随机种子
### DEMO 6-5:求f(x)=2sin(x)在0到π上的积分
```
from maysics.calculus import inte
import numpy as np
def f(x):
return 2 * np.sin(x)
inte(f, [[0, np.pi]], method='mc', height=2)
```
### DEMO 6-6:求$f(x,y)=x^{2}+y^{2}$在$x\in[-2,2]$,$y\in[-1,1]$的积分
```
from maysics.calculus import inte
def f(x):
return x[0]**2 + x[1]**2
inte(f, [[-2, 2], [-1, 1]], method='mc', height=5)
```
|
github_jupyter
|
# Safely refactoring ACLs and firewall rules
Changing ACLs or firewall rules (or *filters*) is one of the riskiest updates to a network. Even a small error can block connectivity for a large set of critical services or open up sensitive resources to the world at large. Earlier notebooks showed how to [analyze filters for what they do and do not allow](https://github.com/batfish/pybatfish/blob/master/jupyter_notebooks/Analyzing%20ACLs%20and%20Firewall%20Rules.ipynb) and how to [make specific changes in a provably safe manner](https://github.com/batfish/pybatfish/blob/master/jupyter_notebooks/Provably%20Safe%20ACL%20and%20Firewall%20Changes.ipynb).
This notebook shows how to refactor complex filters in a way that the full impact of refactoring can be understood and analyzed for correctness *before* refactored filters are pushed to the network.
## Original ACL
We will use the following ACL as a running example in this notebook. The ACL can be read as a few separate sections:
* Line 10: Deny ICMP redirects
* Lines 20, 23: Permit BFD traffic on certain blocks
* Lines 40-80: Permit BGP traffic
* Lines 90-100: Permit DNS traffic a /24 subnet while denying it from a /32 within that
* Lines 110-500: Permit or deny IP traffic from certain subnets
* Line 510: Permit ICMP echo reply
* Lines 520-840: Deny IP traffic to certain subnets
* Lines 850-880: Deny all other types of traffic
(The IP address space in the ACL appears all over the place because it has been anonymized via [Netconan](https://github.com/intentionet/netconan). Netconan preserves the super- and sub-prefix relationships when anonymizing IP addresses and prefixes.)
```
# The ACL before refactoring
original_acl = """
ip access-list acl
10 deny icmp any any redirect
20 permit udp 117.186.185.0/24 range 49152 65535 117.186.185.0/24 eq 3784
30 permit udp 117.186.185.0/24 range 49152 65535 117.186.185.0/24 eq 3785
40 permit tcp 11.36.216.170/32 11.36.216.169/32 eq bgp
50 permit tcp 11.36.216.176/32 11.36.216.179/32 eq bgp
60 permit tcp 204.150.33.175/32 204.150.33.83/32 eq bgp
70 permit tcp 205.248.59.64/32 205.248.59.67/32 eq bgp
80 permit tcp 205.248.58.190/32 205.248.58.188/32 eq bgp
90 deny udp 10.10.10.42/32 218.8.104.58/32 eq domain
100 permit udp 10.10.10.0/24 218.8.104.58/32 eq domain
110 deny ip 54.0.0.0/8 any
120 deny ip 163.157.0.0/16 any
130 deny ip 166.144.0.0/12 any
140 deny ip 198.170.50.0/24 any
150 deny ip 198.120.0.0/16 any
160 deny ip 11.36.192.0/19 any
170 deny ip 11.125.64.0/19 any
180 permit ip 166.146.58.184/32 any
190 deny ip 218.66.57.0/24 any
200 deny ip 218.66.56.0/24 any
210 deny ip 218.67.71.0/24 any
220 deny ip 218.67.72.0/24 any
230 deny ip 218.67.96.0/22 any
240 deny ip 8.89.120.0/22 any
250 deny ip 54.203.159.1/32 any
260 permit ip 218.8.104.0/25 any
270 permit ip 218.8.104.128/25 any
280 permit ip 218.8.103.0/24 any
290 deny ip 144.49.45.40/32 any
300 deny ip 163.255.18.63/32 any
310 deny ip 202.45.130.141/32 any
320 deny ip 212.26.132.18/32 any
330 deny ip 218.111.16.132/32 any
340 deny ip 218.246.165.90/32 any
350 deny ip 29.228.179.210/32 any
360 deny ip 194.181.135.214/32 any
370 deny ip 10.64.90.249/32 any
380 deny ip 207.70.46.217/32 any
390 deny ip 219.185.241.117/32 any
400 deny ip 2.80.3.219/32 any
410 deny ip 27.212.145.150/32 any
420 deny ip 131.159.53.215/32 any
430 deny ip 214.220.213.107/32 any
440 deny ip 196.64.84.239/32 any
450 deny ip 28.69.250.136/32 any
460 deny ip 200.45.87.238/32 any
470 deny ip any 11.125.89.32/30
480 deny ip any 11.125.89.36/30
490 deny ip any 11.125.89.40/30
500 deny ip any 11.125.89.44/30
510 permit icmp any any echo-reply
520 deny ip any 11.36.199.216/30
530 deny ip any 11.36.199.36/30
540 deny ip any 11.36.199.2/30
550 deny ip any 11.36.199.52/30
560 deny ip any 11.36.199.20/30
570 deny ip any 11.125.82.216/30
580 deny ip any 11.125.82.220/32
590 deny ip any 11.125.82.36/30
600 deny ip any 11.125.82.12/30
610 deny ip any 11.125.80.136/30
620 deny ip any 11.125.80.141/32
630 deny ip any 11.125.87.48/30
640 deny ip any 11.125.87.168/30
650 deny ip any 11.125.87.173/32
660 deny ip any 11.125.90.56/30
670 deny ip any 11.125.90.240/30
680 deny ip any 11.125.74.224/30
690 deny ip any 11.125.91.132/30
700 deny ip any 11.125.89.132/30
710 deny ip any 11.125.89.12/30
720 deny ip any 11.125.92.108/30
730 deny ip any 11.125.92.104/32
740 deny ip any 11.125.92.28/30
750 deny ip any 11.125.92.27/32
760 deny ip any 11.125.92.160/30
770 deny ip any 11.125.92.164/32
780 deny ip any 11.125.92.204/30
790 deny ip any 11.125.92.202/32
800 deny ip any 11.125.93.192/29
810 deny ip any 11.125.95.204/30
820 deny ip any 11.125.95.224/30
830 deny ip any 11.125.95.180/30
840 deny ip any 11.125.95.156/30
850 deny tcp any any
860 deny icmp any any
870 deny udp any any
880 deny ip any any
"""
```
## Compressed ACL
Now, assume that we want to compress this ACL to make it more manageable. We do the following operations:
* Merge the two BFD permit statements on lines 20-30 into one statement using the range directive.
* Remove the BGP session on line 80 because it has been decommissioned
* Remove lines 180 and 250 because they are shadowed by earlier lines and will never match a packet. Such lines can be found via the `filterLineReachability` question, as shown [here](https://github.com/batfish/pybatfish/blob/master/jupyter_notebooks/Analyzing%20ACLs%20and%20Firewall%20Rules.ipynb#filterLineReachability:-Analyzing-reachability-of-filter-lines).
* Merge pairs of lines (190, 200), (210, 220), and (260, 270) by combining their prefixes into a less specific prefix.
* Remove all deny statements on lines 520-870. They are not needed given the final deny on line 880.
The result of these actions, which halve the ACL size, is shown below. To enable easy observation of changes, we have preserved the line numbers.
```
compressed_acl = """
ip access-list acl
10 deny icmp any any redirect
20 permit udp 117.186.185.0/24 range 49152 65535 117.186.185.0/24 range 3784 3785
! 30 MERGED WITH LINE ABOVE
40 permit tcp 11.36.216.170/32 11.36.216.169/32 eq bgp
50 permit tcp 11.36.216.176/32 11.36.216.179/32 eq bgp
60 permit tcp 204.150.33.175/32 204.150.33.83/32 eq bgp
70 permit tcp 205.248.59.64/32 205.248.59.67/32 eq bgp
! 80 DECOMMISSIONED BGP SESSION
90 deny udp 10.10.10.42/32 218.8.104.58/32 eq domain
100 permit udp 10.10.10.0/24 218.8.104.58/32 eq domain
110 deny ip 54.0.0.0/8 any
120 deny ip 163.157.0.0/16 any
130 deny ip 166.144.0.0/12 any
140 deny ip 198.170.50.0/24 any
150 deny ip 198.120.0.0/16 any
160 deny ip 11.36.192.0/19 any
170 deny ip 11.125.64.0/19 any
! 180 REMOVED UNREACHABLE LINE
190 deny ip 218.66.56.0/23 any
! 200 MERGED WITH LINE ABOVE
210 deny ip 218.67.71.0/23 any
! 220 MERGED WITH LINE ABOVE
230 deny ip 218.67.96.0/22 any
240 deny ip 8.89.120.0/22 any
! 250 REMOVED UNREACHABLE LINE
260 permit ip 218.8.104.0/24 any
! 270 MERGED WITH LINE ABOVE
280 permit ip 218.8.103.0/24 any
290 deny ip 144.49.45.40/32 any
300 deny ip 163.255.18.63/32 any
310 deny ip 202.45.130.141/32 any
320 deny ip 212.26.132.18/32 any
330 deny ip 218.111.16.132/32 any
340 deny ip 218.246.165.90/32 any
350 deny ip 29.228.179.210/32 any
360 deny ip 194.181.135.214/32 any
370 deny ip 10.64.90.249/32 any
380 deny ip 207.70.46.217/32 any
390 deny ip 219.185.241.117/32 any
400 deny ip 2.80.3.219/32 any
410 deny ip 27.212.145.150/32 any
420 deny ip 131.159.53.215/32 any
430 deny ip 214.220.213.107/32 any
440 deny ip 196.64.84.239/32 any
450 deny ip 28.69.250.136/32 any
460 deny ip 200.45.87.238/32 any
470 deny ip any 11.125.89.32/28
510 permit icmp any any echo-reply
! 520-870 REMOVED UNNECESSARY DENIES
880 deny ip any any
"""
```
The challenge for us is to find out if and how this compressed ACL differs from the original. That is, is there is traffic that is treated differently by the two ACLs, and if so, which lines are responsible for the difference.
This task is difficult to get right through manual reasoning alone, which is why we developed the `compareFilters` question in Batfish.
## Comparing filters
We can compare the two ACLs above as follows. To initialize snapshots, we will use Batfish's `init_snapshot_from_text` function which creates a snapshot with a single device who configuration is the provided text. The analysis shown below can be done even when the filters are embedded within bigger device configurations.
```
# Import packages
%run startup.py
bf = Session(host="localhost")
# Initialize a snapshot with the original ACL
original_snapshot = bf.init_snapshot_from_text(
original_acl,
platform="cisco-nx",
snapshot_name="original",
overwrite=True)
# Initialize a snapshot with the compressed ACL
compressed_snapshot = bf.init_snapshot_from_text(
compressed_acl,
platform="cisco-nx",
snapshot_name="compressed",
overwrite=True)
# Now, compare the two ACLs in the two snapshots
answer = bf.q.compareFilters().answer(snapshot=compressed_snapshot, reference_snapshot=original_snapshot)
show(answer.frame())
```
The `compareFilters` question compares two filters and returns pairs of lines, one from each filter, that match the same flow(s) but treat them differently. If it reports no output, the filters are guaranteed to be identical. The analysis is exhaustive and considers *all possible* flows.
As we can see from the output above, our compressed ACL is not the same as the original one. In particular, line 210 of the compressed ACL will deny some flows that were being permitted by line 510 of the original; and line 510 of the compressed ACL will permit some flows that were being denied by line 220 of the original ACL. Because the permit statements correspond to ICMP traffic, we can tell that the traffic treated by the two filters is ICMP. To narrow learn specific source and destination IPs that are impacted, one may run the `searchFilters` question, as shown [here](https://github.com/batfish/pybatfish/blob/master/jupyter_notebooks/Provably%20Safe%20ACL%20and%20Firewall%20Changes.ipynb#Step-3:-Ensure-that-no-collateral-damage-has-occurred).
By looking at the output above, we can immediately understand the difference:
* The first line is showing that the compressed ACL is denying some traffic on line 210 (with index 16) that the original ACL was permitting via line 510, and the compressed ACL is permitting some traffic on line 510 that the original ACL was denying via line 220.
It turns out that the address space merger we did for lines 210 and 220 in the original ACL, where we combined 218.67.72.0/24 and 218.67.71.0/24 into 218.67.71.0/23, was not correct. The other similar mergers of 218.66.57.0/24 and 218.66.56.0/24 into 218.66.56.0/23 and of 218.8.104.0/25 and 218.8.104.128/25 into 218.8.104.0/24 were correct.
* The third line is showing that the compressed ACL is denying some traffic at the end of the ACL that the original ACL was permitting via line 80. This is an expected change of decommissioning the BGP session on line 80.
It is not always the case that refactoring is semantics preserving. Where `compareFilters` helps is succinctly enumerating *all* differences. Engineers can look at the differences and decide if the refactored filter meets their intent.
## Splitting ACLs
Compressing large ACLs is one type of refactoring engineers do; another one is splitting a large ACL into multiple smaller ACLs and composing them on the same device or spreading across multiple devices in the network. Smaller ACLs are easier to maintain and evolve. However, the split operation is risky. We may forget to include in the smaller ACLs some protections that exist in the original ACL. We show how such splits can be safely done using Batfish.
Suppose we want to split the compressed ACL above into multiple smaller ACLs that handle different concerns. So, we should have different ACLs for different types of traffic and different ACLs for different logical groups of nodes in the network. The result of such splitting is shown below. For ease of exposition, we have retained the line numbers from the original ACL and mimic a scenario in which all ACLs live on the same device.
```
smaller_acls = """
ip access-list deny-icmp-redirect
10 deny icmp any any redirect
ip access-list permit-bfd
20 permit udp 117.186.185.0/24 range 49152 65535 117.186.185.0/24 range 3784 3785
ip access-list permit-bgp-session
40 permit tcp 11.36.216.170/32 11.36.216.169/32 eq bgp
50 permit tcp 11.36.216.176/32 11.36.216.179/32 eq bgp
60 permit tcp 204.150.33.175/32 204.150.33.83/32 eq bgp
70 permit tcp 205.248.59.64/32 205.248.59.67/32 eq bgp
ip access-list acl-dns
90 deny udp 10.10.10.42/32 218.8.104.58/32 eq domain
100 permit udp 10.10.10.0/24 218.8.104.58/32 eq domain
ip access-list deny-untrusted-sources-group1
110 deny ip 54.0.0.0/8 any
120 deny ip 163.157.0.0/16 any
130 deny ip 166.144.0.0/12 any
140 deny ip 198.170.50.0/24 any
150 deny ip 198.120.0.0/16 any
160 deny ip 11.36.192.0/19 any
ip access-list deny-untrusted-sources-group2
160 deny ip 11.36.192.0/20 any
190 deny ip 218.66.56.0/23 any
210 deny ip 218.67.71.0/23 any
230 deny ip 218.67.96.0/22 any
240 deny ip 8.89.120.0/22 any
ip access-list permit-trusted-sources
260 permit ip 218.8.104.0/24 any
280 permit ip 218.8.103.0/24 any
ip access-list deny-untrusted-sources-group3
290 deny ip 144.49.45.40/32 any
300 deny ip 163.255.18.63/32 any
310 deny ip 202.45.130.141/32 any
320 deny ip 212.26.132.18/32 any
300 deny ip 218.111.16.132/32 any
340 deny ip 218.246.165.90/32 any
350 deny ip 29.228.179.210/32 any
360 deny ip 194.181.135.214/32 any
370 deny ip 10.64.90.249/32 any
380 deny ip 207.70.46.217/32 any
390 deny ip 219.185.241.117/32 any
ip access-list deny-untrusted-sources-group4
400 deny ip 2.80.3.219/32 any
410 deny ip 27.212.145.150/32 any
420 deny ip 131.159.53.215/32 any
430 deny ip 214.220.213.107/32 any
440 deny ip 196.64.84.239/32 any
450 deny ip 28.69.250.136/32 any
460 deny ip 200.45.87.238/32 any
ip access-list acl-tail
470 deny ip any 11.125.89.32/28
510 permit icmp any any echo-reply
880 deny ip any any
"""
```
Given the split ACLs above, one analysis may be to figure out if each untrusted source subnet was included in a smaller ACL. Otherwise, we have lost protection that was present in the original ACL. We can accomplish this analysis via the `findMatchingFilterLines` question, as shown below.
Once we are satisfied with analysis of filters, for an end-to-end safety guarantee, we should also analyze if there are new flows that the network will allow (or disallow) after the change. Such an analysis can be done via the `differentialReachability` question, as shown [here](https://github.com/batfish/pybatfish/blob/master/jupyter_notebooks/Introduction%20to%20Forwarding%20Change%20Validation.ipynb#Change-Scenario-2:-Validating-the-end-to-end-impact-of-an-ACL-change).
```
# Initialize a snapshot with the smaller ACLs
smaller_snapshot = bf.init_snapshot_from_text(
smaller_acls,
platform="cisco-nx",
snapshot_name="smaller",
overwrite=True)
# All untrusted subnets
untrusted_source_subnets = ["54.0.0.0/8",
"163.157.0.0/16",
"166.144.0.0/12",
"198.170.50.0/24",
"198.120.0.0/16",
"11.36.192.0/19",
"11.125.64.0/19",
"218.66.56.0/24",
"218.66.57.0/24",
"218.67.71.0/23",
"218.67.96.0/22",
"8.89.120.0/22"
]
for subnet in untrusted_source_subnets:
# Find which ACLs match traffic from this source subnet
answer = bf.q.findMatchingFilterLines(
headers=HeaderConstraints(srcIps=subnet),
filters="/deny-untrusted/").answer(snapshot=smaller_snapshot)
# Each source subnet should match exactly one ACL
af = answer.frame()
if len(af) == 1:
print("{} .... OK".format(subnet))
elif len(af) == 0:
print("{} .... ABSENT".format(subnet))
else:
print("{} .... Multiply present".format(subnet))
show(af)
```
In the code above, we first enumerate all untrusted subnets in the network. The granularity of this specification need not be the same as that in the ACL. For instance, we enumerate 218.66.56.0/24 and 218.66.57.0/24 as untrusted subnets but the ACL has a less specific prefix 218.66.56.0/23. Batfish understands such relationships and provides an accurate analysis that is not possible with simple string matching.
The **for** loop above uses the `findMatchingFilterLines` question to find out which lines across all ACLs whose names contain "deny-untrusted" will match packets starting the the specified subnet. Our expectation is that each subnet should match exactly one line in exactly one ACL, and the output shows "OK" against such subnets. It shows "Absent" for subnets that do not match any line and shows the multiple matching lines for subnets where that happens.
We see that during the split above, we ended up matching the subnet 11.36.192.0/19 twice, once as a /19 in ACL deny-untrusted-sources-group1 and then as /20 in ACL deny-untrusted-sources-group2. More dangerously, we completely forgot to match the 11.125.64.0/19, which will open a security hole in the network if these smaller ACLs were applied.
## Summary
In this notebook, we showed how to use the `compareFilters` and `findMatchingFilterLines` questions of Batfish to safely refactor complex filters.
* `compareFilters` analyzes the original and revised filter to enumerate all cases that will treat *any* flow differently.
* `findMatchingFilterLines` enumerates all lines across all specified filters that match the given space of flows.
For additional ways to analyze filter using Batfish, see the ["Analyzing ACLs and Firewall Rules"](https://github.com/batfish/pybatfish/blob/master/jupyter_notebooks/Analyzing%20ACLs%20and%20Firewall%20Rules.ipynb) and the ["Provably Safe ACL and Firewall Changes"](https://github.com/batfish/pybatfish/blob/master/jupyter_notebooks/Provably%20Safe%20ACL%20and%20Firewall%20Changes.ipynb) notebooks.
***
### Get involved with the Batfish community
Join our community on [Slack](https://join.slack.com/t/batfish-org/shared_invite/enQtMzA0Nzg2OTAzNzQ1LTcyYzY3M2Q0NWUyYTRhYjdlM2IzYzRhZGU1NWFlNGU2MzlhNDY3OTJmMDIyMjQzYmRlNjhkMTRjNWIwNTUwNTQ) and [GitHub](https://github.com/batfish/batfish).
|
github_jupyter
|
# Logistic Regression with a Neural Network mindset
Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning.
**Instructions:**
- Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so.
**You will learn to:**
- Build the general architecture of a learning algorithm, including:
- Initializing parameters
- Calculating the cost function and its gradient
- Using an optimization algorithm (gradient descent)
- Gather all three functions above into a main model function, in the right order.
## <font color='darkblue'>Updates</font>
This notebook has been updated over the past few months. The prior version was named "v5", and the current versionis now named '6a'
#### If you were working on a previous version:
* You can find your prior work by looking in the file directory for the older files (named by version name).
* To view the file directory, click on the "Coursera" icon in the top left corner of this notebook.
* Please copy your work from the older versions to the new version, in order to submit your work for grading.
#### List of Updates
* Forward propagation formula, indexing now starts at 1 instead of 0.
* Optimization function comment now says "print cost every 100 training iterations" instead of "examples".
* Fixed grammar in the comments.
* Y_prediction_test variable name is used consistently.
* Plot's axis label now says "iterations (hundred)" instead of "iterations".
* When testing the model, the test image is normalized by dividing by 255.
## 1 - Packages ##
First, let's run the cell below to import all the packages that you will need during this assignment.
- [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
- [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
- [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python.
- [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
```
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy
from PIL import Image
from scipy import ndimage
from lr_utils import load_dataset
%matplotlib inline
```
## 2 - Overview of the Problem set ##
**Problem Statement**: You are given a dataset ("data.h5") containing:
- a training set of m_train images labeled as cat (y=1) or non-cat (y=0)
- a test set of m_test images labeled as cat or non-cat
- each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px).
You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat.
Let's get more familiar with the dataset. Load the data by running the following code.
```
# Loading the data (cat/non-cat)
train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset()
```
We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing).
Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images.
```
# Example of a picture
index = 25
plt.imshow(train_set_x_orig[index])
print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.")
```
Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs.
**Exercise:** Find the values for:
- m_train (number of training examples)
- m_test (number of test examples)
- num_px (= height = width of a training image)
Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`.
```
### START CODE HERE ### (≈ 3 lines of code)
m_train = train_set_y.shape[1]
m_test = test_set_y.shape[1]
num_px = train_set_x_orig.shape[1]
### END CODE HERE ###
print ("Number of training examples: m_train = " + str(m_train))
print ("Number of testing examples: m_test = " + str(m_test))
print ("Height/Width of each image: num_px = " + str(num_px))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_set_x shape: " + str(train_set_x_orig.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x shape: " + str(test_set_x_orig.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
```
**Expected Output for m_train, m_test and num_px**:
<table style="width:15%">
<tr>
<td>**m_train**</td>
<td> 209 </td>
</tr>
<tr>
<td>**m_test**</td>
<td> 50 </td>
</tr>
<tr>
<td>**num_px**</td>
<td> 64 </td>
</tr>
</table>
For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns.
**Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1).
A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use:
```python
X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X
```
```
train_set_x_orig.reshape(train_set_x_orig.shape[0],-1,1).shape
train_set_x_orig.reshape(-1, train_set_x_orig.shape[0]).shape
# Reshape the training and test examples
### START CODE HERE ### (≈ 2 lines of code)
train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0],-1).T
test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0],-1).T
### END CODE HERE .
print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape))
print ("train_set_y shape: " + str(train_set_y.shape))
print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape))
print ("test_set_y shape: " + str(test_set_y.shape))
print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0]))
```
**Expected Output**:
<table style="width:35%">
<tr>
<td>**train_set_x_flatten shape**</td>
<td> (12288, 209)</td>
</tr>
<tr>
<td>**train_set_y shape**</td>
<td>(1, 209)</td>
</tr>
<tr>
<td>**test_set_x_flatten shape**</td>
<td>(12288, 50)</td>
</tr>
<tr>
<td>**test_set_y shape**</td>
<td>(1, 50)</td>
</tr>
<tr>
<td>**sanity check after reshaping**</td>
<td>[17 31 56 22 33]</td>
</tr>
</table>
To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255.
One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel).
<!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !-->
Let's standardize our dataset.
```
train_set_x = train_set_x_flatten/255.
test_set_x = test_set_x_flatten/255.
```
<font color='blue'>
**What you need to remember:**
Common steps for pre-processing a new dataset are:
- Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...)
- Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1)
- "Standardize" the data
## 3 - General Architecture of the learning algorithm ##
It's time to design a simple algorithm to distinguish cat images from non-cat images.
You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!**
<img src="images/LogReg_kiank.png" style="width:650px;height:400px;">
**Mathematical expression of the algorithm**:
For one example $x^{(i)}$:
$$z^{(i)} = w^T x^{(i)} + b \tag{1}$$
$$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$
$$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$
The cost is then computed by summing over all training examples:
$$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$
**Key steps**:
In this exercise, you will carry out the following steps:
- Initialize the parameters of the model
- Learn the parameters for the model by minimizing the cost
- Use the learned parameters to make predictions (on the test set)
- Analyse the results and conclude
## 4 - Building the parts of our algorithm ##
The main steps for building a Neural Network are:
1. Define the model structure (such as number of input features)
2. Initialize the model's parameters
3. Loop:
- Calculate current loss (forward propagation)
- Calculate current gradient (backward propagation)
- Update parameters (gradient descent)
You often build 1-3 separately and integrate them into one function we call `model()`.
### 4.1 - Helper functions
**Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp().
```
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Compute the sigmoid of z
Arguments:
z -- A scalar or numpy array of any size.
Return:
s -- sigmoid(z)
"""
### START CODE HERE ### (≈ 1 line of code)
s = 1/(1+np.exp(-z))
### END CODE HERE ###
return s
print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2]))))
```
**Expected Output**:
<table>
<tr>
<td>**sigmoid([0, 2])**</td>
<td> [ 0.5 0.88079708]</td>
</tr>
</table>
### 4.2 - Initializing parameters
**Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation.
```
# GRADED FUNCTION: initialize_with_zeros
def initialize_with_zeros(dim):
"""
This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0.
Argument:
dim -- size of the w vector we want (or number of parameters in this case)
Returns:
w -- initialized vector of shape (dim, 1)
b -- initialized scalar (corresponds to the bias)
"""
### START CODE HERE ### (≈ 1 line of code)
w = np.zeros((dim,1))
b = 0
### END CODE HERE ###
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 2
w, b = initialize_with_zeros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
```
**Expected Output**:
<table style="width:15%">
<tr>
<td> ** w ** </td>
<td> [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td> ** b ** </td>
<td> 0 </td>
</tr>
</table>
For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1).
### 4.3 - Forward and Backward propagation
Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters.
**Exercise:** Implement a function `propagate()` that computes the cost function and its gradient.
**Hints**:
Forward Propagation:
- You get X
- You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$
- You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
Here are the two formulas you will be using:
$$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$
$$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$
```
# GRADED FUNCTION: propagate
def propagate(w, b, X, Y):
"""
Implement the cost function and its gradient for the propagation explained above
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples)
Return:
cost -- negative log-likelihood cost for logistic regression
dw -- gradient of the loss with respect to w, thus same shape as w
db -- gradient of the loss with respect to b, thus same shape as b
Tips:
- Write your code step by step for the propagation. np.log(), np.dot()
"""
m = X.shape[1]
# FORWARD PROPAGATION (FROM X TO COST)
### START CODE HERE ### (≈ 2 lines of code)
A = sigmoid(np.dot(w.T,X) +b) # compute activation
cost = -(1/m)*np.sum((Y*np.log(A) + (1-Y)*np.log(1-A))) # compute cost
### END CODE HERE ###
# BACKWARD PROPAGATION (TO FIND GRAD)
### START CODE HERE ### (≈ 2 lines of code)
dw = (1/m)*(np.dot(X,(A-Y).T))
db = (1/m)*(np.sum(A-Y))
### END CODE HERE ###
assert(dw.shape == w.shape)
assert(db.dtype == float)
cost = np.squeeze(cost)
assert(cost.shape == ())
grads = {"dw": dw,
"db": db}
return grads, cost
w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]])
grads, cost = propagate(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("cost = " + str(cost))
```
**Expected Output**:
<table style="width:50%">
<tr>
<td> ** dw ** </td>
<td> [[ 0.99845601]
[ 2.39507239]]</td>
</tr>
<tr>
<td> ** db ** </td>
<td> 0.00145557813678 </td>
</tr>
<tr>
<td> ** cost ** </td>
<td> 5.801545319394553 </td>
</tr>
</table>
### 4.4 - Optimization
- You have initialized your parameters.
- You are also able to compute a cost function and its gradient.
- Now, you want to update the parameters using gradient descent.
**Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate.
```
# GRADED FUNCTION: optimize
def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False):
"""
This function optimizes w and b by running a gradient descent algorithm
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of shape (num_px * num_px * 3, number of examples)
Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- True to print the loss every 100 steps
Returns:
params -- dictionary containing the weights w and bias b
grads -- dictionary containing the gradients of the weights and bias with respect to the cost function
costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve.
Tips:
You basically need to write down two steps and iterate through them:
1) Calculate the cost and the gradient for the current parameters. Use propagate().
2) Update the parameters using gradient descent rule for w and b.
"""
costs = []
for i in range(num_iterations):
# Cost and gradient calculation (≈ 1-4 lines of code)
### START CODE HERE ###
grads, cost = propagate(w, b, X, Y)
### END CODE HERE ###
# Retrieve derivatives from grads
dw = grads["dw"]
db = grads["db"]
# update rule (≈ 2 lines of code)
### START CODE HERE ###
w = w - learning_rate*dw
b = b - learning_rate*db
### END CODE HERE ###
# Record the costs
if i % 100 == 0:
costs.append(cost)
# Print the cost every 100 training iterations
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costs
params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **w** </td>
<td>[[ 0.19033591]
[ 0.12259159]] </td>
</tr>
<tr>
<td> **b** </td>
<td> 1.92535983008 </td>
</tr>
<tr>
<td> **dw** </td>
<td> [[ 0.67752042]
[ 1.41625495]] </td>
</tr>
<tr>
<td> **db** </td>
<td> 0.219194504541 </td>
</tr>
</table>
**Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions:
1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$
2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this).
```
# GRADED FUNCTION: predict
def predict(w, b, X):
'''
Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b)
Arguments:
w -- weights, a numpy array of size (num_px * num_px * 3, 1)
b -- bias, a scalar
X -- data of size (num_px * num_px * 3, number of examples)
Returns:
Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X
'''
m = X.shape[1]
Y_prediction = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Compute vector "A" predicting the probabilities of a cat being present in the picture
### START CODE HERE ### (≈ 1 line of code)
A = sigmoid(np.dot(w.T,X) +b)
### END CODE HERE ###
for i in range(A.shape[1]):
# Convert probabilities A[0,i] to actual predictions p[0,i]
### START CODE HERE ### (≈ 4 lines of code)
Y_prediction[0,i] = 0 if A[0,i] < .5 else 1
### END CODE HERE ###
assert(Y_prediction.shape == (1, m))
return Y_prediction
w = np.array([[0.1124579],[0.23106775]])
b = -0.3
X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]])
print ("predictions = " + str(predict(w, b, X)))
```
**Expected Output**:
<table style="width:30%">
<tr>
<td>
**predictions**
</td>
<td>
[[ 1. 1. 0.]]
</td>
</tr>
</table>
<font color='blue'>
**What to remember:**
You've implemented several functions that:
- Initialize (w,b)
- Optimize the loss iteratively to learn parameters (w,b):
- computing the cost and its gradient
- updating the parameters using gradient descent
- Use the learned (w,b) to predict the labels for a given set of examples
## 5 - Merge all functions into a model ##
You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order.
**Exercise:** Implement the model function. Use the following notation:
- Y_prediction_test for your predictions on the test set
- Y_prediction_train for your predictions on the train set
- w, costs, grads for the outputs of optimize()
```
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False):
"""
Builds the logistic regression model by calling the function you've implemented previously
Arguments:
X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train)
Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train)
X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test)
Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test)
num_iterations -- hyperparameter representing the number of iterations to optimize the parameters
learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize()
print_cost -- Set to true to print the cost every 100 iterations
Returns:
d -- dictionary containing information about the model.
"""
### START CODE HERE ###
# initialize parameters with zeros (≈ 1 line of code)
w, b = initialize_with_zeros(X_train.shape[0])
# Gradient descent (≈ 1 line of code)
parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost=True)
# Retrieve parameters w and b from dictionary "parameters"
w = parameters["w"]
b = parameters["b"]
# Predict test/train set examples (≈ 2 lines of code)
Y_prediction_test = predict(w,b,X_test)
Y_prediction_train = predict(w,b,X_train)
### END CODE HERE ###
# Print train/test Errors
print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100))
print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100))
d = {"costs": costs,
"Y_prediction_test": Y_prediction_test,
"Y_prediction_train" : Y_prediction_train,
"w" : w,
"b" : b,
"learning_rate" : learning_rate,
"num_iterations": num_iterations}
return d
```
Run the following cell to train your model.
```
d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True)
```
**Expected Output**:
<table style="width:40%">
<tr>
<td> **Cost after iteration 0 ** </td>
<td> 0.693147 </td>
</tr>
<tr>
<td> <center> $\vdots$ </center> </td>
<td> <center> $\vdots$ </center> </td>
</tr>
<tr>
<td> **Train Accuracy** </td>
<td> 99.04306220095694 % </td>
</tr>
<tr>
<td>**Test Accuracy** </td>
<td> 70.0 % </td>
</tr>
</table>
**Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test accuracy is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week!
Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set.
```
# Example of a picture that was wrongly classified.
index = 1
plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3)))
print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.")
```
Let's also plot the cost function and the gradients.
```
# Plot learning curve (with costs)
costs = np.squeeze(d['costs'])
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('iterations (per hundreds)')
plt.title("Learning rate =" + str(d["learning_rate"]))
plt.show()
```
**Interpretation**:
You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting.
## 6 - Further analysis (optional/ungraded exercise) ##
Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$.
#### Choice of learning rate ####
**Reminder**:
In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate.
Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens.
```
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
```
**Interpretation**:
- Different learning rates give different costs and thus different predictions results.
- If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost).
- A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy.
- In deep learning, we usually recommend that you:
- Choose the learning rate that better minimizes the cost function.
- If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.)
## 7 - Test with your own image (optional/ungraded exercise) ##
Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
2. Add your image to this Jupyter Notebook's directory, in the "images" folder
3. Change your image's name in the following code
4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
```
## START CODE HERE ## (PUT YOUR IMAGE NAME)
my_image = "my_image.jpg" # change this to the name of your image file
## END CODE HERE ##
# We preprocess the image to fit your algorithm.
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
image = image/255.
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T
my_predicted_image = predict(d["w"], d["b"], my_image)
plt.imshow(image)
print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
```
<font color='blue'>
**What to remember from this assignment:**
1. Preprocessing the dataset is important.
2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model().
3. Tuning the learning rate (which is an example of a "hyperparameter") can make a big difference to the algorithm. You will see more examples of this later in this course!
Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include:
- Play with the learning rate and the number of iterations
- Try different initialization methods and compare the results
- Test other preprocessings (center the data, or divide each row by its standard deviation)
Bibliography:
- http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/
- https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c
|
github_jupyter
|
# This notebook serves as an example of how to create AutoTST objects and how to create 3D geometries
```
#General imports
import os, sys
import logging
from copy import deepcopy
import numpy as np
import pandas as pd
from multiprocessing import Process
#RDKit imports
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.rdchem import Mol
from rdkit.Chem.Pharm3D import EmbedLib
from rdkit import DistanceGeometry
from rdkit.Chem import rdDistGeom
#ASE imports
import ase
from ase import Atom, Atoms
from ase.calculators.gaussian import Gaussian as ASEGaussian
from ase.visualize import view
#RMG-Py imports
from rmgpy.molecule import Molecule as RMGMolecule
from rmgpy.species import Species as RMGSpecies
from rmgpy.reaction import Reaction as RMGReaction
#AutoTST imports
from autotst.species import Species, Conformer
from autotst.reaction import Reaction, TS
from autotst.calculator.gaussian import Gaussian
from autotst.calculator.statmech import StatMech
from autotst.job.job import Job
```
# Creating `Species` and `Conformer` objects
Each stable reactant and product species are generated as `Species` objects. These objects can contain many `Conformer` objects that can represent multiple resonance structures.
`Species` and `Conformer` objects can be initialized as follows:
- `Species(["SMILES_STRING"])`
- `Conformer("SMILES_STRING")`
For a `Species` object, it can take lists of SMILES strings because there may be multiple resonance structures for a given SMILES structure. However, `Conformer` objects represent only a single isomer.
`Species` objects can contain multiple `Conformer` and can be
```
smiles = ["[CH2]C=C(C)C"]
for smile in smiles:
molecule = RMGMolecule(SMILES=smile)
species = Species(["[CH2]C=C(C)C"])
print("For {} there are {} resonance structures".format(species, len(species.conformers)))
print("These structures are:")
for smiles in species.conformers.keys():
print ("\t- {}".format(smiles))
species.conformers
```
When looking at a specific `Conformer` object, you need to know that a `Species` contants a dictionary in `Species.conformers`. The keys are the possible SMILES strings associated with the species and the values are lists of `Conformer` objects for that specific SMILES. When initialized, these lists will be length 1 but will be extended if you generate more conformers. Below is how to view a single `Conformer` object
```
conformer = species.conformers[smiles][0]
conformer.view()
```
# Creating `Reaction` and `TS` objects
For this example we will be looking at a hydrogen abstraction reaction by peroxyl radical of 2-methylbut-2-ene
First, you need to initialize the autotst `Reaction` object as done below. This can be done by using either reaction strings that look like `r1+r2_p1+p2` where `r1`, `r2`, `p1`, and `p2` are smiles strings for the molecules involved in the reaction. In addition, not all reaction need to by bimolecular. AutoTST currently supports reactions of the following reaction families:
- Hydrogen Abstraction (`H_Abstraction`: `r1+r2_p1+p2`)
- Intra hydrogen migration (`intra_H_migration`: `r1_p1`)
- R addition to multiple bond (`R_Addition_MultipleBond`: `r1+r2_p1`)
You can specify a `reaction_family` in a `Reaction` object, however, it is not needed as AutoTST will attempt to match the reaction provided to one of the three supported reaction families.
Alternatively, you can intialize a `Reaction` object using an `RMGReaction` object. This can be done as follows:
`rxn = Reaction(rmg_reaction=RMGReaction())`
The initialization of the reaction will also create a forward and a reverse transition state geometry. And these can be visualized using `py3dmol`.
`Reaction` objects are similar to `Species` objects in that they both contain a dictionary corresponding to their conformers. The `Reaction.ts` is the dictionary of transition states for a reaction just like the `Species.conformers` is the dictionary of conformers for a species. The only difference is the keys for the `Reaction.ts` are simply `"forward"` and `"reverse"` to denote the `TS` generated in either direction
```
rxn = Reaction(label="CC=C(C)C+[O]O_[CH2]C=C(C)C+OO")
transitionstates = rxn.ts["reverse"] #the rxn.ts is a dictionary with keys being "forward" and "reverse"
ts = transitionstates[0] #transitionstates is a list of TS objects, this list is currently length 1
rxn.ts
```
# Editing geometries of `Conformer` and `TS` objects
AutoTST allows you to edit the following features of a `Conformer` or a `TS` object:
- Bond length
- Angles
- Dihedrals
- CisTrans bond orientation
- Sterocenter orientation
The definitions for all of these objects can be found in `geometry.py`.
```
conformer = Conformer("ClC=C(O)C(N)Cl")
conformer.view()
print("{} has the following geometries:".format(conformer))
print("")
print("Bonds")
for bond in conformer.bonds:
print("\t- {}: {}".format(bond, bond.index))
print("")
print("Angles")
for angle in conformer.angles:
print("\t- {}: {}".format(angle, angle.index))
print("")
print("Dihedrals")
for torsion in conformer.torsions:
print("\t- {}: {}".format(torsion, torsion.index))
print("")
print("CisTrans Bonds")
for cistran in conformer.cistrans:
print("\t- {}: ".format(cistran, cistran.index))
print("")
print("Chiral Centers")
for chiral_center in conformer.chiral_centers:
print("\t- {}: {}".format(chiral_center, chiral_center.index))
```
In addition, you can set variables for each of these objects. This next section of the notebook will walk though how to do all of this.
```
conformer.set_bond_length(bond_index=3, length=3) #Length is specified in angstroms
conformer.view()
conformer.set_angle(angle_index=7, angle=45.) #Angles are specified in degrees
conformer.view()
conformer.set_torsion(torsion_index=0, dihedral=180)
conformer.view()
conformer.set_cistrans(cistrans_index=0, stero="z") #options for stero are E and Z
conformer.view()
conformer.set_chirality(chiral_center_index=0, stero="r")
conformer.set_chirality??
conformer.view()
```
## AND all of these features can be extended to `TS` objects as well
# Now, let's look at how to generate ensembles of conformers for `Species` and `Reactions` using AutoTST
Both `Species` and `Reaction` objects have a built-in method called `generate_conformers` and this method takes one argument of an ASE calculator object. This method will then generate all possible combinations of dihedrals, CisTrans bonds, and chiral centers to identify every possible conformer. All of these conformers are then optimized using ASE's BFGS optimizer and the calculator that a user provides. From this, a list of conformers within 1 kcal/mol are generated and added to the `Species.conformers` or the `Reaction.ts` dict. Below are a few examples.
```
from hotbit import Hotbit
species = Species(["CCCC"])
species.generate_conformers(calculator=Hotbit())
species.conformers
reaction = Reaction("C+[O]O_[CH3]+OO")
reaction.generate_conformers(calculator=Hotbit())
reaction.ts
```
For right now, we're going to be looking at the first conformers for the `Species` and `Reaction` listed above.
```
ts = reaction.ts["forward"][0]
conformer = species.conformers["CCCC"][0]
```
# Now, to look at writing input files for `Conformer` and `TS` objects
This is realitively easy, you just need to use the `Gaussian` calculator object to do all of this. This starts with an initialzation of the object followed by calling it's methods on different objects to get ASE calculator objects that can write your geometries.
The main methods of the `Gaussian` calculator are:
- For species conformers
- `get_conformer_calc`
- For transition states
- `get_shell_calc`
- `get_center_calc`
- `get_overall_calc`
- `get_irc_calc`
- For both
- `get_rotor_calc`
```
gaussian = Gaussian()
```
For `Conformer` objects
```
calc = gaussian.get_conformer_calc(conformer=conformer)
calc.write_input(conformer.ase_molecule)
f = open(calc.label + ".com", "r")
print(calc.label)
print("")
for line in f.readlines():
print(line[:-1])
```
For `TS` objects
```
calc = gaussian.get_shell_calc(ts=ts)
calc.write_input(ts.ase_molecule)
f = open(calc.label + ".com", "r")
print(calc.label)
print("")
for line in f.readlines():
print(line[:-1])
calc = gaussian.get_center_calc(ts=ts)
calc.write_input(ts.ase_molecule)
f = open(calc.label + ".com", "r")
print(calc.label)
print("")
for line in f.readlines():
print(line[:-1])
calc = gaussian.get_overall_calc(ts=ts)
calc.write_input(ts.ase_molecule)
f = open(calc.label + ".com", "r")
print(calc.label)
print("")
for line in f.readlines():
print(line[:-1])
calc = gaussian.get_irc_calc(ts=ts)
calc.write_input(ts.ase_molecule)
f = open(calc.label + ".com", "r")
print(calc.label)
print("")
for line in f.readlines():
print(line[:-1])
```
For hindered rotor calculations
```
torsion = conformer.torsions[0]
calc = gaussian.get_rotor_calc(conformer=conformer, torsion=torsion)
calc.write_input(conformer.ase_molecule)
f = open(calc.label + ".com", "r")
print(calc.label)
print("")
for line in f.readlines():
print(line[:-1])
torsion = ts.torsions[0]
calc = gaussian.get_rotor_calc(conformer=ts, torsion=torsion)
calc.write_input(ts.ase_molecule)
f = open(calc.label + ".com", "r")
print(calc.label)
print("")
for line in f.readlines():
print(line[:-1])
```
At this point, you now have a way to have input files for quantum chemistry optimizations to be automatically written for you. And you can have all of these run automatically using the AutoTST `Job` class
|
github_jupyter
|
## Tutorial 2: Mixture Models and Expectation Maximization
### Exercise 1: Categorical Mixture Model (CMM)
```
# Import libraries
import numpy as np
import pandas as pd
from ast import literal_eval
import matplotlib.pyplot as plt
import gensim
from wordcloud import WordCloud, STOPWORDS
from categorical_em import CategoricalEM
```
#### 1.4) Play around with the dataset
##### Load and pre-process the data
Load the data from the tweets_cleaned csv file as `pandas` dataframe. It contains the documents already pre-processed and cleaned after applying the following steps:
1. Tokenization
2. Homogeneization, which includes:
1. Removing capitalization.
2. Removing non alphanumeric tokens (e.g. punktuation signs)
3. Stemming/Lemmatisation.
3. Cleaning
4. Vectorization
```
df = pd.read_csv('tweets_cleaned.csv')
df.drop_duplicates(subset="tweet", inplace=True) # drop duplicates tweets
df['tokens'] = df['tokens'].apply(literal_eval) # transform the string into a list of tokens
X_tokens = list(df['tokens'].values)
print('Columns: {}\n'.format(' | '.join(df.columns.values)))
print('Tweet:\n{}'.format(df.loc[1, 'tweet']))
print('Tweet cleaned:\n{}'.format(df.loc[1, 'tweets_clean']))
print('Tweet tokens:\n{}'.format(X_tokens[1]))
```
##### Create the dictionary
We have transformed the raw text collection in a list of documents stored in `X_tokens`, where each document is a collection of words which are the most relevant according to the semantic analysis.
We now convert these data (a list of token lists) into a numerical representation (a list of vectors, or a matrix). For this purpose we use the `gensim` library.
```
I = 120 # hyperparameter: number of different words to keep
dictionary = gensim.corpora.Dictionary(X_tokens)
print(dictionary)
dictionary.filter_extremes(no_below=15, no_above=0.5, keep_n=I)
print(dictionary)
```
##### Create Bag of Words (BoW)
Let's create the numerical version of our corpus using the `doc2bow` method. In general,
`D.doc2bow(token_list)` transforms any list of tokens into a list of tuples `(token_id, n)`, one per each token in
`token_list`, where `token_id` is the token identifier (according to dictionary `D`) and `n` is the number of occurrences
of such token in `token_list`.
```
X_bow = list()
keep_tweet = list()
for tweet in X_tokens:
tweet_bow = dictionary.doc2bow(tweet)
if len(tweet_bow) > 1:
X_bow.append(tweet_bow)
keep_tweet.append(True)
else:
keep_tweet.append(False)
df_data = df[keep_tweet]
N = len(df_data)
```
##### Create the matrix
Finally, we transform the BoW representation `X_bow` into a matrix, namely `X_matrix`, in which the n-th row and j-th column represents the
number of occurrences of the j-th word of the dictionary in the n-th document. This will be the matrix used in the algorithm.
```
X_matrix = np.zeros([N, I])
for i, doc_bow in enumerate(X_bow):
word_list = list()
for word in doc_bow:
X_matrix[i, word[0]] = word[1]
X_matrix.shape
```
#### 1.5) Implement the EM algorithm
```
K = 6 # hyperparameter: number of topics
i_theta = 1
i_pi = 1
model = CategoricalEM(K, I, N, delta=0.01, epochs=200, init_params={'theta': i_theta, 'pi': i_pi})
model.fit(X_matrix)
```
#### 1.6) Show the ten most representative words for each topic using a wordcloud, and the ten most relevant documents for each topic
Words per topic
```
argsort = np.argsort(model.theta_matrix, axis=1)
argsort = [x[::-1] for x in argsort]
fig, axs = plt.subplots(2, 3, figsize=(30, 10))
for k in range(K):
words = [(dictionary[m], model.theta_matrix[k][m]) for m in argsort[k][:10]]
axs[k//3,k%3].imshow(WordCloud().fit_words(dict(words)))
axs[k//3,k%3].set_title(f'Topic {k+1}', fontsize=25)
axs[k//3,k%3].axis('off')
```
Documents per topic
```
rnk = model.r_matrix.T
rnk.shape
argsort = np.argsort(rnk, axis=1)
argsort = [x[::-1][:10] for x in argsort]
# topic 1
df_data.iloc[argsort[0]]
```
|
github_jupyter
|
# Dimensionality reduction using `scikit-learn`
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import preprocessing, model_selection as ms, \
manifold, decomposition as dec, cross_decomposition as cross_dec
from sklearn.pipeline import Pipeline
%matplotlib inline
BOROUGHS_URL = 'https://files.datapress.com/london/dataset/london-borough-profiles/2017-01-26T18:50:00/london-borough-profiles.csv'
```
Read in the London Borough Profiles datasets.
```
boroughs = pd.read_csv(BOROUGHS_URL, encoding='iso-8859-1')
```
Filter the DataFrame so that only boroughs are included.
```
boroughs = boroughs[boroughs.Code.str.startswith('E09', na=False)]
```
Replace underscores with spaces in column names.
```
boroughs.columns = boroughs.columns.str.replace('_', ' ')
```
Select columns of interest.
```
boroughs = boroughs[[
'Area name',
'Population density (per hectare) 2017',
'Proportion of population aged 0-15, 2015',
'Proportion of population of working-age, 2015',
'Proportion of population aged 65 and over, 2015',
'% of resident population born abroad (2015)',
'Unemployment rate (2015)',
'Gross Annual Pay, (2016)',
'Modelled Household median income estimates 2012/13',
'Number of active businesses, 2015',
'Two-year business survival rates (started in 2013)',
'Crime rates per thousand population 2014/15',
'Fires per thousand population (2014)',
'Ambulance incidents per hundred population (2014)',
'Median House Price, 2015',
'% of area that is Greenspace, 2005',
'Total carbon emissions (2014)',
'Household Waste Recycling Rate, 2014/15',
'Number of cars, (2011 Census)',
'Number of cars per household, (2011 Census)',
'% of adults who cycle at least once per month, 2014/15',
'Average Public Transport Accessibility score, 2014',
'Male life expectancy, (2012-14)',
'Female life expectancy, (2012-14)',
'Teenage conception rate (2014)',
'Life satisfaction score 2011-14 (out of 10)',
'Worthwhileness score 2011-14 (out of 10)',
'Happiness score 2011-14 (out of 10)',
'Anxiety score 2011-14 (out of 10)',
'Childhood Obesity Prevalance (%) 2015/16',
'People aged 17+ with diabetes (%)',
'Mortality rate from causes considered preventable 2012/14'
]]
```
Set index.
```
boroughs.set_index('Area name', inplace=True)
```
Fix a couple of issues with data types.
```
boroughs[boroughs['Gross Annual Pay, (2016)'] == '.'] = None
boroughs['Modelled Household median income estimates 2012/13'] = \
boroughs['Modelled Household median income estimates 2012/13'].str.replace("[^0-9]", "")
boroughs = boroughs.apply(pd.to_numeric)
```
Remove boroughs with missing values.
```
boroughs.dropna(inplace=True)
```
Extract information on 'feelings'.
```
col_idx = [
'Life satisfaction score 2011-14 (out of 10)',
'Worthwhileness score 2011-14 (out of 10)',
'Happiness score 2011-14 (out of 10)',
'Anxiety score 2011-14 (out of 10)'
]
feelings = boroughs[col_idx]
boroughs.drop(col_idx, axis=1, inplace=True)
```
## Multidimensional scaling (MDS)
Create a pipeline that scales the data and performs MDS.
```
smds = Pipeline([
('scale', preprocessing.StandardScaler()),
('mds', manifold.MDS())
])
```
Two-dimensional projection ('embedding') of 'boroughs'
```
boroughs_mds = smds.fit_transform(boroughs)
fig, ax = plt.subplots()
ax.scatter(boroughs_mds[:,0], boroughs_mds[:,1])
for i, name in enumerate(boroughs.index):
ax.annotate(name, boroughs_mds[i,:])
```
## Principal component analysis (PCA)
Create a pipeline that scales the data and performs PCA.
```
spca = Pipeline([
('scale', preprocessing.StandardScaler()),
('pca', dec.PCA())
])
```
Scores (projection of 'boroughs' on the PCs):
```
scores = spca.fit_transform(boroughs)
```
Scores plot:
```
fig, ax = plt.subplots()
ax.scatter(scores[:,0], scores[:,1])
for i, name in enumerate(boroughs.index):
ax.annotate(name, scores[i,0:2])
```
Loadings (coefficients defining the PCs):
```
spca.named_steps['pca'].components_
```
Explained variance:
```
spca.named_steps['pca'].explained_variance_
np.cumsum(spca.named_steps['pca'].explained_variance_)
```
Explained variance ratio:
```
spca.named_steps['pca'].explained_variance_ratio_
np.cumsum(spca.named_steps['pca'].explained_variance_ratio_)
```
Scree plot:
```
plt.bar(np.arange(1, spca.named_steps['pca'].n_components_ + 1) - 0.4,
spca.named_steps['pca'].explained_variance_ratio_)
cum_evr = np.cumsum(spca.named_steps['pca'].explained_variance_ratio_)
plt.plot(np.arange(1, spca.named_steps['pca'].n_components_ + 1), cum_evr, color='black')
```
## Partial least squares (PLS) regression
Create a pipeline that scales the data and performs PLS regression.
```
spls = Pipeline([
('scale', preprocessing.StandardScaler()),
('pls', cross_dec.PLSRegression(scale=False))
])
```
Train a PLS regression model with three components.
```
spls.set_params(
pls__n_components=3
)
spls.fit(boroughs, feelings)
```
Define folds for cross-validation.
```
three_fold_cv = ms.KFold(n_splits=3, shuffle=True)
```
Compute average MSE across folds.
```
mses = ms.cross_val_score(spls, boroughs, feelings, scoring='neg_mean_squared_error', cv=three_fold_cv)
np.mean(-mses)
```
Determine 'optimal' number of components.
```
gs = ms.GridSearchCV(
estimator=spls,
param_grid={
'pls__n_components': np.arange(1, 10)
},
scoring='neg_mean_squared_error',
cv=three_fold_cv
)
gs.fit(boroughs, feelings)
-gs.best_score_
gs.best_estimator_
```
Plot number of components against MSE.
```
plt.plot(np.arange(1, 10), -gs.cv_results_['mean_test_score'])
```
|
github_jupyter
|
# 1. Import libraries
```
#----------------------------Reproducible----------------------------------------------------------------------------------------
import numpy as np
import tensorflow as tf
import random as rn
import os
seed=0
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
rn.seed(seed)
#session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
session_conf =tf.compat.v1.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
from keras import backend as K
#tf.set_random_seed(seed)
tf.compat.v1.set_random_seed(seed)
#sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
K.set_session(sess)
#----------------------------Reproducible----------------------------------------------------------------------------------------
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#--------------------------------------------------------------------------------------------------------------------------------
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Dense, Input, Flatten, Activation, Dropout, Layer
from keras.layers.normalization import BatchNormalization
from keras.utils import to_categorical
from keras import optimizers,initializers,constraints,regularizers
from keras import backend as K
from keras.callbacks import LambdaCallback,ModelCheckpoint
from keras.utils import plot_model
from sklearn.model_selection import StratifiedKFold
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import svm
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
import h5py
import math
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cm as cm
%matplotlib inline
matplotlib.style.use('ggplot')
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
import scipy.sparse as sparse
#--------------------------------------------------------------------------------------------------------------------------------
#Import ourslef defined methods
import sys
sys.path.append(r"./Defined")
import Functions as F
# The following code should be added before the keras model
#np.random.seed(seed)
```
# 2. Loading data
```
train_data_frame=np.array(pd.read_csv('./Dataset/isolet1+2+3+4.data',header=None))
test_data_frame=np.array(pd.read_csv('./Dataset/isolet5.data',header=None))
train_data_arr=(train_data_frame[:,0:617]).copy()
train_label_arr=((train_data_frame[:,617]).copy()-1)
test_data_arr=(test_data_frame[:,0:617]).copy()
test_label_arr=((test_data_frame[:,617]).copy()-1)
train_data_arr.shape
test_data_arr.shape
np.r_[train_data_arr,test_data_arr].shape
Data=MinMaxScaler(feature_range=(0,1)).fit_transform(np.r_[train_data_arr,test_data_arr])
Data.shape
C_train_x=Data[:len(train_data_arr)]
C_test_x=Data[len(train_data_arr):]
C_train_y=train_label_arr#to_categorical(train_label_arr)
C_test_y=test_label_arr#to_categorical(test_label_arr)
x_train,x_validate,y_train_onehot,y_validate_onehot= train_test_split(C_train_x,C_train_y,test_size=0.1,random_state=seed)
x_test=C_test_x
y_test_onehot=C_test_y
print('Shape of x_train: ' + str(x_train.shape))
print('Shape of x_validate: ' + str(x_validate.shape))
print('Shape of x_test: ' + str(x_test.shape))
print('Shape of y_train: ' + str(y_train_onehot.shape))
print('Shape of y_validate: ' + str(y_validate_onehot.shape))
print('Shape of y_test: ' + str(y_test_onehot.shape))
print('Shape of C_train_x: ' + str(C_train_x.shape))
print('Shape of C_train_y: ' + str(C_train_y.shape))
print('Shape of C_test_x: ' + str(C_test_x.shape))
print('Shape of C_test_y: ' + str(C_test_y.shape))
key_feture_number=50
```
# 3.Model
```
np.random.seed(seed)
#--------------------------------------------------------------------------------------------------------------------------------
class Feature_Select_Layer(Layer):
def __init__(self, output_dim, **kwargs):
super(Feature_Select_Layer, self).__init__(**kwargs)
self.output_dim = output_dim
def build(self, input_shape):
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1],),
initializer=initializers.RandomUniform(minval=0.999999, maxval=0.9999999, seed=seed),
trainable=True)
super(Feature_Select_Layer, self).build(input_shape)
def call(self, x, selection=False,k=key_feture_number):
kernel=K.pow(self.kernel,2)
if selection:
kernel_=K.transpose(kernel)
kth_largest = tf.math.top_k(kernel_, k=k)[0][-1]
kernel = tf.where(condition=K.less(kernel,kth_largest),x=K.zeros_like(kernel),y=kernel)
return K.dot(x, tf.linalg.tensor_diag(kernel))
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
#--------------------------------------------------------------------------------------------------------------------------------
def Autoencoder(p_data_feature=x_train.shape[1],\
p_encoding_dim=key_feture_number,\
p_learning_rate= 1E-3):
input_img = Input(shape=(p_data_feature,), name='input_img')
encoded = Dense(p_encoding_dim, activation='linear',kernel_initializer=initializers.glorot_uniform(seed))(input_img)
bottleneck=encoded
decoded = Dense(p_data_feature, activation='linear',kernel_initializer=initializers.glorot_uniform(seed))(encoded)
latent_encoder = Model(input_img, bottleneck)
autoencoder = Model(input_img, decoded)
autoencoder.compile(loss='mean_squared_error', optimizer=optimizers.Adam(lr=p_learning_rate))
print('Autoencoder Structure-------------------------------------')
autoencoder.summary()
#print('Latent Encoder Structure-------------------------------------')
#latent_encoder.summary()
return autoencoder,latent_encoder
#--------------------------------------------------------------------------------------------------------------------------------
def Identity_Autoencoder(p_data_feature=x_train.shape[1],\
p_encoding_dim=key_feture_number,\
p_learning_rate= 1E-3):
input_img = Input(shape=(p_data_feature,), name='autoencoder_input')
feature_selection = Feature_Select_Layer(output_dim=p_data_feature,\
input_shape=(p_data_feature,),\
name='feature_selection')
feature_selection_score=feature_selection(input_img)
encoded = Dense(p_encoding_dim,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_hidden_layer')
encoded_score=encoded(feature_selection_score)
bottleneck_score=encoded_score
decoded = Dense(p_data_feature,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_output')
decoded_score =decoded(bottleneck_score)
latent_encoder_score = Model(input_img, bottleneck_score)
autoencoder = Model(input_img, decoded_score)
autoencoder.compile(loss='mean_squared_error',\
optimizer=optimizers.Adam(lr=p_learning_rate))
print('Autoencoder Structure-------------------------------------')
autoencoder.summary()
return autoencoder,latent_encoder_score
#--------------------------------------------------------------------------------------------------------------------------------
def Fractal_Autoencoder(p_data_feature=x_train.shape[1],\
p_feture_number=key_feture_number,\
p_encoding_dim=key_feture_number,\
p_learning_rate=1E-3,\
p_loss_weight_1=1,\
p_loss_weight_2=2):
input_img = Input(shape=(p_data_feature,), name='autoencoder_input')
feature_selection = Feature_Select_Layer(output_dim=p_data_feature,\
input_shape=(p_data_feature,),\
name='feature_selection')
feature_selection_score=feature_selection(input_img)
feature_selection_choose=feature_selection(input_img,selection=True,k=p_feture_number)
encoded = Dense(p_encoding_dim,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_hidden_layer')
encoded_score=encoded(feature_selection_score)
encoded_choose=encoded(feature_selection_choose)
bottleneck_score=encoded_score
bottleneck_choose=encoded_choose
decoded = Dense(p_data_feature,\
activation='linear',\
kernel_initializer=initializers.glorot_uniform(seed),\
name='autoencoder_output')
decoded_score =decoded(bottleneck_score)
decoded_choose =decoded(bottleneck_choose)
latent_encoder_score = Model(input_img, bottleneck_score)
latent_encoder_choose = Model(input_img, bottleneck_choose)
feature_selection_output=Model(input_img,feature_selection_choose)
autoencoder = Model(input_img, [decoded_score,decoded_choose])
autoencoder.compile(loss=['mean_squared_error','mean_squared_error'],\
loss_weights=[p_loss_weight_1, p_loss_weight_2],\
optimizer=optimizers.Adam(lr=p_learning_rate))
print('Autoencoder Structure-------------------------------------')
autoencoder.summary()
return autoencoder,feature_selection_output,latent_encoder_score,latent_encoder_choose
```
## 3.1 Structure and paramter testing
```
epochs_number=200
batch_size_value=64
```
---
### 3.1.1 Fractal Autoencoder
---
```
loss_weight_1=0.0078125
F_AE,\
feature_selection_output,\
latent_encoder_score_F_AE,\
latent_encoder_choose_F_AE=Fractal_Autoencoder(p_data_feature=x_train.shape[1],\
p_feture_number=key_feture_number,\
p_encoding_dim=key_feture_number,\
p_learning_rate= 1E-3,\
p_loss_weight_1=loss_weight_1,\
p_loss_weight_2=1)
#file_name="./log/F_AE_"+str(key_feture_number)+".png"
#plot_model(F_AE, to_file=file_name,show_shapes=True)
model_checkpoint=ModelCheckpoint('./log_weights/F_AE_'+str(key_feture_number)+'_weights_'+str(loss_weight_1)+'.{epoch:04d}.hdf5',period=100,save_weights_only=True,verbose=1)
#print_weights = LambdaCallback(on_epoch_end=lambda batch, logs: print(F_AE.layers[1].get_weights()))
F_AE_history = F_AE.fit(x_train, [x_train,x_train],\
epochs=epochs_number,\
batch_size=batch_size_value,\
shuffle=True,\
validation_data=(x_validate, [x_validate,x_validate]),\
callbacks=[model_checkpoint])
loss = F_AE_history.history['loss']
val_loss = F_AE_history.history['val_loss']
epochs = range(epochs_number)
plt.plot(epochs, loss, 'bo', label='Training Loss')
plt.plot(epochs, val_loss, 'r', label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.plot(epochs[250:], loss[250:], 'bo', label='Training Loss')
plt.plot(epochs[250:], val_loss[250:], 'r', label='Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
p_data=F_AE.predict(x_test)
numbers=x_test.shape[0]*x_test.shape[1]
print("MSE for one-to-one map layer",np.sum(np.power(np.array(p_data)[0]-x_test,2))/numbers)
print("MSE for feature selection layer",np.sum(np.power(np.array(p_data)[1]-x_test,2))/numbers)
```
---
### 3.1.2 Feature selection layer output
---
```
FS_layer_output=feature_selection_output.predict(x_test)
print(np.sum(FS_layer_output[0]>0))
```
---
### 3.1.3 Key features show
---
```
key_features=F.top_k_keepWeights_1(F_AE.get_layer(index=1).get_weights()[0],key_feture_number)
print(np.sum(F_AE.get_layer(index=1).get_weights()[0]>0))
```
# 4 Classifying
### 4.1 Extra Trees
```
train_feature=C_train_x
train_label=C_train_y
test_feature=C_test_x
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
selected_position_list=np.where(key_features>0)[0]
```
---
#### 4.1.1. On Identity Selection layer
---
a) with zeros
```
train_feature=feature_selection_output.predict(C_train_x)
print("train_feature>0: ",np.sum(train_feature[0]>0))
print(train_feature.shape)
train_label=C_train_y
test_feature=feature_selection_output.predict(C_test_x)
print("test_feature>0: ",np.sum(test_feature[0]>0))
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
b) Sparse matrix
```
train_feature=feature_selection_output.predict(C_train_x)
print(train_feature.shape)
train_label=C_train_y
test_feature=feature_selection_output.predict(C_test_x)
print(test_feature.shape)
test_label=C_test_y
train_feature_sparse=sparse.coo_matrix(train_feature)
test_feature_sparse=sparse.coo_matrix(test_feature)
p_seed=seed
F.ETree(train_feature_sparse,train_label,test_feature_sparse,test_label,p_seed)
```
---
c) Compression
```
train_feature_=feature_selection_output.predict(C_train_x)
train_feature=F.compress_zero(train_feature_,key_feture_number)
print(train_feature.shape)
train_label=C_train_y
test_feature_=feature_selection_output.predict(C_test_x)
test_feature=F.compress_zero(test_feature_,key_feture_number)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
d) Compression with structure
```
train_feature_=feature_selection_output.predict(C_train_x)
train_feature=F.compress_zero_withkeystructure(train_feature_,selected_position_list)
print(train_feature.shape)
train_label=C_train_y
test_feature_=feature_selection_output.predict(C_test_x)
test_feature=F.compress_zero_withkeystructure(test_feature_,selected_position_list)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
#### 4.1.2. On Original Selection
---
a) with zeros
```
train_feature=np.multiply(C_train_x, key_features)
print("train_feature>0: ",np.sum(train_feature[0]>0))
print(train_feature.shape)
train_label=C_train_y
test_feature=np.multiply(C_test_x, key_features)
print("test_feature>0: ",np.sum(test_feature[0]>0))
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
b) Sparse matrix
```
train_feature=np.multiply(C_train_x, key_features)
print(train_feature.shape)
train_label=C_train_y
test_feature=np.multiply(C_test_x, key_features)
print(test_feature.shape)
test_label=C_test_y
train_feature_sparse=sparse.coo_matrix(train_feature)
test_feature_sparse=sparse.coo_matrix(test_feature)
p_seed=seed
F.ETree(train_feature_sparse,train_label,test_feature_sparse,test_label,p_seed)
```
---
c) Compression
```
train_feature_=np.multiply(C_train_x, key_features)
train_feature=F.compress_zero(train_feature_,key_feture_number)
print(train_feature.shape)
train_label=C_train_y
test_feature_=np.multiply(C_test_x, key_features)
test_feature=F.compress_zero(test_feature_,key_feture_number)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
d) Compression with structure
```
train_feature_=np.multiply(C_train_x, key_features)
train_feature=F.compress_zero_withkeystructure(train_feature_,selected_position_list)
print(train_feature.shape)
train_label=C_train_y
test_feature_=np.multiply(C_test_x, key_features)
test_feature=F.compress_zero_withkeystructure(test_feature_,selected_position_list)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
#### 4.1.3. Latent space
---
```
train_feature=latent_encoder_score_F_AE.predict(C_train_x)
print(train_feature.shape)
train_label=C_train_y
test_feature=latent_encoder_score_F_AE.predict(C_test_x)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
train_feature=latent_encoder_choose_F_AE.predict(C_train_x)
print(train_feature.shape)
train_label=C_train_y
test_feature=latent_encoder_choose_F_AE.predict(C_test_x)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature,train_label,test_feature,test_label,p_seed)
```
---
# 6 Feature group compare
---
```
Selected_Weights=F.top_k_keep(F_AE.get_layer(index=1).get_weights()[0],key_feture_number)
selected_position_group=F.k_index_argsort_1d(Selected_Weights,key_feture_number)
train_feature_=np.multiply(C_train_x, key_features)
train_feature=F.compress_zero_withkeystructure(train_feature_,selected_position_group)
print(train_feature.shape)
train_label=C_train_y
test_feature_=np.multiply(C_test_x, key_features)
test_feature=F.compress_zero_withkeystructure(test_feature_,selected_position_group)
print(test_feature.shape)
test_label=C_test_y
p_seed=seed
F.ETree(train_feature[:,0:25],train_label,test_feature[:,0:25],test_label,p_seed)
p_seed=seed
F.ETree(train_feature[:,25:],train_label,test_feature[:,25:],test_label,p_seed)
p_seed=seed
F.ETree(train_feature[:,0:30],train_label,test_feature[:,0:30],test_label,p_seed)
p_seed=seed
F.ETree(train_feature[:,30:],train_label,test_feature[:,30:],test_label,p_seed)
```
# 7. Reconstruction loss
```
from sklearn.linear_model import LinearRegression
def mse_check(train, test):
LR = LinearRegression(n_jobs = -1)
LR.fit(train[0], train[1])
MSELR = ((LR.predict(test[0]) - test[1]) ** 2).mean()
return MSELR
train_feature_=np.multiply(C_train_x, key_features)
C_train_selected_x=F.compress_zero_withkeystructure(train_feature_,selected_position_list)
print(C_train_selected_x.shape)
test_feature_=np.multiply(C_test_x, key_features)
C_test_selected_x=F.compress_zero_withkeystructure(test_feature_,selected_position_list)
print(C_test_selected_x.shape)
train_feature_tuple=(C_train_selected_x,C_train_x)
test_feature_tuple=(C_test_selected_x,C_test_x)
reconstruction_loss=mse_check(train_feature_tuple, test_feature_tuple)
print(reconstruction_loss)
```
|
github_jupyter
|
## Part I: On-policy learning and SARSA
(3 points)
_This notebook builds upon `qlearning.ipynb`, or to be exact, generating qlearning.py._
The policy we're gonna use is epsilon-greedy policy, where agent takes optimal action with probability $(1-\epsilon)$, otherwise samples action at random. Note that agent __can__ occasionally sample optimal action during random sampling by pure chance.
```
# XVFB will be launched if you run on a server
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
%env DISPLAY = : 1
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%load_ext autoreload
%autoreload 2
from qlearning import QLearningAgent
class EVSarsaAgent(QLearningAgent):
"""
An agent that changes some of q-learning functions to implement Expected Value SARSA.
Note: this demo assumes that your implementation of QLearningAgent.update uses get_value(next_state).
If it doesn't, please add
def update(self, state, action, reward, next_state):
and implement it for Expected Value SARSA's V(s')
"""
def get_value(self, state):
"""
Returns Vpi for current state under epsilon-greedy policy:
V_{pi}(s) = sum _{over a_i} {pi(a_i | s) * Q(s, a_i)}
Hint: all other methods from QLearningAgent are still accessible.
"""
epsilon = self.epsilon
possible_actions = self.get_legal_actions(state)
# If there are no legal actions, return 0.0
if len(possible_actions) == 0:
return 0.0
q_values = [self.get_qvalue(state, (self.get_best_action(state)) * self.get_qvalue(state, action)) for action in possible_actions]
# q_values = [(self.get_best_action(state) * self.get_qvalue(state, action)) for action in possible_actions]
# print(q_values)
state_value = np.sum(q_values)
# print(state_value)
return state_value
```
### Cliff World
Let's now see how our algorithm compares against q-learning in case where we force agent to explore all the time.
<img src=https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/cliffworld.png width=600>
<center><i>image by cs188</i></center>
```
import gym
import gym.envs.toy_text
env = gym.envs.toy_text.CliffWalkingEnv()
n_actions = env.action_space.n
print(env.__doc__)
# Our cliffworld has one difference from what's on the image: there is no wall.
# Agent can choose to go as close to the cliff as it wishes. x:start, T:exit, C:cliff, o: flat ground
env.render()
def play_and_train(env, agent, t_max=10**4):
"""This function should
- run a full game, actions given by agent.getAction(s)
- train agent using agent.update(...) whenever possible
- return total reward"""
# global game
total_reward = 0.0
s = env.reset()
# game += 1
for t in range(t_max):
a = agent.get_action(s)
next_s, r, done, _ = env.step(a)
agent.update(s, a, r, next_s)
s = next_s
total_reward += r
# if t % 500 == 0:
# clear_output(True)
# env.render()
# print("game: {}".format(game), "step: {}".format(t), "total reward: {}".format(total_reward))
if done:
break
return total_reward
from qlearning import QLearningAgent
agent_sarsa = EVSarsaAgent(alpha=0.25, epsilon=0.2, discount=0.99,
get_legal_actions=lambda s: range(n_actions))
agent_ql = QLearningAgent(alpha=0.25, epsilon=0.2, discount=0.99,
get_legal_actions=lambda s: range(n_actions))
# from IPython.display import clear_output
# game = 0
# for i in range(100):
# reward = play_and_train(env, agent_sarsa)
# Note: agent.epsilon stays constant
from IPython.display import clear_output
from pandas import DataFrame
def moving_average(x, span=100): return DataFrame(
{'x': np.asarray(x)}).x.ewm(span=span).mean().values
rewards_sarsa, rewards_ql = [], []
for i in range(1000):
rewards_sarsa.append(play_and_train(env, agent_sarsa))
rewards_ql.append(play_and_train(env, agent_ql))
# Note: agent.epsilon stays constant
if i % 100 == 0:
clear_output(True)
print('EVSARSA mean reward =', np.mean(rewards_sarsa[-100:]))
print('QLEARNING mean reward =', np.mean(rewards_ql[-100:]))
plt.title("epsilon = %s" % agent_ql.epsilon)
plt.plot(moving_average(rewards_sarsa), label='ev_sarsa')
plt.plot(moving_average(rewards_ql), label='qlearning')
plt.grid()
plt.legend()
plt.ylim(-50000, 0)
plt.show()
```
Let's now see what did the algorithms learn by visualizing their actions at every state.
```
def draw_policy(env, agent):
""" Prints CliffWalkingEnv policy with arrows. Hard-coded. """
n_rows, n_cols = env._cliff.shape
actions = '^>v<'
for yi in range(n_rows):
for xi in range(n_cols):
if env._cliff[yi, xi]:
print(" C ", end='')
elif (yi * n_cols + xi) == env.start_state_index:
print(" X ", end='')
elif (yi * n_cols + xi) == n_rows * n_cols - 1:
print(" T ", end='')
else:
print(" %s " %
actions[agent.get_best_action(yi * n_cols + xi)], end='')
print()
print("Q-Learning")
draw_policy(env, agent_ql)
print("SARSA")
draw_policy(env, agent_sarsa)
```
### More on SARSA
Here are some of the things you can do if you feel like it:
* Play with epsilon. See learned how policies change if you set epsilon to higher/lower values (e.g. 0.75).
* Expected Value SASRSA for softmax policy __(2pts)__:
$$ \pi(a_i|s) = softmax({Q(s,a_i) \over \tau}) = {e ^ {Q(s,a_i)/ \tau} \over {\sum_{a_j} e ^{Q(s,a_j) / \tau }}} $$
* Implement N-step algorithms and TD($\lambda$): see [Sutton's book](http://incompleteideas.net/book/bookdraft2018jan1.pdf) chapter 7 and chapter 12.
* Use those algorithms to train on CartPole in previous / next assignment for this week.
### Homework part II: experience replay
(4 points)
There's a powerful technique that you can use to improve sample efficiency for off-policy algorithms: [spoiler] Experience replay :)
The catch is that you can train Q-learning and EV-SARSA on `<s,a,r,s'>` tuples even if they aren't sampled under current agent's policy. So here's what we're gonna do:
<img src=https://github.com/yandexdataschool/Practical_RL/raw/master/yet_another_week/_resource/exp_replay.png width=480>
#### Training with experience replay
1. Play game, sample `<s,a,r,s'>`.
2. Update q-values based on `<s,a,r,s'>`.
3. Store `<s,a,r,s'>` transition in a buffer.
3. If buffer is full, delete earliest data.
4. Sample K such transitions from that buffer and update q-values based on them.
To enable such training, first we must implement a memory structure that would act like such a buffer.
```
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
from IPython.display import clear_output
# XVFB will be launched if you run on a server
import os
if type(os.environ.get("DISPLAY")) is not str or len(os.environ.get("DISPLAY")) == 0:
!bash ../xvfb start
%env DISPLAY = : 1
import random
class ReplayBuffer(object):
def __init__(self, size):
"""
Create Replay buffer.
Parameters
----------
size: int
Max number of transitions to store in the buffer. When the buffer
overflows the old memories are dropped.
Note: for this assignment you can pick any data structure you want.
If you want to keep it simple, you can store a list of tuples of (s, a, r, s') in self._storage
However you may find out there are faster and/or more memory-efficient ways to do so.
"""
self._storage = []
self._maxsize = size
# OPTIONAL: YOUR CODE
def __len__(self):
return len(self._storage)
def add(self, obs_t, action, reward, obs_tp1, done):
'''
Make sure, _storage will not exceed _maxsize.
Make sure, FIFO rule is being followed: the oldest examples has to be removed earlier
'''
data = (obs_t, action, reward, obs_tp1, done)
# add data to storage
<YOUR CODE >
def sample(self, batch_size):
"""Sample a batch of experiences.
Parameters
----------
batch_size: int
How many transitions to sample.
Returns
-------
obs_batch: np.array
batch of observations
act_batch: np.array
batch of actions executed given obs_batch
rew_batch: np.array
rewards received as results of executing act_batch
next_obs_batch: np.array
next set of observations seen after executing act_batch
done_mask: np.array
done_mask[i] = 1 if executing act_batch[i] resulted in
the end of an episode and 0 otherwise.
"""
idxes = <randomly generate batch_size integers to be used as indexes of samples >
# collect <s,a,r,s',done> for each index
<YOUR CODE >
return np.array( < states > ), np.array( < actions > ), np.array( < rewards > ), np.array( < next_states > ), np.array( < is_done > )
```
Some tests to make sure your buffer works right
```
replay = ReplayBuffer(2)
obj1 = tuple(range(5))
obj2 = tuple(range(5, 10))
replay.add(*obj1)
assert replay.sample(
1) == obj1, "If there's just one object in buffer, it must be retrieved by buf.sample(1)"
replay.add(*obj2)
assert len(
replay._storage) == 2, "Please make sure __len__ methods works as intended."
replay.add(*obj2)
assert len(replay._storage) == 2, "When buffer is at max capacity, replace objects instead of adding new ones."
assert tuple(np.unique(a) for a in replay.sample(100)) == obj2
replay.add(*obj1)
assert max(len(np.unique(a)) for a in replay.sample(100)) == 2
replay.add(*obj1)
assert tuple(np.unique(a) for a in replay.sample(100)) == obj1
print("Success!")
```
Now let's use this buffer to improve training:
```
import gym
from qlearning import QLearningAgent
env = gym.make("Taxi-v2")
n_actions = env.action_space.n
def play_and_train_with_replay(env, agent, replay=None,
t_max=10**4, replay_batch_size=32):
"""
This function should
- run a full game, actions given by agent.getAction(s)
- train agent using agent.update(...) whenever possible
- return total reward
:param replay: ReplayBuffer where agent can store and sample (s,a,r,s',done) tuples.
If None, do not use experience replay
"""
total_reward = 0.0
s = env.reset()
for t in range(t_max):
# get agent to pick action given state s
a = <YOUR CODE >
next_s, r, done, _ = env.step(a)
# update agent on current transition. Use agent.update
<YOUR CODE >
if replay is not None:
# store current <s,a,r,s'> transition in buffer
<YOUR CODE >
# sample replay_batch_size random transitions from replay,
# then update agent on each of them in a loop
<YOUR CODE >
s = next_s
total_reward += r
if done:
break
return total_reward
# Create two agents: first will use experience replay, second will not.
agent_baseline = QLearningAgent(alpha=0.5, epsilon=0.25, discount=0.99,
get_legal_actions=lambda s: range(n_actions))
agent_replay = QLearningAgent(alpha=0.5, epsilon=0.25, discount=0.99,
get_legal_actions=lambda s: range(n_actions))
replay = ReplayBuffer(1000)
from IPython.display import clear_output
rewards_replay, rewards_baseline = [], []
for i in range(1000):
rewards_replay.append(
play_and_train_with_replay(env, agent_replay, replay))
rewards_baseline.append(play_and_train_with_replay(
env, agent_baseline, replay=None))
agent_replay.epsilon *= 0.99
agent_baseline.epsilon *= 0.99
if i % 100 == 0:
clear_output(True)
print('Baseline : eps =', agent_replay.epsilon,
'mean reward =', np.mean(rewards_baseline[-10:]))
print('ExpReplay: eps =', agent_baseline.epsilon,
'mean reward =', np.mean(rewards_replay[-10:]))
plt.plot(moving_average(rewards_replay), label='exp. replay')
plt.plot(moving_average(rewards_baseline), label='baseline')
plt.grid()
plt.legend()
plt.show()
```
#### What to expect:
Experience replay, if implemented correctly, will improve algorithm's initial convergence a lot, but it shouldn't affect the final performance.
### Outro
We will use the code you just wrote extensively in the next week of our course. If you're feeling that you need more examples to understand how experience replay works, try using it for binarized state spaces (CartPole or other __[classic control envs](https://gym.openai.com/envs/#classic_control)__).
__Next week__ we're gonna explore how q-learning and similar algorithms can be applied for large state spaces, with deep learning models to approximate the Q function.
However, __the code you've written__ for this week is already capable of solving many RL problems, and as an added benifit - it is very easy to detach. You can use Q-learning, SARSA and Experience Replay for any RL problems you want to solve - just thow 'em into a file and import the stuff you need.
### Bonus I: TD($ \lambda $) (5+ points)
There's a number of advanced algorithms you can find in week 3 materials (Silver lecture II and/or reading about eligibility traces). One such algorithm is TD(lambda), which is based on the idea of eligibility traces. You can also view it as a combination of N-step updates for alll N.
* N-step temporal difference from Sutton's book - [url](http://incompleteideas.net/sutton/book/ebook/node73.html)
* Eligibility traces from Sutton's book - [url](http://incompleteideas.net/sutton/book/ebook/node72.html)
* Blog post on eligibility traces - [url](http://pierrelucbacon.com/traces/)
Here's a practical algorithm you can start with: [url](https://stackoverflow.com/questions/40862578/how-to-understand-watkinss-q%CE%BB-learning-algorithm-in-suttonbartos-rl-book/40892302)
Implementing this algorithm will prove more challenging than q-learning or sarsa, but doing so will earn you a deeper understanding of how value-based methods work [in addition to some bonus points].
More kudos for comparing and analyzing TD($\lambda$) against Q-learning and EV-SARSA in different setups (taxi vs cartpole, constant epsilon vs decreasing epsilon).
### Bonus II: More pacman (5+ points)
__see README.md for software requirements of seminar_py2__
Remember seminar_py2 where your vanilla q-learning had hard time solving Pacman even on a small grid? Now's the time to fix that issue.
We'll focus on those grids for pacman setup.
* python pacman.py -p PacmanQAgent -x N_TRAIN_GAMES -n N_TOTAL_GAMES -l __mediumGrid__
* python pacman.py -p PacmanQAgent -x N_TRAIN_GAMES -n N_TOTAL_GAMES -l __mediumClassic__
Even if you adjust N_TRAIN_GAMES to 10^5 and N_TOTAL_GAMES to 10^5+100 (100 last games are for test), pacman won't solve those environments
The problem with those environments is that they have a large amount of unique states. However, you can devise a smaller environment state by choosing different observation parameters, e.g.:
* distance and direction to nearest ghost
* where is nearest food
* 'center of mass' of all food points (and variance, and whatever)
* is there a wall in each direction
* and anything else you see fit
Here's how to get this information from [state](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/pacman.py#L49),
* Get pacman position: [state.getPacmanPosition()](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/pacman.py#L128)
* Is there a wall at (x,y)?: [state.hasWall(x,y)](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/pacman.py#L189)
* Get ghost positions: [state.getGhostPositions()](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2t/pacman.py#L144)
* Get all food positions: [state.getCapsules()](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/pacman.py#L153)
You can call those methods anywhere you see state.
* e.g. in [agent.getValue(state)](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/qlearningAgents.py#L52)
* Defining a function that extracts all features and calling it in [getQValue](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/qlearningAgents.py#L38) and [setQValue](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/qlearningAgents.py#L44) is probably enough.
* You can also change agent parameters. The simplest way is to hard-code them in [PacmanQAgent](https://github.com/yandexdataschool/Practical_RL/blob/master/week3_model_free/seminar_py2/qlearningAgents.py#L140)
Also, don't forget to optimize ```learning_rate```, ```discount``` and ```epsilon``` params of model, this may also help to solve this env.
|
github_jupyter
|
# Facies classification using Machine Learning #
## LA Team Submission 5 ##
### _[Lukas Mosser](https://at.linkedin.com/in/lukas-mosser-9948b32b/en), [Alfredo De la Fuente](https://pe.linkedin.com/in/alfredodelafuenteb)_ ####
In this approach for solving the facies classfication problem ( https://github.com/seg/2016-ml-contest. ) we will explore the following statregies:
- Features Exploration: based on [Paolo Bestagini's work](https://github.com/seg/2016-ml-contest/blob/master/ispl/facies_classification_try02.ipynb), we will consider imputation, normalization and augmentation routines for the initial features.
- Model tuning:
## Libraries
We will need to install the following libraries and packages.
```
# %%sh
# pip install pandas
# pip install scikit-learn
# pip install tpot
from __future__ import print_function
import numpy as np
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold , StratifiedKFold
from classification_utilities import display_cm, display_adj_cm
from sklearn.metrics import confusion_matrix, f1_score
from sklearn import preprocessing
from sklearn.model_selection import LeavePGroupsOut
from sklearn.multiclass import OneVsOneClassifier
from sklearn.ensemble import RandomForestClassifier
from scipy.signal import medfilt
```
## Data Preprocessing
```
#Load Data
data = pd.read_csv('../facies_vectors.csv')
# Parameters
feature_names = ['GR', 'ILD_log10', 'DeltaPHI', 'PHIND', 'PE', 'NM_M', 'RELPOS']
facies_names = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS']
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00', '#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
# Store features and labels
X = data[feature_names].values
y = data['Facies'].values
# Store well labels and depths
well = data['Well Name'].values
depth = data['Depth'].values
# Fill 'PE' missing values with mean
imp = preprocessing.Imputer(missing_values='NaN', strategy='mean', axis=0)
imp.fit(X)
X = imp.transform(X)
```
We procceed to run [Paolo Bestagini's routine](https://github.com/seg/2016-ml-contest/blob/master/ispl/facies_classification_try02.ipynb) to include a small window of values to acount for the spatial component in the log analysis, as well as the gradient information with respect to depth. This will be our prepared training dataset.
```
# Feature windows concatenation function
def augment_features_window(X, N_neig):
# Parameters
N_row = X.shape[0]
N_feat = X.shape[1]
# Zero padding
X = np.vstack((np.zeros((N_neig, N_feat)), X, (np.zeros((N_neig, N_feat)))))
# Loop over windows
X_aug = np.zeros((N_row, N_feat*(2*N_neig+1)))
for r in np.arange(N_row)+N_neig:
this_row = []
for c in np.arange(-N_neig,N_neig+1):
this_row = np.hstack((this_row, X[r+c]))
X_aug[r-N_neig] = this_row
return X_aug
# Feature gradient computation function
def augment_features_gradient(X, depth):
# Compute features gradient
d_diff = np.diff(depth).reshape((-1, 1))
d_diff[d_diff==0] = 0.001
X_diff = np.diff(X, axis=0)
X_grad = X_diff / d_diff
# Compensate for last missing value
X_grad = np.concatenate((X_grad, np.zeros((1, X_grad.shape[1]))))
return X_grad
# Feature augmentation function
def augment_features(X, well, depth, N_neig=1):
# Augment features
X_aug = np.zeros((X.shape[0], X.shape[1]*(N_neig*2+2)))
for w in np.unique(well):
w_idx = np.where(well == w)[0]
X_aug_win = augment_features_window(X[w_idx, :], N_neig)
X_aug_grad = augment_features_gradient(X[w_idx, :], depth[w_idx])
X_aug[w_idx, :] = np.concatenate((X_aug_win, X_aug_grad), axis=1)
# Find padded rows
padded_rows = np.unique(np.where(X_aug[:, 0:7] == np.zeros((1, 7)))[0])
return X_aug, padded_rows
X_aug, padded_rows = augment_features(X, well, depth)
# # Initialize model selection methods
# lpgo = LeavePGroupsOut(2)
# # Generate splits
# split_list = []
# for train, val in lpgo.split(X, y, groups=data['Well Name']):
# hist_tr = np.histogram(y[train], bins=np.arange(len(facies_names)+1)+.5)
# hist_val = np.histogram(y[val], bins=np.arange(len(facies_names)+1)+.5)
# if np.all(hist_tr[0] != 0) & np.all(hist_val[0] != 0):
# split_list.append({'train':train, 'val':val})
def preprocess():
# Preprocess data to use in model
X_train_aux = []
X_test_aux = []
y_train_aux = []
y_test_aux = []
# For each data split
split = split_list[5]
# Remove padded rows
split_train_no_pad = np.setdiff1d(split['train'], padded_rows)
# Select training and validation data from current split
X_tr = X_aug[split_train_no_pad, :]
X_v = X_aug[split['val'], :]
y_tr = y[split_train_no_pad]
y_v = y[split['val']]
# Select well labels for validation data
well_v = well[split['val']]
# Feature normalization
scaler = preprocessing.RobustScaler(quantile_range=(25.0, 75.0)).fit(X_tr)
X_tr = scaler.transform(X_tr)
X_v = scaler.transform(X_v)
X_train_aux.append( X_tr )
X_test_aux.append( X_v )
y_train_aux.append( y_tr )
y_test_aux.append ( y_v )
X_train = np.concatenate( X_train_aux )
X_test = np.concatenate ( X_test_aux )
y_train = np.concatenate ( y_train_aux )
y_test = np.concatenate ( y_test_aux )
return X_train , X_test , y_train , y_test
```
## Data Analysis
In this section we will run a Cross Validation routine
```
# from tpot import TPOTClassifier
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = preprocess()
# tpot = TPOTClassifier(generations=5, population_size=20,
# verbosity=2,max_eval_time_mins=20,
# max_time_mins=100,scoring='f1_micro',
# random_state = 17)
# tpot.fit(X_train, y_train)
# print(tpot.score(X_test, y_test))
# tpot.export('FinalPipeline.py')
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import BernoulliNB
from sklearn.pipeline import make_pipeline, make_union
from sklearn.preprocessing import FunctionTransformer
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
# Train and test a classifier
# Pass in the classifier so we can iterate over many seed later.
def train_and_test(X_tr, y_tr, X_v, well_v, clf):
# Feature normalization
scaler = preprocessing.RobustScaler(quantile_range=(25.0, 75.0)).fit(X_tr)
X_tr = scaler.transform(X_tr)
X_v = scaler.transform(X_v)
clf.fit(X_tr, y_tr)
# Test classifier
y_v_hat = clf.predict(X_v)
# Clean isolated facies for each well
for w in np.unique(well_v):
y_v_hat[well_v==w] = medfilt(y_v_hat[well_v==w], kernel_size=5)
return y_v_hat
```
## Prediction
```
#Load testing data
test_data = pd.read_csv('../validation_data_nofacies.csv')
# Train classifier
#clf = make_pipeline(make_union(VotingClassifier([("est", ExtraTreesClassifier(criterion="gini", max_features=1.0, n_estimators=500))]), FunctionTransformer(lambda X: X)), XGBClassifier(learning_rate=0.73, max_depth=10, min_child_weight=10, n_estimators=500, subsample=0.27))
#clf = make_pipeline( KNeighborsClassifier(n_neighbors=5, weights="distance") )
#clf = make_pipeline(MaxAbsScaler(),make_union(VotingClassifier([("est", RandomForestClassifier(n_estimators=500))]), FunctionTransformer(lambda X: X)),ExtraTreesClassifier(criterion="entropy", max_features=0.0001, n_estimators=500))
# * clf = make_pipeline( make_union(VotingClassifier([("est", BernoulliNB(alpha=60.0, binarize=0.26, fit_prior=True))]), FunctionTransformer(lambda X: X)),RandomForestClassifier(n_estimators=500))
# # Prepare training data
# X_tr = X
# y_tr = y
# # Augment features
# X_tr, padded_rows = augment_features(X_tr, well, depth)
# # Removed padded rows
# X_tr = np.delete(X_tr, padded_rows, axis=0)
# y_tr = np.delete(y_tr, padded_rows, axis=0)
# Prepare test data
well_ts = test_data['Well Name'].values
depth_ts = test_data['Depth'].values
X_ts = test_data[feature_names].values
y_pred = []
print('.' * 100)
for seed in range(100):
np.random.seed(seed)
# Make training data.
X_train, padded_rows = augment_features(X, well, depth)
y_train = y
X_train = np.delete(X_train, padded_rows, axis=0)
y_train = np.delete(y_train, padded_rows, axis=0)
# Train classifier
clf = make_pipeline(XGBClassifier(learning_rate=0.12,
max_depth=3,
min_child_weight=10,
n_estimators=150,
seed=seed,
colsample_bytree=0.9))
# Make blind data.
X_test, _ = augment_features(X_ts, well_ts, depth_ts)
# Train and test.
y_ts_hat = train_and_test(X_train, y_train, X_test, well_ts, clf)
# Collect result.
y_pred.append(y_ts_hat)
print('|', end='')
np.save('LA_Team_100_realizations.npy', y_pred)
# # Augment features
# X_ts, padded_rows = augment_features(X_ts, well_ts, depth_ts)
# # Predict test labels
# y_ts_hat = train_and_test(X_tr, y_tr, X_ts, well_ts)
# # Save predicted labels
# test_data['Facies'] = y_ts_hat
# test_data.to_csv('Prediction_XX_Final.csv')
```
|
github_jupyter
|
TSG086 - Run `top` in all containers
====================================
Steps
-----
### Instantiate Kubernetes client
```
# Instantiate the Python Kubernetes client into 'api' variable
import os
from IPython.display import Markdown
try:
from kubernetes import client, config
from kubernetes.stream import stream
except ImportError:
# Install the Kubernetes module
import sys
!{sys.executable} -m pip install kubernetes
try:
from kubernetes import client, config
from kubernetes.stream import stream
except ImportError:
display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.'))
raise
if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ:
config.load_incluster_config()
else:
try:
config.load_kube_config()
except:
display(Markdown(f'HINT: Use [TSG118 - Configure Kubernetes config](../repair/tsg118-configure-kube-config.ipynb) to resolve this issue.'))
raise
api = client.CoreV1Api()
print('Kubernetes client instantiated')
```
### Get the namespace for the big data cluster
Get the namespace of the Big Data Cluster from the Kuberenetes API.
**NOTE:**
If there is more than one Big Data Cluster in the target Kubernetes
cluster, then either:
- set \[0\] to the correct value for the big data cluster.
- set the environment variable AZDATA\_NAMESPACE, before starting
Azure Data Studio.
```
# Place Kubernetes namespace name for BDC into 'namespace' variable
if "AZDATA_NAMESPACE" in os.environ:
namespace = os.environ["AZDATA_NAMESPACE"]
else:
try:
namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name
except IndexError:
from IPython.display import Markdown
display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.'))
display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.'))
raise
print('The kubernetes namespace for your big data cluster is: ' + namespace)
```
### Run top in each container
```
cmd = "top -b -n 1"
pod_list = api.list_namespaced_pod(namespace)
pod_names = [pod.metadata.name for pod in pod_list.items]
for pod in pod_list.items:
container_names = [container.name for container in pod.spec.containers]
for container in container_names:
print (f"CONTAINER: {container} / POD: {pod.metadata.name}")
try:
print(stream(api.connect_get_namespaced_pod_exec, pod.metadata.name, namespace, command=['/bin/sh', '-c', cmd], container=container, stderr=True, stdout=True))
except Exception as err:
print (f"Failed to get run 'top' for container: {container} in pod: {pod.metadata.name}. Error: {err}")
print("Notebook execution is complete.")
```
|
github_jupyter
|
# TASK #1: DEFINE SINGLE AND MULTI-DIMENSIONAL NUMPY ARRAYS
```
# NumPy is a Linear Algebra Library used for multidimensional arrays
# NumPy brings the best of two worlds: (1) C/Fortran computational efficiency, (2) Python language easy syntax
# Let's define a one-dimensional array
import NumPy as np
list_1 = [6,8,40,80,200,679,800,1289]
list_1
# Let's create a numpy array from the list "my_list"
my_numpy_array = np.array(list_1)
my_numpy_array
type(my_numpy_array)
# Multi-dimensional (Matrix definition)
my_matrix = np.array([[2,5,8],[3,7,9]])
print(my_matrix)
type(my_matrix)
```
MINI CHALLENGE #1:
- Write a code that creates the following 2x4 numpy array
```
[[3 7 9 3]
[4 3 2 2]]
```
```
array_matrix = np.array([[3,7,9,3],
[4,3,2,2]])
print(array_matrix)
```
# TASK #2: LEVERAGE NUMPY BUILT-IN METHODS AND FUNCTIONS
```
# "rand()" uniform distribution between 0 and 1
x = np.random.rand(20)
x
# "randint" is used to generate random integers between upper and lower bounds
y = np.random.rand(3,3)
y
# "randint" can be used to generate a certain number of random itegers as follows
z = np.random.randint(1,50)
print(z)
a = np.random.randint(1,40,9)
print(a)
# np.arange creates an evenly spaced values within a given interval
b = np.arange(1,50,5)
print(b)
c = np.arange(1,20)
c
# create a diagonal of ones and zeros everywhere else
d = np.eye(4)
d
# Matrix of ones
e = np.ones((7,3))
e
# Array of zeros
f = np.zeros((8,2))
f
```
MINI CHALLENGE #2:
- Write a code that takes in a positive integer "x" from the user and creates a 1x10 array with random numbers ranging from 0 to "x"
```
x = int(input("Enter the number for the range: "))
g = np.random.randint(0,x,10)
print(g)
```
# TASK #3: PERFORM MATHEMATICAL OPERATIONS IN NUMPY
```
# np.arange() returns an evenly spaced values within a given interval
h = np.arange(1,9,2)
print(h)
i = np.arange(1,9,2)
print(i)
# Add 2 numpy arrays together
j = h+i
print(j)
k = h**i
print(k)
l = h**2
print(l)
m = np.sqrt(l)
print(m)
n = np.exp(i)
print(n)
```
MINI CHALLENGE #3:
- Given the X and Y values below, obtain the distance between them
```
X = [5, 7, 20]
Y = [9, 15, 4]
```
```
x = np.array([5,7,10])
print(x)
y = np.array([9,15,4])
print(y)
z = np.array(x-y)
print(z)
p = x-y
print(p)
Q = np.sqrt(x**2 + y**2)
print(Q)
```
# TASK #4: PERFORM ARRAYS SLICING AND INDEXING
```
num_py_array = np.array([5,7,8,2,6,10,16,25,67,89])
print(num_py_array)
# Access specific index from the numpy array
num_py_array[4]
num_py_array[-1]
# Starting from the first index 0 up until and NOT including the last element
num_py_array[0:-1]
# Broadcasting, altering several values in a numpy array at once
my_numpy_array[0:4]= 7
print(my_numpy_array)
# Let's define a two dimensional numpy array
matrix_numpy = np.random.randint(1,10,(4,4))
print(matrix_numpy)
# Get a row from a mtrix
print(matrix_numpy[0])
print(matrix_numpy[-1])
# Get one element
print(matrix_numpy[0][3])
print(matrix_numpy[2][1])
```
MINI CHALLENGE #4:
- In the following matrix, replace the last row with 0
```
X = [2 30 20 -2 -4]
[3 4 40 -3 -2]
[-3 4 -6 90 10]
[25 45 34 22 12]
[13 24 22 32 37]
```
```
x = np.array([[2, 30, 20, -2, -4],[3, 4, 40, -3, -2],[-3, 4, -6, 90 ,10],[25, 45, 34, 22, 12],
[13 ,24, 22, 32, 37]])
print(x)
print(x[-1])
x[-1]=0
print(x)
```
# TASK #5: PERFORM ELEMENTS SELECTION (CONDITIONAL)
```
A = np.random.randint(1,100,(5,5))
print(A)
B = A[A > 4]
print(B)
# Obtain odd elements only
C = A[A % 2 ==1]
print(C)
D = A[A % 2 != 0]
print(D)
```
MINI CHALLENGE #5:
- In the following matrix, replace negative elements by 0 and replace odd elements with -2
```
X = [2 30 20 -2 -4]
[3 4 40 -3 -2]
[-3 4 -6 90 10]
[25 45 34 22 12]
[13 24 22 32 37]
```
```
x = np.array([[2, 30, 20, -2, -4],[3, 4, 40, -3, -2],[-3, 4, -6, 90 ,10],[25, 45, 34, 22, 12],
[13 ,24, 22, 32, 37]])
print(x)
x[x < 0]=0
print(x)
x[x%2!=0] = -2
print(x)
```
|
github_jupyter
|
```
import pandas as pd
import scipy.io
import os
import matplotlib.pyplot as plt
path = os.getcwd()
matlab_exe_path = '''matlab'''
julia_path = '''C:\\Users\\mwaugh\\AppData\\Local\\Programs\\Julia\\Julia-1.4.0\\bin\\julia.exe'''
path = "src\\calibration"
#fig_path = "C:\\users\mwaugh\\github\\perla_tonetti_waugh\\Figures"
```
---
### [Equilibrium Technology Diffusion, Trade, and Growth](https://christophertonetti.com/files/papers/PerlaTonettiWaugh_DiffusionTradeAndGrowth.pdf) by Perla, Tonetti, and Waugh (AER 2020)
---
## 7.5. The Role of Firm Dynamics and Adoption Costs
#### Table of Contents
- [GBM](#gbm)
- [Delta Shock](#detla)
- [Connection to Welfare Decomposition (Section 7.3)](#dcomp)
The underlying MATLAB code is described (with links to the relevant ``.m`` files) in the [readme file in the calibraiton folder](/src/calibration/README.md).
---
### <a name="gbm"></a> Importance of Firm Productivity Shocks (GBM)
We uniformly scale up and down the GBM variance and mean for different values of the adoption cost parameter chi. The large value of chi is ten percent larger than the baseline calibrated value. The small value of chi is ten percent smaller than the baseline calibrated value. All other parameter values are fixed, i.e., we do not re-calibrate the model when changing these parameter values.
##### Step 1. Compute outcomes for different GBM parameter values
First, we compute a key input for the figures, saved to [``/output/robust/gbm/closest_chi_params.csv``](/output/robust/gbm/closest_chi_params.csv). Each row in this file contains the parameter values that generate a BGP equilibrium growth rate that matches the baseline aggregate growth when externally fixing a set value for chi.
```
matlab_cmd = '''"cd('src\calibration');robust_no_recalibrate_gbm;"'''
!{matlab_exe_path} -batch {matlab_cmd}
```
##### Step 2. Create Figure 6 in PTW 2020
The code below reads in the output from matlab and then plots the results to generate Figure 6 of PTW.
```
cnames = ['gold', 'gnew', 'gdiff', "welfare", 'upsilon']
mat = scipy.io.loadmat(path + "\\output\\robust\\gbm\\norecalibrate_values_gbm_1.mat")
nocaldf = pd.DataFrame(mat["record_values"])
nocaldf.columns = cnames
nocaldf["gdiff"] = -nocaldf["gdiff"]
nocaldf.sort_values(["upsilon"], inplace = True)
base_chi = str(round(mat["chi_value"][0][0],3))
mat = scipy.io.loadmat(path + "\\output\\robust\\gbm\\norecalibrate_values_gbm_0.9.mat")
nocaldf_lowchi = pd.DataFrame(mat["record_values"])
nocaldf_lowchi.columns = cnames
nocaldf_lowchi["gdiff"] = -nocaldf_lowchi["gdiff"]
nocaldf_lowchi.sort_values(["upsilon"], inplace = True)
low_chi = str(round(mat["chi_value"][0][0],3))
mat = scipy.io.loadmat(path + "\\output\\robust\\gbm\\norecalibrate_values_gbm_1.1.mat")
nocaldf_higchi = pd.DataFrame(mat["record_values"])
nocaldf_higchi.columns = cnames
nocaldf_higchi["gdiff"] = -nocaldf_higchi["gdiff"]
nocaldf_higchi.sort_values(["upsilon"], inplace = True)
hig_chi = str(round(mat["chi_value"][0][0],3))
fig, ax = plt.subplots(2,2,figsize = (12,10))
fig.tight_layout(pad = 6)
position = (0,0)
ax[position].plot(nocaldf.upsilon, 100*nocaldf["gdiff"], lw = 4)
ax[position].plot(nocaldf_lowchi.upsilon, 100*nocaldf_lowchi["gdiff"], lw = 4, color = 'r')
ax[position].plot(nocaldf_higchi.upsilon, 100*nocaldf_higchi["gdiff"], lw = 4, color = 'k')
xticks = [0.01*float(item) for item in list(range(1,11,1)) ]
ax[position].set_xticks(xticks)
ax[position].set_xlim(0.004,0.07)
ax[position].set_ylim(0.10,0.35)
ax[position].set_ylabel("\n Change in Productivity Growth \n Percentage Points", fontsize = 12)
ax[position].spines["right"].set_visible(False)
ax[position].spines["top"].set_visible(False)
ax[position].vlines(0.048, 0.00, 0.234,
color='k',
linestyle='--',
lw = 3)
ax[position].hlines(0.234, -0.001, 0.048 ,
color='k',
label="Calibrated Values",
linestyle='--',
lw = 3)
###################################################################################
position = (0,1)
ax[position].plot(nocaldf.upsilon, 100*nocaldf["gold"], lw = 4, label = "Calibrated χ = " + base_chi)
ax[position].plot(nocaldf_lowchi.upsilon, 100*nocaldf_lowchi["gold"], lw = 4, color = 'red', label = "Large χ = " + low_chi)
ax[position].plot(nocaldf_higchi.upsilon, 100*nocaldf_higchi["gold"], lw = 4, color = 'k', label = "Small χ = " + hig_chi)
xticks = [0.01*float(item) for item in list(range(1,11,1)) ]
ax[position].set_xticks(xticks)
ax[position].set_xlim(0.004,0.07)
ax[position].set_ylim(0.0,3.10)
#ax.set_ylim(0,0.40)
ax[position].set_xlabel("\n GBM Variance Parameter", fontsize = 12)
ax[position].set_ylabel("\n Initial SS Productivity Growth", fontsize = 12)
ax[position].hlines(0.79, -0.001, 0.048,
color='k',
label="Calibrated Values",
linestyle='--',
lw = 3)
ax[position].vlines(0.048, 0, 0.79,
color='k',
linestyle='--',
lw = 3)
ax[position].spines["right"].set_visible(False)
ax[position].spines["top"].set_visible(False)
ax[position].legend(bbox_to_anchor=(0., -1.25, 1., .102),frameon = False, fontsize = 14, loc = 4)
#########################################################################################
position = (1,0)
series = "welfare"
ax[position].plot(nocaldf.upsilon, 100*nocaldf[series], lw = 4, label = "Calibrated χ")
ax[position].plot(nocaldf_lowchi.upsilon, 100*nocaldf_lowchi[series], lw = 4, color = 'red', label = "Large χ")
ax[position].plot(nocaldf_higchi.upsilon, 100*nocaldf_higchi[series], lw = 4, color = 'k', label = "Small χ")
xticks = [0.01*float(item) for item in list(range(1,11,1)) ]
ax[position].set_xticks(xticks)
ax[position].set_xlim(0.004,0.07)
ax[position].set_ylim(5,15)
#ax.set_ylim(0,0.40)
ax[position].set_xlabel("\n GBM Variance Parameter", fontsize = 12)
ax[position].set_ylabel("\n Welfare Gain, Percent", fontsize = 12)
ax[position].spines["right"].set_visible(False)
ax[position].spines["top"].set_visible(False)
ax[position].vlines(0.048, 0, 11.18,
color='k',
linestyle='--',
lw = 3) # thickness of the line
ax[position].hlines(11.18, -0.001, 0.048 ,
color='k',
label="Calibrated Values",
linestyle='--',
lw = 3)
#############################################################################################
position = (1,1)
ax[position].axis('off')
#plt.savefig(fig_path + "\\gbm_chi.pdf", bbox_inches = "tight", dip = 3600)
plt.show()
```
##### Discussion of these results from PTW text
The first thing to observe from Figure 6 is that the percentage point change in productivity is nearly constant across different values of the variance parameter. In other words, the variance does not much affect the response of growth to a change in trade costs.
The parameter which does influence the change in growth is the adoption cost parameter. The three
different lines on the left panel in Figure 6 illustrate this point. A small value of chi (top black line) corresponds to small costs of adoption. When adoption costs are small, growth is more responsive to changes in trade costs. In contrast, a large value of chi (bottom red line) corresponds to large adoption costs and a smaller response of growth to trade costs. The closed form equations available in the non-GBM version of the model deliver some insight. Equation 35 shows that the change in the growth rate for a given change in trade costs is larger when adoption costs are smaller.
Even though the elasticity of growth to trade costs is not sensitive to the value of the GBM variance parameterholding adoption costs constant, the value of the GBM variance parameter—and, thus, the firm dynamics data—strongly influences the calibrated value of the adoption cost. The right panel in Figure 6 illustrates this point by tracing out how the growth rate in the initial steady state varies with the GBM variance parameter. For a given value, there is a near linear decrease in the steady state growth rate as the variance increases. Across chi values, the slope is essentially the same, but the intercept shifts, with smaller chi values leading to higher growth rates. This is intuitive—lower adoption costs lead to more adoption and faster economic growth.
The implication of these observations is that data on firm dynamics influences the inferred adoption cost and, thus, the elasticity of growth to trade costs. For example, holding fixed our target of an aggregate growth rate of 0.79 percent, if the transition matrix of relative size (Table 3) had pushed for us to find a smaller value for the GBM variance parameter, then the right panel of Figure 6 shows this would have lead us to calibrate a larger value for chi. Combining this observation with the left panel of Figure 6, our calibration strategy would have then led to a smaller increase in the growth rate for the same decrease in trade costs.
The lower panel of Figure 6 shows that the welfare gains from trade (comparing BGPs) are nearly constant across values of the GBM variance parameter, but sensitive to the value of chi, just like the elasticity of growth to trade costs. Thus, the value of chi is crucial for determining both the change in growth and the welfare gains from trade. Even though the GBM variance parameter does not much affect the welfare gains from trade when holding all other parameters constant, different values of the GBM variance parameter (which are associated with different firm dynamics moments) affect the calibration of chi. It is in this sense that not just firm heterogeneity, but firm dynamics, matter
for the welfare gains from trade in our model.
Our discussion above, which compares our gains from trade to those in Sampson (2016), strongly suggests this point as well. When the GBM process is shut down and the model is re-calibrated, the gains from trade are still larger than what the ACR formula would imply, but they are far more modest and in line with what Sampson finds. Recall from Section 7.3 that much of the welfare gains arise because the equilibrium has an inefficiently low growth rate and that changes in trade costs change the growth rate. Using the decomposition from Section 7.3, we find that the different values of chi associated with different
values of the GBM variance parameteraffect the welfare gains from trade almost completely because of a change in the sensitivity
of growth to the trade cost and not because of different levels of inefficiency.
This point is illustrated in the Connection to the Welfare Decomposition section below.
---
### <a name="delta"></a> Importance of the Exit Shock
We uniformly scale up and down the exit shock for different values of the adoption cost parameter chi. The large value of chi is ten percent larger than the baseline calibrated value. The small value of chi is ten percent smaller than the baseline calibrated value. All other parameter values are fixed, i.e., we do not re-calibrate the model when changing these parameter values.
##### Step 1. Compute outcomes for different Delta parameter values
This calls the matlab code to perform this operation. The code appendix below describes each of the different components. The line below executes matlab from the command line/terminal
```
matlab_cmd = '''"cd('src\calibration');robust_no_recalibrate_delta;"'''
!{matlab_exe_path} -batch {matlab_cmd}
```
##### Step 2. Create Figure 7 in PTW 2020
The code below reads in the output from matlab and then plots the results to create Figure 7 of PTW.
```
cnames = ['gold', 'gnew', 'gdiff', "welfare", 'delta']
mat = scipy.io.loadmat(path + "\\output\\robust\\delta\\norecalibrate_values_delta_1.mat")
nocaldf = pd.DataFrame(mat["record_values"])
nocaldf.columns = cnames
nocaldf["gdiff"] = -nocaldf["gdiff"]
nocaldf.sort_values(["delta"], inplace = True)
base_chi = str(round(mat["chi_value"][0][0],3))
mat = scipy.io.loadmat(path + "\\output\\robust\\delta\\norecalibrate_values_delta_0.9.mat")
nocaldf_lowchi = pd.DataFrame(mat["record_values"])
nocaldf_lowchi.columns = cnames
nocaldf_lowchi["gdiff"] = -nocaldf_lowchi["gdiff"]
nocaldf_lowchi.sort_values(["delta"], inplace = True)
low_chi = str(round(mat["chi_value"][0][0],3))
mat = scipy.io.loadmat(path + "\\output\\robust\\delta\\norecalibrate_values_delta_1.1.mat")
nocaldf_higchi = pd.DataFrame(mat["record_values"])
nocaldf_higchi.columns = cnames
nocaldf_higchi["gdiff"] = -nocaldf_higchi["gdiff"]
nocaldf_higchi.sort_values(["delta"], inplace = True)
hig_chi = str(round(mat["chi_value"][0][0],3))
fig, ax = plt.subplots(2,2,figsize = (12,10))
fig.tight_layout(pad = 6)
position = (0,0)
ax[position].plot(nocaldf.delta, 100*nocaldf["gdiff"], lw = 4)
ax[position].plot(nocaldf_lowchi.delta, 100*nocaldf_lowchi["gdiff"], lw = 4, color = 'r')
ax[position].plot(nocaldf_higchi.delta, 100*nocaldf_higchi["gdiff"], lw = 4, color = 'k')
xticks = [0.01*float(item) for item in list(range(1,11,1)) ]
ax[position].set_xticks(xticks)
ax[position].set_xlim(0.01,0.04)
ax[position].set_ylim(0.15,0.4)
#ax[position].set_xlabel("\n GBM Variance Parameter", fontsize = 12)
ax[position].set_ylabel("\n Change in Productivity Growth \n Percentage Points", fontsize = 12)
ax[position].spines["right"].set_visible(False)
ax[position].spines["top"].set_visible(False)
ax[position].vlines(0.020, 0.00, 0.234, # Set the value equall to the average
color='k', # make the color red
#label='Trade Shock', # this is the label (shows up in the legend)
linestyle='--',
lw = 3) # thickness of the line
ax[position].hlines(0.234, -0.001, 0.020 , # Set the value equall to the average
color='k', # make the color red
label="Calibrated Values", # this is the label (shows up in the legend)
linestyle='--',
lw = 3) # thickness of the line
##########################################################################################
position = (0,1)
ax[position].plot(nocaldf.delta, 100*nocaldf["gold"], lw = 4, label = "Calibrated χ = " + base_chi)
ax[position].plot(nocaldf_lowchi.delta, 100*nocaldf_lowchi["gold"],
lw = 4, color = 'red', label = "Large χ = " + low_chi)
ax[position].plot(nocaldf_higchi.delta, 100*nocaldf_higchi["gold"],
lw = 4, color = 'k', label = "Small χ = " + hig_chi)
#ax[1].plot(nocaldf_bigchi.upsilon, 100*nocaldf_bigchi["gold"], lw = 4, color = 'k', label = "Large 1/chi")
xticks = [0.01*float(item) for item in list(range(1,11,1)) ]
ax[position].set_xticks(xticks)
ax[position].set_xlim(0.01,0.04)
ax[position].set_ylim(0.20,1.4)
#ax.set_ylim(0,0.40)
ax[position].set_xlabel("\n Exit Shock Parameter", fontsize = 12)
ax[position].set_ylabel("\n Initial SS Productivity Growth", fontsize = 12)
ax[position].vlines(0.02, 0.00, 0.79, # Set the value equall to the average
color='k', # make the color red
#label='Trade Shock', # this is the label (shows up in the legend)
linestyle='--',
lw = 3) # thickness of the line
ax[position].hlines(0.79, -0.001, 0.020 , # Set the value equall to the average
color='k', # make the color red
label="Calibrated Values", # this is the label (shows up in the legend)
linestyle='--',
lw = 3) # thickness of the line
ax[position].spines["right"].set_visible(False)
ax[position].spines["top"].set_visible(False)
ax[position].legend(bbox_to_anchor=(0., -1.25, 1., .102),frameon = False, fontsize = 14, loc = 4)
#########################################################################################
position = (1,0)
series = "welfare"
ax[position].plot(nocaldf.delta, 100*nocaldf[series], lw = 4, label = "Calibrated χ")
ax[position].plot(nocaldf_lowchi.delta, 100*nocaldf_lowchi[series], lw = 4, color = 'red', label = "Large χ")
ax[position].plot(nocaldf_higchi.delta, 100*nocaldf_higchi[series], lw = 4, color = 'k', label = "Small χ")
#ax[1].plot(nocaldf_bigchi.upsilon, 100*nocaldf_bigchi["gold"], lw = 4, color = 'k', label = "Large 1/chi")
xticks = [0.01*float(item) for item in list(range(1,11,1)) ]
ax[position].set_xticks(xticks)
ax[position].set_xlim(0.01,0.04)
ax[position].set_ylim(6,20)
#ax.set_ylim(0,0.40)
ax[position].set_xlabel("\n Exit Shock Parameter", fontsize = 12)
ax[position].set_ylabel("\n Welfare Gain, Percent", fontsize = 12)
ax[position].spines["right"].set_visible(False)
ax[position].spines["top"].set_visible(False)
ax[position].vlines(0.02, 0, 11.18, # Set the value equall to the average
color='k', # make the color red
#label='Trade Shock', # this is the label (shows up in the legend)
linestyle='--',
lw = 3) # thickness of the line
ax[position].hlines(11.18, -0.001, 0.02 , # Set the value equall to the average
color='k', # make the color red
label="Calibrated Values", # this is the label (shows up in the legend)
linestyle='--',
lw = 3) # thickness of the line
###############################################################################################
position = (1,1)
ax[position].axis('off')
#plt.savefig(fig_path + "\\delta_chi.pdf", bbox_inches = "tight", dip = 3600)
plt.show()
```
##### Discussion of Figure from paper
Similarly to the GBM variance case, the delta parameter interacts with the adoption cost parameter to affect the calibrated value of chi. The right panel in Figure 6 illustrates this point by tracing out how the growth rate in the initial steady state varies with delta. For a given chi value, the steady state growth rate increases with delta; across chi values, smaller chi values (lower adoption costs) lead to higher growth rates. Figure 6 shows that larger delta values (i.e., more entry observed in the data) would induce the calibration to infer larger chi values. But because these two parameters have opposite effects on economic growth, the change
in parameter values generates offsetting effects and leaves the model’s elasticity of growth to trade costs unchanged
The welfare gains from trade display a similar pattern. The bottom panel of Figure 7 shows that the welfare gains from trade increase with the value of delta, holding all else fixed. Again, however, larger values of delta generate larger calibrated values of chi, which offset to keep the welfare gains from trade largely unchanged. Re-calibrating the model holding fixed different values for delta verifies this observation—welfare only increases slightly as delta increases.
### <a name="dcomp"></a> Connection to the Welfare Decomposition
This calls Julia to perform the same welfare decomposition exercise as that done in ``section_7-3.ipynb``.
```
!jupyter nbconvert --to script ChiUpsilonDelta.ipynb
julia_command = '''ChiUpsilonDelta.jl'''
!{julia_path} {julia_command}
```
The decomposition from Section 7.3, shows how different values of $\chi$s affect the welfare gains from trade. We find that the different values of $\chi$ associated with different values of $\upsilon^{2}$ affect the welfare gains from trade almost completely because of a change in the sensitivity of growth to the trade cost $\left(\frac{\mathrm{d} f_{g}}{\mathrm{d} d}\right)$ (i.e., the semi-elasticity of growth changes substantially) and not because of different levels of inefficiency $\left(U_1 \frac{ \partial f_{c}}{ \partial g} + U_2\right)$ (which are relativly simmilar across different specifications).
|
github_jupyter
|
```
import numpy as np
import pandas as pd
from CSVUtils import *
import pickle
from os import path
import matplotlib.pyplot as plt
ROOT_DIR = "./from github/Stock-Trading-Environment/"
freq_list = [
{
"freq": 1,
"training": "10k",
"DIR": "./output/200",
"prefix": "BRZ+TW+NASDAQ-Training_detailed-ModelNo-10000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
{
"freq": 1,
"training": "50k",
"DIR": "./output/201",
"prefix": "BRZ+TW+NASDAQ-Training_detailed-ModelNo-50000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
{
"freq": 1,
"training": "100k",
"DIR": "./output/201",
"prefix": "BRZ+TW+NASDAQ-Training_detailed-ModelNo-100000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
{
"freq": 1,
"training": "500k",
"DIR": "./output/202",
"prefix": "BRZ+TW+NASDAQ-Training_detailed-ModelNo-500000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
}
]
bnh_ratio={
"2015-2019": 3.0693999444726088,
"2001-2004": 1.0730432708521411,
"2007-2010": 1.8942480597911275,
}
ROOT_DIR = "./from github/Stock-Trading-Environment/"
freq_list = [
{
"freq": 7,
"training": "50k",
"DIR": "./output/204",
"prefix": "BRZ+TW+NASDAQ-Training-punish_detailed-ModelNo-50000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
{
"freq": 7,
"training": "100k",
"DIR": "./output/205",
"prefix": "BRZ+TW+NASDAQ-Training-swap-nopunish-7d_detailed-ModelNo-100000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
{
"freq": 7,
"training": "200k",
"DIR": "./output/204",
"prefix": "BRZ+TW+NASDAQ-Training-punish_detailed-ModelNo-200000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
]
freq_list = [
{
"freq": 20,
"training": "10k",
"DIR": "./output/306",
"prefix": "BRZ_TW_NASDAQ-Selected_Trans-withleakage+RSI-_detailed-ModelNo-10000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
{
"freq": 20,
"training": "50k",
"DIR": "./output/306",
"prefix": "BRZ_TW_NASDAQ-Selected_Trans-withleakage+RSI-_detailed-ModelNo-50000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
{
"freq": 20,
"training": "100k",
"DIR": "./output/306",
"prefix": "BRZ_TW_NASDAQ-Selected_Trans-withleakage+RSI-_detailed-ModelNo-100000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
{
"freq": 20,
"training": "200k",
"DIR": "./output/306",
"prefix": "BRZ_TW_NASDAQ-Selected_Trans-withleakage+RSI-_detailed-ModelNo-200000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
]
freq_list = [
{
"freq": 20,
"training": "BRZ+TW+NASDAQ",
"DIR": "./output/205",
"prefix": "BRZ+TW+NASDAQ-Training-swap-nopunish-7d_detailed-ModelNo-100000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
{
"freq": 20,
"training": "BRZ+TW+NASDAQ",
"DIR": "./output/205",
"prefix": "NASDA+QBRZ+TW-Training-swap-nopunish_detailed-ModelNo-100000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
{
"freq": 20,
"training": "BRZ+TW+NASDAQ",
"DIR": "./output/205",
"prefix": "TW+NASDAQ+BRZ-Training-swap-nopunish_detailed-ModelNo-100000-",
"start_date": pd.to_datetime("2015-01-01"),
"end_date": pd.to_datetime("2019-12-31"),
},
]
for experiment in freq_list:
nominal_rate_list = [] # Model/bnh
nominal_return_list = [] # profit/300k
actual_return_list = [] # actual profit/bnh
DIR = path.join("./from github/Stock-Trading-Environment",experiment["DIR"])
for i in range(10):
record = pickle.load(open(path.join(DIR,experiment["prefix"]+str(i)+".out"), "rb"))
df = pd.DataFrame(record)
final_nominal_profit = df['net_worth'].iloc[-1]-300000
bnh_profit = df['buyNhold_balance'].iloc[-1]-300000
nominal_profit_rate = (final_nominal_profit/bnh_profit) # How much better is the model compare to bnh
nominal_rate_list.append(nominal_profit_rate)
nominal_return_list.append(final_nominal_profit/300000)
actual_return_list.append(df['actual_profit'].iloc[-1]/df['buyNhold_balance'].iloc[-1])
nominal_rate_list=np.array(nominal_rate_list)
print(experiment['freq'], experiment['training'],
len(nominal_rate_list[(nominal_rate_list>=1)]),
len(nominal_rate_list[(nominal_rate_list>=0.50) & (nominal_rate_list<1)]),
len(nominal_rate_list[(nominal_rate_list>=0) & (nominal_rate_list<0.50)]),
len(nominal_rate_list[nominal_rate_list<0]),
np.mean(nominal_return_list),
np.mean(actual_return_list),
)
df
```
|
github_jupyter
|
# Federated learning: pretrained model
In this notebook, we provide a simple example of how to perform an experiment in a federated environment with the help of the Sherpa.ai Federated Learning framework. We are going to use a popular dataset and a pretrained model.
## The data
The framework provides some functions for loading the [Emnist](https://www.nist.gov/itl/products-and-services/emnist-dataset) digits dataset.
```
import shfl
database = shfl.data_base.Emnist()
train_data, train_labels, test_data, test_labels = database.load_data()
```
Let's inspect some properties of the loaded data.
```
print(len(train_data))
print(len(test_data))
print(type(train_data[0]))
train_data[0].shape
```
So, as we have seen, our dataset is composed of a set of matrices that are 28 by 28. Before starting with the federated scenario, we can take a look at a sample in the training data.
```
import matplotlib.pyplot as plt
plt.imshow(train_data[0])
```
We are going to simulate a federated learning scenario with a set of client nodes containing private data, and a central server that will be responsible for coordinating the different clients. But, first of all, we have to simulate the data contained in every client. In order to do that, we are going to use the previously loaded dataset. The assumption in this example is that the data is distributed as a set of independent and identically distributed random variables, with every node having approximately the same amount of data. There are a set of different possibilities for distributing the data. The distribution of the data is one of the factors that can have the most impact on a federated algorithm. Therefore, the framework has some of the most common distributions implemented, which allows you to easily experiment with different situations. In [Federated Sampling](./federated_learning_sampling.ipynb), you can dig into the options that the framework provides, at the moment.
```
iid_distribution = shfl.data_distribution.IidDataDistribution(database)
federated_data, test_data, test_label = iid_distribution.get_federated_data(num_nodes=20, percent=10)
```
That's it! We have created federated data from the Emnist dataset using 20 nodes and 10 percent of the available data. This data is distributed to a set of data nodes in the form of private data. Let's learn a little more about the federated data.
```
print(type(federated_data))
print(federated_data.num_nodes())
federated_data[0].private_data
```
As we can see, private data in a node is not directly accessible but the framework provides mechanisms to use this data in a machine learning model.
## The model
A federated learning algorithm is defined by a machine learning model, locally deployed in each node, that learns from the respective node's private data and an aggregating mechanism to aggregate the different model parameters uploaded by the client nodes to a central node. In this example, we will use a deep learning model using Keras to build it. The framework provides classes on using Tensorflow (see notebook [Federated learning Tensorflow Model](./federated_learning_basic_concepts_tensorflow.ipynb)) and Keras (see notebook [Federated Learning basic concepts](./federated_learning_basic_concepts.ipynb)) models in a federated learning scenario, your only job is to create a function acting as model builder. Moreover, the framework provides classes to allow using pretrained Tensorflow and Keras models. In this example, we will use a pretrained Keras learning model.
```
import tensorflow as tf
#If you want execute in GPU, you must uncomment this two lines.
# physical_devices = tf.config.experimental.list_physical_devices('GPU')
# tf.config.experimental.set_memory_growth(physical_devices[0], True)
train_data = train_data.reshape(-1,28,28,1)
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', strides=1, input_shape=(28, 28, 1)))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid'))
model.add(tf.keras.layers.Dropout(0.4))
model.add(tf.keras.layers.Conv2D(32, kernel_size=(3, 3), padding='same', activation='relu', strides=1))
model.add(tf.keras.layers.MaxPooling2D(pool_size=2, strides=2, padding='valid'))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.1))
model.add(tf.keras.layers.Dense(64, activation='relu'))
model.add(tf.keras.layers.Dense(10, activation='softmax'))
model.compile(optimizer="rmsprop", loss="categorical_crossentropy", metrics=["accuracy"])
model.fit(x=train_data, y=train_labels, batch_size=128, epochs=3, validation_split=0.2,
verbose=1, shuffle=False)
def model_builder():
pretrained_model = model
criterion = tf.keras.losses.CategoricalCrossentropy()
optimizer = tf.keras.optimizers.RMSprop()
metrics = [tf.keras.metrics.categorical_accuracy]
return shfl.model.DeepLearningModel(model=pretrained_model, criterion=criterion, optimizer=optimizer, metrics=metrics)
```
Now, the only piece missing is the aggregation operator. Nevertheless, the framework provides some aggregation operators that we can use. In the following piece of code, we define the federated aggregation mechanism. Moreover, we define the federated government based on the Keras learning model, the federated data, and the aggregation mechanism.
```
aggregator = shfl.federated_aggregator.FedAvgAggregator()
federated_government = shfl.federated_government.FederatedGovernment(model_builder, federated_data, aggregator)
```
If you want to see all the aggregation operators, you can check out the [Aggregation Operators](./federated_learning_basic_concepts_aggregation_operators.ipynb) notebook. Before running the algorithm, we want to apply a transformation to the data. A good practice is to define a federated operation that will ensure that the transformation is applied to the federated data in all the client nodes. We want to reshape the data, so we define the following FederatedTransformation.
```
import numpy as np
class Reshape(shfl.private.FederatedTransformation):
def apply(self, labeled_data):
labeled_data.data = np.reshape(labeled_data.data, (labeled_data.data.shape[0], labeled_data.data.shape[1], labeled_data.data.shape[2],1))
class CastFloat(shfl.private.FederatedTransformation):
def apply(self, labeled_data):
labeled_data.data = labeled_data.data.astype(np.float32)
shfl.private.federated_operation.apply_federated_transformation(federated_data, Reshape())
shfl.private.federated_operation.apply_federated_transformation(federated_data, CastFloat())
```
## Run the federated learning experiment
We are now ready to execute our federated learning algorithm.
```
test_data = np.reshape(test_data, (test_data.shape[0], test_data.shape[1], test_data.shape[2],1))
test_data = test_data.astype(np.float32)
federated_government.run_rounds(2, test_data, test_label)
```
|
github_jupyter
|
## Using Isolation Forest to Detect Criminally-Linked Properties
The goal of this notebook is to apply the Isolation Forest anomaly detection algorithm to the property data. The algorithm is particularly good at detecting anomalous data points in cases of extreme class imbalance. After normalizing the data and splitting into a training set and test set, I trained the first model.
Next, I manually selected a few features that, based on my experience investigating money-laundering and asset tracing, I thought would be most important and trained a model on just those.
```
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn import preprocessing
from sklearn.metrics import classification_report, confusion_matrix, recall_score, roc_auc_score
from sklearn.metrics import make_scorer, precision_score, accuracy_score
from sklearn.ensemble import IsolationForest
from sklearn.decomposition import PCA
import seaborn as sns
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
sns.set_style('dark')
```
#### Load Data and Remove Columns
```
# Read in the data
df = pd.read_hdf('../data/processed/bexar_true_labels.h5')
print("Number of properties:", len(df))
# Get criminal property rate
crim_prop_rate = 1 - (len(df[df['crim_prop']==0]) / len(df))
print("Rate is: {:.5%}".format(crim_prop_rate))
# Re-label the normal properties with 1 and the criminal ones with -1
df['binary_y'] = [1 if x==0 else -1 for x in df.crim_prop]
print(df.binary_y.value_counts())
# Normalize the data
X = df.iloc[:,1:-2]
X_norm = preprocessing.normalize(X)
y = df.binary_y
# Split the data into training and test
X_train_norm, X_test_norm, y_train_norm, y_test_norm = train_test_split(
X_norm, y, test_size=0.33, random_state=42
)
```
#### UDFs
```
# Define function to plot resulting confusion matrix
def plot_confusion_matrix(conf_matrix, title, classes=['criminally-linked', 'normal'],
cmap=plt.cm.Oranges):
"""Plot confusion matrix with heatmap and classification statistics."""
conf_matrix = conf_matrix.astype('float') / conf_matrix.sum(axis=1)[:, np.newaxis]
plt.figure(figsize=(8,8))
plt.imshow(conf_matrix, interpolation='nearest', cmap=cmap)
plt.title(title,fontsize=18)
plt.colorbar(pad=.12)
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45,fontsize=11)
plt.yticks(tick_marks, classes, rotation=45, fontsize=11)
fmt = '.4%'
thresh = conf_matrix.max() / 2.
for i, j in itertools.product(range(conf_matrix.shape[0]), range(conf_matrix.shape[1])):
plt.text(j, i, format(conf_matrix[i, j], fmt),
horizontalalignment="center",
verticalalignment="top",
fontsize=16,
color="white" if conf_matrix[i, j] > thresh else "black")
plt.ylabel('True label',fontsize=14, rotation=0)
plt.xlabel('Predicted label',fontsize=14)
# Function for returning the model metrics
def metrics_iforest(y_true,y_pred):
"""Return model metrics."""
print('Model recall is',recall_score(
y_true,
y_pred,
zero_division=0,
pos_label=-1
))
print('Model precision is',precision_score(
y_true,
y_pred,
zero_division=0,
pos_label=-1
))
print("Model AUC is", roc_auc_score(y_true, y_pred))
# Function for histograms of anomaly scores
def anomaly_plot(anomaly_scores,anomaly_scores_list,title):
"""Plot histograms of anomaly scores."""
plt.figure(figsize=[15,9])
plt.subplot(211)
plt.hist(anomaly_scores,bins=100,log=False,color='royalblue')
for xc in anomaly_scores_list:
plt.axvline(x=xc,color='red',linestyle='--',linewidth=0.5,label='criminally-linked property')
plt.title(title,fontsize=16)
handles, labels = plt.gca().get_legend_handles_labels()
by_label = dict(zip(labels, handles))
plt.legend(by_label.values(), by_label.keys(),fontsize=14)
plt.ylabel('Number of properties',fontsize=13)
plt.subplot(212)
plt.hist(anomaly_scores,bins=100,log=True,color='royalblue')
for xc in anomaly_scores_list:
plt.axvline(x=xc,color='red',linestyle='--',linewidth=0.5,label='criminally-linked property')
plt.xlabel('Anomaly score',fontsize=13)
plt.ylabel('Number of properties',fontsize=13)
plt.title('{} (Log Scale)'.format(title),fontsize=16)
plt.show()
```
#### Gridsearch
Isolation Forest is fairly robust to parameter changes, but changes in the contamination rate affect performance. I will gridsearch based on a range of contamination from 0.01 to 0.25 in leaps of 0.05.
```
# Set what metrics to evaluate predictions
scoring = {
'AUC': 'roc_auc',
'Recall': make_scorer(recall_score,pos_label=-1),
'Precision': make_scorer(precision_score,pos_label=-1)
}
gs = GridSearchCV(
IsolationForest(max_samples=0.25, random_state=42,n_estimators=100),
param_grid={'contamination': np.arange(0.01, 0.25, 0.05)},
scoring=scoring,
refit='Recall',
verbose=0,
cv=3
)
# Fit to training data
gs.fit(X_train_norm,y_train_norm)
print(gs.best_params_)
```
##### Model Performance on Training Data
```
y_pred_train_gs = gs.predict(X_train_norm)
metrics_iforest(y_train_norm,y_pred_train_gs)
conf_matrix = confusion_matrix(y_train_norm, y_pred_train_gs)
print(conf_matrix)
plot_confusion_matrix(conf_matrix, title='Isolation Forest Confusion Matrix on Training Data')
```
Model recall is decent, but the precision is quite poor; the model is labeling >20% of innocent properties as criminal.
##### Model Performance on Test Data
```
y_pred_test_gs = gs.predict(X_test_norm)
metrics_iforest(y_test_norm,y_pred_test_gs)
conf_matrix = confusion_matrix(y_test_norm, y_pred_test_gs)
print(conf_matrix)
plot_confusion_matrix(conf_matrix, title='Isolation Forest Confusion Matrix on Test Data')
```
Similar to performance on the training data, the model has a tremendous amount of false positives. While better than false negatives, were this model to be implemented to screen properties, it would waste a lot of manual labor on checking falsely-labeled properties.
Given the context of detecting money-laundering and ill-gotten funds, more false positives are acceptable to reduce false negatives, but the model produces far too many.
#### Visualize Distribution of Anomaly Scores
Sklearn's Isolation Forest provides anomaly scores for each property where the lower the score, the more anomalous the datapoint is.
##### Training Data
```
# Grab anomaly scores for criminally-linked properties
train_df = pd.DataFrame(X_train_norm)
y_train_series = y_train_norm.reset_index()
train_df['y_value'] = y_train_series.binary_y
train_df['anomaly_scores'] = gs.decision_function(X_train_norm)
anomaly_scores_list = train_df[train_df.y_value==-1]['anomaly_scores']
print("Mean score for outlier properties:",np.mean(anomaly_scores_list))
print("Mean score for normal properties:",np.mean(train_df[train_df.y_value==1]['anomaly_scores']))
anomaly_plot(train_df['anomaly_scores'],
anomaly_scores_list,
title='Distribution of Anomaly Scores across Training Data')
```
##### Test Data
```
test_df = pd.DataFrame(X_test_norm)
y_test_series = y_test_norm.reset_index()
test_df['y_value'] = y_test_series.binary_y
test_df['anomaly_scores'] = gs.decision_function(X_test_norm)
anomaly_scores_list_test = test_df[test_df.y_value==-1]['anomaly_scores']
print("Mean score for outlier properties:",np.mean(anomaly_scores_list_test))
print("Mean score for normal properties:",np.mean(test_df[test_df.y_value==1]['anomaly_scores']))
anomaly_plot(test_df['anomaly_scores'],
anomaly_scores_list_test,
title='Distribution of Anomaly Scores across Test Data'
)
```
The top plots give a sense of how skewed the distribution is and how relatively lower the anomaly scores for the criminally-linked properties are when compared to the greater population. The log scale histogram highlights just how many properties do have quite low anomaly scores, which are returned as false positives.
#### Model with Select Features
With `feature_importances_` not existing for Isolation Forest, I wanted to see if I could use my background in investigating money laundering to select a few features that would be the best indicators of "abnormal" properties.
```
# Grab specific columns
X_trim = X[['partial_owner','just_established_owner',
'foreign_based_owner','out_of_state_owner',
'owner_legal_person','owner_likely_company',
'owner_owns_multiple','two_gto_reqs']]
# Normalize
X_trim_norm = preprocessing.normalize(X_trim)
# Split the data into train and test
X_train_trim, X_test_trim, y_train_trim, y_test_trim = train_test_split(
X_trim_norm, y, test_size=0.33, random_state=42
)
scoring = {
'AUC': 'roc_auc',
'Recall': make_scorer(recall_score, pos_label=-1),
'Precision': make_scorer(precision_score, pos_label=-1)
}
gs_trim = GridSearchCV(
IsolationForest(max_samples=0.25, random_state=42,n_estimators=100),
param_grid={'contamination': np.arange(0.01, 0.25, 0.05)},
scoring=scoring,
refit='Recall',
verbose=0,
cv=3
)
# Fit to training data
gs_trim.fit(X_train_trim,y_train_trim)
print(gs_trim.best_params_)
```
##### Training Data
```
y_pred_train_gs_trim = gs_trim.predict(X_train_trim)
metrics_iforest(y_train_trim,y_pred_train_gs_trim)
conf_matrix = confusion_matrix(y_train_trim, y_pred_train_gs_trim)
print(conf_matrix)
plot_confusion_matrix(conf_matrix, title='Conf Matrix on Training Data with Select Features')
```
Reducing the data to select features worsens the model's true positives by two properties, but massively improves the false positive rate (753 down to 269). Overall, model precision is still poor.
##### Test Data
```
y_pred_test_trim = gs_trim.predict(X_test_trim)
metrics_iforest(y_test_trim,y_pred_test_trim)
conf_matrix = confusion_matrix(y_test_trim, y_pred_test_trim)
print(conf_matrix)
plot_confusion_matrix(conf_matrix, title='Conf Matrix on Test Data with Select Features')
```
The model trained on select features performs better than the first on the test data both in terms of correct labels and reducing false positives.
#### Final Notes
- For both models, recall is strong, indicating the model is able to detect something anomalous about the criminal properties. However, model precision is awful, meaning it does so at the expense of many false positives.
- Selecting features based on my experience in the field improves model precision.
- There are many properties that the models find more "anomalous" than the true positives. This could indicate the criminals have done a good job of making their properties appear relatively "innocent" in the broad spectrum of residential property ownership in Bexar County.
|
github_jupyter
|
<img src="../images/aeropython_logo.png" alt="AeroPython" style="width: 300px;"/>
# Secciones de arrays
_Hasta ahora sabemos cómo crear arrays y realizar algunas operaciones con ellos, sin embargo, todavía no hemos aprendido cómo acceder a elementos concretos del array_
## Arrays de una dimensión
```
# Accediendo al primer elemento
# Accediendo al último
```
##### __¡Atención!__
NumPy devuelve __vistas__ de la sección que le pidamos, no __copias__. Esto quiere decir que debemos prestar mucha atención a este comportamiento:
Lo mismo ocurre al revés:
`a` apunta a las direcciones de memoria donde están guardados los elementos del array `arr` que hemos seleccionado, no copia sus valores, a menos que explícitamente hagamos:
## Arrays de dos dimensiones
## Secciones de arrays
Hasta ahora hemos visto cómo acceder a elementos aislados del array, pero la potencia de NumPy está en poder acceder a secciones enteras. Para ello se usa la sintaxis `inicio:final:paso`: si alguno de estos valores no se pone toma un valor por defecto. Veamos ejemplos:
```
# De la segunda a la tercera fila, incluida
# Hasta la tercera fila sin incluir y de la segunda a la quinta columnas saltando dos
#M[1:2:1, 1:5:2] # Equivalente
```
##### Ejercicio
Pintar un tablero de ajedrez usando la función `plt.matshow`.
---
___Hemos aprendido:___
* A acceder a elementos de un array
* Que las secciones no devuelven copias, sino vistas
__¡Quiero más!__Algunos enlaces:
Algunos enlaces en Pybonacci:
* [Cómo crear matrices en Python con NumPy](http://pybonacci.wordpress.com/2012/06/11/como-crear-matrices-en-python-con-numpy/).
* [Números aleatorios en Python con NumPy y SciPy](http://pybonacci.wordpress.com/2013/01/11/numeros-aleatorios-en-python-con-numpy-y-scipy/).
Algunos enlaces en otros sitios:
* [100 numpy exercises](http://www.labri.fr/perso/nrougier/teaching/numpy.100/index.html). Es posible que de momento sólo sepas hacer los primeros, pero tranquilo, pronto sabrás más...
* [NumPy and IPython SciPy 2013 Tutorial](http://conference.scipy.org/scipy2013/tutorial_detail.php?id=100).
* [NumPy and SciPy documentation](http://docs.scipy.org/doc/).
---
<br/>
#### <h4 align="right">¡Síguenos en Twitter!
<br/>
###### <a href="https://twitter.com/AeroPython" class="twitter-follow-button" data-show-count="false">Follow @AeroPython</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script>
<br/>
###### Este notebook ha sido realizado por: Juan Luis Cano y Álex Sáez
<br/>
##### <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es"><img alt="Licencia Creative Commons" style="border-width:0" src="http://i.creativecommons.org/l/by/4.0/88x31.png" /></a><br /><span xmlns:dct="http://purl.org/dc/terms/" property="dct:title">Curso AeroPython</span> por <span xmlns:cc="http://creativecommons.org/ns#" property="cc:attributionName">Juan Luis Cano Rodriguez y Alejandro Sáez Mollejo</span> se distribuye bajo una <a rel="license" href="http://creativecommons.org/licenses/by/4.0/deed.es">Licencia Creative Commons Atribución 4.0 Internacional</a>.
---
_Las siguientes celdas contienen configuración del Notebook_
_Para visualizar y utlizar los enlaces a Twitter el notebook debe ejecutarse como [seguro](http://ipython.org/ipython-doc/dev/notebook/security.html)_
File > Trusted Notebook
```
# Esta celda da el estilo al notebook
from IPython.core.display import HTML
css_file = '../styles/aeropython.css'
HTML(open(css_file, "r").read())
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D1_ModelTypes/W1D1_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Neuromatch Academy: Week 1, Day 1, Tutorial 2
# Model Types: "How" models
__Content creators:__ Matt Laporte, Byron Galbraith, Konrad Kording
__Content reviewers:__ Dalin Guo, Aishwarya Balwani, Madineh Sarvestani, Maryam Vaziri-Pashkam, Michael Waskom
___
# Tutorial Objectives
This is tutorial 2 of a 3-part series on different flavors of models used to understand neural data. In this tutorial we will explore models that can potentially explain *how* the spiking data we have observed is produced
To understand the mechanisms that give rise to the neural data we save in Tutorial 1, we will build simple neuronal models and compare their spiking response to real data. We will:
- Write code to simulate a simple "leaky integrate-and-fire" neuron model
- Make the model more complicated — but also more realistic — by adding more physiologically-inspired details
```
#@title Video 1: "How" models
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='PpnagITsb3E', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
# Setup
```
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
#@title Figure Settings
import ipywidgets as widgets #interactive display
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
#@title Helper Functions
def histogram(counts, bins, vlines=(), ax=None, ax_args=None, **kwargs):
"""Plot a step histogram given counts over bins."""
if ax is None:
_, ax = plt.subplots()
# duplicate the first element of `counts` to match bin edges
counts = np.insert(counts, 0, counts[0])
ax.fill_between(bins, counts, step="pre", alpha=0.4, **kwargs) # area shading
ax.plot(bins, counts, drawstyle="steps", **kwargs) # lines
for x in vlines:
ax.axvline(x, color='r', linestyle='dotted') # vertical line
if ax_args is None:
ax_args = {}
# heuristically set max y to leave a bit of room
ymin, ymax = ax_args.get('ylim', [None, None])
if ymax is None:
ymax = np.max(counts)
if ax_args.get('yscale', 'linear') == 'log':
ymax *= 1.5
else:
ymax *= 1.1
if ymin is None:
ymin = 0
if ymax == ymin:
ymax = None
ax_args['ylim'] = [ymin, ymax]
ax.set(**ax_args)
ax.autoscale(enable=False, axis='x', tight=True)
def plot_neuron_stats(v, spike_times):
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 5))
# membrane voltage trace
ax1.plot(v[0:100])
ax1.set(xlabel='Time', ylabel='Voltage')
# plot spike events
for x in spike_times:
if x >= 100:
break
ax1.axvline(x, color='red')
# ISI distribution
if len(spike_times)>1:
isi = np.diff(spike_times)
n_bins = np.arange(isi.min(), isi.max() + 2) - .5
counts, bins = np.histogram(isi, n_bins)
vlines = []
if len(isi) > 0:
vlines = [np.mean(isi)]
xmax = max(20, int(bins[-1])+5)
histogram(counts, bins, vlines=vlines, ax=ax2, ax_args={
'xlabel': 'Inter-spike interval',
'ylabel': 'Number of intervals',
'xlim': [0, xmax]
})
else:
ax2.set(xlabel='Inter-spike interval',
ylabel='Number of intervals')
plt.show()
```
# Section 1: The Linear Integrate-and-Fire Neuron
How does a neuron spike?
A neuron charges and discharges an electric field across its cell membrane. The state of this electric field can be described by the _membrane potential_. The membrane potential rises due to excitation of the neuron, and when it reaches a threshold a spike occurs. The potential resets, and must rise to a threshold again before the next spike occurs.
One of the simplest models of spiking neuron behavior is the linear integrate-and-fire model neuron. In this model, the neuron increases its membrane potential $V_m$ over time in response to excitatory input currents $I$ scaled by some factor $\alpha$:
\begin{align}
dV_m = {\alpha}I
\end{align}
Once $V_m$ reaches a threshold value a spike is produced, $V_m$ is reset to a starting value, and the process continues.
Here, we will take the starting and threshold potentials as $0$ and $1$, respectively. So, for example, if $\alpha I=0.1$ is constant---that is, the input current is constant---then $dV_m=0.1$, and at each timestep the membrane potential $V_m$ increases by $0.1$ until after $(1-0)/0.1 = 10$ timesteps it reaches the threshold and resets to $V_m=0$, and so on.
Note that we define the membrane potential $V_m$ as a scalar: a single real (or floating point) number. However, a biological neuron's membrane potential will not be exactly constant at all points on its cell membrane at a given time. We could capture this variation with a more complex model (e.g. with more numbers). Do we need to?
The proposed model is a 1D simplification. There are many details we could add to it, to preserve different parts of the complex structure and dynamics of a real neuron. If we were interested in small or local changes in the membrane potential, our 1D simplification could be a problem. However, we'll assume an idealized "point" neuron model for our current purpose.
#### Spiking Inputs
Given our simplified model for the neuron dynamics, we still need to consider what form the input $I$ will take. How should we specify the firing behavior of the presynaptic neuron(s) providing the inputs to our model neuron?
Unlike in the simple example above, where $\alpha I=0.1$, the input current is generally not constant. Physical inputs tend to vary with time. We can describe this variation with a distribution.
We'll assume the input current $I$ over a timestep is due to equal contributions from a non-negative ($\ge 0$) integer number of input spikes arriving in that timestep. Our model neuron might integrate currents from 3 input spikes in one timestep, and 7 spikes in the next timestep. We should see similar behavior when sampling from our distribution.
Given no other information about the input neurons, we will also assume that the distribution has a mean (i.e. mean rate, or number of spikes received per timestep), and that the spiking events of the input neuron(s) are independent in time. Are these reasonable assumptions in the context of real neurons?
A suitable distribution given these assumptions is the Poisson distribution, which we'll use to model $I$:
\begin{align}
I \sim \mathrm{Poisson}(\lambda)
\end{align}
where $\lambda$ is the mean of the distribution: the average rate of spikes received per timestep.
### Exercise 1: Compute $dV_m$
For your first exercise, you will write the code to compute the change in voltage $dV_m$ (per timestep) of the linear integrate-and-fire model neuron. The rest of the code to handle numerical integration is provided for you, so you just need to fill in a definition for `dv` in the `lif_neuron` function below. The value of $\lambda$ for the Poisson random variable is given by the function argument `rate`.
The [`scipy.stats`](https://docs.scipy.org/doc/scipy/reference/stats.html) package is a great resource for working with and sampling from various probability distributions. We will use the `scipy.stats.poisson` class and its method `rvs` to produce Poisson-distributed random samples. In this tutorial, we have imported this package with the alias `stats`, so you should refer to it in your code as `stats.poisson`.
```
def lif_neuron(n_steps=1000, alpha=0.01, rate=10):
""" Simulate a linear integrate-and-fire neuron.
Args:
n_steps (int): The number of time steps to simulate the neuron's activity.
alpha (float): The input scaling factor
rate (int): The mean rate of incoming spikes
"""
# precompute Poisson samples for speed
exc = stats.poisson(rate).rvs(n_steps)
v = np.zeros(n_steps)
spike_times = []
################################################################################
# Students: compute dv, then comment out or remove the next line
raise NotImplementedError("Excercise: compute the change in membrane potential")
################################################################################
for i in range(1, n_steps):
dv = ...
v[i] = v[i-1] + dv
if v[i] > 1:
spike_times.append(i)
v[i] = 0
return v, spike_times
# Set random seed (for reproducibility)
np.random.seed(12)
# Uncomment these lines after completing the lif_neuron function
# v, spike_times = lif_neuron()
# plot_neuron_stats(v, spike_times)
# to_remove solution
def lif_neuron(n_steps=1000, alpha=0.01, rate=10):
""" Simulate a linear integrate-and-fire neuron.
Args:
n_steps (int): The number of time steps to simulate the neuron's activity.
alpha (float): The input scaling factor
rate (int): The mean rate of incoming spikes
"""
# precompute Poisson samples for speed
exc = stats.poisson(rate).rvs(n_steps)
v = np.zeros(n_steps)
spike_times = []
for i in range(1, n_steps):
dv = alpha * exc[i]
v[i] = v[i-1] + dv
if v[i] > 1:
spike_times.append(i)
v[i] = 0
return v, spike_times
# Set random seed (for reproducibility)
np.random.seed(12)
v, spike_times = lif_neuron()
with plt.xkcd():
plot_neuron_stats(v, spike_times)
```
## Interactive Demo: Linear-IF neuron
Like last time, you can now explore how various parametes of the LIF model influence the ISI distribution.
```
#@title
#@markdown You don't need to worry about how the code works – but you do need to **run the cell** to enable the sliders.
def _lif_neuron(n_steps=1000, alpha=0.01, rate=10):
exc = stats.poisson(rate).rvs(n_steps)
v = np.zeros(n_steps)
spike_times = []
for i in range(1, n_steps):
dv = alpha * exc[i]
v[i] = v[i-1] + dv
if v[i] > 1:
spike_times.append(i)
v[i] = 0
return v, spike_times
@widgets.interact(
n_steps=widgets.FloatLogSlider(1000.0, min=2, max=4),
alpha=widgets.FloatLogSlider(0.01, min=-2, max=-1),
rate=widgets.IntSlider(10, min=5, max=20)
)
def plot_lif_neuron(n_steps=1000, alpha=0.01, rate=10):
v, spike_times = _lif_neuron(int(n_steps), alpha, rate)
plot_neuron_stats(v, spike_times)
#@title Video 2: Linear-IF models
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='QBD7kulhg4U', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
# Section 2: Inhibitory signals
Our linear integrate-and-fire neuron from the previous section was indeed able to produce spikes. However, our ISI histogram doesn't look much like empirical ISI histograms seen in Tutorial 1, which had an exponential-like shape. What is our model neuron missing, given that it doesn't behave like a real neuron?
In the previous model we only considered excitatory behavior -- the only way the membrane potential could decrease was upon a spike event. We know, however, that there are other factors that can drive $V_m$ down. First is the natural tendency of the neuron to return to some steady state or resting potential. We can update our previous model as follows:
\begin{align}
dV_m = -{\beta}V_m + {\alpha}I
\end{align}
where $V_m$ is the current membrane potential and $\beta$ is some leakage factor. This is a basic form of the popular Leaky Integrate-and-Fire model neuron (for a more detailed discussion of the LIF Neuron, see the Appendix).
We also know that in addition to excitatory presynaptic neurons, we can have inhibitory presynaptic neurons as well. We can model these inhibitory neurons with another Poisson random variable:
\begin{align}
I = I_{exc} - I_{inh} \\
I_{exc} \sim \mathrm{Poisson}(\lambda_{exc}) \\
I_{inh} \sim \mathrm{Poisson}(\lambda_{inh})
\end{align}
where $\lambda_{exc}$ and $\lambda_{inh}$ are the average spike rates (per timestep) of the excitatory and inhibitory presynaptic neurons, respectively.
### Exercise 2: Compute $dV_m$ with inhibitory signals
For your second exercise, you will again write the code to compute the change in voltage $dV_m$, though now of the LIF model neuron described above. Like last time, the rest of the code needed to handle the neuron dynamics are provided for you, so you just need to fill in a definition for `dv` below.
```
def lif_neuron_inh(n_steps=1000, alpha=0.5, beta=0.1, exc_rate=10, inh_rate=10):
""" Simulate a simplified leaky integrate-and-fire neuron with both excitatory
and inhibitory inputs.
Args:
n_steps (int): The number of time steps to simulate the neuron's activity.
alpha (float): The input scaling factor
beta (float): The membrane potential leakage factor
exc_rate (int): The mean rate of the incoming excitatory spikes
inh_rate (int): The mean rate of the incoming inhibitory spikes
"""
# precompute Poisson samples for speed
exc = stats.poisson(exc_rate).rvs(n_steps)
inh = stats.poisson(inh_rate).rvs(n_steps)
v = np.zeros(n_steps)
spike_times = []
###############################################################################
# Students: compute dv, then comment out or remove the next line
raise NotImplementedError("Excercise: compute the change in membrane potential")
################################################################################
for i in range(1, n_steps):
dv = ...
v[i] = v[i-1] + dv
if v[i] > 1:
spike_times.append(i)
v[i] = 0
return v, spike_times
# Set random seed (for reproducibility)
np.random.seed(12)
# Uncomment these lines do make the plot once you've completed the function
#v, spike_times = lif_neuron_inh()
#plot_neuron_stats(v, spike_times)
# to_remove solution
def lif_neuron_inh(n_steps=1000, alpha=0.5, beta=0.1, exc_rate=10, inh_rate=10):
""" Simulate a simplified leaky integrate-and-fire neuron with both excitatory
and inhibitory inputs.
Args:
n_steps (int): The number of time steps to simulate the neuron's activity.
alpha (float): The input scaling factor
beta (float): The membrane potential leakage factor
exc_rate (int): The mean rate of the incoming excitatory spikes
inh_rate (int): The mean rate of the incoming inhibitory spikes
"""
# precompute Poisson samples for speed
exc = stats.poisson(exc_rate).rvs(n_steps)
inh = stats.poisson(inh_rate).rvs(n_steps)
v = np.zeros(n_steps)
spike_times = []
for i in range(1, n_steps):
dv = -beta * v[i-1] + alpha * (exc[i] - inh[i])
v[i] = v[i-1] + dv
if v[i] > 1:
spike_times.append(i)
v[i] = 0
return v, spike_times
# Set random seed (for reproducibility)
np.random.seed(12)
v, spike_times = lif_neuron_inh()
with plt.xkcd():
plot_neuron_stats(v, spike_times)
```
## Interactive Demo: LIF + inhibition neuron
```
#@title
#@markdown **Run the cell** to enable the sliders.
def _lif_neuron_inh(n_steps=1000, alpha=0.5, beta=0.1, exc_rate=10, inh_rate=10):
""" Simulate a simplified leaky integrate-and-fire neuron with both excitatory
and inhibitory inputs.
Args:
n_steps (int): The number of time steps to simulate the neuron's activity.
alpha (float): The input scaling factor
beta (float): The membrane potential leakage factor
exc_rate (int): The mean rate of the incoming excitatory spikes
inh_rate (int): The mean rate of the incoming inhibitory spikes
"""
# precompute Poisson samples for speed
exc = stats.poisson(exc_rate).rvs(n_steps)
inh = stats.poisson(inh_rate).rvs(n_steps)
v = np.zeros(n_steps)
spike_times = []
for i in range(1, n_steps):
dv = -beta * v[i-1] + alpha * (exc[i] - inh[i])
v[i] = v[i-1] + dv
if v[i] > 1:
spike_times.append(i)
v[i] = 0
return v, spike_times
@widgets.interact(n_steps=widgets.FloatLogSlider(1000.0, min=2.5, max=4),
alpha=widgets.FloatLogSlider(0.5, min=-1, max=1),
beta=widgets.FloatLogSlider(0.1, min=-1, max=0),
exc_rate=widgets.IntSlider(12, min=10, max=20),
inh_rate=widgets.IntSlider(12, min=10, max=20))
def plot_lif_neuron(n_steps=1000, alpha=0.5, beta=0.1, exc_rate=10, inh_rate=10):
v, spike_times = _lif_neuron_inh(int(n_steps), alpha, beta, exc_rate, inh_rate)
plot_neuron_stats(v, spike_times)
#@title Video 3: LIF + inhibition
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='Aq7JrxRkn2w', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
#Summary
In this tutorial we gained some intuition for the mechanisms that produce the observed behavior in our real neural data. First, we built a simple neuron model with excitatory input and saw that it's behavior, measured using the ISI distribution, did not match our real neurons. We then improved our model by adding leakiness and inhibitory input. The behavior of this balanced model was much closer to the real neural data.
# Bonus
### Why do neurons spike?
A neuron stores energy in an electric field across its cell membrane, by controlling the distribution of charges (ions) on either side of the membrane. This energy is rapidly discharged to generate a spike when the field potential (or membrane potential) crosses a threshold. The membrane potential may be driven toward or away from this threshold, depending on inputs from other neurons: excitatory or inhibitory, respectively. The membrane potential tends to revert to a resting potential, for example due to the leakage of ions across the membrane, so that reaching the spiking threshold depends not only on the amount of input ever received following the last spike, but also the timing of the inputs.
The storage of energy by maintaining a field potential across an insulating membrane can be modeled by a capacitor. The leakage of charge across the membrane can be modeled by a resistor. This is the basis for the leaky integrate-and-fire neuron model.
### The LIF Model Neuron
The full equation for the LIF neuron is
\begin{align}
C_{m}\frac{dV_m}{dt} = -(V_m - V_{rest})/R_{m} + I
\end{align}
where $C_m$ is the membrane capacitance, $R_M$ is the membrane resistance, $V_{rest}$ is the resting potential, and $I$ is some input current (from other neurons, an electrode, ...).
In our above examples we set many of these parameters to convenient values ($C_m = R_m = dt = 1$, $V_{rest} = 0$) to focus more on the general behavior of the model. However, these too can be manipulated to achieve different dynamics, or to ensure the dimensions of the problem are preserved between simulation units and experimental units (e.g. with $V_m$ given in millivolts, $R_m$ in megaohms, $t$ in milliseconds).
|
github_jupyter
|
## A quick Gender Recognition model
Grabbed from [nlpforhackers](https://nlpforhackers.io/introduction-machine-learning/) webpage.
1. Firstly convert the dataset into a numpy array to keep only gender and names
2. Set the feature parameters which takes in different parameters
3. Vectorize the parametes
4. Get varied train, test split and test it for validity by checking out the count of the train test split
5. Transform lists of feature-value mappings to vectors. (When feature values are strings, this transformer will do a binary one-hot (aka one-of-K) coding: one boolean-valued feature is constructed for each of the possible string values that the feature can take on)
6. Train a decision tree classifier on this and save the model as a pickle file
```
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
from sklearn.feature_extraction import DictVectorizer
from sklearn.tree import DecisionTreeClassifier
names = pd.read_csv('names_dataset.csv')
print(names.head(10))
print("%d names in dataset" % len(names))
# Get the data out of the dataframe into a numpy matrix and keep only the name and gender columns
names = names.as_matrix()[:, 1:]
print(names)
# We're using 90% of the data for training
TRAIN_SPLIT = 0.90
def features(name):
name = name.lower()
return {
'first-letter': name[0], # First letter
'first2-letters': name[0:2], # First 2 letters
'first3-letters': name[0:3], # First 3 letters
'last-letter': name[-1], # Last letter
'last2-letters': name[-2:], # Last 2 letters
'last3-letters': name[-3:], # Last 3 letters
}
# Feature Extraction
print(features("Alex"))
# Vectorize the features function
features = np.vectorize(features)
print(features(["Anna", "Hannah", "Paul"]))
# [ array({'first2-letters': 'an', 'last-letter': 'a', 'first-letter': 'a', 'last2-letters': 'na', 'last3-letters': 'nna', 'first3-letters': 'ann'}, dtype=object)
# array({'first2-letters': 'ha', 'last-letter': 'h', 'first-letter': 'h', 'last2-letters': 'ah', 'last3-letters': 'nah', 'first3-letters': 'han'}, dtype=object)
# array({'first2-letters': 'pa', 'last-letter': 'l', 'first-letter': 'p', 'last2-letters': 'ul', 'last3-letters': 'aul', 'first3-letters': 'pau'}, dtype=object)]
# Extract the features for the whole dataset
X = features(names[:, 0]) # X contains the features
# Get the gender column
y = names[:, 1] # y contains the targets
# Test if we built the dataset correctly
print("\n\nName: %s, features=%s, gender=%s" % (names[0][0], X[0], y[0]))
X, y = shuffle(X, y)
X_train, X_test = X[:int(TRAIN_SPLIT * len(X))], X[int(TRAIN_SPLIT * len(X)):]
y_train, y_test = y[:int(TRAIN_SPLIT * len(y))], y[int(TRAIN_SPLIT * len(y)):]
# Check to see if the datasets add up
print len(X_train), len(X_test), len(y_train), len(y_test)
# Transforms lists of feature-value mappings to vectors.
vectorizer = DictVectorizer()
vectorizer.fit(X_train)
transformed = vectorizer.transform(features(["Mary", "John"]))
print transformed
print type(transformed) # <class 'scipy.sparse.csr.csr_matrix'>
print transformed.toarray()[0][12] # 1.0
print vectorizer.feature_names_[12] # first-letter=m
clf = DecisionTreeClassifier(criterion = 'gini')
clf.fit(vectorizer.transform(X_train), y_train)
# Accuracy on training set
print clf.score(vectorizer.transform(X_train), y_train)
# Accuracy on test set
print clf.score(vectorizer.transform(X_test), y_test)
# Therefore, we are getting a decent result from the names
print clf.predict(vectorizer.transform(features(["SMYSLOV", "CHASTITY", "MISS PERKY", "SHARON", "ALONSO", "SECONDARY OFFICER"])))
# Save the model using pickle
import pickle
pickle_out = open("gender_recog.pickle", "wb")
pickle.dump(clf, pickle_out)
pickle_out.close()
```
|
github_jupyter
|
# Get started
<a href="https://mybinder.org/v2/gh/tinkoff-ai/etna/master?filepath=examples/get_started.ipynb">
<img src="https://mybinder.org/badge_logo.svg" align='left'>
</a>
This notebook contains the simple examples of time series forecasting pipeline
using ETNA library.
**Table of Contents**
* [Creating TSDataset](#chapter1)
* [Plotting](#chapter2)
* [Forecast single time series](#chapter3)
* [Simple forecast](#section_3_1)
* [Prophet](#section_3_2)
* [Catboost](#section_3_3)
* [Forecast multiple time series](#chapter4)
* [Pipeline](#chapter5)
## 1. Creating TSDataset <a class="anchor" id="chapter1"></a>
Let's load and look at the dataset
```
import pandas as pd
original_df = pd.read_csv("data/monthly-australian-wine-sales.csv")
original_df.head()
```
etna_ts is strict about data format:
* column we want to predict should be called `target`
* column with datatime data should be called `timestamp`
* because etna is always ready to work with multiple time series, column `segment` is also compulsory
Our library works with the special data structure TSDataset. So, before starting anything, we need to convert the classical DataFrame to TSDataset.
Let's rename first
```
original_df["timestamp"] = pd.to_datetime(original_df["month"])
original_df["target"] = original_df["sales"]
original_df.drop(columns=["month", "sales"], inplace=True)
original_df["segment"] = "main"
original_df.head()
```
Time to convert to TSDataset!
To do this, we initially need to convert the classical DataFrame to the special format.
```
from etna.datasets.tsdataset import TSDataset
df = TSDataset.to_dataset(original_df)
df.head()
```
Now we can construct the TSDataset.
Additionally to passing dataframe we should specify frequency of our data.
In this case it is monthly data.
```
ts = TSDataset(df, freq="1M")
```
Oups. Let's fix that
```
ts = TSDataset(df, freq="MS")
```
We can look at the basic information about the dataset
```
ts.info()
```
Or in DataFrame format
```
ts.describe()
```
## 2. Plotting <a class="anchor" id="chapter2"></a>
Let's take a look at the time series in the dataset
```
ts.plot()
```
## 3. Forecasting single time series <a class="anchor" id="chapter3"></a>
Our library contains a wide range of different models for time series forecasting. Let's look at some of them.
### 3.1 Simple forecast<a class="anchor" id="section_3_1"></a>
Let's predict the monthly values in 1994 in our dataset using the ```NaiveModel```
```
train_ts, test_ts = ts.train_test_split(train_start="1980-01-01",
train_end="1993-12-01",
test_start="1994-01-01",
test_end="1994-08-01")
HORIZON = 8
from etna.models import NaiveModel
#Fit the model
model = NaiveModel(lag=12)
model.fit(train_ts)
#Make the forecast
future_ts = train_ts.make_future(HORIZON)
forecast_ts = model.forecast(future_ts)
```
Now let's look at a metric and plot the prediction.
All the methods already built-in in etna.
```
from etna.metrics import SMAPE
smape = SMAPE()
smape(y_true=test_ts, y_pred=forecast_ts)
from etna.analysis import plot_forecast
plot_forecast(forecast_ts, test_ts, train_ts, n_train_samples=10)
```
### 3.2 Prophet<a class="anchor" id="section_3_2"></a>
Now try to improve the forecast and predict the values with the Facebook Prophet.
```
from etna.models import ProphetModel
model = ProphetModel()
model.fit(train_ts)
#Make the forecast
future_ts = train_ts.make_future(HORIZON)
forecast_ts = model.forecast(future_ts)
smape(y_true=test_ts, y_pred=forecast_ts)
plot_forecast(forecast_ts, test_ts, train_ts, n_train_samples=10)
```
### 3.2 Catboost<a class="anchor" id="section_3_3"></a>
And finally let's try the Catboost model.
Also etna has wide range of transforms you may apply to your data.
Here how it is done:
```
from etna.transforms import LagTransform
lags = LagTransform(in_column="target", lags=list(range(8, 24, 1)))
train_ts.fit_transform([lags])
from etna.models import CatBoostModelMultiSegment
model = CatBoostModelMultiSegment()
model.fit(train_ts)
future_ts = train_ts.make_future(HORIZON)
forecast_ts = model.forecast(future_ts)
from etna.metrics import SMAPE
smape = SMAPE()
smape(y_true=test_ts, y_pred=forecast_ts)
from etna.analysis import plot_forecast
train_ts.inverse_transform()
plot_forecast(forecast_ts, test_ts, train_ts, n_train_samples=10)
```
## 4. Forecasting multiple time series <a class="anchor" id="chapter4"></a>
In this section you may see example of how easily etna works
with multiple time series and get acquainted with other transforms etna contains.
```
original_df = pd.read_csv("data/example_dataset.csv")
original_df.head()
df = TSDataset.to_dataset(original_df)
ts = TSDataset(df, freq="D")
ts.plot()
ts.info()
import warnings
from etna.transforms import MeanTransform, LagTransform, LogTransform, \
SegmentEncoderTransform, DateFlagsTransform, LinearTrendTransform
warnings.filterwarnings("ignore")
log = LogTransform(in_column="target")
trend = LinearTrendTransform(in_column="target")
seg = SegmentEncoderTransform()
lags = LagTransform(in_column="target", lags=list(range(30, 96, 1)))
d_flags = DateFlagsTransform(day_number_in_week=True,
day_number_in_month=True,
week_number_in_month=True,
week_number_in_year=True,
month_number_in_year=True,
year_number=True,
special_days_in_week=[5, 6])
mean30 = MeanTransform(in_column="target", window=30)
HORIZON = 31
train_ts, test_ts = ts.train_test_split(train_start="2019-01-01",
train_end="2019-11-30",
test_start="2019-12-01",
test_end="2019-12-31")
train_ts.fit_transform([log, trend, lags, d_flags, seg, mean30])
from etna.models import CatBoostModelMultiSegment
model = CatBoostModelMultiSegment()
model.fit(train_ts)
future_ts = train_ts.make_future(HORIZON)
forecast_ts = model.forecast(future_ts)
smape = SMAPE()
smape(y_true=test_ts, y_pred=forecast_ts)
train_ts.inverse_transform()
plot_forecast(forecast_ts, test_ts, train_ts, n_train_samples=20)
```
## 5. Pipeline <a class="anchor" id="chapter5"></a>
Let's wrap everything into pipeline to create the end-to-end model from previous section.
```
from etna.pipeline import Pipeline
train_ts, test_ts = ts.train_test_split(train_start="2019-01-01",
train_end="2019-11-30",
test_start="2019-12-01",
test_end="2019-12-31")
```
We put: **model**, **transforms** and **horizon** in a single object, which has the similar interface with the model(fit/forecast)
```
model = Pipeline(model=CatBoostModelMultiSegment(),
transforms=[log, trend, lags, d_flags, seg, mean30],
horizon=HORIZON)
model.fit(train_ts)
forecast_ts = model.forecast()
```
As in the previous section, let's calculate the metrics and plot the forecast
```
smape = SMAPE()
smape(y_true=test_ts, y_pred=forecast_ts)
plot_forecast(forecast_ts, test_ts, train_ts, n_train_samples=20)
```
|
github_jupyter
|
# <div align="center">What is a Tensor</div>
---------------------------------------------------------------------
you can Find me on Github:
> ###### [ GitHub](https://github.com/lev1khachatryan)
***Tensors are not generalizations of vectors***. It’s very slightly more understandable to say that tensors are generalizations of matrices, in the same way that it is slightly more accurate to say “vanilla ice cream is a generalization of chocolate ice cream” than it is to say that “vanilla ice cream is a generalization of dessert”, closer, but still false. Vanilla and Chocolate are both ice cream, but chocolate ice cream is not a type of vanilla ice cream, and “dessert” certainly isn’t a type of vanilla ice cream. In fact, technically, ***vectors are generalizations of tensors.*** What we generally think of as vectors are geometrical points in space, and we normally represent them as an array of numbers. That array of numbers is what people are referring to when they say "Tensors are generalizations of vectors", but really, even this adjusted claim is fundamentally false and extremely misleading.
At first let's define what is a vector.
## Definition of a vector space
The set ***V*** is a vector space with respect to the operations + (which is any operation that maps two elements of the space to another element of the space, not necessarily addition) and * (which is any operation that maps an element in the space and a scalar to another element in the space, not necessarily multiplication) if and only if, for every $x,y,z ∈ V$ and $a,b ∈ R$
* \+ is commutative, that is x+y=y+x
* \+ is associative, that is (x+y)+z=x+(y+z)
* There exists an identity element in the space, that is there exists an element 0 such that x+0=x
* Every element has an inverse, that is for every element x there exists an element −x such that x+−x=0
* \* is associative, that is a(b∗x)=(ab)∗x
* There is scalar distributivity over +, that is a∗(x+y)=a∗x+a∗y
* There is vector distributivity over scalar addition, that is (a+b)∗x=a∗x+b∗x
* And finally, 1∗x=x (it can be obtained from above mentioned 7 points)
***A vector is defined as a member of such a space***. Notice how nothing here is explicitly stated to be numerical. We could be talking about colors, or elephants, or glasses of milk; as long as we meaningfully define these two operations, anything can be a vector. The special case of vectors that we usually think about in physics and geometry satisfy this definition ( i.e. points in space or “arrows”). Thus, “arrows” are special cases of vectors. More formally, every “arrow” v represents the line segment from 0, "the origin", which is the identity element of the vector space, to some other point in space. In this view, you can construct a vector space of “arrows” by first picking a point in space, and taking the set of all line segments from that point. (From now on, I will use the term “arrows” to formally distinguish between formal vectors and the type of vectors that have “magnitude and direction”.)
Okay, so anyone trying to understand tensors probably already knows this stuff.
But here is something you may not have heard about before if you are learning about tensors. When we define a vector space like this, we generally find that it is natural to define an operation that gives us lengths and angles. ***A vector space with lengths and angles is called an inner product space.***
## Definition of a Inner product space
An inner product space is a vector space V with an additional operation ***⋅*** such that, for all x,y,z ∈ V
* x⋅y ∈ R
* x⋅x ≥ 0
* x⋅x=0 ⟺ x=0
* x⋅(ay)=a(x⋅y)
* x⋅y=y⋅x
* x⋅(y+z)=x⋅y+x⋅z
We define the length of a vector x in an inner product space to be $||x|| = \sqrt[2]{x⋅x}$ , and the angle between two vectors x,y to be $arccos(\frac{x⋅y}{||x||||y||}).$
This is the equivalent of the dot product, which is defined to be $||x||||y||cos(θ)$, but note that this is not defined in terms of any sort of "components" of the vector, there are no arrays of numbers mentioned. I.e. the dot product is a geometrical operation.
So I have secretly given you your first glimpse at a tensor. Where was it? Was it x? Was it y? Was it V? Was it the glass of milk???
It was none of these things; ***it was the operation itself . The dot product itself is an example of a tensor.***
Well, again, ***tensors aren’t generalizations of vectors at all. Vectors, as we defined them above, are generalizations of tensors. And tensors aren’t technically generalizations of matrices. But tensors can certainly be thought of as kind of the same sort of object as a matrix.***
There are two things that tensors and matrices have in common. The first, and most important thing, is that they are both n-linear maps. This is why tensors are almost generalizations of matrices. The second, and more misleading, thing is that they can be represented as a 2d array of numbers. This second thing is a huge, and I mean HUGE red herring, and has undoubtedly caused an innumerable number of people to be confused.
*Let’s tackle the concept of bilinear maps, and then we can use that knowledge of bilinear maps to help us tackle the concept of representing rank 2 tensors as 2d arrays.*
## Bilinear maps
The dot product is what the cool kids like to call a bilinear map. This just means that the dot product has the following properties:
* x⋅(y+z)=x⋅y+x⋅z
* (y+z)⋅x=y⋅x+z⋅x
* x⋅(ay)=a(x⋅y)
Why is this important? Well if we represent the vector x as $x=x_{1}i+x_{2}j$, and we represent the vector $y=y_{1}i+y_{2}j$, then because ⋅ is linear, the following is true: $x⋅y=y_{1} x_{1} i⋅i + y_{2} x_{2} j⋅j + (x_{1} y_{2} + x_{2} y_{1})i⋅j$
This means if we know the values of i⋅i, j⋅j, and i⋅j, then we have completely defined the operation ⋅. In other words, knowing just these 3 values allows us to calculate the value of x⋅y for any x and y.
Now we can describe how ⋅ might be represented as a 2d array. If ⋅ is the standard cartesian dot product that you learned about on the first day of your linear algebra or physics class, and i and j are both the standard cartesian unit vectors, then i⋅i=1, j⋅j=1, and j⋅i=i⋅j=0.
To represent this tensor ⋅ as a 2d array, we would create a table holding these values, i.e.
\begin{bmatrix}
⋅ & i & j \\[0.3em]
i & 1 & 0 \\[0.3em]
j & 0 & 1
\end{bmatrix}
Or, more compactly
\begin{bmatrix}
1 & 0 \\[0.3em]
0 & 1
\end{bmatrix}
DO NOT LET THIS SIMILARITY TO THE SIMILAR MATRIX NOTATION FOOL YOU. Multiplying this by a vector will clearly give the wrong answer for many reasons, the most important of which is that the dot product produces a scalar quantity, a matrix produces a vector quantity. This notation is simply a way of neatly writing what the dot product represents, it is not a way of making the dot product into a matrix.
If we become more general, then we can take arbitrary values for these dot products i⋅i=a, j⋅j=b, and j⋅i=i⋅j=c.
Which would be represented as
\begin{bmatrix}
a & c \\[0.3em]
c & b
\end{bmatrix}
***A tensor defined in this way is called the metric tensor. The reason it is called that, and the reason it is so important in general relativity, is that just by changing the values we can change the definition of lengths and angles*** (remember that inner product spaces define length and angles in terms of ⋅), and we can enumerate over all possible definitions of lengths and angles. We call this a rank 2 tensor because it is a 2d array (i.e. it looks like a square), if we had a 3x3 tensor, such as a metric tensor for 3 dimensional space it would still be an example of a rank 2 tensor.
\begin{bmatrix}
a & s & d \\[0.3em]
f & g & h \\[0.3em]
z & x & b
\end{bmatrix}
(Note: the table is symmetric along the diagonal only because the metric tensor is commutative. A general tensor does not have to be commutative and thus its representation does not have to be symmetric.)
To get a rank 3 tensor, we would create a cube-like table of values as opposed to a square-like one (I can’t do this in latex so you’ll have to imagine it). A rank 3 tensor would be a trilinear map. A trilinear map m takes 3 vectors from a vector space V, and can be defined in terms of the values it takes when its arguments are the basis vectors of V. E.g. if V has two basis vectors i and j, then m can be defined by defining the values of m(i,i,i), m(i,i,j), m(i,j,i), m(i,j,j), m(j,i,i), m(j,i,j), m(j,j,i), and m(j,j,j) in a 3d array.
A rank 4 tensor would be a 4-linear, A.K.A quadrlinear map that would take 4 arguments, and thus be represented as a 4 dimensional array etc.
## Why do people think tensors are generalizations of vectors?
So now we come to why people think tensors are generalizations of vectors. Its because, if we take a function $f(y)=x⋅y$, then f, being the linear scallawag it is, can be defined with only 2 values. $f(y)=y_{1}f(i)+y_{2}f(j)$, so knowing the values of f(i)and f(j) completely define f. And therefore, f is a rank 1 tensor, i.e. a multilinear map with one argument. This would be represented as a 1d array, very much like the common notion of a vector. Furthermore, these values completely define x as well. If ⋅ is specifically the cartesian metric tensor, then the values of the representation of x and the values of the representation of f are exactly the same. This is why people think tensors are generalizations of vectors.
But if ⋅ is given different values, then the representation of x and the representation of f will have different values. ***Vectors by themselves are not linear maps, they can just be thought of as linear maps***. In order for them to actually be linear maps, they need to be combined with some sort of linear operator such as ⋅.
So here is the definition: ***A tensor is any multilinear map from a vector space to a scalar field***. (Note: A multilinear map is just a generalization of linear and bilinear maps to maps that have more than 2 arguments. I.e. any map which is distributive over addition and scalar multiplication. Linear maps are considered a type of multilinear map)
This definition as a multilinear maps is another reason people think tensors are generalization of matrices, because matrices are linear maps just like tensors. But the distinction is that matrices take a vector space to itself, while tensors take a vector space to a scalar field. So a matrix is not strictly speaking a tensor.
|
github_jupyter
|
```
%load_ext autoreload
%autoreload 2
import numpy as np
np.set_printoptions(precision=2)
import matplotlib.pyplot as plt
import copy as cp
import sys, json, pickle
PROJECT_PATHS = ['/home/nbuckman/Dropbox (MIT)/DRL/2020_01_cooperative_mpc/mpc-multiple-vehicles/', '/Users/noambuckman/mpc-multiple-vehicles/']
for p in PROJECT_PATHS:
sys.path.append(p)
import src.traffic_world as tw
import src.multiagent_mpc as mpc
import src.car_plotting_multiple as cmplot
import src.solver_helper as helper
import src.vehicle as vehicle
i_mpc_start = 1
i_mpc = i_mpc_start
log_directory = '/home/nbuckman/mpc_results/f509-425f-20200907-153800/'
with open(log_directory + "params.json",'rb') as fp:
params = json.load(fp)
n_rounds_mpc = params['n_rounds_mpc']
number_ctrl_pts_executed = params['number_ctrl_pts_executed']
xamb_actual, uamb_actual = np.zeros((6, n_rounds_mpc*number_ctrl_pts_executed + 1)), np.zeros((2, n_rounds_mpc*number_ctrl_pts_executed))
xothers_actual = [np.zeros((6, n_rounds_mpc*number_ctrl_pts_executed + 1)) for i in range(params['n_other'])]
uothers_actual = [np.zeros((2, n_rounds_mpc*number_ctrl_pts_executed)) for i in range(params['n_other'])]
actual_t = 0
last_mpc_i = 104
for i_mpc_start in range(1,last_mpc_i+2):
previous_mpc_file = folder + 'data/mpc_%02d'%(i_mpc_start - 1)
xamb_executed, uamb_executed, _, all_other_x_executed, all_other_u_executed, _, = mpc.load_state(previous_mpc_file, params['n_other'])
all_other_u_mpc = all_other_u_executed
uamb_mpc = uamb_executed
previous_all_file = folder + 'data/all_%02d'%(i_mpc_start -1)
# xamb_actual_prev, uamb_actual_prev, _, xothers_actual_prev, uothers_actual_prev, _ = mpc.load_state(previous_all_file, params['n_other'], ignore_des = True)
t_end = actual_t+number_ctrl_pts_executed+1
xamb_actual[:, actual_t:t_end] = xamb_executed[:,:number_ctrl_pts_executed+1]
uamb_actual[:, actual_t:t_end] = uamb_executed[:,:number_ctrl_pts_executed+1]
for i in range(len(xothers_actual_prev)):
xothers_actual[i][:, actual_t:t_end] = all_other_x_executed[i][:,:number_ctrl_pts_executed+1]
uothers_actual[i][:, actual_t:t_end] = all_other_u_executed[i][:,:number_ctrl_pts_executed+1]
# print(xamb_actual[0,:t_end])
# print(" ")
file_name = folder + "data/"+'all_%02d'%(i_mpc_start-1)
mpc.save_state(file_name, xamb_actual, uamb_actual, None, xothers_actual, uothers_actual, None, end_t = actual_t+number_ctrl_pts_executed+1)
actual_t += number_ctrl_pts_executed
print("Loaded initial positions from %s"%(previous_mpc_file))
print(xothers_actual[0][0,:t_end])
```
|
github_jupyter
|
# Classification example 2 using Health Data with PyCaret
```
#Code from https://github.com/pycaret/pycaret/
# check version
from pycaret.utils import version
version()
```
# 1. Data Repository
```
import pandas as pd
url = 'https://raw.githubusercontent.com/davidrkearney/colab-notebooks/main/datasets/strokes_training.csv'
df = pd.read_csv(url, error_bad_lines=False)
df
data=df
```
# 2. Initialize Setup
```
from pycaret.classification import *
clf1 = setup(df, target = 'stroke', session_id=123, log_experiment=True, experiment_name='health2')
```
# 3. Compare Baseline
```
best_model = compare_models()
```
# 4. Create Model
```
lr = create_model('lr')
dt = create_model('dt')
rf = create_model('rf', fold = 5)
models()
models(type='ensemble').index.tolist()
#ensembled_models = compare_models(whitelist = models(type='ensemble').index.tolist(), fold = 3)
```
# 5. Tune Hyperparameters
```
tuned_lr = tune_model(lr)
tuned_rf = tune_model(rf)
```
# 6. Ensemble Model
```
bagged_dt = ensemble_model(dt)
boosted_dt = ensemble_model(dt, method = 'Boosting')
```
# 7. Blend Models
```
blender = blend_models(estimator_list = [boosted_dt, bagged_dt, tuned_rf], method = 'soft')
```
# 8. Stack Models
```
stacker = stack_models(estimator_list = [boosted_dt,bagged_dt,tuned_rf], meta_model=rf)
```
# 9. Analyze Model
```
plot_model(rf)
plot_model(rf, plot = 'confusion_matrix')
plot_model(rf, plot = 'boundary')
plot_model(rf, plot = 'feature')
plot_model(rf, plot = 'pr')
plot_model(rf, plot = 'class_report')
evaluate_model(rf)
```
# 10. Interpret Model
```
catboost = create_model('rf', cross_validation=False)
interpret_model(catboost)
interpret_model(catboost, plot = 'correlation')
interpret_model(catboost, plot = 'reason', observation = 12)
```
# 11. AutoML()
```
best = automl(optimize = 'Recall')
best
```
# 12. Predict Model
```
pred_holdouts = predict_model(lr)
pred_holdouts.head()
new_data = data.copy()
new_data.drop(['Purchase'], axis=1, inplace=True)
predict_new = predict_model(best, data=new_data)
predict_new.head()
```
# 13. Save / Load Model
```
save_model(best, model_name='best-model')
loaded_bestmodel = load_model('best-model')
print(loaded_bestmodel)
from sklearn import set_config
set_config(display='diagram')
loaded_bestmodel[0]
from sklearn import set_config
set_config(display='text')
```
# 14. Deploy Model
```
deploy_model(best, model_name = 'best-aws', authentication = {'bucket' : 'pycaret-test'})
```
# 15. Get Config / Set Config
```
X_train = get_config('X_train')
X_train.head()
get_config('seed')
from pycaret.classification import set_config
set_config('seed', 999)
get_config('seed')
```
# 16. MLFlow UI
```
# !mlflow ui
```
|
github_jupyter
|
# Logistic regression example
### Dr. Tirthajyoti Sarkar, Fremont, CA 94536
---
This notebook demonstrates solving a logistic regression problem of predicting Hypothyrodism with **Scikit-learn** and **Statsmodels** libraries.
The dataset is taken from UCI ML repository.
<br>Here is the link: https://archive.ics.uci.edu/ml/datasets/Thyroid+Disease
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
```
### Read the dataset
```
names = 'response age sex on_thyroxine query_on_thyroxine antithyroid_medication thyroid_surgery query_hypothyroid query_hyperthyroid pregnant \
sick tumor lithium goitre TSH_measured TSH T3_measured \
T3 TT4_measured TT4 T4U_measured T4U FTI_measured FTI TBG_measured TBG'
names = names.split(' ')
#!wget https://raw.githubusercontent.com/tirthajyoti/Machine-Learning-with-Python/master/Datasets/hypothyroid.csv
#!mkdir Data
#!mv hypothyroid.csv Data/
df = pd.read_csv('Data/hypothyroid.csv',index_col=False,names=names,na_values=['?'])
df.head()
to_drop=[]
for c in df.columns:
if 'measured' in c or 'query' in c:
to_drop.append(c)
to_drop
to_drop.append('TBG')
df.drop(to_drop,axis=1,inplace=True)
df.head()
```
### Let us see the basic statistics on the dataset
```
df.describe().T
```
### Are any data points are missing? We can check it using `df.isna()` method
The `df.isna()` method gives back a full DataFrame with Boolean values - True for data present, False for missing data. We can use `sum()` on that DataFrame to see and calculate the number of missing values per column.
```
df.isna().sum()
```
### We can use `df.dropna()` method to drop those missing rows
```
df.dropna(inplace=True)
df.shape
```
### Creating a transformation function to convert `+` or `-` responses to 1 and 0
```
def class_convert(response):
if response=='hypothyroid':
return 1
else:
return 0
df['response']=df['response'].apply(class_convert)
df.head()
df.columns
```
### Exploratory data analysis
```
for var in ['age','TSH','T3','TT4','T4U','FTI']:
sns.boxplot(x='response',y=var,data=df)
plt.show()
sns.pairplot(data=df[df.columns[1:]],diag_kws={'edgecolor':'k','bins':25},plot_kws={'edgecolor':'k'})
plt.show()
```
### Create dummy variables for the categorical variables
```
df_dummies = pd.get_dummies(data=df)
df_dummies.shape
df_dummies.sample(10)
```
### Test/train split
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(df_dummies.drop('response',axis=1),
df_dummies['response'], test_size=0.30,
random_state=42)
print("Training set shape",X_train.shape)
print("Test set shape",X_test.shape)
```
### Using `LogisticRegression` estimator from Scikit-learn
We are using the L2 regularization by default
```
from sklearn.linear_model import LogisticRegression
clf1 = LogisticRegression(penalty='l2',solver='newton-cg')
clf1.fit(X_train,y_train)
```
### Intercept, coefficients, and score
```
clf1.intercept_
clf1.coef_
clf1.score(X_test,y_test)
```
### For `LogisticRegression` estimator, there is a special `predict_proba` method which computes the raw probability values
```
prob_threshold = 0.5
prob_df=pd.DataFrame(clf1.predict_proba(X_test[:10]),columns=['Prob of NO','Prob of YES'])
prob_df['Decision']=(prob_df['Prob of YES']>prob_threshold).apply(int)
prob_df
y_test[:10]
```
### Classification report, and confusion matrix
```
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(y_test, clf1.predict(X_test)))
pd.DataFrame(confusion_matrix(y_test, clf1.predict(X_test)),columns=['Predict-YES','Predict-NO'],index=['YES','NO'])
```
### Using `statsmodels` library
```
import statsmodels.formula.api as smf
import statsmodels.api as sm
df_dummies.columns
```
### Create a 'formula' in the same style as in R language
```
formula = 'response ~ ' + '+'.join(df_dummies.columns[1:])
formula
```
### Fit a GLM (Generalized Linear model) with this formula and choosing `Binomial` as the family of function
```
model = smf.glm(formula = formula, data=df_dummies, family=sm.families.Binomial())
result=model.fit()
```
### `summary` method shows a R-style table with all kind of statistical information
```
print(result.summary())
```
### The `predict` method computes probability for the test dataset
```
result.predict(X_test[:10])
```
### To create binary predictions, you have to apply a threshold probability and convert the booleans into integers
```
y_pred=(result.predict(X_test)>prob_threshold).apply(int)
print(classification_report(y_test,y_pred))
pd.DataFrame(confusion_matrix(y_test, y_pred),columns=['Predict-YES','Predict-NO'],index=['YES','NO'])
```
### A smaller model with only first few variables
We saw that majority of variables in the logistic regression model has p-values very high and therefore they are not statistically significant. We create another smaller model removing those variables.
```
formula = 'response ~ ' + '+'.join(df_dummies.columns[1:7])
formula
model = smf.glm(formula = formula, data=df_dummies, family=sm.families.Binomial())
result=model.fit()
print(result.summary())
y_pred=(result.predict(X_test)>prob_threshold).apply(int)
print(classification_report(y_pred,y_test))
pd.DataFrame(confusion_matrix(y_test, y_pred),columns=['Predict-YES','Predict-NO'],index=['YES','NO'])
```
### How do the probabilities compare between `Scikit-learn` and `Statsmodels` predictions?
```
sklearn_prob = clf1.predict_proba(X_test)[...,1][:10]
statsmodels_prob = result.predict(X_test[:10])
prob_comp_df=pd.DataFrame(data={'Scikit-learn Prob':list(sklearn_prob),'Statsmodels Prob':list(statsmodels_prob)})
prob_comp_df
```
### Coefficient interpretation
What is the interpretation of the coefficient value for `age` and `FTI`?
- With every one year of age increase, the log odds of the hypothyrodism **increases** by 0.0248 or the odds of hypothyroidsm increases by a factor of exp(0.0248) = 1.025 i.e. almost 2.5%.
- With every one unit of FTI increase, the log odds of the hypothyrodism **decreases** by 0.1307 or the odds of hypothyroidsm decreases by a factor of exp(0.1307) = 1.1396 i.e. almost by 12.25%.
|
github_jupyter
|
# Keras Functional API
```
# sudo pip3 install --ignore-installed --upgrade tensorflow
import keras
import tensorflow as tf
print(keras.__version__)
print(tf.__version__)
# To ignore keep_dims warning
tf.logging.set_verbosity(tf.logging.ERROR)
```
Let’s start with a minimal example that shows side by side a simple Sequential model and its equivalent in the functional API:
```
from keras.models import Sequential, Model
from keras import layers
from keras import Input
seq_model = Sequential()
seq_model.add(layers.Dense(32, activation='relu', input_shape=(64,)))
seq_model.add(layers.Dense(32, activation='relu'))
seq_model.add(layers.Dense(10, activation='softmax'))
input_tensor = Input(shape=(64,))
x = layers.Dense(32, activation='relu')(input_tensor)
x = layers.Dense(32, activation='relu')(x)
output_tensor = layers.Dense(10, activation='softmax')(x)
model = Model(input_tensor, output_tensor)
model.summary()
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model,show_shapes=True).create(prog='dot', format='svg'))
```
The only part that may seem a bit magical at this point is instantiating a Model object using only an input tensor and an output tensor. Behind the scenes, Keras retrieves every layer involved in going from input_tensor to output_tensor, bringing them together into a graph-like data structure—a Model. Of course, the reason it works is that output_tensor was obtained by repeatedly transforming input_tensor.
If you tried to build a model from **inputs and outputs that weren’t related**, you’d get a RuntimeError:
```
unrelated_input = Input(shape=(32,))
bad_model = Model(unrelated_input, output_tensor)
```
This error tells you, in essence, that Keras couldn’t reach input_2 from the provided output tensor.
When it comes to compiling, training, or evaluating such an instance of Model, the API is *the same as that of Sequential*:
```
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
import numpy as np
x_train = np.random.random((1000, 64))
y_train = np.random.random((1000, 10))
model.fit(x_train, y_train, epochs=10, batch_size=128)
score = model.evaluate(x_train, y_train)
```
## Multi-input models
#### A question-answering model example
Following is an example of how you can build such a model with the functional API. You set up two independent branches, encoding the text input and the question input as representation vectors; then, concatenate these vectors; and finally, add a softmax classifier on top of the concatenated representations.
```
from keras.models import Model
from keras import layers
from keras import Input
text_vocabulary_size = 10000
question_vocabulary_size = 10000
answer_vocabulary_size = 500
# The text input is a variable-length sequence of integers.
# Note that you can optionally name the inputs.
text_input = Input(shape=(None,), dtype='int32', name='text')
# Embeds the inputs into a sequence of vectors of size 64
# embedded_text = layers.Embedding(64, text_vocabulary_size)(text_input)
# embedded_text = layers.Embedding(output_dim=64, input_dim=text_vocabulary_size)(text_input)
embedded_text = layers.Embedding(text_vocabulary_size,64)(text_input)
# Encodes the vectors in a single vector via an LSTM
encoded_text = layers.LSTM(32)(embedded_text)
# Same process (with different layer instances) for the question
question_input = Input(shape=(None,),dtype='int32',name='question')
# embedded_question = layers.Embedding(32, question_vocabulary_size)(question_input)
# embedded_question = layers.Embedding(output_dim=32, input_dim=question_vocabulary_size)(question_input)
embedded_question = layers.Embedding(question_vocabulary_size,32)(question_input)
encoded_question = layers.LSTM(16)(embedded_question)
# Concatenates the encoded question and encoded text
concatenated = layers.concatenate([encoded_text, encoded_question],axis=-1)
# Adds a softmax classifier on top
answer = layers.Dense(answer_vocabulary_size, activation='softmax')(concatenated)
# At model instantiation, you specify the two inputs and the output.
model = Model([text_input, question_input], answer)
model.compile(optimizer='rmsprop',loss='categorical_crossentropy',metrics=['acc'])
model.summary()
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model,show_shapes=True).create(prog='dot', format='svg'))
```
Now, how do you **train** this two-input model?
There are two possible APIs:
* you can feed the model a list of Numpy arrays as inputs
* you can feed it a dictionary that maps input names to Numpy arrays.
Naturally, the latter option is available only if you give names to your inputs.
#### Training the multi-input model
```
import numpy as np
num_samples = 1000
max_length = 100
# Generates dummy Numpy data
text = np.random.randint(1, text_vocabulary_size,size=(num_samples, max_length))
question = np.random.randint(1, question_vocabulary_size,size=(num_samples, max_length))
# Answers are one-hot encoded, not integers
# answers = np.random.randint(0, 1,size=(num_samples, answer_vocabulary_size))
answers = np.random.randint(answer_vocabulary_size, size=(num_samples))
answers = keras.utils.to_categorical(answers, answer_vocabulary_size)
# Fitting using a list of inputs
print('-'*10,"First training run with list of NumPy arrays",'-'*60)
model.fit([text, question], answers, epochs=10, batch_size=128)
print()
# Fitting using a dictionary of inputs (only if inputs are named)
print('-'*10,"Second training run with dictionary and named inputs",'-'*60)
model.fit({'text': text, 'question': question}, answers,epochs=10, batch_size=128)
```
## Multi-output models
You can also use the functional API to build models with multiple outputs (or multiple *heads*).
#### Example - prediction of Age, Gender and Income from social media posts
A simple example is a network that attempts to simultaneously predict different properties of the data, such as a network that takes as input a series of social media posts from a single anonymous person and tries to predict attributes of that person, such as age, gender, and income level.
```
from keras import layers
from keras import Input
from keras.models import Model
vocabulary_size = 50000
num_income_groups = 10
posts_input = Input(shape=(None,), dtype='int32', name='posts')
#embedded_posts = layers.Embedding(256, vocabulary_size)(posts_input)
embedded_posts = layers.Embedding(vocabulary_size,256)(posts_input)
x = layers.Conv1D(128, 5, activation='relu', padding='same')(embedded_posts)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(256, 5, activation='relu', padding='same')(x)
x = layers.Conv1D(256, 5, activation='relu', padding='same')(x)
x = layers.MaxPooling1D(5)(x)
x = layers.Conv1D(256, 5, activation='relu', padding='same')(x)
x = layers.Conv1D(256, 5, activation='relu', padding='same')(x)
x = layers.GlobalMaxPooling1D()(x)
x = layers.Dense(128, activation='relu')(x)
# Note that the output layers are given names.
age_prediction = layers.Dense(1, name='age')(x)
income_prediction = layers.Dense(num_income_groups, activation='softmax',name='income')(x)
gender_prediction = layers.Dense(1, activation='sigmoid', name='gender')(x)
model = Model(posts_input,[age_prediction, income_prediction, gender_prediction])
print("Model is ready!")
```
#### Compilation options of a multi-output model: multiple losses
```
model.compile(optimizer='rmsprop', loss=['mse', 'categorical_crossentropy', 'binary_crossentropy'])
# Equivalent (possible only if you give names to the output layers)
model.compile(optimizer='rmsprop',loss={'age': 'mse',
'income': 'categorical_crossentropy',
'gender': 'binary_crossentropy'})
model.compile(optimizer='rmsprop',
loss=['mse', 'categorical_crossentropy', 'binary_crossentropy'],
loss_weights=[0.25, 1., 10.])
# Equivalent (possible only if you give names to the output layers)
model.compile(optimizer='rmsprop',
loss={'age': 'mse','income': 'categorical_crossentropy','gender': 'binary_crossentropy'},
loss_weights={'age': 0.25,
'income': 1.,
'gender': 10.})
model.summary()
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model,show_shapes=True).create(prog='dot', format='svg'))
```
#### Feeding data to a multi-output model
Much as in the case of multi-input models, you can pass Numpy data to the model for training either via a list of arrays or via a dictionary of arrays.
#### Training a multi-output model
```
import numpy as np
TRACE = False
num_samples = 1000
max_length = 100
posts = np.random.randint(1, vocabulary_size, size=(num_samples, max_length))
if TRACE:
print("*** POSTS ***")
print(posts.shape)
print(posts[:10])
print()
age_targets = np.random.randint(0, 100, size=(num_samples,1))
if TRACE:
print("*** AGE ***")
print(age_targets.shape)
print(age_targets[:10])
print()
income_targets = np.random.randint(1, num_income_groups, size=(num_samples,1))
income_targets = keras.utils.to_categorical(income_targets,num_income_groups)
if TRACE:
print("*** INCOME ***")
print(income_targets.shape)
print(income_targets[:10])
print()
gender_targets = np.random.randint(0, 2, size=(num_samples,1))
if TRACE:
print("*** GENDER ***")
print(gender_targets.shape)
print(gender_targets[:10])
print()
print('-'*10, "First training run with NumPy arrays", '-'*60)
# age_targets, income_targets, and gender_targets are assumed to be Numpy arrays.
model.fit(posts, [age_targets, income_targets, gender_targets], epochs=10, batch_size=64)
print('-'*10,"Second training run with dictionary and named outputs",'-'*60)
# Equivalent (possible only if you give names to the output layers)
model.fit(posts, {'age': age_targets,
'income': income_targets,
'gender': gender_targets},
epochs=10, batch_size=64)
```
### 7.1.4 Directed acyclic graphs of layers
With the functional API, not only can you build models with multiple inputs and multiple outputs, but you can also implement networks with a complex internal topology.
Neural networks in Keras are allowed to be arbitrary directed acyclic graphs of layers (the only processing loops that are allowed are those internal to recurrent layers).
Several common neural-network components are implemented as graphs. Two notable ones are <i>Inception modules</i> and <i>residual connections</i>. To better understand how the functional API can be used to build graphs of layers, let’s take a look at how you can implement both of them in Keras.
#### Inception modules
Inception [3] is a popular type of network architecture for convolutional neural networks. It consists of a stack of modules that themselves look like small independent networks, split into several parallel branches.
##### The purpose of 1 × 1 convolutions
1 × 1 convolutions (also called pointwise convolutions) are featured in Inception modules, where they contribute to factoring out channel-wise feature learning and space-wise feature learning.
```
from keras import layers
from keras.layers import Input
# This example assumes the existence of a 4D input tensor x:
# This returns a typical image tensor like those of MNIST dataset
x = Input(shape=(28, 28, 1), dtype='float32', name='images')
print("x.shape:",x.shape)
# Every branch has the same stride value (2), which is necessary to
# keep all branch outputs the same size so you can concatenate them
branch_a = layers.Conv2D(128, 1, padding='same', activation='relu', strides=2)(x)
# In this branch, the striding occurs in the spatial convolution layer.
branch_b = layers.Conv2D(128, 1, padding='same', activation='relu')(x)
branch_b = layers.Conv2D(128, 3, padding='same', activation='relu', strides=2)(branch_b)
# In this branch, the striding occurs in the average pooling layer.
branch_c = layers.AveragePooling2D(3, padding='same', strides=2)(x)
branch_c = layers.Conv2D(128, 3, padding='same', activation='relu')(branch_c)
branch_d = layers.Conv2D(128, 1, padding='same', activation='relu')(x)
branch_d = layers.Conv2D(128, 3, padding='same', activation='relu')(branch_d)
branch_d = layers.Conv2D(128, 3, padding='same', activation='relu', strides=2)(branch_d)
# Concatenates the branch outputs to obtain the module output
output = layers.concatenate([branch_a, branch_b, branch_c, branch_d], axis=-1)
# Adding a classifier on top of the convnet
output = layers.Flatten()(output)
output = layers.Dense(512, activation='relu')(output)
predictions = layers.Dense(10, activation='softmax')(output)
model = keras.models.Model(inputs=x, outputs=predictions)
```
#### Train the Inception model using the Dataset API and the MNIST data
Inspired by: https://github.com/keras-team/keras/blob/master/examples/mnist_dataset_api.py
```
import numpy as np
import os
import tempfile
import keras
from keras import backend as K
from keras import layers
from keras.datasets import mnist
import tensorflow as tf
if K.backend() != 'tensorflow':
raise RuntimeError('This example can only run with the TensorFlow backend,'
' because it requires the Dataset API, which is not'
' supported on other platforms.')
batch_size = 128
buffer_size = 10000
steps_per_epoch = int(np.ceil(60000 / float(batch_size))) # = 469
epochs = 5
num_classes = 10
def cnn_layers(x):
# This example assumes the existence of a 4D input tensor x:
# This returns a typical image tensor like those of MNIST dataset
print("x.shape:",x.shape)
# Every branch has the same stride value (2), which is necessary to
# keep all branch outputs the same size so you can concatenate them
branch_a = layers.Conv2D(128, 1, padding='same', activation='relu', strides=2)(x)
# In this branch, the striding occurs in the spatial convolution layer.
branch_b = layers.Conv2D(128, 1, padding='same', activation='relu')(x)
branch_b = layers.Conv2D(128, 3, padding='same', activation='relu', strides=2)(branch_b)
# In this branch, the striding occurs in the average pooling layer.
branch_c = layers.AveragePooling2D(3, padding='same', strides=2)(x)
branch_c = layers.Conv2D(128, 3, padding='same', activation='relu')(branch_c)
branch_d = layers.Conv2D(128, 1, padding='same', activation='relu')(x)
branch_d = layers.Conv2D(128, 3, padding='same', activation='relu')(branch_d)
branch_d = layers.Conv2D(128, 3, padding='same', activation='relu', strides=2)(branch_d)
# Concatenates the branch outputs to obtain the module output
output = layers.concatenate([branch_a, branch_b, branch_c, branch_d], axis=-1)
# Adding a classifier on top of the convnet
output = layers.Flatten()(output)
output = layers.Dense(512, activation='relu')(output)
predictions = layers.Dense(num_classes, activation='softmax')(output)
return predictions
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype(np.float32) / 255
x_train = np.expand_dims(x_train, -1)
y_train = tf.one_hot(y_train, num_classes)
# Create the dataset and its associated one-shot iterator.
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.repeat()
dataset = dataset.shuffle(buffer_size)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
# Model creation using tensors from the get_next() graph node.
inputs, targets = iterator.get_next()
print("inputs.shape:",inputs.shape)
print("targets.shape:",targets.shape)
model_input = layers.Input(tensor=inputs)
model_output = cnn_layers(model_input)
model = keras.models.Model(inputs=model_input, outputs=model_output)
model.compile(optimizer=keras.optimizers.RMSprop(lr=2e-3, decay=1e-5),
loss='categorical_crossentropy',
metrics=['accuracy'],
target_tensors=[targets])
model.summary()
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model,show_shapes=True).create(prog='dot', format='svg'))
```
#### Train Inception model
```
model.fit(epochs=epochs,
steps_per_epoch=steps_per_epoch)
# Save the model weights.
weight_path = os.path.join(tempfile.gettempdir(), 'saved_Inception_wt.h5')
model.save_weights(weight_path)
```
#### Test the Inception model
Second session to test loading trained model without tensors.
```
# Clean up the TF session.
K.clear_session()
# Second session to test loading trained model without tensors.
x_test = x_test.astype(np.float32)
x_test = np.expand_dims(x_test, -1)
x_test_inp = layers.Input(shape=x_test.shape[1:])
test_out = cnn_layers(x_test_inp)
test_model = keras.models.Model(inputs=x_test_inp, outputs=test_out)
weight_path = os.path.join(tempfile.gettempdir(), 'saved_Inception_wt.h5')
test_model.load_weights(weight_path)
test_model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
test_model.summary()
SVG(model_to_dot(test_model).create(prog='dot', format='svg'))
loss, acc = test_model.evaluate(x_test, y_test, num_classes)
print('\nTest accuracy: {0}'.format(acc))
```
#### Residual connections - ResNET
Residual connections or ResNET are a common graph-like network component found in many post-2015 network architectures, including Xception. They were introduced by He et al. from Microsoft and are figthing two common problems with large-scale deep-learning model: vanishing gradients and representational bottlenecks.
A residual connection consists of making the output of an earlier layer available as input to a later layer, effectively creating a shortcut in a sequential network. Rather than being concatenated to the later activation, the earlier output is summed with the later activation, which assumes that both activations are the same size. If they’re different sizes, you can use a linear transformation to reshape the earlier activation into the target shape (for example, a Dense layer without an activation or, for convolutional feature maps, a 1 × 1 convolution without an activation).
###### ResNET implementation when the feature-map sizes are the same
Here’s how to implement a residual connection in Keras when the feature-map sizes are the same, using identity residual connections. This example assumes the existence of a 4D input tensor x:
```
from keras import layers
from keras.layers import Input
# This example assumes the existence of a 4D input tensor x:
# This returns a typical image tensor like those of MNIST dataset
x = Input(shape=(28, 28, 1), dtype='float32', name='images')
print("x.shape:",x.shape)
# Applies a transformation to x
y = layers.Conv2D(128, 3, activation='relu', padding='same')(x)
y = layers.Conv2D(128, 3, activation='relu', padding='same')(y)
y = layers.Conv2D(128, 3, activation='relu', padding='same')(y)
# Adds the original x back to the output features
output = layers.add([y, x])
# Adding a classifier on top of the convnet
output = layers.Flatten()(output)
output = layers.Dense(512, activation='relu')(output)
predictions = layers.Dense(10, activation='softmax')(output)
model.summary()
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model,show_shapes=True).create(prog='dot', format='svg'))
```
###### ResNET implementation when the feature-map sizes differ
And the following implements a residual connection when the feature-map sizes differ, using a linear residual connection (again, assuming the existence of a 4D input tensor x):
```
from keras import layers
from keras.layers import Input
# This example assumes the existence of a 4D input tensor x:
# This returns a typical image tensor like those of MNIST dataset
x = Input(shape=(28, 28, 1), dtype='float32', name='images')
print("x.shape:",x.shape)
# Applies a transformation to x
y = layers.Conv2D(128, 3, activation='relu', padding='same')(x)
y = layers.Conv2D(128, 3, activation='relu', padding='same')(y)
y = layers.MaxPooling2D(2, strides=2)(y)
# Uses a 1 × 1 convolution to linearly downsample the original x tensor to the same shape as y
residual = layers.Conv2D(128, 1, strides=2, padding='same')(x)
# Adds the residual tensor back to the output features
output = layers.add([y, residual])
# Adding a classifier on top of the convnet
output = layers.Flatten()(output)
output = layers.Dense(512, activation='relu')(output)
predictions = layers.Dense(10, activation='softmax')(output)
model = keras.models.Model(inputs=x, outputs=predictions)
model.summary()
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model,show_shapes=True).create(prog='dot', format='svg'))
```
#### Train the ResNET model using the Dataset API and the MNIST data
(when the feature-map sizes are the same)
```
import numpy as np
import os
import tempfile
import keras
from keras import backend as K
from keras import layers
from keras.datasets import mnist
import tensorflow as tf
if K.backend() != 'tensorflow':
raise RuntimeError('This example can only run with the TensorFlow backend,'
' because it requires the Dataset API, which is not'
' supported on other platforms.')
batch_size = 128
buffer_size = 10000
steps_per_epoch = int(np.ceil(60000 / float(batch_size))) # = 469
epochs = 5
num_classes = 10
def cnn_layers(x):
# This example assumes the existence of a 4D input tensor x:
# This returns a typical image tensor like those of MNIST dataset
print("x.shape:",x.shape)
# Applies a transformation to x
y = layers.Conv2D(128, 3, activation='relu', padding='same')(x)
y = layers.Conv2D(128, 3, activation='relu', padding='same')(y)
y = layers.Conv2D(128, 3, activation='relu', padding='same')(y)
# Adds the original x back to the output features
output = layers.add([y, x])
# Adding a classifier on top of the convnet
output = layers.Flatten()(output)
output = layers.Dense(512, activation='relu')(output)
predictions = layers.Dense(10, activation='softmax')(output)
return predictions
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype(np.float32) / 255
x_train = np.expand_dims(x_train, -1)
y_train = tf.one_hot(y_train, num_classes)
# Create the dataset and its associated one-shot iterator.
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.repeat()
dataset = dataset.shuffle(buffer_size)
dataset = dataset.batch(batch_size)
iterator = dataset.make_one_shot_iterator()
# Model creation using tensors from the get_next() graph node.
inputs, targets = iterator.get_next()
print("inputs.shape:",inputs.shape)
print("targets.shape:",targets.shape)
model_input = layers.Input(tensor=inputs)
model_output = cnn_layers(model_input)
model = keras.models.Model(inputs=model_input, outputs=model_output)
model.compile(optimizer=keras.optimizers.RMSprop(lr=2e-3, decay=1e-5),
loss='categorical_crossentropy',
metrics=['accuracy'],
target_tensors=[targets])
model.summary()
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model,show_shapes=True).create(prog='dot', format='svg'))
```
#### Train and Save the ResNet model
```
model.fit(epochs=epochs,
steps_per_epoch=steps_per_epoch)
# Save the model weights.
weight_path = os.path.join(tempfile.gettempdir(), 'saved_ResNet_wt.h5')
model.save_weights(weight_path)
```
#### Second session to test loading trained model without tensors.
```
# Clean up the TF session.
K.clear_session()
# Second session to test loading trained model without tensors.
x_test = x_test.astype(np.float32)
x_test = np.expand_dims(x_test, -1)
x_test_inp = layers.Input(shape=x_test.shape[1:])
test_out = cnn_layers(x_test_inp)
test_model = keras.models.Model(inputs=x_test_inp, outputs=test_out)
weight_path = os.path.join(tempfile.gettempdir(), 'saved_ResNet_wt.h5')
test_model.load_weights(weight_path)
test_model.compile(optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
test_model.summary()
loss, acc = test_model.evaluate(x_test, y_test, num_classes)
print('\nTest accuracy: {0}'.format(acc))
```
Not very good... probably normal since residual connection are good with very deep network but here we have only 2 hidden layers.
### 7.1.5. Layer weights sharing
One more important feature of the functional API is the ability to reuse a layer instance several times where instead of instantiating a new layer for each call, you reuse the same weights with every call. This allows you to build models that have shared branches—several branches that all share the same knowledge and perform the same operations.
#### Example - semantic similarity between two sentences
For example, consider a model that attempts to assess the semantic similarity between two sentences. The model has two inputs (the two sentences to compare) and outputs a score between 0 and 1, where 0 means unrelated sentences and 1 means sentences that are either identical or reformulations of each other. Such a model could be useful in many applications, including deduplicating natural-language queries in a dialog system.
In this setup, the two input sentences are interchangeable, because semantic similarity is a symmetrical relationship: the similarity of A to B is identical to the similarity of B to A. For this reason, it wouldn’t make sense to learn two independent models for processing each input sentence. Rather, you want to process both with a single LSTM layer. The representations of this LSTM layer (its weights) are learned based on both inputs simultaneously. This is what we call a Siamese LSTM model or a shared LSTM.
Note: Siamese network is a special type of neural network architecture. Instead of learning to classify its
inputs, the Siamese neural network learns to differentiate between two inputs. It learns the similarity.
Here’s how to implement such a model using layer sharing (layer reuse) in the Keras functional API:
```
from keras import layers
from keras import Input
from keras.models import Model
# Instantiates a single LSTM layer, once
lstm = layers.LSTM(32)
# Building the left branch of the model:
# inputs are variable-length sequences of vectors of size 128.
left_input = Input(shape=(None, 128))
left_output = lstm(left_input)
# Building the right branch of the model:
# when you call an existing layer instance, you reuse its weights.
right_input = Input(shape=(None, 128))
right_output = lstm(right_input)
# Builds the classifier on top
merged = layers.concatenate([left_output, right_output], axis=-1)
predictions = layers.Dense(1, activation='sigmoid')(merged)
# Instantiating the model
model = Model([left_input, right_input], predictions)
model.summary()
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model,show_shapes=True).create(prog='dot', format='svg'))
import numpy as np
num_samples = 100
num_symbols = 2
TRACE = False
left_data = np.random.randint(0,num_symbols, size=(num_samples,1,128))
if TRACE:
print(type(left_data))
print(left_data.shape)
print(left_data)
print('-'*50)
right_data = np.random.randint(0,num_symbols, size=(num_samples,1,128))
if TRACE:
print(type(right_data))
print(right_data.shape)
print(right_data)
print('-'*50)
matching_list = [np.random.randint(0,num_symbols) for _ in range(num_samples)]
targets = np.array(matching_list)
if TRACE:
print(type(targets))
print(targets.shape)
print(targets)
print('-'*50)
# We must compile a model before training/testing.
model.compile(optimizer='rmsprop',loss='binary_crossentropy',metrics=['acc'])
# Training the model: when you train such a model,
# the weights of the LSTM layer are updated based on both inputs.
model.fit([left_data, right_data],targets)
```
### 7.1.6. Models as layers
Importantly, in the functional API, models can be used as you’d use layers—effectively, you can think of a model as a “bigger layer.” This is true of both the Sequential and Model classes. This means you can call a model on an input tensor and retrieve an output tensor:
y = model(x)
If the model has multiple input tensors and multiple output tensors, it should be called with a list of tensors:
y1, y2 = model([x1, x2])
When you call a model instance, you’re reusing the weights of the model—exactly like what happens when you call a layer instance. Calling an instance, whether it’s a layer instance or a model instance, will always reuse the existing learned representations of the instance—which is intuitive.
```
from keras import layers
from keras import applications
from keras import Input
nbr_classes = 10
# The base image-processing model is the Xception network (convolutional base only).
xception_base = applications.Xception(weights=None,include_top=False)
# The inputs are 250 × 250 RGB images.
left_input = Input(shape=(250, 250, 3))
right_input = Input(shape=(250, 250, 3))
left_features = xception_base(left_input)
# right_input = xception_base(right_input)
right_features = xception_base(right_input)
merged_features = layers.concatenate([left_features, right_features], axis=-1)
predictions = layers.Dense(nbr_classes, activation='softmax')(merged_features)
# Instantiating the model
model = Model([left_input, right_input], predictions)
model.summary()
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
SVG(model_to_dot(model,show_shapes=True).create(prog='dot', format='svg'))
```
|
github_jupyter
|
```
# Libraries for R^2 visualization
from ipywidgets import interactive, IntSlider, FloatSlider
from math import floor, ceil
from sklearn.base import BaseEstimator, RegressorMixin
# Libraries for model building
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Library for working locally or Colab
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Applied-Modeling/master/data/'
# If you're working locally:
else:
DATA_PATH = '../data/'
```
# I. Wrangle Data
```
df = wrangle(DATA_PATH + 'elections/bread_peace_voting.csv')
```
# II. Split Data
**First** we need to split our **target vector** from our **feature matrix**.
```
```
**Second** we need to split our dataset into **training** and **test** sets.
Two strategies:
- Random train-test split using [`train_test_split`](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.train_test_split.html). Generally we use 80% of the data for training, and 20% of the data for testing.
- If you have **timeseries**, then you need to do a "cutoff" split.
```
```
# III. Establish Baseline
```
```
# IV. Build Model
```
```
# V. Check Metrics
## Mean Absolute Error
The unit of measurement is the same as the unit of measurment for your target (in this case, vote share [%]).
```
```
## Root Mean Squared Error
The unit of measurement is the same as the unit of measurment for your target (in this case, vote share [%]).
```
```
## $R^2$ Score
TL;DR: Usually ranges between 0 (bad) and 1 (good).
```
class BruteForceRegressor(BaseEstimator, RegressorMixin):
def __init__(self, m=0, b=0):
self.m = m
self.b = b
self.mean = 0
def fit(self, X, y):
self.mean = np.mean(y)
return self
def predict(self, X, return_mean=True):
if return_mean:
return [self.mean] * len(X)
else:
return X * self.m + self.b
def plot(slope, intercept):
# Assign data to variables
x = df['income']
y = df['incumbent_vote_share']
# Create figure
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,6))
# Set ax limits
mar = 0.2
x_lim = floor(x.min() - x.min()*mar), ceil(x.max() + x.min()*mar)
y_lim = floor(y.min() - y.min()*mar), ceil(y.max() + y.min()*mar)
# Instantiate and train model
bfr = BruteForceRegressor(slope, intercept)
bfr.fit(x, y)
# ax1
## Plot data
ax1.set_xlim(x_lim)
ax1.set_ylim(y_lim)
ax1.scatter(x, y)
## Plot base model
ax1.axhline(bfr.mean, color='orange', label='baseline model')
## Plot residual lines
y_base_pred = bfr.predict(x)
ss_base = mean_squared_error(y, y_base_pred) * len(y)
for x_i, y_i, yp_i in zip(x, y, y_base_pred):
ax1.plot([x_i, x_i], [y_i, yp_i],
color='gray', linestyle='--', alpha=0.75)
## Formatting
ax1.legend()
ax1.set_title(f'Sum of Squares: {np.round(ss_base, 2)}')
ax1.set_xlabel('Growth in Personal Incomes')
ax1.set_ylabel('Incumbent Party Vote Share [%]')
# ax2
ax2.set_xlim(x_lim)
ax2.set_ylim(y_lim)
## Plot data
ax2.scatter(x, y)
## Plot model
x_model = np.linspace(*ax2.get_xlim(), 10)
y_model = bfr.predict(x_model, return_mean=False)
ax2.plot(x_model, y_model, color='green', label='our model')
for x_coord, y_coord in zip(x, y):
ax2.plot([x_coord, x_coord], [y_coord, x_coord * slope + intercept],
color='gray', linestyle='--', alpha=0.75)
ss_ours = mean_squared_error(y, bfr.predict(x, return_mean=False)) * len(y)
## Formatting
ax2.legend()
ax2.set_title(f'Sum of Squares: {np.round(ss_ours, 2)}')
ax2.set_xlabel('Growth in Personal Incomes')
ax2.set_ylabel('Incumbent Party Vote Share [%]')
y = df['incumbent_vote_share']
slope_slider = FloatSlider(min=-5, max=5, step=0.5, value=0)
intercept_slider = FloatSlider(min=int(y.min()), max=y.max(), step=2, value=y.mean())
interactive(plot, slope=slope_slider, intercept=intercept_slider)
```
# VI. Communicate Results
**Challenge:** How can we find the coefficients and intercept for our `model`?
```
```
|
github_jupyter
|
<div>
<img src="https://drive.google.com/uc?export=view&id=1vK33e_EqaHgBHcbRV_m38hx6IkG0blK_" width="350"/>
</div>
#**Artificial Intelligence - MSc**
##ET5003 - MACHINE LEARNING APPLICATIONS
###Instructor: Enrique Naredo
###ET5003_NLP_SpamClasiffier-2
### Spam Classification
[Spamming](https://en.wikipedia.org/wiki/Spamming) is the use of messaging systems to send multiple unsolicited messages (spam) to large numbers of recipients for the purpose of commercial advertising, for the purpose of non-commercial proselytizing, for any prohibited purpose (especially the fraudulent purpose of phishing), or simply sending the same message over and over to the same user.
Spam Classification: Deciding whether an email is spam or not.
## Imports
```
# standard libraries
import pandas as pd
import numpy as np
# Scikit-learn is an open source machine learning library
# that supports supervised and unsupervised learning
# https://scikit-learn.org/stable/
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, confusion_matrix
# Regular expression operations
#https://docs.python.org/3/library/re.html
import re
# Natural Language Toolkit
# https://www.nltk.org/install.html
import nltk
# Stemming maps different forms of the same word to a common “stem”
# https://pypi.org/project/snowballstemmer/
from nltk.stem import SnowballStemmer
# https://www.nltk.org/book/ch02.html
from nltk.corpus import stopwords
```
## Step 1: Load dataset
```
# Mount Google Drive
from google.colab import drive
drive.mount('/content/drive')
# path to your (local/cloud) drive
path = '/content/drive/MyDrive/Colab Notebooks/Enrique/Data/spam/'
# load dataset
df = pd.read_csv(path+'spam.csv', encoding='latin-1')
df.rename(columns = {'v1':'class_label', 'v2':'message'}, inplace = True)
df.drop(['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis = 1, inplace = True)
# original dataset
df.head()
```
The dataset has 4825 ham messages and 747 spam messages.
```
# histogram
import seaborn as sns
sns.countplot(df['class_label'])
# explore dataset
vc = df['class_label'].value_counts()
print(vc)
```
This is an imbalanced dataset
* The number of ham messages is much higher than those of spam.
* This can potentially cause our model to be biased.
* To fix this, we could resample our data to get an equal number of spam/ham messages.
```
# convert class label to numeric
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(df.class_label)
df2 = df
df2['class_label'] = le.transform(df.class_label)
df2.head()
# another histogram
df2.hist()
```
## Step 2: Pre-processing
Next, we’ll convert our DataFrame to a list, where every element of that list will be a spam message. Then, we’ll join each element of our list into one big string of spam messages. The lowercase form of that string is the required format needed for our word cloud creation.
```
spam_list = df['message'].tolist()
spam_list
new_df = pd.DataFrame({'message':spam_list})
# removing everything except alphabets
new_df['clean_message'] = new_df['message'].str.replace("[^a-zA-Z#]", " ")
# removing short words
short_word = 4
new_df['clean_message'] = new_df['clean_message'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>short_word]))
# make all text lowercase
new_df['clean_message'] = new_df['clean_message'].apply(lambda x: x.lower())
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
swords = stopwords.words('english')
# tokenization
tokenized_doc = new_df['clean_message'].apply(lambda x: x.split())
# remove stop-words
tokenized_doc = tokenized_doc.apply(lambda x: [item for item in x if item not in swords])
# de-tokenization
detokenized_doc = []
for i in range(len(new_df)):
t = ' '.join(tokenized_doc[i])
detokenized_doc.append(t)
new_df['clean_message'] = detokenized_doc
new_df.head()
```
## Step 3: TfidfVectorizer
**[TfidfVectorizer](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html)**
Convert a collection of raw documents to a matrix of TF-IDF features.
```
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(stop_words='english', max_features= 300, max_df=0.5, smooth_idf=True)
print(vectorizer)
X = vectorizer.fit_transform(new_df['clean_message'])
X.shape
y = df['class_label']
y.shape
```
Handle imbalance data through SMOTE
```
from imblearn.combine import SMOTETomek
smk= SMOTETomek()
X_bal, y_bal = smk.fit_sample(X, y)
# histogram
import seaborn as sns
sns.countplot(y_bal)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X_bal, y_bal, test_size = 0.20, random_state = 0)
X_train.todense()
```
## Step 4: Learning
Training the classifier and making predictions on the test set
```
# create a model
MNB = MultinomialNB()
# fit to data
MNB.fit(X_train, y_train)
# testing the model
prediction_train = MNB.predict(X_train)
print('training prediction\t', prediction_train)
prediction_test = MNB.predict(X_test)
print('test prediction\t\t', prediction_test)
np.set_printoptions(suppress=True)
# Ham and Spam probabilities in test
class_prob = MNB.predict_proba(X_test)
print(class_prob)
# show emails classified as 'spam'
threshold = 0.5
spam_ind = np.where(class_prob[:,1]>threshold)[0]
```
## Step 5: Accuracy
```
# accuracy in training set
y_pred_train = prediction_train
print("Train Accuracy: "+str(accuracy_score(y_train, y_pred_train)))
# accuracy in test set (unseen data)
y_true = y_test
y_pred_test = prediction_test
print("Test Accuracy: "+str(accuracy_score(y_true, y_pred_test)))
# confusion matrix
conf_mat = confusion_matrix(y_true, y_pred_test)
print("Confusion Matrix\n", conf_mat)
import matplotlib.pyplot as plt
from sklearn.metrics import ConfusionMatrixDisplay
labels = ['Ham','Spam']
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(conf_mat)
plt.title('Confusion matrix of the classifier\n')
fig.colorbar(cax)
ax.set_xticklabels([''] + labels)
ax.set_yticklabels([''] + labels)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.show()
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/amathsow/wolof_speech_recognition/blob/master/Speech_recognition_project.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
!pip3 install torch
!pip3 install torchvision
!pip3 install torchaudio
!pip install comet_ml
import os
from comet_ml import Experiment
import torch
import torch.nn as nn
import torch.utils.data as data
import torch.optim as optim
import torch.nn.functional as F
import torchaudio
import numpy as np
import pandas as pd
import librosa
```
## ETL process
```
from google.colab import drive
drive.mount('/content/drive')
path_audio= 'drive/My Drive/Speech Recognition project/recordings/'
path_text = 'drive/My Drive/Speech Recognition project/wolof_text/'
wav_text = 'drive/My Drive/Speech Recognition project/Wavtext_dataset2.csv'
```
## Data preparation for created the char.txt file from my dataset.
```
datapath = 'drive/My Drive/Speech Recognition project/data/records'
trainpath = '../drive/My Drive/Speech Recognition project/data/records/train/'
valpath = '../drive/My Drive/Speech Recognition project/data/records/val/'
testpath = '../drive/My Drive/Speech Recognition project/data/records/test/'
```
## Let's create the dataset
```
! git clone https://github.com/facebookresearch/CPC_audio.git
!pip install soundfile
!pip install torchaudio
!mkdir checkpoint_data
!wget https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/not_hub/2levels_6k_top_ctc/checkpoint_30.pt -P checkpoint_data
!wget https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/not_hub/2levels_6k_top_ctc/checkpoint_logs.json -P checkpoint_data
!wget https://dl.fbaipublicfiles.com/librilight/CPC_checkpoints/not_hub/2levels_6k_top_ctc/checkpoint_args.json -P checkpoint_data
!ls checkpoint_data
import torch
import torchaudio
%cd CPC_audio/
from cpc.model import CPCEncoder, CPCAR
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
DIM_ENCODER=256
DIM_CONTEXT=256
KEEP_HIDDEN_VECTOR=False
N_LEVELS_CONTEXT=1
CONTEXT_RNN="LSTM"
N_PREDICTIONS=12
LEARNING_RATE=2e-4
N_NEGATIVE_SAMPLE =128
encoder = CPCEncoder(DIM_ENCODER).to(device)
context = CPCAR(DIM_ENCODER, DIM_CONTEXT, KEEP_HIDDEN_VECTOR, 1, mode=CONTEXT_RNN).to(device)
# Several functions that will be necessary to load the data later
from cpc.dataset import findAllSeqs, AudioBatchData, parseSeqLabels
SIZE_WINDOW = 20480
BATCH_SIZE=8
def load_dataset(path_dataset, file_extension='.flac', phone_label_dict=None):
data_list, speakers = findAllSeqs(path_dataset, extension=file_extension)
dataset = AudioBatchData(path_dataset, SIZE_WINDOW, data_list, phone_label_dict, len(speakers))
return dataset
class CPCModel(torch.nn.Module):
def __init__(self,
encoder,
AR):
super(CPCModel, self).__init__()
self.gEncoder = encoder
self.gAR = AR
def forward(self, batch_data):
encoder_output = self.gEncoder(batch_data)
#print(encoder_output.shape)
# The output of the encoder data does not have the good format
# indeed it is Batch_size x Hidden_size x temp size
# while the context requires Batch_size x temp size x Hidden_size
# thus you need to permute
context_input = encoder_output.permute(0, 2, 1)
context_output = self.gAR(context_input)
#print(context_output.shape)
return context_output, encoder_output
datapath ='../drive/My Drive/Speech Recognition project/data/records/'
datapath2 ='../drive/My Drive/Speech Recognition project/data/'
!ls .. /checkpoint_data/checkpoint_30.pt
%cd CPC_audio/
from cpc.dataset import parseSeqLabels
from cpc.feature_loader import loadModel
checkpoint_path = '../checkpoint_data/checkpoint_30.pt'
cpc_model, HIDDEN_CONTEXT_MODEL, HIDDEN_ENCODER_MODEL = loadModel([checkpoint_path])
cpc_model = cpc_model.cuda()
label_dict, N_PHONES = parseSeqLabels(datapath2+'chars2.txt')
dataset_train = load_dataset(datapath+'train', file_extension='.wav', phone_label_dict=label_dict)
dataset_val = load_dataset(datapath+'val', file_extension='.wav', phone_label_dict=label_dict)
dataset_test = load_dataset(datapath+'test', file_extension='.wav', phone_label_dict=label_dict)
data_loader_train = dataset_train.getDataLoader(BATCH_SIZE, "speaker", True)
data_loader_val = dataset_val.getDataLoader(BATCH_SIZE, "sequence", False)
data_loader_test = dataset_test.getDataLoader(BATCH_SIZE, "sequence", False)
```
## Create Model
```
class PhoneClassifier(torch.nn.Module):
def __init__(self,
input_dim : int,
n_phones : int):
super(PhoneClassifier, self).__init__()
self.linear = torch.nn.Linear(input_dim, n_phones)
def forward(self, x):
return self.linear(x)
phone_classifier = PhoneClassifier(HIDDEN_CONTEXT_MODEL, N_PHONES).to(device)
loss_criterion = torch.nn.CrossEntropyLoss()
parameters = list(phone_classifier.parameters()) + list(cpc_model.parameters())
LEARNING_RATE = 2e-4
optimizer = torch.optim.Adam(parameters, lr=LEARNING_RATE)
optimizer_frozen = torch.optim.Adam(list(phone_classifier.parameters()), lr=LEARNING_RATE)
def train_one_epoch(cpc_model,
phone_classifier,
loss_criterion,
data_loader,
optimizer):
cpc_model.train()
loss_criterion.train()
avg_loss = 0
avg_accuracy = 0
n_items = 0
for step, full_data in enumerate(data_loader):
# Each batch is represented by a Tuple of vectors:
# sequence of size : N x 1 x T
# label of size : N x T
#
# With :
# - N number of sequence in the batch
# - T size of each sequence
sequence, label = full_data
bs = len(sequence)
seq_len = label.size(1)
optimizer.zero_grad()
context_out, enc_out, _ = cpc_model(sequence.to(device),label.to(device))
scores = phone_classifier(context_out)
scores = scores.permute(0,2,1)
loss = loss_criterion(scores,label.to(device))
loss.backward()
optimizer.step()
avg_loss+=loss.item()*bs
n_items+=bs
correct_labels = scores.argmax(1)
avg_accuracy += ((label==correct_labels.cpu()).float()).mean(1).sum().item()
avg_loss/=n_items
avg_accuracy/=n_items
return avg_loss, avg_accuracy
avg_loss, avg_accuracy = train_one_epoch(cpc_model, phone_classifier, loss_criterion, data_loader_train, optimizer_frozen)
avg_loss, avg_accuracy
def validation_step(cpc_model,
phone_classifier,
loss_criterion,
data_loader):
cpc_model.eval()
phone_classifier.eval()
avg_loss = 0
avg_accuracy = 0
n_items = 0
with torch.no_grad():
for step, full_data in enumerate(data_loader):
# Each batch is represented by a Tuple of vectors:
# sequence of size : N x 1 x T
# label of size : N x T
#
# With :
# - N number of sequence in the batch
# - T size of each sequence
sequence, label = full_data
bs = len(sequence)
seq_len = label.size(1)
context_out, enc_out, _ = cpc_model(sequence.to(device),label.to(device))
scores = phone_classifier(context_out)
scores = scores.permute(0,2,1)
loss = loss_criterion(scores,label.to(device))
avg_loss+=loss.item()*bs
n_items+=bs
correct_labels = scores.argmax(1)
avg_accuracy += ((label==correct_labels.cpu()).float()).mean(1).sum().item()
avg_loss/=n_items
avg_accuracy/=n_items
return avg_loss, avg_accuracy
import matplotlib.pyplot as plt
from google.colab import files
def run(cpc_model,
phone_classifier,
loss_criterion,
data_loader_train,
data_loader_val,
optimizer,
n_epoch):
epoches = []
train_losses = []
train_accuracies = []
val_losses = []
val_accuracies = []
for epoch in range(n_epoch):
epoches.append(epoch)
print(f"Running epoch {epoch + 1} / {n_epoch}")
loss_train, acc_train = train_one_epoch(cpc_model, phone_classifier, loss_criterion, data_loader_train, optimizer)
print("-------------------")
print(f"Training dataset :")
print(f"Average loss : {loss_train}. Average accuracy {acc_train}")
train_losses.append(loss_train)
train_accuracies.append(acc_train)
print("-------------------")
print("Validation dataset")
loss_val, acc_val = validation_step(cpc_model, phone_classifier, loss_criterion, data_loader_val)
print(f"Average loss : {loss_val}. Average accuracy {acc_val}")
print("-------------------")
print()
val_losses.append(loss_val)
val_accuracies.append(acc_val)
plt.plot(epoches, train_losses, label = "train loss")
plt.plot(epoches, val_losses, label = "val loss")
plt.xlabel('epoches')
plt.ylabel('loss')
plt.title('train and validation loss')
plt.legend()
# Display a figure.
plt.savefig("loss1.png")
files.download("loss1.png")
plt.show()
plt.plot(epoches, train_accuracies, label = "train accuracy")
plt.plot(epoches, val_accuracies, label = "vali accuracy")
plt.xlabel('epoches')
plt.ylabel('accuracy')
plt.title('train and validation accuracy')
plt.legend()
plt.savefig("val1.png")
files.download("val1.png")
# Display a figure.
plt.show()
```
## The Training and Evaluating Script
```
run(cpc_model,phone_classifier,loss_criterion,data_loader_train,data_loader_val,optimizer_frozen,n_epoch=10)
loss_ctc = torch.nn.CTCLoss(zero_infinity=True)
%cd CPC_audio/
from cpc.eval.common_voices_eval import SingleSequenceDataset, parseSeqLabels, findAllSeqs
path_train_data_per = datapath+'train'
path_val_data_per = datapath+'val'
path_phone_data_per = datapath2+'chars2.txt'
BATCH_SIZE=8
phone_labels, N_PHONES = parseSeqLabels(path_phone_data_per)
data_train_per, _ = findAllSeqs(path_train_data_per, extension='.wav')
dataset_train_non_aligned = SingleSequenceDataset(path_train_data_per, data_train_per, phone_labels)
data_loader_train = torch.utils.data.DataLoader(dataset_train_non_aligned, batch_size=BATCH_SIZE,
shuffle=True)
data_val_per, _ = findAllSeqs(path_val_data_per, extension='.wav')
dataset_val_non_aligned = SingleSequenceDataset(path_val_data_per, data_val_per, phone_labels)
data_loader_val = torch.utils.data.DataLoader(dataset_val_non_aligned, batch_size=BATCH_SIZE,
shuffle=True)
from cpc.feature_loader import loadModel
checkpoint_path = '../checkpoint_data/checkpoint_30.pt'
cpc_model, HIDDEN_CONTEXT_MODEL, HIDDEN_ENCODER_MODEL = loadModel([checkpoint_path])
cpc_model = cpc_model.cuda()
phone_classifier = PhoneClassifier(HIDDEN_CONTEXT_MODEL, N_PHONES).to(device)
parameters = list(phone_classifier.parameters()) + list(cpc_model.parameters())
LEARNING_RATE = 2e-4
optimizer = torch.optim.Adam(parameters, lr=LEARNING_RATE)
optimizer_frozen = torch.optim.Adam(list(phone_classifier.parameters()), lr=LEARNING_RATE)
import torch.nn.functional as F
def train_one_epoch_ctc(cpc_model,
phone_classifier,
loss_criterion,
data_loader,
optimizer):
cpc_model.train()
loss_criterion.train()
avg_loss = 0
avg_accuracy = 0
n_items = 0
for step, full_data in enumerate(data_loader):
x, x_len, y, y_len = full_data
x_batch_len = x.shape[-1]
x, y = x.to(device), y.to(device)
bs=x.size(0)
optimizer.zero_grad()
context_out, enc_out, _ = cpc_model(x.to(device),y.to(device))
scores = phone_classifier(context_out)
scores = scores.permute(1,0,2)
scores = F.log_softmax(scores,2)
yhat_len = torch.tensor([int(scores.shape[0]*x_len[i]/x_batch_len) for i in range(scores.shape[1])]) # this is an approximation, should be good enough
loss = loss_criterion(scores.float(),y.float().to(device),yhat_len,y_len)
loss.backward()
optimizer.step()
avg_loss+=loss.item()*bs
n_items+=bs
avg_loss/=n_items
return avg_loss
def validation_step(cpc_model,
phone_classifier,
loss_criterion,
data_loader):
cpc_model.eval()
phone_classifier.eval()
avg_loss = 0
avg_accuracy = 0
n_items = 0
with torch.no_grad():
for step, full_data in enumerate(data_loader):
x, x_len, y, y_len = full_data
x_batch_len = x.shape[-1]
x, y = x.to(device), y.to(device)
bs=x.size(0)
context_out, enc_out, _ = cpc_model(x.to(device),y.to(device))
scores = phone_classifier(context_out)
scores = scores.permute(1,0,2)
scores = F.log_softmax(scores,2)
yhat_len = torch.tensor([int(scores.shape[0]*x_len[i]/x_batch_len) for i in range(scores.shape[1])]) # this is an approximation, should be good enough
loss = loss_criterion(scores,y.to(device),yhat_len,y_len)
avg_loss+=loss.item()*bs
n_items+=bs
avg_loss/=n_items
#print(loss)
return avg_loss
def run_ctc(cpc_model,
phone_classifier,
loss_criterion,
data_loader_train,
data_loader_val,
optimizer,
n_epoch):
epoches = []
train_losses = []
val_losses = []
for epoch in range(n_epoch):
print(f"Running epoch {epoch + 1} / {n_epoch}")
loss_train = train_one_epoch_ctc(cpc_model, phone_classifier, loss_criterion, data_loader_train, optimizer)
print("-------------------")
print(f"Training dataset :")
print(f"Average loss : {loss_train}.")
print("-------------------")
print("Validation dataset")
loss_val = validation_step(cpc_model, phone_classifier, loss_criterion, data_loader_val)
print(f"Average loss : {loss_val}")
print("-------------------")
print()
epoches.append(epoch)
train_losses.append(loss_train)
val_losses.append(loss_val)
plt.plot(epoches, train_losses, label = "ctc_train loss")
plt.plot(epoches, val_losses, label = "ctc_val loss")
plt.xlabel('epoches')
plt.ylabel('loss')
plt.title('train and validation ctc loss')
plt.legend()
# Display and save a figure.
plt.savefig("ctc_loss.png")
files.download("ctc_loss.png")
plt.show()
run_ctc(cpc_model,phone_classifier,loss_ctc,data_loader_train,data_loader_val,optimizer_frozen,n_epoch=10)
import numpy as np
def get_PER_sequence(ref_seq, target_seq):
# re = g.split()
# h = h.split()
n = len(ref_seq)
m = len(target_seq)
D = np.zeros((n+1,m+1))
for i in range(1,n+1):
D[i,0] = D[i-1,0]+1
for j in range(1,m+1):
D[0,j] = D[0,j-1]+1
### TODO compute the alignment
for i in range(1,n+1):
for j in range(1,m+1):
D[i,j] = min(
D[i-1,j]+1,
D[i-1,j-1]+1,
D[i,j-1]+1,
D[i-1,j-1]+ 0 if ref_seq[i-1]==target_seq[j-1] else float("inf")
)
return D[n,m]/len(ref_seq)
#return PER
ref_seq = [0, 1, 1, 2, 0, 2, 2]
pred_seq = [1, 1, 2, 2, 0, 0]
expected_PER = 4. / 7.
print(get_PER_sequence(ref_seq, pred_seq) == expected_PER)
import progressbar
from multiprocessing import Pool
def cut_data(seq, sizeSeq):
maxSeq = sizeSeq.max()
return seq[:, :maxSeq]
def prepare_data(data):
seq, sizeSeq, phone, sizePhone = data
seq = seq.cuda()
phone = phone.cuda()
sizeSeq = sizeSeq.cuda().view(-1)
sizePhone = sizePhone.cuda().view(-1)
seq = cut_data(seq.permute(0, 2, 1), sizeSeq).permute(0, 2, 1)
return seq, sizeSeq, phone, sizePhone
def get_per(test_dataloader,
cpc_model,
phone_classifier):
downsampling_factor = 160
cpc_model.eval()
phone_classifier.eval()
avgPER = 0
nItems = 0
per = []
Item = []
print("Starting the PER computation through beam search")
bar = progressbar.ProgressBar(maxval=len(test_dataloader))
bar.start()
for index, data in enumerate(test_dataloader):
bar.update(index)
with torch.no_grad():
seq, sizeSeq, phone, sizePhone = prepare_data(data)
c_feature, _, _ = cpc_model(seq.to(device),phone.to(device))
sizeSeq = sizeSeq / downsampling_factor
predictions = torch.nn.functional.softmax(
phone_classifier(c_feature), dim=2).cpu()
phone = phone.cpu()
sizeSeq = sizeSeq.cpu()
sizePhone = sizePhone.cpu()
bs = c_feature.size(0)
data_per = [(predictions[b].argmax(1), phone[b]) for b in range(bs)]
# data_per = [(predictions[b], sizeSeq[b], phone[b], sizePhone[b],
# "criterion.module.BLANK_LABEL") for b in range(bs)]
with Pool(bs) as p:
poolData = p.starmap(get_PER_sequence, data_per)
avgPER += sum([x for x in poolData])
nItems += len(poolData)
per.append(sum([x for x in poolData]))
Item.append(index)
bar.finish()
avgPER /= nItems
print(f"Average CER {avgPER}")
plt.plot(Item, per, label = "Per by item")
plt.xlabel('Items')
plt.ylabel('PER')
plt.title('trends of the PER')
plt.legend()
# Display and save a figure.
plt.savefig("Per.png")
files.download("Per.png")
plt.show()
return avgPER
get_per(data_loader_val,cpc_model,phone_classifier)
# Load a dataset labelled with the letters of each sequence.
%cd /content/CPC_audio
from cpc.eval.common_voices_eval import SingleSequenceDataset, parseSeqLabels, findAllSeqs
path_train_data_cer = datapath+'train'
path_val_data_cer = datapath+'val'
path_letter_data_cer = datapath2+'chars2.txt'
BATCH_SIZE=8
letters_labels, N_LETTERS = parseSeqLabels(path_letter_data_cer)
data_train_cer, _ = findAllSeqs(path_train_data_cer, extension='.wav')
dataset_train_non_aligned = SingleSequenceDataset(path_train_data_cer, data_train_cer, letters_labels)
data_val_cer, _ = findAllSeqs(path_val_data_cer, extension='.wav')
dataset_val_non_aligned = SingleSequenceDataset(path_val_data_cer, data_val_cer, letters_labels)
# The data loader will generate a tuple of tensors data, labels for each batch
# data : size N x T1 x 1 : the audio sequence
# label : size N x T2 the sequence of letters corresponding to the audio data
# IMPORTANT NOTE: just like the PER the CER is computed with non-aligned phone data.
data_loader_train_letters = torch.utils.data.DataLoader(dataset_train_non_aligned, batch_size=BATCH_SIZE,
shuffle=True)
data_loader_val_letters = torch.utils.data.DataLoader(dataset_val_non_aligned, batch_size=BATCH_SIZE,
shuffle=True)
from cpc.feature_loader import loadModel
checkpoint_path = '../checkpoint_data/checkpoint_30.pt'
cpc_model, HIDDEN_CONTEXT_MODEL, HIDDEN_ENCODER_MODEL = loadModel([checkpoint_path])
cpc_model = cpc_model.cuda()
character_classifier = PhoneClassifier(HIDDEN_CONTEXT_MODEL, N_LETTERS).to(device)
parameters = list(character_classifier.parameters()) + list(cpc_model.parameters())
LEARNING_RATE = 2e-4
optimizer = torch.optim.Adam(parameters, lr=LEARNING_RATE)
optimizer_frozen = torch.optim.Adam(list(character_classifier.parameters()), lr=LEARNING_RATE)
loss_ctc = torch.nn.CTCLoss(zero_infinity=True)
run_ctc(cpc_model,character_classifier,loss_ctc,data_loader_train_letters,data_loader_val_letters,optimizer_frozen,n_epoch=10)
get_per(data_loader_val_letters,cpc_model,character_classifier)
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/lmcanavals/algorithmic_complexity/blob/main/05_01_UCS_dijkstra.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Breadth First Search
BFS para los amigos
```
import graphviz as gv
import numpy as np
import pandas as pd
import heapq as hq
import math
def readAdjl(fn, haslabels=False, weighted=False, sep="|"):
with open(fn) as f:
labels = None
if haslabels:
labels = f.readline().strip().split()
L = []
for line in f:
if weighted:
L.append([tuple(map(int, p.split(sep))) for p in line.strip().split()])
# line => "1|3 2|5 4|4" ==> [(1, 3), (2, 5), (4, 4)]
else:
L.append(list(map(int, line.strip().split()))) # "1 3 5" => [1, 3, 5]
# L.append([int(x) for x in line.strip().split()])
return L, labels
def adjlShow(L, labels=None, directed=False, weighted=False, path=[],
layout="sfdp"):
g = gv.Digraph("G") if directed else gv.Graph("G")
g.graph_attr["layout"] = layout
g.edge_attr["color"] = "gray"
g.node_attr["color"] = "orangered"
g.node_attr["width"] = "0.1"
g.node_attr["height"] = "0.1"
g.node_attr["fontsize"] = "8"
g.node_attr["fontcolor"] = "mediumslateblue"
g.node_attr["fontname"] = "monospace"
g.edge_attr["fontsize"] = "8"
g.edge_attr["fontname"] = "monospace"
n = len(L)
for u in range(n):
g.node(str(u), labels[u] if labels else str(u))
added = set()
for v, u in enumerate(path):
if u != None:
if weighted:
for vi, w in G[u]:
if vi == v:
break
g.edge(str(u), str(v), str(w), dir="forward", penwidth="2", color="orange")
else:
g.edge(str(u), str(v), dir="forward", penwidth="2", color="orange")
added.add(f"{u},{v}")
added.add(f"{v},{u}")
if weighted:
for u in range(n):
for v, w in L[u]:
if not directed and not f"{u},{v}" in added:
added.add(f"{u},{v}")
added.add(f"{v},{u}")
g.edge(str(u), str(v), str(w))
elif directed:
g.edge(str(u), str(v), str(w))
else:
for u in range(n):
for v in L[u]:
if not directed and not f"{u},{v}" in added:
added.add(f"{u},{v}")
added.add(f"{v},{u}")
g.edge(str(u), str(v))
elif directed:
g.edge(str(u), str(v))
return g
```
## Dijkstra
```
def dijkstra(G, s):
n = len(G)
visited = [False]*n
path = [None]*n
cost = [math.inf]*n
cost[s] = 0
queue = [(0, s)]
while queue:
g_u, u = hq.heappop(queue)
if not visite[u]:
visited[u] = True
for v, w in G[u]:
f = g_u + w
if f < cost[v]:
cost[v] = f
path[v] = u
hq.heappush(queue, (f, v))
return path, cost
%%file 1.in
2|4 7|8 14|3
2|7 5|7
0|4 1|7 3|5 6|1
2|5
7|7
1|7 6|1 8|5
2|1 5|1
0|8 4|7 8|8
5|5 7|8 9|8 11|9 12|6
8|8 10|8 12|9 13|7
9|8 13|3
8|9
8|6 9|9 13|2 15|5
9|7 10|13 12|2 16|9
0|3 15|9
12|5 14|9 17|7
13|9 17|8
15|7 16|8
G, _ = readAdjl("1.in", weighted=True)
for i, edges in enumerate(G):
print(f"{i:2}: {edges}")
adjlShow(G, weighted=True)
path, cost = dijkstra(G, 8)
print(path)
adjlShow(G, weighted=True, path=path)
```
|
github_jupyter
|
# [Introduction to Data Science: A Comp-Math-Stat Approach](https://lamastex.github.io/scalable-data-science/as/2019/)
## YOIYUI001, Summer 2019
©2019 Raazesh Sainudiin. [Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/)
# 08. Pseudo-Random Numbers, Simulating from Some Discrete and Continuous Random Variables
- The $Uniform(0,1)$ RV
- The $Bernoulli(\theta)$ RV
- Simulating from the $Bernoulli(\theta)$ RV
- The Equi-Probable $de\,Moivre(k)$ RV
- Simulating from the Equi-Probable $de\,Moivre(k)$ RV
- The $Uniform(\theta_1, \theta_2)$ RV
- Simulating from the $Uniform(\theta_1, \theta_2)$ RV
- The $Exponential(\lambda)$ RV
- Simulating from the $Exponential(\lambda)$ RV
- The standard $Cauchy$ RV
- Simulating from the standard $Cauchy$ RV
- Investigating running means
- Replicable samples
- A simple simulation
In the last notebook, we started to look at how we can produce realisations from the most elementary $Uniform(0,1)$ random variable.
i.e., how can we produce samples $(x_1, x_2, \ldots, x_n)$ from $X_1, X_2, \ldots, X_n$ $\overset{IID}{\thicksim}$ $Uniform(0,1)$?
What is SageMath doing when we ask for random()?
```
random()
```
We looked at how Modular arithmetic and number theory gives us pseudo-random number generators.
We used linear congruential generators (LCG) as simple pseudo-random number generators.
Remember that "pseudo-random" means that the numbers are not really random. We saw that some linear congruential generators (LCG) have much shorter, more predictable, patterns than others and we learned what makes a good LCG.
We introduced the pseudo-random number generator (PRNG) called the Mersenne Twister that we will use for simulation purposes in this course. It is based on more sophisticated theory than that of LCG but the basic principles of recurrence relations are the same.
# The $Uniform(0,1)$ Random Variable
Recall that the $Uniform(0,1)$ random variable is the fundamental model as we can transform it to any other random variable, random vector or random structure. The PDF $f$ and DF $F$ of $X \sim Uniform(0,1)$ are:
$f(x) = \begin{cases} 0 & \text{if} \ x \notin [0,1] \\ 1 & \text{if} \ x \in [0,1] \end{cases}$
$F(x) = \begin{cases} 0 & \text{if} \ x < 0 \\ 1 & \text{if} \ x > 1 \\ x & \text{if} \ x \in [0,1] \end{cases}$
We use the Mersenne twister pseudo-random number generator to mimic independent and identically distributed draws from the $uniform(0,1)$ RV.
In Sage, we use the python random module to generate pseudo-random numbers for us. (We have already used it: remember randint?)
random() will give us one simulation from the $Uniform(0,1)$ RV:
```
random()
```
If we want a whole simulated sample we can use a list comprehension. We will be using this technique frequently so make sure you understand what is going on. "for i in range(3)" is acting like a counter to give us 3 simulated values in the list we are making
```
[random() for i in range(3)]
listOfUniformSamples = [random() for i in range(3) ]
listOfUniformSamples
```
If we do this again, we will get a different sample:
```
listOfUniformSamples2 = [random() for i in range(3) ]
listOfUniformSamples2
```
Often is it useful to be able to replicate the same random sample. For example, if we were writing some code to do some simulations using samples from a PRNG, and we "improved" the way that we were doing it, how would we want to test our improvement? If we could replicate the same samples then we could show that our new code was equivalent to our old code, just more efficient.
Remember when we were using the LCGs, and we could set the seed $x_0$? More sophisticated PRNGs like the Mersenne Twister also have a seed. By setting this seed to a specified value we can make sure that we can replicate samples.
```
?set_random_seed
set_random_seed(256526)
listOfUniformSamples = [random() for i in range(3) ]
listOfUniformSamples
initial_seed()
```
Now we can replicate the same sample again by setting the seed to the same value:
```
set_random_seed(256526)
listOfUniformSamples2 = [random() for i in range(3) ]
listOfUniformSamples2
initial_seed()
set_random_seed(2676676766)
listOfUniformSamples2 = [random() for i in range(3) ]
listOfUniformSamples2
initial_seed()
```
We can compare some samples visually by plotting them:
```
set_random_seed(256526)
listOfUniformSamples = [(i,random()) for i in range(100)]
plotsSeed1 = points(listOfUniformSamples)
t1 = text('Seed 1 = 256626', (60,1.2), rgbcolor='blue',fontsize=10)
set_random_seed(2676676766)
plotsSeed2 = points([(i,random()) for i in range(100)],rgbcolor="red")
t2 = text('Seed 2 = 2676676766', (60,1.2), rgbcolor='red',fontsize=10)
bothSeeds = plotsSeed1 + plotsSeed2
t31 = text('Seed 1 and', (30,1.2), rgbcolor='blue',fontsize=10)
t32 = text('Seed 2', (65,1.2), rgbcolor='red',fontsize=10)
show(graphics_array( (plotsSeed1+t1,plotsSeed2+t2, bothSeeds+t31+t32)),figsize=[9,3])
```
### YouTry
Try looking at the more advanced documentation and play a bit.
```
#?sage.misc.randstate
```
(end of You Try)
---
---
### Question:
What can we do with samples from a $Uniform(0,1)$ RV? Why bother?
### Answer:
We can use them to sample or simulate from other, more complex, random variables.
# The $Bernoulli(\theta)$ Random Variable
The $Bernoulli(\theta)$ RV $X$ with PMF $f(x;\theta)$ and DF $F(x;\theta)$ parameterised by some real $\theta\in [0,1]$ is a discrete random variable with only two possible outcomes.
$f(x;\theta)= \theta^x (1-\theta)^{1-x} \mathbf{1}_{\{0,1\}}(x) =
\begin{cases}
\theta & \text{if} \ x=1,\\
1-\theta & \text{if} \ x=0,\\
0 & \text{otherwise}
\end{cases}$
$F(x;\theta) =
\begin{cases}
1 & \text{if} \ 1 \leq x,\\
1-\theta & \text{if} \ 0 \leq x < 1,\\
0 & \text{otherwise}
\end{cases}$
Here are some functions for the PMF and DF for a $Bernoulli$ RV along with various useful functions for us in the sequel. Let's take a quick look at them.
```
def bernoulliPMF(x, theta):
'''Probability mass function for Bernoulli(theta).
Param x is the value to find the Bernoulli probability mass of.
Param theta is the theta parameterising this Bernoulli RV.'''
retValue = 0
if x == 1:
retValue = theta
elif x == 0:
retValue = 1 - theta
return retValue
def bernoulliCDF(x, theta):
'''DF for Bernoulli(theta).
Param x is the value to find the Bernoulli cumulative density function of.
Param theta is the theta parameterising this Bernoulli RV.'''
retValue = 0
if x >= 1:
retValue = 1
elif x >= 0:
retValue = 1 - theta
# in the case where x < 0, retValue is the default of 0
return retValue
# PFM plot
def pmfPlot(outcomes, pmf_values):
'''Returns a pmf plot for a discrete distribution.'''
pmf = points(zip(outcomes,pmf_values), rgbcolor="blue", pointsize='20')
for i in range(len(outcomes)):
pmf += line([(outcomes[i], 0),(outcomes[i], pmf_values[i])], rgbcolor="blue", linestyle=":")
# padding
pmf += point((0,1), rgbcolor="black", pointsize="0")
return pmf
# CDF plot
def cdfPlot(outcomes, cdf_values):
'''Returns a DF plot for a discrete distribution.'''
cdf_pairs = zip(outcomes, cdf_values)
cdf = point(cdf_pairs, rgbcolor = "red", faceted = false, pointsize="20")
for k in range(len(cdf_pairs)):
x, kheight = cdf_pairs[k] # unpack tuple
previous_x = 0
previous_height = 0
if k > 0:
previous_x, previous_height = cdf_pairs[k-1] # unpack previous tuple
cdf += line([(previous_x, previous_height),(x, previous_height)], rgbcolor="grey")
cdf += points((x, previous_height),rgbcolor = "white", faceted = true, pointsize="20")
cdf += line([(x, previous_height),(x, kheight)], rgbcolor="blue", linestyle=":")
# padding
max_index = len(outcomes)-1
cdf += line([(outcomes[0]-0.2, 0),(outcomes[0], 0)], rgbcolor="grey")
cdf += line([(outcomes[max_index],cdf_values[max_index]),(outcomes[max_index]+0.2, cdf_values[max_index])], \
rgbcolor="grey")
return cdf
def makeFreqDictHidden(myDataList):
'''Make a frequency mapping out of a list of data.
Param myDataList, a list of data.
Return a dictionary mapping each data value from min to max in steps of 1 to its frequency count.'''
freqDict = {} # start with an empty dictionary
sortedMyDataList = sorted(myDataList)
for k in sortedMyDataList:
freqDict[k] = myDataList.count(k)
return freqDict # return the dictionary created
def makeEMFHidden(myDataList):
'''Make an empirical mass function from a data list.
Param myDataList, list of data to make emf from.
Return list of tuples comprising (data value, relative frequency) ordered by data value.'''
freqs = makeFreqDictHidden(myDataList) # make the frequency counts mapping
totalCounts = sum(freqs.values())
relFreqs = [fr/(1.0*totalCounts) for fr in freqs.values()] # use a list comprehension
numRelFreqPairs = zip(freqs.keys(), relFreqs) # zip the keys and relative frequencies together
numRelFreqPairs.sort() # sort the list of tuples
return numRelFreqPairs
from pylab import array
def makeEDFHidden(myDataList):
'''Make an empirical distribution function from a data list.
Param myDataList, list of data to make emf from.
Return list of tuples comprising (data value, cumulative relative frequency) ordered by data value.'''
freqs = makeFreqDictHidden(myDataList) # make the frequency counts mapping
totalCounts = sum(freqs.values())
relFreqs = [fr/(1.0*totalCounts) for fr in freqs.values()] # use a list comprehension
relFreqsArray = array(relFreqs)
cumFreqs = list(relFreqsArray.cumsum())
numCumFreqPairs = zip(freqs.keys(), cumFreqs) # zip the keys and culm relative frequencies together
numCumFreqPairs.sort() # sort the list of tuples
return numCumFreqPairs
# EPMF plot
def epmfPlot(samples):
'''Returns an empirical probability mass function plot from samples data.'''
epmf_pairs = makeEMFHidden(samples)
epmf = point(epmf_pairs, rgbcolor = "blue", pointsize="20")
for k in epmf_pairs: # for each tuple in the list
kkey, kheight = k # unpack tuple
epmf += line([(kkey, 0),(kkey, kheight)], rgbcolor="blue", linestyle=":")
# padding
epmf += point((0,1), rgbcolor="black", pointsize="0")
return epmf
# ECDF plot
def ecdfPlot(samples):
'''Returns an empirical probability mass function plot from samples data.'''
ecdf_pairs = makeEDFHidden(samples)
ecdf = point(ecdf_pairs, rgbcolor = "red", faceted = false, pointsize="20")
for k in range(len(ecdf_pairs)):
x, kheight = ecdf_pairs[k] # unpack tuple
previous_x = 0
previous_height = 0
if k > 0:
previous_x, previous_height = ecdf_pairs[k-1] # unpack previous tuple
ecdf += line([(previous_x, previous_height),(x, previous_height)], rgbcolor="grey")
ecdf += points((x, previous_height),rgbcolor = "white", faceted = true, pointsize="20")
ecdf += line([(x, previous_height),(x, kheight)], rgbcolor="blue", linestyle=":")
# padding
ecdf += line([(ecdf_pairs[0][0]-0.2, 0),(ecdf_pairs[0][0], 0)], rgbcolor="grey")
max_index = len(ecdf_pairs)-1
ecdf += line([(ecdf_pairs[max_index][0], ecdf_pairs[max_index][1]),(ecdf_pairs[max_index][0]+0.2, \
ecdf_pairs[max_index][1])],rgbcolor="grey")
return ecdf
```
We can see the effect of varying $\theta$ interactively:
```
@interact
def _(theta=(0.5)):
'''Interactive function to plot the bernoulli pmf and cdf.'''
if theta <=1 and theta >= 0:
outcomes = (0, 1) # define the bernoulli outcomes
print "Bernoulli (", RR(theta).n(digits=2), ") pmf and cdf"
# pmf plot
pmf_values = [bernoulliPMF(x, theta) for x in outcomes]
pmf = pmfPlot(outcomes, pmf_values) # this is one of our own, hidden, functions
# cdf plot
cdf_values = [bernoulliCDF(x, theta) for x in outcomes]
cdf = cdfPlot(outcomes, cdf_values) # this is one of our own, hidden, functions
show(graphics_array([pmf, cdf]),figsize=[8,3])
else:
print "0 <= theta <= 1"
```
Don't worry about how these plots are done: you are not expected to be able to understand all of these details now.
Just use them to see the effect of varying $\theta$.
## Simulating a sample from the $Bernoulli(\theta)$ RV
We can simulate a sample from a $Bernoulli$ distribution by transforming input from a $Uniform(0,1)$ distribution using the floor() function in Sage. In maths, $\lfloor x \rfloor$, the 'floor of $x$' is the largest integer that is smaller than or equal to $x$. For example, $\lfloor 3.8 \rfloor = 3$.
```
z=3.8
floor(z)
```
Using floor, we can do inversion sampling from the $Bernoulli(\theta)$ RV using the the $Uniform(0,1)$ random variable that we said is the fundamental model.
We will introduce inversion sampling more formally later. In general, inversion sampling means using the inverse of the CDF $F$, $F^{[-1]}$, to transform input from a $Uniform(0,1)$ distribution.
To simulate from the $Bernoulli(\theta)$, we can use the following algorithm:
### Input:
- $u \thicksim Uniform(0,1)$ from a PRNG, $\qquad \qquad \text{where, } \sim$ means "sample from"
- $\theta$, the parameter
### Output:
$x \thicksim Bernoulli(\theta)$
### Steps:
- $u \leftarrow Uniform(0,1)$
- $x \leftarrow \lfloor u + \theta \rfloor$
- Return $x$
We can illustrate this with SageMath:
```
theta = 0.5 # theta must be such that 0 <= theta <= 1
u = random()
x = floor(u + theta)
x
```
To make a number of simulations, we can use list comprehensions again:
```
theta = 0.5
n = 20
randomUs = [random() for i in range(n)]
simulatedBs = [floor(u + theta) for u in randomUs]
simulatedBs
```
To make modular reusable code we can package up what we have done as functions.
The function `bernoulliFInverse(u, theta)` codes the inverse of the CDF of a Bernoulli distribution parameterised by `theta`. The function `bernoulliSample(n, theta)` uses `bernoulliFInverse(...)` in a list comprehension to simulate n samples from a Bernoulli distribution parameterised by theta, i.e., the distribution of our $Bernoulli(\theta)$ RV.
```
def bernoulliFInverse(u, theta):
'''A function to evaluate the inverse CDF of a bernoulli.
Param u is the value to evaluate the inverse CDF at.
Param theta is the distribution parameters.
Returns inverse CDF under theta evaluated at u'''
return floor(u + theta)
def bernoulliSample(n, theta):
'''A function to simulate samples from a bernoulli distribution.
Param n is the number of samples to simulate.
Param theta is the bernoulli distribution parameter.
Returns a simulated Bernoulli sample as a list'''
us = [random() for i in range(n)]
# use bernoulliFInverse in a list comprehension
return [bernoulliFInverse(u, theta) for u in us]
```
Note that we are using a list comprehension and the built-in SageMath `random()` function to make a list of pseudo-random simulations from the $Uniform(0,1)$. The length of the list is determined by the value of n. Inside the body of the function we assign this list to a variable named `us` (i.e., u plural). We then use another list comprehension to make our simulated sample. This list comprehension works by calling our function `bernoulliFInverse(...)` and passing in values for theta together with each u in us in turn.
Let's try a small number of samples:
```
theta = 0.2
n = 10
samples = bernoulliSample(n, theta)
samples
```
Now lets explore the effect of interactively varying n and $\theta$:
```
@interact
def _(theta=(0.5), n=(10,(0..1000))):
'''Interactive function to plot samples from bernoulli distribution.'''
if theta >= 0 and theta <= 1:
print "epmf and ecdf for ", n, " samples from Bernoulli (", theta, ")"
samples = bernoulliSample(n, theta)
# epmf plot
epmf = epmfPlot(samples) # this is one of our hidden functions
# ecdf plot
ecdf = ecdfPlot(samples) # this is one of our hidden functions
show(graphics_array([epmf, ecdf]),figsize=[8,3])
else:
print "0 <= theta <=1, n>0"
```
You can vary $\theta$ and $n$ on the interactive plot. You should be able to see that as $n$ increases, the empirical plots should get closer to the theoretical $f$ and $F$.
### YouTry
Check that you understand what `floor` is doing. We have put some extra print statements into our demonstration of floor so that you can see what is going on in each step. Try evaluating this cell several times so that you see what happens with different values of `u`.
```
theta = 0.5 # theta must be such that 0 <= theta <= 1
u = random()
print "u is", u
print "u + theta is", (u + theta)
print "floor(u + theta) is", floor(u + theta)
```
In the cell below we use floor to get 1's and 0's from the pseudo-random u's given by random(). It is effectively doing exactly the same thing as the functions above that we use to simulate a specified number of $Bernoulli(\theta)$ RVs, but the why that it is written may be easier to understand. If `floor` is doing what we want it to, then when `n` is sufficiently large, we'd expect our proportion of `1`s to be close to `theta` (remember Kolmogorov's axiomatic motivations for probability!). Try changing the value assigned to the variable `theta` and re-evaluting the cell to check this.
```
theta = 0.7 # theta must be such that 0 <= theta <= 1
listFloorResults = [] # an empty list to store results in
n = 100000 # how many iterations to do
for i in range(n): # a for loop to do something n times
u = random() # generate u
x = floor(u + theta) # use floor
listFloorResults.append(x) # add x to the list of results
listFloorResults.count(1)*1.0/len(listFloorResults) # proportion of 1s in the results
```
# The equi-probable $de~Moivre(\theta)$ Random Variable
The $de~Moivre(\theta_1,\theta_2,\ldots,\theta_k)$ RV is the natural generalisation of the $Bernoulli (\theta)$ RV to more than two outcomes. Take a die (i.e. one of a pair of dice): there are 6 possible outcomes from tossing a die if the die is a normal six-sided one (the outcome is which face is the on the top). To start with we can allow the possibility that the different faces could be loaded so that they have different probabilities of being the face on the top if we throw the die. In this case, k=6 and the parameters $\theta_1$, $\theta_2$, ...$\theta_6$ specify how the die is loaded, and the number on the upper-most face if the die is tossed is a $de\,Moivre$ random variable parameterised by $\theta_1,\theta_2,\ldots,\theta_6$.
If $\theta_1=\theta_2=\ldots=\theta_6= \frac{1}{6}$ then we have a fair die.
Here are some functions for the equi-probable $de\, Moivre$ PMF and CDF where we code the possible outcomes as the numbers on the faces of a k-sided die, i.e, 1,2,...k.
```
def deMoivrePMF(x, k):
'''Probability mass function for equi-probable de Moivre(k).
Param x is the value to evaluate the deMoirve pmf at.
Param k is the k parameter for an equi-probable deMoivre.
Returns the evaluation of the deMoivre(k) pmf at x.'''
if (int(x)==x) & (x > 0) & (x <= k):
return 1.0/k
else:
return 0
def deMoivreCDF(x, k):
'''DF for equi-probable de Moivre(k).
Param x is the value to evaluate the deMoirve cdf at.
Param k is the k parameter for an equi-probable deMoivre.
Returns the evaluation of the deMoivre(k) cdf at x.'''
return 1.0*x/k
@interact
def _(k=(6)):
'''Interactive function to plot the de Moivre pmf and cdf.'''
if (int(k) == k) and (k >= 1):
outcomes = range(1,k+1,1) # define the outcomes
pmf_values = [deMoivrePMF(x, k) for x in outcomes]
print "equi-probable de Moivre (", k, ") pmf and cdf"
# pmf plot
pmf = pmfPlot(outcomes, pmf_values) # this is one of our hidden functions
# cdf plot
cdf_values = [deMoivreCDF(x, k) for x in outcomes]
cdf = cdfPlot(outcomes, cdf_values) # this is one of our hidden functions
show(graphics_array([pmf, cdf]),figsize=[8,3])
else:
print "k must be an integer, k>0"
```
### YouTry
Try changing the value of k in the above interact.
## Simulating a sample from the equi-probable $de\,Moivre(k)$ random variable
We use floor ($\lfloor \, \rfloor$) again for simulating from the equi-probable $de \, Moivre(k)$ RV, but because we are defining our outcomes as 1, 2, ... k, we just add 1 to the result.
```
k = 6
u = random()
x = floor(u*k)+1
x
```
To simulate from the equi-probable $de\,Moivre(k)$, we can use the following algorithm:
#### Input:
- $u \thicksim Uniform(0,1)$ from a PRNG
- $k$, the parameter
#### Output:
- $x \thicksim \text{equi-probable } de \, Moivre(k)$
#### Steps:
- $u \leftarrow Uniform(0,1)$
- $x \leftarrow \lfloor uk \rfloor + 1$
- return $x$
We can illustrate this with SageMath:
```
def deMoivreFInverse(u, k):
'''A function to evaluate the inverse CDF of an equi-probable de Moivre.
Param u is the value to evaluate the inverse CDF at.
Param k is the distribution parameter.
Returns the inverse CDF for a de Moivre(k) distribution evaluated at u.'''
return floor(k*u) + 1
def deMoivreSample(n, k):
'''A function to simulate samples from an equi-probable de Moivre.
Param n is the number of samples to simulate.
Param k is the bernoulli distribution parameter.
Returns a simulated sample of size n from an equi-probable de Moivre(k) distribution as a list.'''
us = [random() for i in range(n)]
return [deMoivreFInverse(u, k) for u in us]
```
A small sample:
```
deMoivreSample(15,6)
```
You should understand the `deMoivreFInverse` and `deMoivreSample` functions and be able to write something like them if you were asked to.
You are not expected to be to make the interactive plots below (but this is not too hard to do by syntactic mimicry and google searches!).
Now let's do some interactive sampling where you can vary $k$ and the sample size $n$:
```
@interact
def _(k=(6), n=(10,(0..500))):
'''Interactive function to plot samples from equi-probable de Moivre distribution.'''
if n > 0 and k >= 0 and int(k) == k:
print "epmf and ecdf for ", n, " samples from equi-probable de Moivre (", k, ")"
outcomes = range(1,k+1,1) # define the outcomes
samples = deMoivreSample(n, k) # get the samples
epmf = epmfPlot(samples) # this is one of our hidden functions
ecdf = ecdfPlot(samples) # this is one of our hidden functions
show(graphics_array([epmf, ecdf]),figsize=[10,3])
else:
print "k>0 must be an integer, n>0"
```
Try changing $n$ and/or $k$. With $k = 40$ for example, you could be simulating the number on the first ball for $n$ Lotto draws.
### YouTry
A useful counterpart to the floor of a number is the ceiling, denoted $\lceil \, \rceil$. In maths, $\lceil x \rceil$, the 'ceiling of $x$' is the smallest integer that is larger than or equal to $x$. For example, $\lceil 3.8 \rceil = 4$. We can use the ceil function to do this in Sage:
```
ceil(3.8)
```
Try using `ceil` to check that you understand what it is doing. What would `ceil(0)` be?
# Inversion Sampler for Continuous Random Variables
When we simulated from the discrete RVs above, the $Bernoulli(\theta)$ and the equi-probable $de\,Moivre(k)$, we transformed some $u \thicksim Uniform(0,1)$ into some value for the RV.
Now we will look at the formal idea of an inversion sampler for continuous random variables. Inversion sampling for continuous random variables is a way to simulate values for a continuous random variable $X$ using $u \thicksim Uniform(0,1)$.
The idea of the inversion sampler is to treat $u \thicksim Uniform(0,1)$ as some value taken by the CDF $F$ and find the value $x$ at which $F(X \le x) = u$.
To find x where $F(X \le x) = u$ we need to use the inverse of $F$, $F^{[-1]}$. This is why it is called an **inversion sampler**.
Formalising this,
### Proposition
Let $F(x) := \int_{- \infty}^{x} f(y) \,d y : \mathbb{R} \rightarrow [0,1]$ be a continuous DF with density $f$, and let its inverse $F^{[-1]} $ be:
$$ F^{[-1]}(u) := \inf \{ x : F(x) = u \} : [0,1] \rightarrow \mathbb{R} $$
Then, $F^{[-1]}(U)$ has the distribution function $F$, provided $U \thicksim Uniform(0,1)$ ($U$ is a $Uniform(0,1)$ RV).
Note:
The infimum of a set A of real numbers, denoted by $\inf(A)$, is the greatest lower bound of every element of $A$.
Proof
The "one-line proof" of the proposition is due to the following equalities:
$$P(F^{[-1]}(U) \leq x) = P(\inf \{ y : F(y) = U)\} \leq x ) = P(U \leq F(x)) = F(x), \quad \text{for all } x \in \mathbb{R} . $$
# Algorithm for Inversion Sampler
#### Input:
- A PRNG for $Uniform(0,1)$ samples
- A procedure to give us $F^{[-1]}(u)$, inverse of the DF of the target RV $X$ evaluated at $u$
#### Output:
- A sample $x$ from $X$ distributed according to $F$
#### Algorithm steps:
- Draw $u \sim Uniform(0,1)$
- Calculate $x = F^{[-1]}(u)$
# The $Uniform(\theta_1, \theta_2)$RV
We have already met the$Uniform(\theta_1, \theta_2)$ RV.
Given two real parameters $\theta_1,\theta_2 \in \mathbb{R}$, such that $\theta_1 < \theta_2$, the PDF of the $Uniform(\theta_1,\theta_2)$ RV $X$ is:
$$f(x;\theta_1,\theta_2) =
\begin{cases}
\frac{1}{\theta_2 - \theta_1} & \text{if }\theta_1 \leq x \leq \theta_2\text{,}\\
0 & \text{otherwise}
\end{cases}
$$
and its DF given by $F(x;\theta_1,\theta_2) = \int_{- \infty}^x f(y; \theta_1,\theta_2) \, dy$ is:
$$
F(x; \theta_1,\theta_2) =
\begin{cases}
0 & \text{if }x < \theta_1 \\
\frac{x-\theta_1}{\theta_2-\theta_1} & \text{if}~\theta_1 \leq x \leq \theta_2,\\
1 & \text{if} x > \theta_2
\end{cases}
$$
For example, here are the PDF, CDF and inverse CDF for the $Uniform(-1,1)$:
<img src="images/UniformMinus11ThreeCharts.png" width=800>
As usual, we can make some SageMath functions for the PDF and CDF:
```
# uniform pdf
def uniformPDF(x, theta1, theta2):
'''Uniform(theta1, theta2) pdf function f(x; theta1, theta2).
x is the value to evaluate the pdf at.
theta1, theta2 are the distribution parameters.'''
retvalue = 0 # default return value
if x >= theta1 and x <= theta2:
retvalue = 1.0/(theta2-theta1)
return retvalue
# uniform cdf
def uniformCDF(x, theta1, theta2):
'''Uniform(theta1, theta2) CDF or DF function F(x; theta1, theta2).
x is the value to evaluate the cdf at.
theta1, theta2 are the distribution parameters.'''
retvalue = 0 # default return value
if (x > theta2):
retvalue = 1
elif (x > theta1): # else-if
retvalue = (x - theta1) / (theta2-theta1)
# if (x < theta1), retvalue will be 0
return retvalue
```
Using these functions in an interactive plot, we can see the effect of changing the distribution parameters $\theta_1$ and $\theta_2$.
```
@interact
def InteractiveUniformPDFCDFPlots(theta1=0,theta2=1):
if theta2 > theta1:
print "Uniform(", + RR(theta1).n(digits=2), ",", RR(theta2).n(digits=2), ") pdf and cdf"
p1 = line([(theta1-1,0), (theta1,0)], rgbcolor='blue')
p1 += line([(theta1,1/(theta2-theta1)), (theta2,1/(theta2-theta1))], rgbcolor='blue')
p1 += line([(theta2,0), (theta2+1,0)], rgbcolor='blue')
p2 = line([(theta1-1,0), (theta1,0)], rgbcolor='red')
p2 += line([(theta1,0), (theta2,1)], rgbcolor='red')
p2 += line([(theta2,1), (theta2+1,1)], rgbcolor='red')
show(graphics_array([p1, p2]),figsize=[8,3])
else:
print "theta2 must be greater than theta1"
```
# Simulating from the $Uniform(\theta_1, \theta_2)$ RV
We can simulate from the $Uniform(\theta_1,\theta_2)$ using the inversion sampler, provided that we can get an expression for $F^{[-1]}$ that can be implemented as a procedure.
We can get this by solving for $x$ in terms of $u=F(x;\theta_1,\theta_2)$:
$$
u = \frac{x-\theta_1}{\theta_2-\theta_1} \quad \iff \quad x = (\theta_2-\theta_1)u+\theta_1 \quad \iff \quad F^{[-1]}(u;\theta_1,\theta_2) = \theta_1+(\theta_2-\theta_1)u
$$
<img src="images/Week7InverseUniformSampler.png" width=600>
## Algorithm for Inversion Sampler for the $Uniform(\theta_1, \theta_2)$ RV
#### Input:
- $u \thicksim Uniform(0,1)$
- $F^{[-1]}(u)$
- $\theta_1$, $\theta_2$
#### Output:
- A sample $x \thicksim Uniform(\theta_1, \theta_2)$
#### Algorithm steps:
- Draw $u \sim Uniform(0,1)$
- Calculate $x = F^{[-1]}(u) = (\theta_1 + u(\theta_2 - \theta_1))$
- Return $x$
We can illustrate this with SageMath by writing a function to calculate the inverse of the CDF of a uniform distribution parameterised by theta1 and theta2. Given a value between 0 and 1 for the parameter u, it returns the height of the inverse CDF at this point, i.e. the value in the range theta1 to theta2 where the CDF evaluates to u.
```
def uniformFInverse(u, theta1, theta2):
'''A function to evaluate the inverse CDF of a uniform(theta1, theta2) distribution.
u, u should be 0 <= u <= 1, is the value to evaluate the inverse CDF at.
theta1, theta2, theta2 > theta1, are the uniform distribution parameters.'''
return theta1 + (theta2 - theta1)*u
```
This function transforms a single $u$ into a single simulated value from the $Uniform(\theta_1, \theta_2)$, for example:
```
u = random()
theta1, theta2 = 3, 6
uniformFInverse(u, theta1, theta2)
```
Then we can use this function inside another function to generate a number of samples:
```
def uniformSample(n, theta1, theta2):
'''A function to simulate samples from a uniform distribution.
n > 0 is the number of samples to simulate.
theta1, theta2 (theta2 > theta1) are the uniform distribution parameters.'''
us = [random() for i in range(n)]
return [uniformFInverse(u, theta1, theta2) for u in us]
```
The basic strategy is the same as for simulating $Bernoulli$ and $de \, Moirve$ samples: we are using a list comprehension and the built-in SAGE random() function to make a list of pseudo-random simulations from the $Uniform(0,1)$. The length of the list is determined by the value of n. Inside the body of the function we assign this list to a variable named us (i.e., u plural). We then use another list comprehension to make our simulated sample. This list comprehension works by calling our function uniformFInverse(...) and passing in values for theta1 and theta2 together with each u in us in turn.
You should be able to write simple functions like uniformFinverse and uniformSample yourself.
Try this for a small sample:
```
param1 = -5
param2 = 5
nToGenerate = 30
myUniformSample = uniformSample(nToGenerate, param1, param2)
print(myUniformSample)
```
Much more fun, we can make an interactive plot which uses the uniformSample(...) function to generate and plot while you choose the parameters and number to generate (you are not expected to be able to make interactive plots like this):
```
@interact
def _(theta1=-1, theta2=1, n=(1..5000)):
'''Interactive function to plot samples from uniform distribution.'''
if theta2 > theta1:
if n == 1:
print n, "uniform(", + RR(theta1).n(digits=2), ",", RR(theta2).n(digits=2), ") sample"
else:
print n, "uniform(", + RR(theta1).n(digits=2), ",", RR(theta2).n(digits=2), ") samples"
sample = uniformSample(n, theta1, theta2)
pts = zip(range(1,n+1,1),sample) # plot so that first sample is at x=1
p=points(pts)
p+= text(str(theta1), (0, theta1), fontsize=10, color='black') # add labels manually
p+= text(str(theta2), (0, theta2), fontsize=10, color='black')
p.show(xmin=0, xmax = n+1, ymin=theta1, ymax = theta2, axes=false, gridlines=[[0,n+1],[theta1,theta2]], \
figsize=[7,3])
else:
print "Theta1 must be less than theta2"
```
We can get a better idea of the distribution of our sample using a histogram (the minimum sample size has been set to 50 here because the automatic histogram generation does not do a very good job with small samples).
```
import pylab
@interact
def _(theta1=0, theta2=1, n=(50..5000), Bins=5):
'''Interactive function to plot samples from uniform distribution as a histogram.'''
if theta2 > theta1:
sample = uniformSample(n, theta1, theta2)
pylab.clf() # clear current figure
n, bins, patches = pylab.hist(sample, Bins, density=true)
pylab.ylabel('normalised count')
pylab.title('Normalised histogram')
pylab.savefig('myHist') # to actually display the figure
pylab.show()
else:
print "Theta1 must be less than theta2"
```
# The $Exponential(\lambda)$ Random Variable
For a given $\lambda$ > 0, an $Exponential(\lambda)$ Random Variable has the following PDF $f$ and DF $F$:
$$
f(x;\lambda) =\begin{cases}\lambda e^{-\lambda x} & \text{if }x \ge 0\text{,}\\ 0 & \text{otherwise}\end{cases}
$$
$$
F(x;\lambda) =\begin{cases}1 - e^{-\lambda x} & \text{if }x \ge 0\text{,}\\ 0 & \text{otherwise}\end{cases}
$$
An exponential distribution is useful because is can often be used to model inter-arrival times or making inter-event measurements (if you are familiar with the $Poisson$ distribution, a discrete distribution, you may have also met the $Exponential$ distribution as the time between $Poisson$ events). Here are some examples of random variables which are sometimes modelled with an exponential distribution:
time between the arrival of buses at a bus-stop
distance between roadkills on a stretch of highway
In SageMath, the we can use `exp(x)` to calculate $e^x$, for example:
```
x = 3.0
exp(x)
```
We can code some functions for the PDF and DF of an $Exponential$ parameterised by lambda like this $\lambda$.
**Note** that we cannot or should not use the name `lambda` for the parameter because in SageMath (and Python), the term `lambda` has a special meaning. Do you recall lambda expressions?
```
def exponentialPDF(x, lam):
'''Exponential pdf function.
x is the value we want to evaluate the pdf at.
lam is the exponential distribution parameter.'''
return lam*exp(-lam*x)
def exponentialCDF(x, lam):
'''Exponential cdf or df function.
x is the value we want to evaluate the cdf at.
lam is the exponential distribution parameter.'''
return 1 - exp(-lam*x)
```
You should be able to write simple functions like `exponentialPDF` and `exponentialCDF` yourself, but you are not expected to be able to make the interactive plots.
You can see the shapes of the PDF and CDF for different values of $\lambda$ using the interactive plot below.
```
@interact
def _(lam=('lambda',0.5),Xmax=(5..100)):
'''Interactive function to plot the exponential pdf and cdf.'''
if lam > 0:
print "Exponential(", RR(lam).n(digits=2), ") pdf and cdf"
from pylab import arange
xvalues = list(arange(0.1, Xmax, 0.1))
p1 = line(zip(xvalues, [exponentialPDF(y, lam) for y in xvalues]), rgbcolor='blue')
p2 = line(zip(xvalues, [exponentialCDF(y, lam) for y in xvalues]), rgbcolor='red')
show(graphics_array([p1, p2]),figsize=[8,3])
else:
print "Lambda must be greater than 0"
```
We are going to write some functions to help us to do inversion sampling from the $Exponential(\lambda)$ RV.
As before, we need an expression for $F^{[-1]}$ that can be implemented as a procedure.
We can get this by solving for $x$ in terms of $u=F(x;\lambda)$
### YouTry later
Show that
$$
F^{[-1]}(u;\lambda) =\frac{-1}{\lambda} \ln(1-u)
$$
$\ln = \log_e$ is the natural logarthim.
(end of You try)
---
---
# Simulating from the $Exponential(\lambda)$ RV
Algorithm for Inversion Sampler for the $Exponential(\lambda)$ RV
#### Input:
- $u \thicksim Uniform(0,1)$
- $F^{[-1]}(u)$
- $\lambda$
### Output:
- sample $x \thicksim Exponential(\lambda)$
#### Algorithm steps:
- Draw $u \sim Uniform(0,1)$
- Calculate $x = F^{[-1]}(u) = \frac{-1}{\lambda}\ln(1-u)$
- Return $x$
The function `exponentialFInverse(u, lam)` codes the inverse of the CDF of an exponential distribution parameterised by `lam`. Given a value between 0 and 1 for the parameter `u`, it returns the height of the inverse CDF of the exponential distribution at this point, i.e. the value where the CDF evaluates to `u`. The function `exponentialSample(n, lam)` uses `exponentialFInverse(...)` to simulate `n` samples from an exponential distribution parameterised by `lam`.
```
def exponentialFInverse(u, lam):
'''A function to evaluate the inverse CDF of a exponential distribution.
u is the value to evaluate the inverse CDF at.
lam is the exponential distribution parameter.'''
# log without a base is the natural logarithm
return (-1.0/lam)*log(1 - u)
def exponentialSample(n, lam):
'''A function to simulate samples from an exponential distribution.
n is the number of samples to simulate.
lam is the exponential distribution parameter.'''
us = [random() for i in range(n)]
return [exponentialFInverse(u, lam) for u in us]
```
We can have a look at a small sample:
```
lam = 0.5
nToGenerate = 30
sample = exponentialSample(nToGenerate, lam)
print(sorted(sample)) # recall that sorted makes a new sorted list
```
You should be able to write simple functions like `exponentialFinverse` and `exponentialSample` yourself by now.
The best way to visualise the results is to use a histogram. With this interactive plot you can explore the effect of varying lambda and n:
```
import pylab
@interact
def _(lam=('lambda',0.5), n=(50,(10..10000)), Bins=(5,(1,1000))):
'''Interactive function to plot samples from exponential distribution.'''
if lam > 0:
pylab.clf() # clear current figure
n, bins, patches = pylab.hist(exponentialSample(n, lam), Bins, density=true)
pylab.ylabel('normalised count')
pylab.title('Normalised histogram')
pylab.savefig('myHist') # to actually display the figure
pylab.show()
else:
print "Lambda must be greater than 0"
```
# The Standard $Cauchy$ Random Variable
A standard $Cauchy$ Random Variable has the following PDF $f$ and DF $F$:
$$
f(x) =\frac{1}{\pi(1+x^2)}\text{,}\,\, -\infty < x < \infty
$$
$$
F(x) = \frac{1}{\pi}\tan^{-1}(x) + 0.5
$$
The $Cauchy$ distribution is an interesting distribution because the expectation does not exist:
$$
\int \left|x\right|\,dF(x) = \frac{2}{\pi} \int_0^{\infty} \frac{x}{1+x^2}\,dx = \left(x \tan^{-1}(x) \right]_0^{\infty} - \int_0^{\infty} \tan^{-1}(x)\, dx = \infty \ .
$$
In SageMath, we can use the `arctan` function for $tan^{-1}$, and `pi` for $\pi$ and code some functions for the PDF and DF of the standard Cauchy as follows.
```
def cauchyPDF(x):
'''Standard Cauchy pdf function.
x is the value to evaluate the pdf at.'''
return 1.0/(pi.n()*(1+x^2))
def cauchyCDF(x):
'''Standard Cauchy cdf function.
x is the value to evaluate the cdf at.'''
return (1.0/pi.n())*arctan(x) + 0.5
```
You can see the shapes of the PDF and CDF using the plot below. Note from the PDF $f$ above is defined for $-\infty < x < \infty$. This means we should set some arbitrary limits on the minimum and maximum values to use for the x-axis on the plots. You can change these limits interactively.
```
@interact
def _(lower=(-4), upper=(4)):
'''Interactive function to plot the Cauchy pdf and cdf.'''
if lower < upper:
print "Standard Cauchy pdf and cdf"
p1 = plot(cauchyPDF, lower,upper, rgbcolor='blue')
p2 = plot(cauchyCDF, lower,upper, rgbcolor='red')
show(graphics_array([p1, p2]),figsize=[8,3])
else:
print "Upper must be greater than lower"
```
#### Constructing a standard $Cauchy$ RVs
- Place a double light sabre (i.e., one that can shoot its lazer beam from both ends, like that of Darth Mole in Star Wars) on a cartesian axis so that it is centred on $(0, 1)$.
- Randomly spin it (so that its spin angle to the x-axis is $\theta \thicksim Uniform (0, 2\pi)$).
- Let it come to rest.
- The y-coordinate of the point of intersection with the y-axis is a standard Cauchy RV.
You can see that we are equally likely to get positive and negative values (the density function of the standard $Cauchy$ RV is symmetrical about 0) and whenever the spin angle is close to $\frac{\pi}{4}$ ($90^{\circ}$) or $\frac{3\pi}{2}$ ($270^{\circ}$), the intersections will be a long way out up or down the y-axis, i.e. very negative or very positive values. If the light sabre is exactly parallel to the y-axis there will be no intersection: a $Cauchy$ RV $X$ can take values $-\infty < x < \infty$
<img src="images/Week7CauchyLightSabre.png" width=300>
## Simulating from the standard $Cauchy$
We can perform inversion sampling on the $Cauchy$ RV by transforming a $Uniform(0,1)$ random variable into a $Cauchy$ random variable using the inverse CDF.
We can get this by replacing $F(x)$ by $u$ in the expression for $F(x)$:
$$
\frac{1}{\pi}tan^{-1}(x) + 0.5 = u
$$
and solving for $x$:
$$
\begin{array}{lcl} \frac{1}{\pi}tan^{-1}(x) + 0.5 = u & \iff & \frac{1}{\pi} tan^{-1}(x) = u - \frac{1}{2}\\ & \iff & tan^{-1}(x) = (u - \frac{1}{2})\pi\\ & \iff & tan(tan^{-1}(x)) = tan((u - \frac{1}{2})\pi)\\ & \iff & x = tan((u - \frac{1}{2})\pi) \end{array}
$$
## Inversion Sampler for the standard $Cauchy$ RV
#### Input:
- $u \thicksim Uniform(0,1)$
- $F^{[-1]}(u)$
#### Output:
- A sample $x \thicksim \text{standard } Cauchy$
#### Algorithm steps:
- Draw $u \sim Uniform(0,1)$
- Calculate $x = F^{[-1]}(u) = tan((u - \frac{1}{2})\pi)$
- Return $x$
The function `cauchyFInverse(u)` codes the inverse of the CDF of the standard Cauchy distribution. Given a value between 0 and 1 for the parameter u, it returns the height of the inverse CDF of the standard $Cauchy$ at this point, i.e. the value where the CDF evaluates to u. The function `cauchySample(n`) uses `cauchyFInverse(...)` to simulate `n` samples from a standard Cauchy distribution.
```
def cauchyFInverse(u):
'''A function to evaluate the inverse CDF of a standard Cauchy distribution.
u is the value to evaluate the inverse CDF at.'''
return RR(tan(pi*(u-0.5)))
def cauchySample(n):
'''A function to simulate samples from a standard Cauchy distribution.
n is the number of samples to simulate.'''
us = [random() for i in range(n)]
return [cauchyFInverse(u) for u in us]
```
And we can visualise these simulated samples with an interactive plot:
```
@interact
def _(n=(50,(0..5000))):
'''Interactive function to plot samples from standard Cauchy distribution.'''
if n == 1:
print n, "Standard Cauchy sample"
else:
print n, "Standard Cauchy samples"
sample = cauchySample(n)
pts = zip(range(1,n+1,1),sample)
p=points(pts)
p+= text(str(floor(min(sample))), (0, floor(min(sample))), \
fontsize=10, color='black') # add labels manually
p+= text(str(ceil(max(sample))), (0, ceil(max(sample))), \
fontsize=10, color='black')
p.show(xmin=0, xmax = n+1, ymin=floor(min(sample)), \
ymax = ceil(max(sample)), axes=false, \
gridlines=[[0,n+1],[floor(min(sample)),ceil(max(sample))]],\
figsize=[7,3])
```
Notice how we can get some very extreme values This is because of the 'thick tails' of the density function of the $Cauchy$ RV. Think about this in relation to the double light sabre visualisation. We can see effect of the extreme values with a histogram visualisation as well. The interactive plot below will only use values between lower and upper in the histogram. Try increasing the sample size to something like 1000 and then gradually widening the limits:
```
import pylab
@interact
def _(n=(50,(0..5000)), lower=(-4), upper=(4), Bins=(5,(1,100))):
'''Interactive function to plot samples from
standard Cauchy distribution.'''
if lower < upper:
if n == 1:
print n, "Standard Cauchy sample"
else:
print n, "Standard Cauchy samples"
sample = cauchySample(n) # the whole sample
sampleToShow=[c for c in sample if (c >= lower and c <= upper)]
pylab.clf() # clear current figure
n, bins, patches = pylab.hist(sampleToShow, Bins, density=true)
pylab.ylabel('normalised count')
pylab.title('Normalised histogram, values between ' \
+ str(floor(lower)) + ' and ' + str(ceil(upper)))
pylab.savefig('myHist') # to actually display the figure
pylab.show()
else:
print "lower must be less than upper"
```
# Running means
When we introduced the $Cauchy$ distribution, we noted that the expectation of the $Cauchy$ RV does not exist. This means that attempts to estimate the mean of a $Cauchy$ RV by looking at a sample mean will not be successful: as you take larger and larger samples, the effect of the extreme values will still cause the sample mean to swing around wildly (we will cover estimation properly soon). You are going to investigate the sample mean of simulated $Cauchy$ samples of steadily increasing size and show how unstable this is. A convenient way of doing this is to look at a running mean. We will start by working through the process of calculating some running means for the $Uniform(0,10)$, which do stabilise. You will then do the same thing for the $Cauchy$ and be able to see the instability.
We will be using the pylab.cumsum function, so we make sure that we have it available. We then generate a sample from the $Uniform(0,10)$
```
from pylab import cumsum
nToGenerate = 10 # sample size to generate
theta1, theta2 = 0, 10 # uniform parameters
uSample = uniformSample(nToGenerate, theta1, theta2)
print(uSample)
```
We are going to treat this sample as though it is actually 10 samples of increasing size:
- sample 1 is the first element in uSample
- sample 2 contains the first 2 elements in uSample
- sample 3 contains the first 3 elements in uSample
- ...
- sample10 contains the first 10 elements in uSample
We know that a sample mean is the sum of the elements in the sample divided by the number of elements in the sample $n$:
$$
\bar{x} = \frac{1}{n} \sum_{i=1}^n x_i
$$
We can get the sum of the elements in each of our 10 samples with the cumulative sum of `uSample`.
We use `cumsum` to get the cumulative sum. This will be a `pylab.array` (or `numpy.arrat`) type, so we use the `list` function to turn it back into a list:
```
csUSample = list(cumsum(uSample))
print(csUSample)
```
What we have now is effectively a list
$$\left[\displaystyle\sum_{i=1}^1x_i, \sum_{i=1}^2x_i, \sum_{i=1}^3x_i, \ldots, \sum_{i=1}^{10}x_i\right]$$
So all we have to do is divide each element in `csUSample` by the number of elements that were summed to make it, and we have a list of running means
$$\left[\frac{1}{1}\displaystyle\sum_{i=1}^1x_i, \frac{1}{2}\sum_{i=1}^2x_i, \frac{1}{3}\sum_{i=1}^3x_i, \ldots, \frac{1}{10}\sum_{i=1}^{10}x_i\right]$$
We can get the running sample sizes using the `range` function:
```
samplesizes = range(1, len(uSample)+1,1)
samplesizes
```
And we can do the division with list comprehension:
```
uniformRunningMeans = [csUSample[i]/samplesizes[i] for i in range(nToGenerate)]
print(uniformRunningMeans)
```
We could pull all of this together into a function which produced a list of running means for sample sizes 1 to $n$.
```
def uniformRunningMeans(n, theta1, theta2):
'''Function to give a list of n running means from uniform(theta1, theta2).
n is the number of running means to generate.
theta1, theta2 are the uniform distribution parameters.
return a list of n running means.'''
sample = uniformSample(n, theta1, theta2)
from pylab import cumsum # we can import in the middle of code!
csSample = list(cumsum(sample))
samplesizes = range(1, n+1,1)
return [csSample[i]/samplesizes[i] for i in range(n)]
```
Have a look at the running means of 10 incrementally-sized samples:
```
nToGenerate = 10
theta1, theta2 = 0, 10
uRunningMeans = uniformRunningMeans(nToGenerate, theta1, theta2)
pts = zip(range(1, len(uRunningMeans)+1,1),uRunningMeans)
p = points(pts)
show(p, figsize=[5,3])
```
Recall that the expectation $E_{(\theta_1, \theta_2)}(X)$ of a $X \thicksim Uniform(\theta_1, \theta_2) = \frac{(\theta_1 +\theta_2)}{2}$
In our simulations we are using $\theta_1 = 0$, $\theta_2 = 10$, so if $X \thicksim Uniform(0,10)$, $E(X) = 5$
To show that the running means of different simulations from a $Uniform$ distribution settle down to be close to the expectation, we can plot say 5 different groups of running means for sample sizes $1, \ldots, 1000$. We will use a line plot rather than plotting individual points.
```
nToGenerate = 1000
theta1, theta2 = 0, 10
iterations = 5
xvalues = range(1, nToGenerate+1,1)
for i in range(iterations):
redshade = 0.5*(iterations - 1 - i)/iterations # to get different colours for the lines
uRunningMeans = uniformRunningMeans(nToGenerate, theta1, theta2)
pts = zip(xvalues,uRunningMeans)
if (i == 0):
p = line(pts, rgbcolor = (redshade,0,1))
else:
p += line(pts, rgbcolor = (redshade,0,1))
show(p, figsize=[5,3])
```
### YouTry!
Your task is to now do the same thing for some standard Cauchy running means.
To start with, do not put everything into a function, just put statements into the cell(s) below to:
Make variable for the number of running means to generate; assign it a small value like 10 at this stage
Use the cauchySample function to generate the sample from the standard $Cauchy$; have a look at your sample
Make a named list of cumulative sums of your $Cauchy$ sample using list and cumsum, as we did above; have a look at your cumulative sums
Make a named list of sample sizes, as we did above
Use a list comprehension to turn the cumulative sums and sample sizes into a list of running means, as we did above
Have a look at your running means; do they make sense to you given the individual sample values?
Add more cells as you need them.
When you are happy that you are doing the right things, **write a function**, parameterised by the number of running means to do, that returns a list of running means. Try to make your own function rather than copying and changing the one we used for the $Uniform$: you will learn more by trying to do it yourself. Please call your function `cauchyRunningMeans`, so that (if you have done everything else right), you'll be able to use some code we will supply you with to plot the results.
Try checking your function by using it to create a small list of running means. Check that the function does not report an error and gives you the kind of list you expect.
When you think that your function is working correctly, try evaluating the cell below: this will put the plot of 5 groups of $Uniform(0,10)$ running means beside a plot of 5 groups of standard $Cauchy$ running means produced by your function.
```
nToGenerate = 10000
theta1, theta2 = 0, 10
iterations = 5
xvalues = range(1, nToGenerate+1,1)
for i in range(iterations):
shade = 0.5*(iterations - 1 - i)/iterations # to get different colours for the lines
uRunningMeans = uniformRunningMeans(nToGenerate, theta1, theta2)
problemStr="" # an empty string
# use try to catch problems with cauchyRunningMeans functions
try:
cRunningMeans = cauchyRunningMeans(nToGenerate)
##cRunningMeans = hiddenCauchyRunningMeans(nToGenerate)
cPts = zip(xvalues, cRunningMeans)
except NameError, e:
# cauchyRunningMeans is not defined
cRunningMeans = [1 for c in range(nToGenerate)] # default value
problemStr = "No "
except Exception, e:
# some other problem with cauchyRunningMeans
cRunningMeans = [1 for c in range(nToGenerate)]
problemStr = "Problem with "
uPts = zip(xvalues, uRunningMeans)
cPts = zip(xvalues, cRunningMeans)
if (i < 1):
p1 = line(uPts, rgbcolor = (shade, 0, 1))
p2 = line(cPts, rgbcolor = (1-shade, 0, shade))
cauchyTitleMax = max(cRunningMeans) # for placement of cauchy title
else:
p1 += line(uPts, rgbcolor = (shade, 0, 1))
p2 += line(cPts, rgbcolor = (1-shade, 0, shade))
if max(cRunningMeans) > cauchyTitleMax:
cauchyTitleMax = max(cRunningMeans)
titleText1 = "Uniform(" + str(theta1) + "," + str(theta2) + ") running means" # make title text
t1 = text(titleText1, (nToGenerate/2,theta2), rgbcolor='blue',fontsize=10)
titleText2 = problemStr + "standard Cauchy running means" # make title text
t2 = text(titleText2, (nToGenerate/2,ceil(cauchyTitleMax)+1), rgbcolor='red',fontsize=10)
show(graphics_array((p1+t1,p2+t2)),figsize=[10,5])
```
# Replicable samples
Remember that we know how to set the seed of the PRNG used by `random()` with `set_random_seed`? If we wanted our sampling functions to give repeatable samples, we could also pass the functions the seed to use. Try making a new version of `uniformSample` which has a parameter for a value to use as the random number generator seed. Call your new version `uniformSampleSeeded` to distinguish it from the original one.
Try out your new `uniformSampleSeeded` function: if you generate two samples using the same seed they should be exactly the same. You could try using a large sample and checking on sample statistics such as the mean, min, max, variance etc, rather than comparing small samples by eye.
Recall that you can also give parameters default values in SageMath. Using a default value means that if no value is passed to the function for that parameter, the default value is used. Here is an example with a very simple function:
```
# we already saw default parameters in use - here's a careful walkthrough of how it works
def simpleDefaultExample(x, y=0):
'''A simple function to demonstrate default parameter values.
x is the first parameter, with no default value.
y is the second parameter, defaulting to 0.'''
return x + y
```
Note that parameters with default values need to come after parameters without default values when we define the function.
Now you can try the function - evaluate the following cells to see what you get:
```
simpleDefaultExample (1,3) # specifying two arguments for the function
simpleDefaultExample (1) # specifying one argument for the function
# another way to specify one argument for the function
simpleDefaultExample (x=6)
# uncomment next line and evaluate - but this will give an error because x has no default value
#simpleDefaultExample()
# uncomment next line and evaluate - but this will also give an error because x has no default value
# simpleDefaultExample (y=9)
```
Try making yet another version of the uniform sampler which takes a value to be used as a random number generator seed, but defaults to `None` if no value is supplied for that parameter. `None` is a special Python type.
```
x = None
type(x)
```
Using `set_random_seed(None)` will mean that the random seed is actually reset to a new ('random') value. You can see this by testing what happens when you do this twice in succession and then check what seed is being used with `initial_seed`:
```
set_random_seed(None)
initial_seed()
set_random_seed(None)
initial_seed()
```
Do another version of the `uniformSampleSeeded` function with a default value for the seed of `None`.
Check your function again by testing with both when you supply a value for the seed and when you don't.
---
## Assignment 2, PROBLEM 4
Maximum Points = 1
First read and understand the following simple simulation (originally written by Jenny Harlow). Then you will modify the simulation to find the solution to this problem.
### A Simple Simulation
We could use the samplers we have made to do a very simple simulation. Suppose the inter-arrival times, in minutes, of Orbiter buses at an Orbiter stop in Christchurch follows an $Exponential(\lambda = 0.1)$ distribution. Also suppose that this is quite a popular bus stop, and the arrival of people is very predictable: one new person will arrive in each whole minute. This means that the longer another bus takes in coming, the more people arrive to join the queue. Also suppose that the number of free seats available on any bus follows a $de\, Moivre(k=40)$ distribution, i.e, there are equally like to to be 1, or 2, or 3 ... or 40 spare seats. If there are more spare seats than people in the queue, everyone can get onto the bus and nobody is left waiting, but if there are not enough spare seats some people will be left waiting for the next bus. As they wait, more people arrive to join the queue....
This is not very realistic - we would want a better model for how many people arrive at the stop at least, and for the number of spare seats there will be on the bus. However, we are just using this as a simple example that you can do using the random variables you already know how to simulate samples from.
Try to code this example yourself, using our suggested steps. We have put our version the code into a cell below, but you will get more out of this example by trying to do it yourself first.
#### Suggested steps:
- Get a list of 100 $Exponential(\lambda = 0.1)$ samples using the `exponentialSamples` function. Assign the list to a variable named something like `busTime`s. These are your 100 simulated bus inter-arrival times.
- Choose a value for the number of people who will be waiting at the busstop when you start the simulation. Call this something like `waiting`.
- Make a list called something like `leftWaiting`, which to begin with contains just the value assigned to `waiting`.
- Make an empty list called something like `boardBus`.
- Start a for loop which takes each element in `busTimes` in turn, i.e. each bus inter-arrival time, and within the for loop:
- Calculate the number of people arriving at the stop as the floor of the time taken for that bus to arrive (i.e., one person for each whole minute until the bus arrives)
- Add this to the number of people waiting (e.g., if the number of arrivals is assigned to a variable arrivals, then waiting = waiting + arrivals will increment the value assigned to the waiting variable by the value of arrivals).
- Simulate a value for the number of seats available on the bus as one simulation from a $de \, Moirve(k=40)$ RV (it may be easier to use `deMoivreFInverse` rather than `deMoivreSample` because you only need one value - remember that you will have to pass a simulated $u \thicksim Uniform(0,1)$ to `deMoivreFInverse` as well as the value of the parameter $k$).
- The number of people who can get on the bus is the minimum of the number of people waiting in the queue and the number of seats on the bus. Calculate this value and assign it to a variable called something like `getOnBus`.
- Append `getOnBus` to the list `boardBus`.
- Subtract `getOnBus` from the number of people waiting, waiting (e.g., `waiting = waiting - getOnBus` will decrement waiting by the number of people who get on the bus).
- Append the new value of `waiting` to the list `leftWaiting`.
- That is the end of the for loop: you now have two lists, one for the number of people waiting at the stop and one for the number of people who can board each bus as it arrives.
## YouTry
Here is our code to do the bus stop simulation.
Yours may be different - maybe it will be better!
*You are expected to find the needed functions from the latest notebook this assignment came from and be able to answer this question. Unless you can do it in your head.*
```
def busStopSimulation(buses, lam, seats):
'''A Simple Simulation - see description above!'''
BusTimes = exponentialSample(buses,lam)
waiting = 0 # how many people are waiting at the start of the simulation
BoardBus = [] # empty list
LeftWaiting = [waiting] # list with just waiting in it
for time in BusTimes: # for each bus inter-arrival time
arrivals = floor(time) # people who arrive at the stop before the bus gets there
waiting = waiting + arrivals # add them to the queue
busSeats = deMoivreFInverse(random(), seats) # how many seats available on the bus
getOnBus = min(waiting, busSeats) # how many people can get on the bus
BoardBus.append(getOnBus) # add to the list
waiting = waiting - getOnBus # take the people who board the bus out of the queue
LeftWaiting.append(waiting) # add to the list
return [LeftWaiting, BoardBus, BusTimes]
# let's simulate the people left waiting at the bus stop
set_random_seed(None) # replace None by a integer to fix seed and output of simulation
buses = 100
lam = 0.1
seats = 40
leftWaiting, boardBus, busTimes = busStopSimulation(buses, lam, seats)
print(leftWaiting) # look at the leftWaiting list
print(boardBus) # boad bus
print(busTimes)
```
We could do an interactive visualisation of this by evaluating the next cell. This will be showing the number of people able to board the bus and the number of people left waiting at the bus stop by the height of lines on the plot.
```
@interact
def _(seed=[0,123,456], lam=[0.1,0.01], seats=[40,10,1000]):
set_random_seed(seed)
buses=100
leftWaiting, boardBus, busTimes = busStopSimulation(buses, lam,seats)
p1 = line([(0.5,0),(0.5,leftWaiting[0])])
from pylab import cumsum
csBusTimes=list(cumsum(busTimes))
for i in range(1, len(leftWaiting), 1):
p1+= line([(csBusTimes[i-1],0),(csBusTimes[i-1],boardBus[i-1])], rgbcolor='green')
p1+= line([(csBusTimes[i-1]+.01,0),(csBusTimes[i-1]+.01,leftWaiting[i])], rgbcolor='red')
t1 = text("Boarding the bus", (csBusTimes[len(busTimes)-1]/3,max(max(boardBus),max(leftWaiting))+1), \
rgbcolor='green',fontsize=10)
t2 = text("Waiting", (csBusTimes[len(busTimes)-1]*(2/3),max(max(boardBus),max(leftWaiting))+1), \
rgbcolor='red',fontsize=10)
xaxislabel = text("Time", (csBusTimes[len(busTimes)-1],-10),fontsize=10,color='black')
yaxislabel = text("People", (-50,max(max(boardBus),max(leftWaiting))+1),fontsize=10,color='black')
show(p1+t1+t2+xaxislabel+yaxislabel,figsize=[8,5])
```
Very briefly explain the effect of varying one of the three parameters:
- `seed`
- `lam`
- `seats`
while holding the other two parameters fixed on:
- the number of people waiting at the bus stop and
- the number of people boarding the bus
by using the dropdown menus in the `@interact` above. Think if the simulation makes sense and explain why. You can write down your answers using keyboard by double-clicking this cell and writing between `---` and `---`.
---
---
#### Solution for CauchyRunningMeans
```
def hiddenCauchyRunningMeans(n):
'''Function to give a list of n running means from standardCauchy.
n is the number of running means to generate.'''
sample = cauchySample(n)
from pylab import cumsum
csSample = list(cumsum(sample))
samplesizes = range(1, n+1,1)
return [csSample[i]/samplesizes[i] for i in range(n)]
```
|
github_jupyter
|
# Convolutional Neural Networks: Application
Welcome to Course 4's second assignment! In this notebook, you will:
- Implement helper functions that you will use when implementing a TensorFlow model
- Implement a fully functioning ConvNet using TensorFlow
**After this assignment you will be able to:**
- Build and train a ConvNet in TensorFlow for a classification problem
We assume here that you are already familiar with TensorFlow. If you are not, please refer the *TensorFlow Tutorial* of the third week of Course 2 ("*Improving deep neural networks*").
### <font color='darkblue'> Updates to Assignment <font>
#### If you were working on a previous version
* The current notebook filename is version "1a".
* You can find your work in the file directory as version "1".
* To view the file directory, go to the menu "File->Open", and this will open a new tab that shows the file directory.
#### List of Updates
* `initialize_parameters`: added details about tf.get_variable, `eval`. Clarified test case.
* Added explanations for the kernel (filter) stride values, max pooling, and flatten functions.
* Added details about softmax cross entropy with logits.
* Added instructions for creating the Adam Optimizer.
* Added explanation of how to evaluate tensors (optimizer and cost).
* `forward_propagation`: clarified instructions, use "F" to store "flatten" layer.
* Updated print statements and 'expected output' for easier visual comparisons.
* Many thanks to Kevin P. Brown (mentor for the deep learning specialization) for his suggestions on the assignments in this course!
## 1.0 - TensorFlow model
In the previous assignment, you built helper functions using numpy to understand the mechanics behind convolutional neural networks. Most practical applications of deep learning today are built using programming frameworks, which have many built-in functions you can simply call.
As usual, we will start by loading in the packages.
```
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
import tensorflow as tf
from tensorflow.python.framework import ops
from cnn_utils import *
%matplotlib inline
np.random.seed(1)
```
Run the next cell to load the "SIGNS" dataset you are going to use.
```
# Loading the data (signs)
X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()
```
As a reminder, the SIGNS dataset is a collection of 6 signs representing numbers from 0 to 5.
<img src="images/SIGNS.png" style="width:800px;height:300px;">
The next cell will show you an example of a labelled image in the dataset. Feel free to change the value of `index` below and re-run to see different examples.
```
# Example of a picture
index = 6
plt.imshow(X_train_orig[index])
print ("y = " + str(np.squeeze(Y_train_orig[:, index])))
```
In Course 2, you had built a fully-connected network for this dataset. But since this is an image dataset, it is more natural to apply a ConvNet to it.
To get started, let's examine the shapes of your data.
```
X_train = X_train_orig/255.
X_test = X_test_orig/255.
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
conv_layers = {}
```
### 1.1 - Create placeholders
TensorFlow requires that you create placeholders for the input data that will be fed into the model when running the session.
**Exercise**: Implement the function below to create placeholders for the input image X and the output Y. You should not define the number of training examples for the moment. To do so, you could use "None" as the batch size, it will give you the flexibility to choose it later. Hence X should be of dimension **[None, n_H0, n_W0, n_C0]** and Y should be of dimension **[None, n_y]**. [Hint: search for the tf.placeholder documentation"](https://www.tensorflow.org/api_docs/python/tf/placeholder).
```
# GRADED FUNCTION: create_placeholders
def create_placeholders(n_H0, n_W0, n_C0, n_y):
"""
Creates the placeholders for the tensorflow session.
Arguments:
n_H0 -- scalar, height of an input image
n_W0 -- scalar, width of an input image
n_C0 -- scalar, number of channels of the input
n_y -- scalar, number of classes
Returns:
X -- placeholder for the data input, of shape [None, n_H0, n_W0, n_C0] and dtype "float"
Y -- placeholder for the input labels, of shape [None, n_y] and dtype "float"
"""
### START CODE HERE ### (≈2 lines)
X = tf.placeholder(tf.float32, shape=(None, n_H0, n_W0, n_C0), name='X')
Y = tf.placeholder(tf.float32, shape=(None, n_y), name='Y')
### END CODE HERE ###
return X, Y
X, Y = create_placeholders(64, 64, 3, 6)
print ("X = " + str(X))
print ("Y = " + str(Y))
```
**Expected Output**
<table>
<tr>
<td>
X = Tensor("Placeholder:0", shape=(?, 64, 64, 3), dtype=float32)
</td>
</tr>
<tr>
<td>
Y = Tensor("Placeholder_1:0", shape=(?, 6), dtype=float32)
</td>
</tr>
</table>
### 1.2 - Initialize parameters
You will initialize weights/filters $W1$ and $W2$ using `tf.contrib.layers.xavier_initializer(seed = 0)`. You don't need to worry about bias variables as you will soon see that TensorFlow functions take care of the bias. Note also that you will only initialize the weights/filters for the conv2d functions. TensorFlow initializes the layers for the fully connected part automatically. We will talk more about that later in this assignment.
**Exercise:** Implement initialize_parameters(). The dimensions for each group of filters are provided below. Reminder - to initialize a parameter $W$ of shape [1,2,3,4] in Tensorflow, use:
```python
W = tf.get_variable("W", [1,2,3,4], initializer = ...)
```
#### tf.get_variable()
[Search for the tf.get_variable documentation](https://www.tensorflow.org/api_docs/python/tf/get_variable). Notice that the documentation says:
```
Gets an existing variable with these parameters or create a new one.
```
So we can use this function to create a tensorflow variable with the specified name, but if the variables already exist, it will get the existing variable with that same name.
```
# GRADED FUNCTION: initialize_parameters
def initialize_parameters():
"""
Initializes weight parameters to build a neural network with tensorflow. The shapes are:
W1 : [4, 4, 3, 8]
W2 : [2, 2, 8, 16]
Note that we will hard code the shape values in the function to make the grading simpler.
Normally, functions should take values as inputs rather than hard coding.
Returns:
parameters -- a dictionary of tensors containing W1, W2
"""
tf.set_random_seed(1) # so that your "random" numbers match ours
### START CODE HERE ### (approx. 2 lines of code)
W1 = tf.get_variable('W1', shape=(4,4,3,8), initializer=tf.contrib.layers.xavier_initializer(seed=0))
W2 = tf.get_variable('W2', shape=(2,2,8,16), initializer=tf.contrib.layers.xavier_initializer(seed=0))
### END CODE HERE ###
parameters = {"W1": W1,
"W2": W2}
return parameters
tf.reset_default_graph()
with tf.Session() as sess_test:
parameters = initialize_parameters()
init = tf.global_variables_initializer()
sess_test.run(init)
print("W1[1,1,1] = \n" + str(parameters["W1"].eval()[1,1,1]))
print("W1.shape: " + str(parameters["W1"].shape))
print("\n")
print("W2[1,1,1] = \n" + str(parameters["W2"].eval()[1,1,1]))
print("W2.shape: " + str(parameters["W2"].shape))
```
** Expected Output:**
```
W1[1,1,1] =
[ 0.00131723 0.14176141 -0.04434952 0.09197326 0.14984085 -0.03514394
-0.06847463 0.05245192]
W1.shape: (4, 4, 3, 8)
W2[1,1,1] =
[-0.08566415 0.17750949 0.11974221 0.16773748 -0.0830943 -0.08058
-0.00577033 -0.14643836 0.24162132 -0.05857408 -0.19055021 0.1345228
-0.22779644 -0.1601823 -0.16117483 -0.10286498]
W2.shape: (2, 2, 8, 16)
```
### 1.3 - Forward propagation
In TensorFlow, there are built-in functions that implement the convolution steps for you.
- **tf.nn.conv2d(X,W, strides = [1,s,s,1], padding = 'SAME'):** given an input $X$ and a group of filters $W$, this function convolves $W$'s filters on X. The third parameter ([1,s,s,1]) represents the strides for each dimension of the input (m, n_H_prev, n_W_prev, n_C_prev). Normally, you'll choose a stride of 1 for the number of examples (the first value) and for the channels (the fourth value), which is why we wrote the value as `[1,s,s,1]`. You can read the full documentation on [conv2d](https://www.tensorflow.org/api_docs/python/tf/nn/conv2d).
- **tf.nn.max_pool(A, ksize = [1,f,f,1], strides = [1,s,s,1], padding = 'SAME'):** given an input A, this function uses a window of size (f, f) and strides of size (s, s) to carry out max pooling over each window. For max pooling, we usually operate on a single example at a time and a single channel at a time. So the first and fourth value in `[1,f,f,1]` are both 1. You can read the full documentation on [max_pool](https://www.tensorflow.org/api_docs/python/tf/nn/max_pool).
- **tf.nn.relu(Z):** computes the elementwise ReLU of Z (which can be any shape). You can read the full documentation on [relu](https://www.tensorflow.org/api_docs/python/tf/nn/relu).
- **tf.contrib.layers.flatten(P)**: given a tensor "P", this function takes each training (or test) example in the batch and flattens it into a 1D vector.
* If a tensor P has the shape (m,h,w,c), where m is the number of examples (the batch size), it returns a flattened tensor with shape (batch_size, k), where $k=h \times w \times c$. "k" equals the product of all the dimension sizes other than the first dimension.
* For example, given a tensor with dimensions [100,2,3,4], it flattens the tensor to be of shape [100, 24], where 24 = 2 * 3 * 4. You can read the full documentation on [flatten](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/flatten).
- **tf.contrib.layers.fully_connected(F, num_outputs):** given the flattened input F, it returns the output computed using a fully connected layer. You can read the full documentation on [full_connected](https://www.tensorflow.org/api_docs/python/tf/contrib/layers/fully_connected).
In the last function above (`tf.contrib.layers.fully_connected`), the fully connected layer automatically initializes weights in the graph and keeps on training them as you train the model. Hence, you did not need to initialize those weights when initializing the parameters.
#### Window, kernel, filter
The words "window", "kernel", and "filter" are used to refer to the same thing. This is why the parameter `ksize` refers to "kernel size", and we use `(f,f)` to refer to the filter size. Both "kernel" and "filter" refer to the "window."
**Exercise**
Implement the `forward_propagation` function below to build the following model: `CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED`. You should use the functions above.
In detail, we will use the following parameters for all the steps:
- Conv2D: stride 1, padding is "SAME"
- ReLU
- Max pool: Use an 8 by 8 filter size and an 8 by 8 stride, padding is "SAME"
- Conv2D: stride 1, padding is "SAME"
- ReLU
- Max pool: Use a 4 by 4 filter size and a 4 by 4 stride, padding is "SAME"
- Flatten the previous output.
- FULLYCONNECTED (FC) layer: Apply a fully connected layer without an non-linear activation function. Do not call the softmax here. This will result in 6 neurons in the output layer, which then get passed later to a softmax. In TensorFlow, the softmax and cost function are lumped together into a single function, which you'll call in a different function when computing the cost.
```
# GRADED FUNCTION: forward_propagation
def forward_propagation(X, parameters):
"""
Implements the forward propagation for the model:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Note that for simplicity and grading purposes, we'll hard-code some values
such as the stride and kernel (filter) sizes.
Normally, functions should take these values as function parameters.
Arguments:
X -- input dataset placeholder, of shape (input size, number of examples)
parameters -- python dictionary containing your parameters "W1", "W2"
the shapes are given in initialize_parameters
Returns:
Z3 -- the output of the last LINEAR unit
"""
# Retrieve the parameters from the dictionary "parameters"
W1 = parameters['W1']
W2 = parameters['W2']
### START CODE HERE ###
# CONV2D: stride of 1, padding 'SAME'
Z1 = tf.nn.conv2d(X, W1, strides=[1, 1, 1, 1], padding='SAME')
# RELU
A1 = tf.nn.relu(Z1)
# MAXPOOL: window 8x8, sride 8, padding 'SAME'
P1 = tf.nn.max_pool(A1, ksize=[1, 8, 8, 1], strides=[1, 8, 8, 1], padding='SAME')
# CONV2D: filters W2, stride 1, padding 'SAME'
Z2 = tf.nn.conv2d(P1, W2, strides=[1, 1, 1, 1], padding='SAME')
# RELU
A2 = tf.nn.relu(Z2)
# MAXPOOL: window 4x4, stride 4, padding 'SAME'
P2 = tf.nn.max_pool(A2, ksize=[1, 4, 4, 1], strides=[1, 4, 4, 1], padding='SAME')
# FLATTEN
P2 = tf.contrib.layers.flatten(P2)
# FULLY-CONNECTED without non-linear activation function (not not call softmax).
# 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None"
Z3 = tf.contrib.layers.fully_connected(P2, 6, activation_fn=None)
### END CODE HERE ###
return Z3
tf.reset_default_graph()
with tf.Session() as sess:
np.random.seed(1)
X, Y = create_placeholders(64, 64, 3, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
init = tf.global_variables_initializer()
sess.run(init)
a = sess.run(Z3, {X: np.random.randn(2,64,64,3), Y: np.random.randn(2,6)})
print("Z3 = \n" + str(a))
```
**Expected Output**:
```
Z3 =
[[-0.44670227 -1.57208765 -1.53049231 -2.31013036 -1.29104376 0.46852064]
[-0.17601591 -1.57972014 -1.4737016 -2.61672091 -1.00810647 0.5747785 ]]
```
### 1.4 - Compute cost
Implement the compute cost function below. Remember that the cost function helps the neural network see how much the model's predictions differ from the correct labels. By adjusting the weights of the network to reduce the cost, the neural network can improve its predictions.
You might find these two functions helpful:
- **tf.nn.softmax_cross_entropy_with_logits(logits = Z, labels = Y):** computes the softmax entropy loss. This function both computes the softmax activation function as well as the resulting loss. You can check the full documentation [softmax_cross_entropy_with_logits](https://www.tensorflow.org/api_docs/python/tf/nn/softmax_cross_entropy_with_logits).
- **tf.reduce_mean:** computes the mean of elements across dimensions of a tensor. Use this to calculate the sum of the losses over all the examples to get the overall cost. You can check the full documentation [reduce_mean](https://www.tensorflow.org/api_docs/python/tf/reduce_mean).
#### Details on softmax_cross_entropy_with_logits (optional reading)
* Softmax is used to format outputs so that they can be used for classification. It assigns a value between 0 and 1 for each category, where the sum of all prediction values (across all possible categories) equals 1.
* Cross Entropy is compares the model's predicted classifications with the actual labels and results in a numerical value representing the "loss" of the model's predictions.
* "Logits" are the result of multiplying the weights and adding the biases. Logits are passed through an activation function (such as a relu), and the result is called the "activation."
* The function is named `softmax_cross_entropy_with_logits` takes logits as input (and not activations); then uses the model to predict using softmax, and then compares the predictions with the true labels using cross entropy. These are done with a single function to optimize the calculations.
** Exercise**: Compute the cost below using the function above.
```
# GRADED FUNCTION: compute_cost
def compute_cost(Z3, Y):
"""
Computes the cost
Arguments:
Z3 -- output of forward propagation (output of the last LINEAR unit), of shape (number of examples, 6)
Y -- "true" labels vector placeholder, same shape as Z3
Returns:
cost - Tensor of the cost function
"""
### START CODE HERE ### (1 line of code)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=Z3, labels=Y))
### END CODE HERE ###
return cost
tf.reset_default_graph()
with tf.Session() as sess:
np.random.seed(1)
X, Y = create_placeholders(64, 64, 3, 6)
parameters = initialize_parameters()
Z3 = forward_propagation(X, parameters)
cost = compute_cost(Z3, Y)
init = tf.global_variables_initializer()
sess.run(init)
a = sess.run(cost, {X: np.random.randn(4,64,64,3), Y: np.random.randn(4,6)})
print("cost = " + str(a))
```
**Expected Output**:
```
cost = 2.91034
```
## 1.5 Model
Finally you will merge the helper functions you implemented above to build a model. You will train it on the SIGNS dataset.
**Exercise**: Complete the function below.
The model below should:
- create placeholders
- initialize parameters
- forward propagate
- compute the cost
- create an optimizer
Finally you will create a session and run a for loop for num_epochs, get the mini-batches, and then for each mini-batch you will optimize the function. [Hint for initializing the variables](https://www.tensorflow.org/api_docs/python/tf/global_variables_initializer)
#### Adam Optimizer
You can use `tf.train.AdamOptimizer(learning_rate = ...)` to create the optimizer. The optimizer has a `minimize(loss=...)` function that you'll call to set the cost function that the optimizer will minimize.
For details, check out the documentation for [Adam Optimizer](https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer)
#### Random mini batches
If you took course 2 of the deep learning specialization, you implemented `random_mini_batches()` in the "Optimization" programming assignment. This function returns a list of mini-batches. It is already implemented in the `cnn_utils.py` file and imported here, so you can call it like this:
```Python
minibatches = random_mini_batches(X, Y, mini_batch_size = 64, seed = 0)
```
(You will want to choose the correct variable names when you use it in your code).
#### Evaluating the optimizer and cost
Within a loop, for each mini-batch, you'll use the `tf.Session` object (named `sess`) to feed a mini-batch of inputs and labels into the neural network and evaluate the tensors for the optimizer as well as the cost. Remember that we built a graph data structure and need to feed it inputs and labels and use `sess.run()` in order to get values for the optimizer and cost.
You'll use this kind of syntax:
```
output_for_var1, output_for_var2 = sess.run(
fetches=[var1, var2],
feed_dict={var_inputs: the_batch_of_inputs,
var_labels: the_batch_of_labels}
)
```
* Notice that `sess.run` takes its first argument `fetches` as a list of objects that you want it to evaluate (in this case, we want to evaluate the optimizer and the cost).
* It also takes a dictionary for the `feed_dict` parameter.
* The keys are the `tf.placeholder` variables that we created in the `create_placeholders` function above.
* The values are the variables holding the actual numpy arrays for each mini-batch.
* The sess.run outputs a tuple of the evaluated tensors, in the same order as the list given to `fetches`.
For more information on how to use sess.run, see the documentation [tf.Sesssion#run](https://www.tensorflow.org/api_docs/python/tf/Session#run) documentation.
```
# GRADED FUNCTION: model
def model(X_train, Y_train, X_test, Y_test, learning_rate = 0.009,
num_epochs = 100, minibatch_size = 64, print_cost = True):
"""
Implements a three-layer ConvNet in Tensorflow:
CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED
Arguments:
X_train -- training set, of shape (None, 64, 64, 3)
Y_train -- test set, of shape (None, n_y = 6)
X_test -- training set, of shape (None, 64, 64, 3)
Y_test -- test set, of shape (None, n_y = 6)
learning_rate -- learning rate of the optimization
num_epochs -- number of epochs of the optimization loop
minibatch_size -- size of a minibatch
print_cost -- True to print the cost every 100 epochs
Returns:
train_accuracy -- real number, accuracy on the train set (X_train)
test_accuracy -- real number, testing accuracy on the test set (X_test)
parameters -- parameters learnt by the model. They can then be used to predict.
"""
ops.reset_default_graph() # to be able to rerun the model without overwriting tf variables
tf.set_random_seed(1) # to keep results consistent (tensorflow seed)
seed = 3 # to keep results consistent (numpy seed)
(m, n_H0, n_W0, n_C0) = X_train.shape
n_y = Y_train.shape[1]
costs = [] # To keep track of the cost
# Create Placeholders of the correct shape
### START CODE HERE ### (1 line)
X, Y = create_placeholders(n_H0, n_W0, n_C0, n_y)
### END CODE HERE ###
# Initialize parameters
### START CODE HERE ### (1 line)
parameters = initialize_parameters()
### END CODE HERE ###
# Forward propagation: Build the forward propagation in the tensorflow graph
### START CODE HERE ### (1 line)
Z3 = forward_propagation(X, parameters)
### END CODE HERE ###
# Cost function: Add cost function to tensorflow graph
### START CODE HERE ### (1 line)
cost = compute_cost(Z3, Y)
### END CODE HERE ###
# Backpropagation: Define the tensorflow optimizer. Use an AdamOptimizer that minimizes the cost.
### START CODE HERE ### (1 line)
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
### END CODE HERE ###
# Initialize all the variables globally
init = tf.global_variables_initializer()
# Start the session to compute the tensorflow graph
with tf.Session() as sess:
# Run the initialization
sess.run(init)
# Do the training loop
for epoch in range(num_epochs):
minibatch_cost = 0.
num_minibatches = int(m / minibatch_size) # number of minibatches of size minibatch_size in the train set
seed = seed + 1
minibatches = random_mini_batches(X_train, Y_train, minibatch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
"""
# IMPORTANT: The line that runs the graph on a minibatch.
# Run the session to execute the optimizer and the cost.
# The feedict should contain a minibatch for (X,Y).
"""
### START CODE HERE ### (1 line)
_ , temp_cost =sess.run([optimizer, cost], {X: minibatch_X, Y: minibatch_Y})
### END CODE HERE ###
minibatch_cost += temp_cost / num_minibatches
# Print the cost every epoch
if print_cost == True and epoch % 5 == 0:
print ("Cost after epoch %i: %f" % (epoch, minibatch_cost))
if print_cost == True and epoch % 1 == 0:
costs.append(minibatch_cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
# Calculate the correct predictions
predict_op = tf.argmax(Z3, 1)
correct_prediction = tf.equal(predict_op, tf.argmax(Y, 1))
# Calculate accuracy on the test set
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(accuracy)
train_accuracy = accuracy.eval({X: X_train, Y: Y_train})
test_accuracy = accuracy.eval({X: X_test, Y: Y_test})
print("Train Accuracy:", train_accuracy)
print("Test Accuracy:", test_accuracy)
return train_accuracy, test_accuracy, parameters
```
Run the following cell to train your model for 100 epochs. Check if your cost after epoch 0 and 5 matches our output. If not, stop the cell and go back to your code!
```
_, _, parameters = model(X_train, Y_train, X_test, Y_test)
```
**Expected output**: although it may not match perfectly, your expected output should be close to ours and your cost value should decrease.
<table>
<tr>
<td>
**Cost after epoch 0 =**
</td>
<td>
1.917929
</td>
</tr>
<tr>
<td>
**Cost after epoch 5 =**
</td>
<td>
1.506757
</td>
</tr>
<tr>
<td>
**Train Accuracy =**
</td>
<td>
0.940741
</td>
</tr>
<tr>
<td>
**Test Accuracy =**
</td>
<td>
0.783333
</td>
</tr>
</table>
Congratulations! You have finished the assignment and built a model that recognizes SIGN language with almost 80% accuracy on the test set. If you wish, feel free to play around with this dataset further. You can actually improve its accuracy by spending more time tuning the hyperparameters, or using regularization (as this model clearly has a high variance).
Once again, here's a thumbs up for your work!
```
fname = "images/thumbs_up.jpg"
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(64,64))
plt.imshow(my_image)
```
|
github_jupyter
|
# k-Nearest Neighbor (kNN) implementation
*Credits: this notebook is deeply based on Stanford CS231n course assignment 1. Source link: http://cs231n.github.io/assignments2019/assignment1/*
The kNN classifier consists of two stages:
- During training, the classifier takes the training data and simply remembers it
- During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples
- The value of k is cross-validated
In this exercise you will implement these steps and understand the basic Image Classification pipeline and gain proficiency in writing efficient, vectorized code.
We will work with the handwritten digits dataset. Images will be flattened (8x8 sized image -> 64 sized vector) and treated as vectors.
```
'''
If you are using Google Colab, uncomment the next line to download `k_nearest_neighbor.py`.
You can open and change it in Colab using the "Files" sidebar on the left.
'''
# !wget https://raw.githubusercontent.com/girafe-ai/ml-mipt/basic_s20/homeworks_basic/assignment0_01_kNN/k_nearest_neighbor.py
from sklearn import datasets
dataset = datasets.load_digits()
print(dataset.DESCR)
# First 100 images will be used for testing. This dataset is not sorted by the labels, so it's ok
# to do the split this way.
# Please be careful when you split your data into train and test in general.
test_border = 100
X_train, y_train = dataset.data[test_border:], dataset.target[test_border:]
X_test, y_test = dataset.data[:test_border], dataset.target[:test_border]
print('Training data shape: ', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
num_test = X_test.shape[0]
# Run some setup code for this notebook.
import random
import numpy as np
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (14.0, 12.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = list(np.arange(10))
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].reshape((8, 8)).astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
```
Autoreload is a great stuff, but sometimes it does not work as intended. The code below aims to fix than. __Do not forget to save your changes in the `.py` file before reloading the `KNearestNeighbor` class.__
```
# This dirty hack might help if the autoreload has failed for some reason
try:
del KNearestNeighbor
except:
pass
from k_nearest_neighbor import KNearestNeighbor
# Create a kNN classifier instance.
# Remember that training a kNN classifier is a noop:
# the Classifier simply remembers the data and does no further processing
classifier = KNearestNeighbor()
classifier.fit(X_train, y_train)
X_train.shape
```
We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps:
1. First we must compute the distances between all test examples and all train examples.
2. Given these distances, for each test example we find the k nearest examples and have them vote for the label
Lets begin with computing the distance matrix between all training and test examples. For example, if there are **Ntr** training examples and **Nte** test examples, this stage should result in a **Nte x Ntr** matrix where each element (i,j) is the distance between the i-th test and j-th train example.
**Note: For the three distance computations that we require you to implement in this notebook, you may not use the np.linalg.norm() function that numpy provides.**
First, open `k_nearest_neighbor.py` and implement the function `compute_distances_two_loops` that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time.
```
# Open k_nearest_neighbor.py and implement
# compute_distances_two_loops.
# Test your implementation:
dists = classifier.compute_distances_two_loops(X_test)
print(dists.shape)
# We can visualize the distance matrix: each row is a single test example and
# its distances to training examples
plt.imshow(dists, interpolation='none')
plt.show()
```
**Inline Question 1**
Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.)
- What in the data is the cause behind the distinctly bright rows?
- What causes the columns?
$\color{blue}{\textit Your Answer:}$ *To my mind, if some point in the test data is noisy (we can't recognize it), this corresponding row will be brighter. For columns, we have the same situation: if some noisy point exists in train data, we will have a brighter column.*
```
# Now implement the function predict_labels and run the code below:
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=1)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
```
You should expect to see approximately `95%` accuracy. Now lets try out a larger `k`, say `k = 5`:
```
y_test_pred = classifier.predict_labels(dists, k=5)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
```
Accuracy should slightly decrease with `k = 5` compared to `k = 1`.
**Inline Question 2**
We can also use other distance metrics such as L1 distance.
For pixel values $p_{ij}^{(k)}$ at location $(i,j)$ of some image $I_k$,
the mean $\mu$ across all pixels over all images is $$\mu=\frac{1}{nhw}\sum_{k=1}^n\sum_{i=1}^{h}\sum_{j=1}^{w}p_{ij}^{(k)}$$
And the pixel-wise mean $\mu_{ij}$ across all images is
$$\mu_{ij}=\frac{1}{n}\sum_{k=1}^np_{ij}^{(k)}.$$
The general standard deviation $\sigma$ and pixel-wise standard deviation $\sigma_{ij}$ is defined similarly.
Which of the following preprocessing steps will not change the performance of a Nearest Neighbor classifier that uses L1 distance? Select all that apply.
1. Subtracting the mean $\mu$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu$.)
2. Subtracting the per pixel mean $\mu_{ij}$ ($\tilde{p}_{ij}^{(k)}=p_{ij}^{(k)}-\mu_{ij}$.)
3. Subtracting the mean $\mu$ and dividing by the standard deviation $\sigma$.
4. Subtracting the pixel-wise mean $\mu_{ij}$ and dividing by the pixel-wise standard deviation $\sigma_{ij}$.
5. Rotating the coordinate axes of the data.
$\color{blue}{\textit Your Answer:}$ 1, 2, 3, 4
$\color{blue}{\textit Your Explanation:}$
1. We just substruct some value from all points. It means that all points will move along the axis.
2. The same.
3. We just scale the distances between points, but it doesn't change the performance.
4. The same
5. Rotating the coordinate changed the performance of kNN. It relates to projection, which can change distances between points.
```
# Now lets speed up distance matrix computation by using partial vectorization
# with one loop. Implement the function compute_distances_one_loop and run the
# code below:
dists_one = classifier.compute_distances_one_loop(X_test)
# To ensure that our vectorized implementation is correct, we make sure that it
# agrees with the naive implementation. There are many ways to decide whether
# two matrices are similar; one of the simplest is the Frobenius norm. In case
# you haven't seen it before, the Frobenius norm of two matrices is the square
# root of the squared sum of differences of all elements; in other words, reshape
# the matrices into vectors and compute the Euclidean distance between them.
difference = np.linalg.norm(dists - dists_one, ord='fro')
print('One loop difference was: %f' % (difference, ))
if difference < 0.001:
print('Good! The distance matrices are the same')
else:
print('Uh-oh! The distance matrices are different')
# Now implement the fully vectorized version inside compute_distances_no_loops
# and run the code
dists_two = classifier.compute_distances_no_loops(X_test)
# check that the distance matrix agrees with the one we computed before:
difference = np.linalg.norm(dists - dists_two, ord='fro')
print('No loop difference was: %f' % (difference, ))
if difference < 0.001:
print('Good! The distance matrices are the same')
else:
print('Uh-oh! The distance matrices are different')
```
### Comparing handcrafted and `sklearn` implementations
In this section we will just compare the performance of handcrafted and `sklearn` kNN algorithms. The predictions should be the same. No need to write any code in this section.
```
from sklearn import neighbors
implemented_knn = KNearestNeighbor()
implemented_knn.fit(X_train, y_train)
n_neighbors = 1
external_knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
external_knn.fit(X_train, y_train)
print('sklearn kNN (k=1) implementation achieves: {} accuracy on the test set'.format(
external_knn.score(X_test, y_test)
))
y_predicted = implemented_knn.predict(X_test, k=n_neighbors).astype(int)
accuracy_score = sum((y_predicted==y_test).astype(float)) / num_test
print('Handcrafted kNN (k=1) implementation achieves: {} accuracy on the test set'.format(accuracy_score))
assert np.array_equal(
external_knn.predict(X_test),
y_predicted
), 'Labels predicted by handcrafted and sklearn kNN implementations are different!'
print('\nsklearn and handcrafted kNN implementations provide same predictions')
print('_'*76)
n_neighbors = 5
external_knn = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors)
external_knn.fit(X_train, y_train)
print('sklearn kNN (k=5) implementation achieves: {} accuracy on the test set'.format(
external_knn.score(X_test, y_test)
))
y_predicted = implemented_knn.predict(X_test, k=n_neighbors).astype(int)
accuracy_score = sum((y_predicted==y_test).astype(float)) / num_test
print('Handcrafted kNN (k=5) implementation achieves: {} accuracy on the test set'.format(accuracy_score))
assert np.array_equal(
external_knn.predict(X_test),
y_predicted
), 'Labels predicted by handcrafted and sklearn kNN implementations are different!'
print('\nsklearn and handcrafted kNN implementations provide same predictions')
print('_'*76)
```
### Measuring the time
Finally let's compare how fast the implementations are.
To make the difference more noticable, let's repeat the train and test objects (there is no point but to compute the distance between more pairs).
```
X_train_big = np.vstack([X_train]*5)
X_test_big = np.vstack([X_test]*5)
y_train_big = np.hstack([y_train]*5)
y_test_big = np.hstack([y_test]*5)
classifier_big = KNearestNeighbor()
classifier_big.fit(X_train_big, y_train_big)
# Let's compare how fast the implementations are
def time_function(f, *args):
"""
Call a function f with args and return the time (in seconds) that it took to execute.
"""
import time
tic = time.time()
f(*args)
toc = time.time()
return toc - tic
two_loop_time = time_function(classifier_big.compute_distances_two_loops, X_test_big)
print('Two loop version took %f seconds' % two_loop_time)
one_loop_time = time_function(classifier_big.compute_distances_one_loop, X_test_big)
print('One loop version took %f seconds' % one_loop_time)
no_loop_time = time_function(classifier_big.compute_distances_no_loops, X_test_big)
print('No loop version took %f seconds' % no_loop_time)
# You should see significantly faster performance with the fully vectorized implementation!
# NOTE: depending on what machine you're using,
# you might not see a speedup when you go from two loops to one loop,
# and might even see a slow-down.
```
The improvement seems significant. (On some hardware one loop version may take even more time, than two loop, but no loop should definitely be the fastest.
**Inline Question 3**
Which of the following statements about $k$-Nearest Neighbor ($k$-NN) are true in a classification setting, and for all $k$? Select all that apply.
1. The decision boundary (hyperplane between classes in feature space) of the k-NN classifier is linear.
2. The training error of a 1-NN will always be lower than that of 5-NN.
3. The test error of a 1-NN will always be lower than that of a 5-NN.
4. The time needed to classify a test example with the k-NN classifier grows with the size of the training set.
5. None of the above.
$\color{blue}{\textit Your Answer:}$ 2, 4
$\color{blue}{\textit Your Explanation:}$
1. Decision boundary depends on distances to closest k points. But we don't have any supposes about linearity.
2. Yes, because of overfitting on one closest point.
3. Not always, but in most cases test error of a 1-NN will be higher than test error of a 5-NN because of using just the one-point to predict the label for test point.
4. Yes, it is true because of computing distances to all points.
### Submitting your work
To submit your work you need to log into Yandex contest (link will be provided later) and upload the `k_nearest_neighbor.py` file for the corresponding problem
|
github_jupyter
|
# Run Modes
Running MAGICC in different modes can be non-trivial. In this notebook we show how to set MAGICC's config flags so that it will run as desired for a few different cases.
```
# NBVAL_IGNORE_OUTPUT
from os.path import join
import datetime
import dateutil
from copy import deepcopy
import numpy as np
import pandas as pd
from pymagicc import MAGICC6, rcp26, zero_emissions
from pymagicc.io import MAGICCData
%matplotlib inline
from matplotlib import pyplot as plt
plt.style.use("ggplot")
plt.rcParams["figure.figsize"] = (12, 6)
```
## Concentration to emissions hybrid
This is MAGICC's default run mode. In this run mode, MAGICC will run with prescribed concentrations (or a quantity which scales linearly with radiative forcing for aerosol species) until a given point in time and will then switch to running in emissions driven mode.
```
with MAGICC6() as magicc:
res = magicc.run(rcp26)
# NBVAL_IGNORE_OUTPUT
res.head()
plt.figure()
res.filter(variable="Emis*CO2*", region="World").line_plot(hue="variable")
plt.figure()
res.filter(variable="Atmos*Conc*CO2", region="World").line_plot(hue="variable");
```
The switches which control the time at which MAGICC switches from concentrations driven to emissions driven are all in the form `GAS_SWITCHFROMXXX2EMIS_YEAR` e.g. `CO2_SWITCHFROMCONC2EMIS_YEAR` and `BCOC_SWITCHFROMRF2EMIS_YEAR`.
Changing the value of these switches will alter how MAGICC runs.
```
# NBVAL_IGNORE_OUTPUT
df = deepcopy(rcp26)
df["scenario"] = "RCP26_altered_co2_switch"
with MAGICC6() as magicc:
res = res.append(magicc.run(df, co2_switchfromconc2emis_year=1850))
plt.figure()
res.filter(variable="Emis*CO2*", region="World").line_plot(hue="variable")
plt.figure()
res.filter(variable="Atmos*Conc*CO2", region="World").line_plot(hue="variable");
# NBVAL_IGNORE_OUTPUT
res.timeseries()
```
As we can see, the emissions remain unchanged but the concentrations are altered as MAGICC is now running emissions driven from 1850 rather than 2005 (the default).
To get a fully emissions driven run, you need to change all of the relevant `GAS_SWITCHXXX2EMIS_YEAR` flags.
## CO$_2$ Emissions Driven Only
We can get a CO$_2$ emissions only driven run like shown.
```
df = zero_emissions.timeseries()
time = zero_emissions["time"]
df.loc[
(
df.index.get_level_values("variable")
== "Emissions|CO2|MAGICC Fossil and Industrial"
),
:,
] = np.linspace(0, 30, len(time))
scen = MAGICCData(df)
scen.filter(variable="Em*CO2*Fossil*").line_plot(
x="time", label="CO2 Fossil", hue=None
)
scen.filter(variable="Em*CO2*Fossil*", keep=False).line_plot(
x="time", label="Everything else", hue=None
);
# NBVAL_IGNORE_OUTPUT
with MAGICC6() as magicc:
co2_only_res = magicc.run(
scen,
endyear=scen["time"].max().year,
rf_total_constantafteryr=5000,
rf_total_runmodus="CO2",
co2_switchfromconc2emis_year=min(scen["time"]).year,
)
for v in [
"Emis*CO2*",
"Atmos*Conc*CO2",
"Radiative Forcing",
"Surface Temperature",
]:
plt.figure()
co2_only_res.filter(variable=v, region="World").line_plot(hue="variable")
```
## Prescribed Forcing Driven Only
It is also possible to examine MAGICC's response to a prescribed radiative forcing only.
```
time = zero_emissions["time"]
forcing_external = 2.0 * np.arange(0, len(time)) / len(time)
forcing_ext = MAGICCData(
forcing_external,
index=time,
columns={
"scenario": ["idealised"],
"model": ["unspecified"],
"climate_model": ["unspecified"],
"variable": ["Radiative Forcing|Extra"],
"unit": ["W / m^2"],
"todo": ["SET"],
"region": ["World"],
},
)
forcing_ext.metadata = {
"header": "External radiative forcing with linear increase"
}
forcing_ext.line_plot(x="time");
with MAGICC6() as magicc:
forcing_ext_filename = "CUSTOM_EXTRA_RF.IN"
forcing_ext.write(
join(magicc.run_dir, forcing_ext_filename), magicc.version
)
ext_forc_only_res = magicc.run(
rf_extra_read=1,
file_extra_rf=forcing_ext_filename,
rf_total_runmodus="QEXTRA",
endyear=max(time).year,
rf_initialization_method="ZEROSTARTSHIFT", # this is default but to be sure
rf_total_constantafteryr=5000,
)
ext_forc_only_res.filter(
variable=["Radiative Forcing", "Surface Temperature"], region="World"
).line_plot(hue="variable")
```
## Zero Temperature Output
Getting MAGICC to return zero for its temperature output is surprisingly difficult. To help address this, we add the `set_zero_config` method to our MAGICC classes.
```
print(MAGICC6.set_zero_config.__doc__)
# NBVAL_IGNORE_OUTPUT
with MAGICC6() as magicc:
magicc.set_zero_config()
res_zero = magicc.run()
res_zero.filter(
variable=["Surface Temperature", "Radiative Forcing"], region="World"
).line_plot(x="time");
```
## CO$_2$ Emissions and Prescribed Forcing
It is also possible to run MAGICC in a mode which is CO$_2$ emissions driven but also includes a prescribed external forcing.
```
df = zero_emissions.timeseries()
time = zero_emissions["time"]
emms_fossil_co2 = (
np.linspace(0, 3, len(time))
- (1 + (np.arange(len(time)) - 500) / 500) ** 2
)
df.loc[
(
df.index.get_level_values("variable")
== "Emissions|CO2|MAGICC Fossil and Industrial"
),
:,
] = emms_fossil_co2
scen = MAGICCData(df)
scen.filter(variable="Em*CO2*Fossil*").line_plot(x="time", hue="variable")
scen.filter(variable="Em*CO2*Fossil*", keep=False).line_plot(
x="time", label="Everything Else"
)
forcing_external = 3.0 * np.arange(0, len(time)) / len(time)
forcing_ext = MAGICCData(
forcing_external,
index=time,
columns={
"scenario": ["idealised"],
"model": ["unspecified"],
"climate_model": ["unspecified"],
"variable": ["Radiative Forcing|Extra"],
"unit": ["W / m^2"],
"todo": ["SET"],
"region": ["World"],
},
)
forcing_ext.metadata = {
"header": "External radiative forcing with linear increase"
}
forcing_ext.line_plot(x="time", hue="variable");
# NBVAL_IGNORE_OUTPUT
scen.timeseries()
with MAGICC6() as magicc:
magicc.set_zero_config() # very important, try commenting this out and see what happens
forcing_ext_filename = "CUSTOM_EXTRA_RF.IN"
forcing_ext.write(
join(magicc.run_dir, forcing_ext_filename), magicc.version
)
co2_emms_ext_forc_res = magicc.run(
scen,
endyear=scen["time"].max().year,
co2_switchfromconc2emis_year=min(scen["time"]).year,
rf_extra_read=1,
file_extra_rf=forcing_ext_filename,
rf_total_runmodus="ALL", # default but just in case
rf_initialization_method="ZEROSTARTSHIFT", # this is default but to be sure
rf_total_constantafteryr=5000,
)
plt.figure()
co2_emms_ext_forc_res.filter(variable="Emis*CO2*", region="World").line_plot(
x="time", hue="variable"
)
plt.figure()
co2_emms_ext_forc_res.filter(
variable="Atmos*Conc*CO2", region="World"
).line_plot(x="time")
plt.figure()
co2_emms_ext_forc_res.filter(
variable="Radiative Forcing", region="World"
).line_plot(x="time")
plt.figure()
co2_emms_ext_forc_res.filter(
variable="Surface Temperature", region="World"
).line_plot(x="time");
```
If we adjust MAGICC's CO$_2$ temperature feedback start year, it is easier to see what is going on.
```
with MAGICC6() as magicc:
magicc.set_zero_config()
forcing_ext_filename = "CUSTOM_EXTRA_RF.IN"
forcing_ext.write(
join(magicc.run_dir, forcing_ext_filename), magicc.version
)
for temp_feedback_year in [2000, 2100, 3000]:
scen["scenario"] = "idealised_{}_CO2_temperature_feedback".format(
temp_feedback_year
)
co2_emms_ext_forc_res.append(
magicc.run(
scen,
endyear=scen["time"].max().year,
co2_switchfromconc2emis_year=min(scen["time"]).year,
rf_extra_read=1,
file_extra_rf=forcing_ext_filename,
rf_total_runmodus="ALL",
rf_initialization_method="ZEROSTARTSHIFT",
rf_total_constantafteryr=5000,
co2_tempfeedback_yrstart=temp_feedback_year,
)
)
co2_emms_ext_forc_res.filter(variable="Emis*CO2*", region="World").line_plot(
x="time", hue="variable"
)
plt.figure()
co2_emms_ext_forc_res.filter(
variable="Atmos*Conc*CO2", region="World"
).line_plot(x="time")
plt.figure()
co2_emms_ext_forc_res.filter(
variable="Radiative Forcing", region="World"
).line_plot(x="time")
plt.figure()
co2_emms_ext_forc_res.filter(
variable="Surface Temperature", region="World"
).line_plot(x="time");
```
## CO$_2$ Concentrations Driven
```
time = zero_emissions["time"]
co2_concs = 278 * np.ones_like(time)
co2_concs[105:] = 278 * 1.01 ** (np.arange(0, len(time[105:])))
co2_concs = MAGICCData(
co2_concs,
index=time,
columns={
"scenario": ["1%/yr CO2"],
"model": ["unspecified"],
"climate_model": ["unspecified"],
"variable": ["Atmospheric Concentrations|CO2"],
"unit": ["ppm"],
"todo": ["SET"],
"region": ["World"],
},
)
co2_concs = co2_concs.filter(year=range(1700, 2001))
time = co2_concs["time"]
co2_concs.metadata = {"header": "1%/yr atmospheric CO2 concentration increase"}
co2_concs.line_plot(x="time");
with MAGICC6() as magicc:
co2_conc_filename = "1PCT_CO2_CONC.IN"
co2_concs.write(join(magicc.run_dir, co2_conc_filename), magicc.version)
co2_conc_driven_res = magicc.run(
file_co2_conc=co2_conc_filename,
co2_switchfromconc2emis_year=max(time).year,
co2_tempfeedback_switch=1,
co2_tempfeedback_yrstart=1870,
co2_fertilization_yrstart=1870,
rf_total_runmodus="CO2",
rf_total_constantafteryr=max(time).year,
endyear=max(time).year,
out_inverseemis=1,
)
plt.figure()
co2_conc_driven_res.filter(
variable="Inverse Emis*CO2*", region="World"
).line_plot()
plt.figure()
co2_conc_driven_res.filter(
variable="Atmos*Conc*CO2", region="World"
).line_plot()
plt.figure()
co2_conc_driven_res.filter(
variable="Radiative Forcing", region="World"
).line_plot()
plt.figure()
co2_conc_driven_res.filter(
variable="Surface Temperature", region="World"
).line_plot();
```
|
github_jupyter
|
```
import pandas as pd
import numpy as np
import os
from matplotlib.pyplot import *
from IPython.display import display, HTML
import glob
import scanpy as sc
import pandas as pd
import seaborn as sns
import scipy.stats
%matplotlib inline
file = '/nfs/leia/research/stegle/dseaton/hipsci/singlecell_neuroseq/data/ipsc_singlecell_analysis/sarkar2019_yoruba_ipsc/version0/sarkar2019_yoruba_ipsc.scanpy.dimreduction.harmonyPCA.clustered.h5'
adata_clustered = sc.read(file)
file = '/nfs/leia/research/stegle/dseaton/hipsci/singlecell_neuroseq/data/ipsc_singlecell_analysis/sarkar2019_yoruba_ipsc/version0/sarkar2019_yoruba_ipsc.scanpy.h5'
adatafull = sc.read(file)
in_dir = os.path.dirname(file)
adatafull.obs['cluster_id'] = adata_clustered.obs['louvain'].astype(str)
adatafull.obsm['X_umap'] = adata_clustered.obsm['X_umap']
adatafull.obs['day'] = 'day0'
adatafull.obs['donor_long_id'] = adatafull.obs['chip_id']
adatafull.obs.head()
#subsample
fraction = 1.0
adata = sc.pp.subsample(adatafull, fraction, copy=True)
adata.raw = adata
fig_format = 'png'
# fig_format = 'pdf'
sc.set_figure_params(dpi_save=200,format=fig_format)
#rcParams['figure.figsize'] = 5,4
rcParams['figure.figsize'] = 5,4
plotting_fcn = sc.pl.umap
plotting_fcn(adata, color='cluster_id',size=10)
adata.var
# gene_list = ['NANOG','SOX2','POU5F1','UTF1','SP8']
# ensembl gene ids correspoinding
# gene_list = ['ENSG00000111704','ENSG00000181449','ENSG00000204531','ENSG00000171794','ENSG00000164651']
gene_list = ['ENSG00000111704','ENSG00000181449','ENSG00000204531','ENSG00000171794','ENSG00000166863']
sc.pl.stacked_violin(adata, gene_list, groupby='cluster_id', figsize=(5,4))
df = adata.obs.groupby(['donor_long_id','experiment','cluster_id'])[['day']].count().fillna(0.0).rename(columns={'day':'count'})
total_counts = adata.obs.groupby(['donor_long_id','experiment'])[['day']].count().rename(columns={'day':'total_count'})
df = df.reset_index()
#.join(donor_total_counts)
df['f_cells'] = df.apply(lambda x: x['count']/total_counts.loc[(x['donor_long_id'],x['experiment']),'total_count'], axis=1)
df = df.dropna()
df.head()
mydir = "/hps/nobackup/stegle/users/acuomo/all_scripts/sc_neuroseq/iPSC_scanpy/"
filename = mydir + 'Sarkar_cluster_cell_fractions_by_donor_experiment.csv'
df.to_csv(filename)
sc.tl.rank_genes_groups(adata, groupby='cluster_id', n_genes=1e6)
# group_names = pval_df.columns
group_names = [str(x) for x in range(4)]
df_list = []
for group_name in group_names:
column_names = ['names','pvals','pvals_adj','logfoldchanges','scores']
data = [pd.DataFrame(adata.uns['rank_genes_groups'][col])[group_name] for col in column_names]
temp_df = pd.DataFrame(data, index=column_names).transpose()
temp_df['cluster_id'] = group_name
df_list.append(temp_df)
diff_expression_df = pd.concat(df_list)
diff_expression_df.head()
diff_exp_file = mydir + 'Sarkar2019' + '.cluster_expression_markers.tsv'
diff_expression_df.to_csv(diff_exp_file, sep='\t', index=False)
diff_expression_df.query('cluster_id=="0"').to_csv(diff_exp_file.replace('.tsv','.cluster0.tsv'), sep='\t', index=False)
diff_expression_df.query('cluster_id=="1"').to_csv(diff_exp_file.replace('.tsv','.cluster1.tsv'), sep='\t', index=False)
diff_expression_df.query('cluster_id=="2"').to_csv(diff_exp_file.replace('.tsv','.cluster2.tsv'), sep='\t', index=False)
diff_expression_df.query('cluster_id=="3"').to_csv(diff_exp_file.replace('.tsv','.cluster3.tsv'), sep='\t', index=False)
```
|
github_jupyter
|
```
%load_ext autoreload
%autoreload 2
```
# Generate images
```
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
SMALL_SIZE = 15
MEDIUM_SIZE = 20
BIGGER_SIZE = 25
plt.rc("font", size=SMALL_SIZE)
plt.rc("axes", titlesize=SMALL_SIZE)
plt.rc("axes", labelsize=MEDIUM_SIZE)
plt.rc("xtick", labelsize=SMALL_SIZE)
plt.rc("ytick", labelsize=SMALL_SIZE)
plt.rc("legend", fontsize=SMALL_SIZE)
plt.rc("figure", titlesize=BIGGER_SIZE)
DATA_PATH = Path("../thesis/img/")
```
# DTW
```
from fastdtw import fastdtw
ts_0 = np.sin(np.logspace(0, np.log10(2 * np.pi), 30))
ts_1 = np.sin(np.linspace(1, 2 * np.pi, 30))
distance, warping_path = fastdtw(ts_0, ts_1)
fig, axs = plt.subplots(2, 1, figsize=(8, 8), sharex=True)
for name, ax in zip(["Euclidian distance", "Dynamic Time Warping"], axs):
ax.plot(ts_0 + 1, "o-", linewidth=3)
ax.plot(ts_1, "o-", linewidth=3)
ax.set_yticks([])
ax.set_xticks([])
ax.set_title(name)
for x, y in zip(zip(np.arange(30), np.arange(30)), zip(ts_0 + 1, ts_1)):
axs[0].plot(x, y, "r--", linewidth=2, alpha=0.5)
for x_0, x_1 in warping_path:
axs[1].plot([x_0, x_1], [ts_0[x_0] + 1, ts_1[x_1]], "r--", linewidth=2, alpha=0.5)
plt.savefig(DATA_PATH / "dtw_vs_euclid.svg")
plt.tight_layout()
plt.show()
matrix = (ts_0.reshape(-1, 1) - ts_1) ** 2
x = [x for x, _ in warping_path]
y = [y for _, y in warping_path]
# plt.close('all')
fig = plt.figure(figsize=(8, 8))
gs = fig.add_gridspec(
2,
2,
width_ratios=(1, 8),
height_ratios=(8, 1),
left=0.1,
right=0.9,
bottom=0.1,
top=0.9,
wspace=0.01,
hspace=0.01,
)
fig.tight_layout()
ax_ts_x = fig.add_subplot(gs[0, 0])
ax_ts_y = fig.add_subplot(gs[1, 1])
ax = fig.add_subplot(gs[0, 1], sharex=ax_ts_y, sharey=ax_ts_x)
ax.set_xticks([])
ax.set_yticks([])
ax.tick_params(axis="x", labelbottom=False)
ax.tick_params(axis="y", labelleft=False)
fig.suptitle("DTW calculated optimal warping path")
im = ax.imshow(np.log1p(matrix), origin="lower", cmap="bone_r")
ax.plot(y, x, "r", linewidth=4, label="Optimal warping path")
ax.plot(
[0, 29], [0, 29], "--", linewidth=3, color="black", label="Default warping path"
)
ax.legend()
ax_ts_x.plot(ts_0 * -1, np.arange(30), linewidth=4, color="#1f77b4")
# ax_ts_x.set_yticks(np.arange(30))
ax_ts_x.set_ylim(-0.5, 29.5)
ax_ts_x.set_xlim(-1.5, 1.5)
ax_ts_x.set_xticks([])
ax_ts_y.plot(ts_1, linewidth=4, color="#ff7f0e")
# ax_ts_y.set_xticks(np.arange(30))
ax_ts_y.set_xlim(-0.5, 29.5)
ax_ts_y.set_ylim(-1.5, 1.5)
ax_ts_y.set_yticks([])
# cbar = plt.colorbar(im, ax=ax, use_gridspec=False, panchor=False)
plt.savefig(DATA_PATH / "dtw_warping_path.svg")
plt.show()
```
# TSNE
```
import mpl_toolkits.mplot3d.axes3d as p3
from sklearn.datasets import make_s_curve, make_swiss_roll
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
n_samples = 1500
X, y = make_swiss_roll(n_samples, noise=0.1)
X, y = make_s_curve(n_samples, random_state=42)
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection="3d")
ax.view_init(20, -60)
# ax.set_title("S curve dataset", fontsize=18)
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=y)
ax.set_yticklabels([])
ax.set_xticklabels([])
ax.set_zticklabels([])
fig.tight_layout()
plt.savefig(DATA_PATH / "s_dataset.svg", bbox_inches=0)
plt.show()
X_pca = PCA(n_components=2, random_state=42).fit_transform(X)
X_tsne = TSNE(n_components=2, perplexity=30, init="pca", random_state=42).fit_transform(
X
)
fig = plt.figure(figsize=(10, 10))
# plt.title("PCA transformation", fontsize=18)
plt.scatter(X_pca[:, 0], X_pca[:, 1], c=y)
plt.xticks([])
plt.yticks([])
plt.savefig(DATA_PATH / "s_dataset_pca.svg")
plt.show()
fig = plt.figure(figsize=(10, 10))
# plt.title("t-SNE transformation", fontsize=18)
plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=y)
plt.xticks([])
plt.yticks([])
plt.savefig(DATA_PATH / "s_dataset_tsne.svg")
plt.show()
```
# Datashader
```
import datashader as ds
import datashader.transfer_functions as tf
import matplotlib.patches as mpatches
from lttb import downsample
np.random.seed(42)
signal = np.random.normal(0, 10, size=10 ** 6).cumsum() + np.sin(
np.linspace(0, 100 * np.pi, 10 ** 6)
) * np.random.normal(0, 1, size=10 ** 6)
s_frame = pd.DataFrame(signal, columns=["signal"]).reset_index()
x = 1500
y = 500
cvs = ds.Canvas(plot_height=y, plot_width=x)
line = cvs.line(s_frame, "index", "signal")
img = tf.shade(line).to_pil()
trans = downsample(s_frame.values, 100)
trans[:, 0] /= trans[:, 0].max()
trans[:, 0] *= x
trans[:, 1] *= -1
trans[:, 1] -= trans[:, 1].min()
trans[:, 1] /= trans[:, 1].max()
trans[:, 1] *= y
fig, ax = plt.subplots(figsize=(x / 60, y / 60))
plt.imshow(img, origin="upper")
plt.plot(*trans.T, "r", alpha=0.6, linewidth=2)
plt.legend(
handles=[
mpatches.Patch(color="blue", label="Datashader (10^6 points)"),
mpatches.Patch(color="red", label="LTTB (10^3 points)"),
],
prop={"size": 25},
)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.ylabel("Value", fontsize=25)
plt.xlabel("Time", fontsize=25)
plt.tight_layout()
plt.savefig(DATA_PATH / "datashader.png")
plt.show()
```
# LTTB
```
from matplotlib import cm
from matplotlib.colors import Normalize
from matplotlib.patches import Polygon
np.random.seed(42)
ns = np.random.normal(0, 1, size=26).cumsum()
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
plt.plot(ns, "-o", linewidth=2)
mapper = cm.ScalarMappable(Normalize(vmin=0, vmax=15, clip=True), cmap="autumn_r")
areas = []
for i, data in enumerate(ns[:-2], 1):
cors = [[i + ui, ns[i + ui]] for ui in range(-1, 2)]
x = [m[0] for m in cors]
y = [m[1] for m in cors]
ea = 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) * 10
areas.append(ea)
color = mapper.to_rgba(ea)
plt.plot([i], [ns[i]], "o", color=color)
ax.add_patch(
Polygon(
cors,
closed=True,
fill=True,
alpha=0.3,
color=color,
)
)
cbar = plt.colorbar(mapper, alpha=0.3)
cbar.set_label("Effective Area Size")
fig.suptitle("Effective Area of Data Points")
plt.ylabel("Value")
plt.xlabel("Time")
plt.tight_layout()
plt.savefig(DATA_PATH / "effective-area.svg")
plt.savefig(DATA_PATH / "effective-area.png")
plt.show()
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
plt.plot(ns, "--o", linewidth=2, label="Original time series")
mapper = cm.ScalarMappable(Normalize(vmin=0, vmax=15, clip=True), cmap="autumn_r")
lotb = np.concatenate(
[[0], np.arange(1, 25, 3) + np.array(areas).reshape(-1, 3).argmax(axis=1), [25]]
)
for i, data in enumerate(ns[:-2], 1):
cors = [[i + ui, ns[i + ui]] for ui in range(-1, 2)]
x = [m[0] for m in cors]
y = [m[1] for m in cors]
ea = 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) * 10
color = mapper.to_rgba(ea) # cm.tab10.colors[i % 5 + 1]
plt.plot([i], [ns[i]], "o", color=color)
ax.add_patch(
Polygon(
cors,
closed=True,
fill=True,
alpha=0.3,
color=color,
)
)
plt.plot(
lotb, ns[lotb], "-x", linewidth=2, color="tab:purple", label="LTOB approximation"
)
cbar = plt.colorbar(mapper, alpha=0.3)
cbar.set_label("Effective Area Size")
plt.vlines(np.linspace(0.5, 24.5, 9), ns.min(), ns.max(), "black", "--", alpha=0.5)
plt.ylabel("Value")
plt.xlabel("Time")
fig.suptitle("LTOB downsampling")
plt.legend()
plt.tight_layout()
plt.savefig(DATA_PATH / "ltob.svg")
plt.savefig(DATA_PATH / "ltob.png")
plt.show()
fig, ax = plt.subplots(1, 1, figsize=(15, 5))
plt.plot(ns, "--o", linewidth=2, label="Original time series")
ds = downsample(np.vstack([np.arange(26), ns]).T, 10)
plt.plot(*ds.T, "-x", linewidth=2, label="LTTB approximation")
# plt.plot(ns, "x")
plt.vlines(np.linspace(0.5, 24.5, 9), ns.min(), ns.max(), "black", "--", alpha=0.5)
plt.ylabel("Value")
plt.xlabel("Time")
fig.suptitle("LTTB downsampling")
plt.legend()
plt.tight_layout()
plt.savefig(DATA_PATH / "lttb.svg")
plt.savefig(DATA_PATH / "lttb.png")
plt.show()
```
|
github_jupyter
|
# Processing Milwaukee Label (~3K labels)
Building on `2020-03-24-EDA-Size.ipynb`
Goal is to prep a standard CSV that we can update and populate
```
import pandas as pd
import numpy as np
import os
import s3fs # for reading from S3FileSystem
import json # for working with JSON files
import matplotlib.pyplot as plt
pd.set_option('max_colwidth', -1)
# Import custom modules
import sys
SWKE_PATH = r'/home/ec2-user/SageMaker/classify-streetview/swke'
sys.path.append(SWKE_PATH)
import labelcrops
SAGEMAKER_PATH = r'/home/ec2-user/SageMaker'
SPLIT_PATH = os.path.join(SAGEMAKER_PATH, 'classify-streetview', 'split-train-test')
```
# Alternative Template - row for ~3K labels x # crops appeared in
* img_id
* heading
* crop_id
* label
* dist_x_left
* dist_x_right
* dist_y_top
* dist_y_bottom
```
df_labels = pd.read_csv(os.path.join(SPLIT_PATH, 'restructure_single_labels.csv'))
print(df_labels.shape)
df_labels.head()
df_coor = pd.read_csv('crop_coor.csv')
df_coor
df_outer = pd.merge(left=df_labels, right=df_coor, how='outer')
df_outer.shape
df_outer = pd.concat([df_labels, df_coor], axis = 1)
df_outer.head(10)
# Let's just use a for loop and join back together
list_dfs = []
coor_cols = list(df_coor.columns)
for index, row in df_coor.iterrows():
df_temp_labels = df_labels
for col in coor_cols:
df_temp_labels[col] = row[col]
list_dfs.append(df_temp_labels)
print(df_temp_labels.shape)
# Let's just use a for loop and join back together
list_dfs = []
coor_cols = list(df_coor.columns)
for index, row in df_coor.iterrows():
df_temp_labels = df_labels.copy()
for col in coor_cols:
df_temp_labels[col] = row[col]
list_dfs.append(df_temp_labels)
print(df_temp_labels.shape)
df_concat = pd.concat(list_dfs)
df_concat.shape
df_concat['corner_x'].value_counts()
df_concat.head()
df_concat.to_csv('merged_crops_template.csv', index = False)
df_concat.columns
```
## Take the differences
```
df_concat['xpt_minus_xleft'] = df_concat['sv_image_x'] - df_concat['x_crop_left']
df_concat['xright_minus_xpt'] = df_concat['x_crop_right'] - df_concat['sv_image_x']
df_concat['ypt_minus_ytop'] = df_concat['sv_image_y'] - df_concat['y_crop_top']
df_concat['ybottom_minus_ypt'] = df_concat['y_crop_bottom'] - df_concat['sv_image_y']
positive_mask = (df_concat['xpt_minus_xleft'] > 0) & (df_concat['xright_minus_xpt'] > 0) & (df_concat['ypt_minus_ytop'] > 0) & (df_concat['ybottom_minus_ypt'] > 0)
df_concat['label_in_crop'] = positive_mask
df_concat['label_in_crop'].value_counts()
df_incrop = df_concat.loc[df_concat['label_in_crop']]
df_incrop.shape
df_incrop['crop_number'].value_counts()
df_incrop.to_csv('Crops_with_Labels.csv', index = False)
7038 / 2851
```
## Observations
* We have 12919 Null Crops
* We have 7038 Crops with a feature in them
* Three bottom crops (5, 6, 7) have the most points (these are the biggest)
* The 3 middle crops have the most for their row (2, 3, 6)
* Labels appear in an average of 2.47 image crops
# Visualize Label Locations
* xpt_minus_xleft - x location in the crop relative to bottom left (0, 0)
* ybottom_minus_ypt - y location in the crop relative to bottom left (0, 0)
```
fig = plt.figure(figsize = (12, 3))
colors_list = ['tab:red', 'orange', 'gold', 'forestgreen']
for crop_id in range(1, 5):
ax = fig.add_subplot(1, 4, crop_id)
x = df_incrop['xpt_minus_xleft'].loc[df_incrop['crop_number'] == crop_id]
y = df_incrop['ybottom_minus_ypt'].loc[df_incrop['crop_number'] == crop_id]
ax.plot(x, y, marker = '.', ls = 'none', alpha = 0.4, color = colors_list[int(crop_id -1)])
#ax.plot(x, y, marker = '.', ls = 'none', alpha = 0.4)
plt.ylim(0, 220)
plt.xlim(0, 220)
plt.title(f'Crop: {crop_id}')
ax.set_yticklabels([])
ax.set_xticklabels([])
plt.tight_layout()
fig2 = plt.figure(figsize = (12, 4))
# colors_list = ['forestgreen', 'indigo', 'mediumblue', 'gold', 'tab:red']
colors_list = ['blue', 'indigo', 'fuchsia']
for crop_id in range(5, 8):
plot_num = crop_id - 4
ax2 = fig2.add_subplot(1, 3, plot_num)
x = df_incrop['xpt_minus_xleft'].loc[df_incrop['crop_number'] == crop_id]
y = df_incrop['ybottom_minus_ypt'].loc[df_incrop['crop_number'] == crop_id]
ax2.plot(x, y, marker = '.', ls = 'none', alpha = 0.4, color = colors_list[int(plot_num - 1)])
#ax.plot(x, y, marker = '.', ls = 'none', alpha = 0.4)
plt.ylim(0, 300)
plt.xlim(0, 300)
plt.title(f'Crop: {crop_id}')
ax2.set_yticklabels([])
ax2.set_xticklabels([])
plt.tight_layout()
```
# Deep Dive into df_incrop
```
df_incrop.head()
df_incrop.columns
incrop_keep_cols = ['filename', 'crop_number', 'region_id', 'label_name', 'region_count', 'img_id',
'sv_image_x', 'sv_image_y','sv_image_y_bottom_origin', 'xpt_minus_xleft', 'xright_minus_xpt',
'ypt_minus_ytop', 'ybottom_minus_ypt']
df_incrop_short = df_incrop[incrop_keep_cols].copy()
df_incrop_short.head()
# Make some new ids
df_incrop_short['heading'] = df_incrop_short['filename'].str.extract('(.*)_(.*).jpg', expand = True)[1]
df_incrop_short.dtypes
#df_incrop_short['crop_name_id'] = df_incrop_short[['img_id', 'heading', 'crop_number']].apply(lambda x: '_'.join(str(x)), axis=1)
#df_incrop_short['label_id'] = df_incrop_short[['img_id', 'heading', 'region_id']].apply(lambda x: '_'.join(str(x)), axis=1)
df_incrop_short['crop_name_id'] = df_incrop_short['img_id'].astype(str) + '_' + df_incrop_short['heading'] + '_' + df_incrop_short['crop_number'].astype(str)
df_incrop_short['label_id'] = df_incrop_short['img_id'].astype(str) + '_' + df_incrop_short['heading'] + '_' + df_incrop_short['region_id'].astype(str)
df_incrop_short.head()
df_incrop_short['crop_name_id'].value_counts()
crop_label_counts = df_incrop_short['crop_name_id'].value_counts()
crop_label_counts.value_counts()
label_id_counts = df_incrop_short['label_id'].value_counts()
label_id_counts.value_counts()
label_id_counts.head(20)
506 * 7 * 4
14168 - 5254
df_incrop_short.to_csv('incrop_labels.csv', index = False)
```
# Desired End Template CSV for 506 x 7 x 4 image crops
* img_id
* heading
* crop_id
* combined_id
* primary_label - based on a hierarchy of importance
* 0_missing_count
* 1_null_count
* 2_obstacle_count
* 3_present_count
* 4_surface_prob_count
* 5_nosidewalk_count
```
unique_labels_list = list(df_incrop_short['label_name'].unique())
folders_list = ['3_present', '4_surface_prob', '2_obstacle', '0_missing', '6_occlusion', '5_nosidewalk']
for label, folder in zip(unique_labels_list, folders_list):
label_mask = (df_incrop_short['label_name'].str.contains(label))
df_incrop_short[folder] = np.where(label_mask, 1, 0)
df_incrop_short.head()
df_group = df_incrop_short.groupby(['img_id', 'heading', 'crop_number'])[folders_list].sum()
df_group.head()
df_group['count_all'] = df_group[folders_list].values.sum(axis = 1)
df_group.head()
df_group.shape
df_group = df_group.reset_index()
df_group.head()
df_group.to_csv('img_heading_crop_labelcounts.csv', index = False)
df_group[folders_list].sum()
(df_group[folders_list] > 0).sum()
df_group[folders_list].sum()
```
# Next Phase
* Grab a couple thousand null crops
* Find out which ones are null by creating a img_id x heading x all crop_numbers list and then doing a join with df_group
* Then fill in the NAs with 0s and add a new column that if count_all == 0, then 1_null = 1
* Then merge with the test/train names by img_id
* Then move those crops into the test folder
|
github_jupyter
|
```
!nvidia-smi
# unrar x "/content/drive/MyDrive/IDC_regular_ps50_idx5.rar" "/content/drive/MyDrive/"
# !unzip "/content/drive/MyDrive/base_dir/train_dir/b_idc.zip" -d "/content/drive/MyDrive/base_dir/train_dir"
import os
! pip install -q kaggle
from google.colab import files
files.upload()
! mkdir ~/.kaggle
! cp kaggle.json ~/.kaggle/
! chmod 600 ~/.kaggle/kaggle.json
! kaggle datasets list
!kaggle datasets download -d paultimothymooney/breast-histopathology-images
! mkdir breast-histopathology-images
! unzip breast-histopathology-images.zip -d breast-histopathology-images
!pip install tensorflow-gpu
import pandas as pd
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Conv2D, MaxPooling2D, Flatten
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.metrics import categorical_crossentropy
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
import os
import cv2
import imageio
import skimage
import skimage.io
import skimage.transform
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
import itertools
import shutil
import matplotlib.pyplot as plt
%matplotlib inline
from google.colab import drive
drive.mount('/content/drive')
SAMPLE_SIZE = 78786
IMAGE_SIZE = 224
os.listdir('/content/breast-histopathology-images')
patient = os.listdir('/content/breast-histopathology-images/IDC_regular_ps50_idx5')
len(patient)
#Copy all images into one directory
#This will make it easier to work with this data.
# Create a new directory to store all available images
all_images = 'all_images'
os.mkdir(all_images)
patient_list = os.listdir('/content/breast-histopathology-images/IDC_regular_ps50_idx5')
print(patient_list)
for patient in patient_list:
# /content/IDC_regular_ps50_idx5/10253/0/10253_idx5_x1001_y1001_class0.png
path_0 = "/content/breast-histopathology-images/IDC_regular_ps50_idx5/"+str(patient)+"/0"
path_1 = "/content/breast-histopathology-images/IDC_regular_ps50_idx5/"+str(patient)+"/1"
#list for 0
file_list_0 = os.listdir(path_0)
#list for 1
file_list_1 = os.listdir(path_1)
#move all 0 related img of a patient to all_image directory
for fname in file_list_0:
#src path of image
src = os.path.join(path_0, fname)
#dst path for image
dst = os.path.join(all_images, fname)
#move the image to directory
shutil.copyfile(src, dst)
#move all 1 related img of a patient to all_image directory
for fname in file_list_1:
#src path of image
src = os.path.join(path_1, fname)
#dst path for image
dst = os.path.join(all_images, fname)
#move the image to directory
shutil.copyfile(src, dst)
len(os.listdir('all_images'))
image_list = os.listdir('all_images')
df_data = pd.DataFrame(image_list, columns=['image_id'])
df_data.head()
# Define Helper Functions
# Each file name has this format:
# '14211_idx5_x2401_y1301_class1.png'
def extract_patient_id(x):
# split into a list
a = x.split('_')
# the id is the first index in the list
patient_id = a[0]
return patient_id
def extract_target(x):
# split into a list
a = x.split('_')
# the target is part of the string in index 4
b = a[4]
# the ytarget i.e. 1 or 2 is the 5th index of the string --> class1
target = b[5]
return target
# extract the patient id
# create a new column called 'patient_id'
df_data['patient_id'] = df_data['image_id'].apply(extract_patient_id)
# create a new column called 'target'
df_data['target'] = df_data['image_id'].apply(extract_target)
df_data.head(10)
df_data.shape
def draw_category_images(col_name,figure_cols, df, IMAGE_PATH):
"""
Give a column in a dataframe,
this function takes a sample of each class and displays that
sample on one row. The sample size is the same as figure_cols which
is the number of columns in the figure.
Because this function takes a random sample, each time the function is run it
displays different images.
"""
categories = (df.groupby([col_name])[col_name].nunique()).index
f, ax = plt.subplots(nrows=len(categories),ncols=figure_cols,
figsize=(4*figure_cols,4*len(categories))) # adjust size here
# draw a number of images for each location
for i, cat in enumerate(categories):
sample = df[df[col_name]==cat].sample(figure_cols) # figure_cols is also the sample size
for j in range(0,figure_cols):
file=IMAGE_PATH + sample.iloc[j]['image_id']
im=cv2.imread(file)
ax[i, j].imshow(im, resample=True, cmap='gray')
ax[i, j].set_title(cat, fontsize=16)
plt.tight_layout()
plt.show()
IMAGE_PATH = 'all_images/'
draw_category_images('target',4, df_data, IMAGE_PATH)
# What is the class distribution?
df_data['target'].value_counts()
# take a sample of the majority class 0 (total = 198738)
df_0 = df_data[df_data['target'] == '0'].sample(SAMPLE_SIZE, random_state=101)
# take a sample of class 1 (total = 78786)
df_1 = df_data[df_data['target'] == '1'].sample(SAMPLE_SIZE, random_state=101)
# concat the two dataframes
df_data = pd.concat([df_0, df_1], axis=0).reset_index(drop=True)
# Check the new class distribution
df_data['target'].value_counts()
# train_test_split
# stratify=y creates a balanced validation set.
y = df_data['target']
df_train, df_val = train_test_split(df_data, test_size=0.10, random_state=101, stratify=y)
print(df_train.shape)
print(df_val.shape)
df_train['target'].value_counts()
df_val['target'].value_counts()
# Create a new directory
base_dir = 'base_dir'
os.mkdir(base_dir)
#[CREATE FOLDERS INSIDE THE BASE DIRECTORY]
# now we create 2 folders inside 'base_dir':
# train_dir
# a_no_idc
# b_has_idc
# val_dir
# a_no_idc
# b_has_idc
# create a path to 'base_dir' to which we will join the names of the new folders
# train_dir
train_dir = os.path.join(base_dir, 'train_dir')
os.mkdir(train_dir)
# val_dir
val_dir = os.path.join(base_dir, 'val_dir')
os.mkdir(val_dir)
# [CREATE FOLDERS INSIDE THE TRAIN AND VALIDATION FOLDERS]
# Inside each folder we create seperate folders for each class
# create new folders inside train_dir
a_no_idc = os.path.join(train_dir, 'a_no_idc')
os.mkdir(a_no_idc)
b_has_idc = os.path.join(train_dir, 'b_has_idc')
os.mkdir(b_has_idc)
# create new folders inside val_dir
a_no_idc = os.path.join(val_dir, 'a_no_idc')
os.mkdir(a_no_idc)
b_has_idc = os.path.join(val_dir, 'b_has_idc')
os.mkdir(b_has_idc)
# check that the folders have been created
os.listdir('base_dir/train_dir')
# Set the id as the index in df_data
df_data.set_index('image_id', inplace=True)
# Get a list of train and val images
train_list = list(df_train['image_id'])
val_list = list(df_val['image_id'])
# Transfer the train images
for image in train_list:
# the id in the csv file does not have the .tif extension therefore we add it here
fname = image
# get the label for a certain image
target = df_data.loc[image,'target']
# these must match the folder names
if target == '0':
label = 'a_no_idc'
if target == '1':
label = 'b_has_idc'
# source path to image
src = os.path.join(all_images, fname)
# destination path to image
dst = os.path.join(train_dir, label, fname)
# move the image from the source to the destination
shutil.move(src, dst)
# Transfer the val images
for image in val_list:
# the id in the csv file does not have the .tif extension therefore we add it here
fname = image
# get the label for a certain image
target = df_data.loc[image,'target']
# these must match the folder names
if target == '0':
label = 'a_no_idc'
if target == '1':
label = 'b_has_idc'
# source path to image
src = os.path.join(all_images, fname)
# destination path to image
dst = os.path.join(val_dir, label, fname)
# move the image from the source to the destination
shutil.move(src, dst)
# check how many train images we have in each folder
print(len(os.listdir('base_dir/train_dir/a_no_idc')))
print(len(os.listdir('base_dir/train_dir/b_has_idc')))
# check how many val images we have in each folder
print(len(os.listdir('base_dir/val_dir/a_no_idc')))
print(len(os.listdir('base_dir/val_dir/b_has_idc')))
train_path = 'base_dir/train_dir'
valid_path = 'base_dir/val_dir'
num_train_samples = len(df_train)
num_val_samples = len(df_val)
train_batch_size = 10
val_batch_size = 10
train_steps = np.ceil(num_train_samples / train_batch_size)
val_steps = np.ceil(num_val_samples / val_batch_size)
datagen = ImageDataGenerator(rescale=1.0/255)
train_gen = datagen.flow_from_directory(train_path,
target_size=(IMAGE_SIZE,IMAGE_SIZE),
batch_size=train_batch_size,
class_mode='categorical')
val_gen = datagen.flow_from_directory(valid_path,
target_size=(IMAGE_SIZE,IMAGE_SIZE),
batch_size=val_batch_size,
class_mode='categorical')
# Note: shuffle=False causes the test dataset to not be shuffled
test_gen = datagen.flow_from_directory(valid_path,
target_size=(IMAGE_SIZE,IMAGE_SIZE),
batch_size=1,
class_mode='categorical',
shuffle=False)
from tensorflow.keras.models import *
from sklearn.model_selection import *
from tensorflow.keras.applications import *
from tensorflow.keras.layers import *
base_Neural_Net= InceptionResNetV2(input_shape=(224,224,3), weights='imagenet', include_top=False)
model=Sequential()
model.add(base_Neural_Net)
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dense(256,kernel_initializer='he_uniform'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(2,activation='softmax'))
model.summary()
model.compile('adam', loss='binary_crossentropy',
metrics=['accuracy'])
filepath = "model.h5"
checkpoint = ModelCheckpoint(filepath, monitor='val_acc', verbose=1,
save_best_only=True, mode='max')
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.5, patience=3,
verbose=1, mode='max')
callbacks_list = [checkpoint, reduce_lr]
history = model.fit_generator(train_gen, steps_per_epoch=train_steps,
validation_data=val_gen,
validation_steps=val_steps,
epochs=10, verbose=1,
callbacks=callbacks_list)
# get the metric names so we can use evaulate_generator
model.metrics_names
val_loss, val_acc = \
model.evaluate(test_gen,
steps=len(df_val))
print('val_loss:', val_loss)
print('val_acc:', val_acc)
import matplotlib.pyplot as plt
accuracy = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(accuracy) + 1)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.figure()
plt.plot(epochs, accuracy, 'bo', label='Training acc')
plt.plot(epochs, val_acc , 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
predictions = model.predict_generator(test_gen, steps=len(df_val), verbose=1)
predictions.shape
test_gen.class_indices
df_preds = pd.DataFrame(predictions, columns=['no_idc', 'has_idc'])
#df_preds.head()
df_preds
y_true = test_gen.classes
# Get the predicted labels as probabilities
y_pred = df_preds['has_idc']
from sklearn.metrics import roc_auc_score
roc_auc_score(y_true, y_pred)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
test_labels = test_gen.classes
test_labels.shape
cm = confusion_matrix(test_labels, predictions.argmax(axis=1))
test_gen.class_indices
cm_plot_labels = ['no_idc', 'has_idc']
plot_confusion_matrix(cm, cm_plot_labels, title='Confusion Matrix')
from sklearn.metrics import classification_report
# Generate a classification report
# For this to work we need y_pred as binary labels not as probabilities
y_pred_binary = predictions.argmax(axis=1)
report = classification_report(y_true, y_pred_binary, target_names=cm_plot_labels)
print(report)
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
model.save("/content/drive/MyDrive/InceptionResNetV2.h5")
yy = model.predict(test_gen)
len(yy)
yy
yy = np.argmax(yy, axis=1)
yy
```
|
github_jupyter
|
# Security Master Analysis
by @marketneutral
```
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import plotly.plotly as py
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
import cufflinks as cf
init_notebook_mode(connected=False)
cf.set_config_file(offline=True, world_readable=True, theme='polar')
plt.style.use('seaborn')
plt.rcParams['figure.figsize'] = 12, 7
```
# Why Sec Master Analysis?
Before you do anything exciting in financial data science, you **need to understand the nature of the universe of assets you are working with** and **how the data is presented**; otherwise, garbage in, garbage out. A "security master" refers to reference data about the lifetime of a particular asset, tracking ticker changes, name changes, etc over time. In finance, raw information is typically uninteresting and uninformative and you need to do substantial feature engineering and create either or both of time series and cross-sectional features. However **to do that without error requires that you deeply understand the nature of the asset universe.** This is not exciting fancy data science, but absolutely essential. Kaggle competitions are usually won in the third or fourth decimal place of the score so every detail matters.
### What are some questions we want to answer?
**Is `assetCode` a unique and permanent identifier?**
If you group by `assetCode` and make time-series features are you assured to be referencing the same instrument? In the real world, the ticker symbol is not guaranteed to refer to the same company over time. Data providers usually provide a "permanent ID" so that you can keep track of this over time. This is not provided here (although in fact both Intrinio and Reuters provide this in the for sale version of the data used in this competition).
The rules state:
> Each asset is identified by an assetCode (note that a single company may have multiple assetCodes). Depending on what you wish to do, you may use the assetCode, assetName, or time as a way to join the market data to news data.
>assetCode(object) - a unique id of an asset
So is it unique or not and can we join time series features **always** over time on `assetCode`?
**What about `assetName`? Is that unique or do names change over time?**
>assetName(category) - the name that corresponds to a group of assetCodes. These may be "Unknown" if the corresponding assetCode does not have any rows in the news data.
**What is the nature of missing data? What does it mean when data is missing?**
Let's explore and see.
```
# Make environment and get data
from kaggle.competitions import twosigmanews
env = twosigmanews.make_env()
(market_train_df, news_train_df) = env.get_training_data()
```
[](http://)Let's define a valid "has_data" day for each asset if there is reported trading `volume` for the day.
```
df = market_train_df
df['has_data'] = df.volume.notnull().astype('int')
```
And let's see how long an asset is "alive" by the
- the distance betwen the first reported data point and last
- and the number of days in that distance that actually has data
```
lifetimes_df = df.groupby(
by='assetCode'
).agg(
{'time': [np.min, np.max],
'has_data': 'sum'
}
)
lifetimes_df.columns = lifetimes_df.columns.droplevel()
lifetimes_df.rename(columns={'sum': 'has_data_sum'}, inplace=True)
lifetimes_df['days_alive'] = np.busday_count(
lifetimes_df.amin.values.astype('datetime64[D]'),
lifetimes_df.amax.values.astype('datetime64[D]')
)
#plt.hist(lifetimes_df.days_alive.astype('int'), bins=25);
#plt.title('Histogram of Asset Lifetimes (business days)');
data = [go.Histogram(x=lifetimes_df.days_alive.astype('int'))]
layout = dict(title='Histogram of Asset Lifetimes (business days)',
xaxis=dict(title='Business Days'),
yaxis=dict(title='Asset Count')
)
fig = dict(data = data, layout = layout)
iplot(fig)
```
This was shocking to me. There are very many assets that only exist for, say, 50 days or less. When we look at the amount of data in these spans, it is even more surprising. Let's compare the asset lifetimes with the amout of data in those lifetime. Here I calculate the difference between the number of business days in each span and the count of valid days; sorted by most "missing data".
```
lifetimes_df['alive_no_data'] = np.maximum(lifetimes_df['days_alive'] - lifetimes_df['has_data_sum'],0)
lifetimes_df.sort_values('alive_no_data', ascending=False ).head(10)
```
For example, ticker VNDA.O has its first data point on 2007-02-23, and its last on 2016-12-22 for a span of 2556 business days. However in that 2556 days, there are only 115 days that actually have data!
```
df.set_index('time').query('assetCode=="VNDA.O"').returnsOpenNextMktres10.iplot(kind='scatter',mode='markers', title='VNDA.O');
```
**It's not the case that VNDA.O didn't exist during those times; we just don't have data.**
Looking across the entire dataset, however, things look a little better.
```
#plt.hist(lifetimes_df['alive_no_data'], bins=25);
#plt.ylabel('Count of Assets');
#plt.xlabel('Count of missing days');
#plt.title('Missing Days in Asset Lifetime Spans');
data = [go.Histogram(x=lifetimes_df['alive_no_data'])]
layout = dict(title='Missing Days in Asset Lifetime Spans',
xaxis=dict(title='Count of missing days'),
yaxis=dict(title='Asset Count')
)
fig = dict(data = data, layout = layout)
iplot(fig)
```
Now let's look at whether tickers change over time. **Is either `assetCode` or `assetName` unique?**
```
df.groupby('assetName')['assetCode'].nunique().sort_values(ascending=False).head(20)
```
**So there are a number of companies that have more than 1 `assetCode` over their lifetime. ** For example, 'T-Mobile US Inc':
```
df[df.assetName=='T-Mobile US Inc'].assetCode.unique()
```
And we can trace the lifetime of this company over multiple `assetCodes`.
```
lifetimes_df.loc[['PCS.N', 'TMUS.N', 'TMUS.O']]
```
The company started its life as PCS.N, was merged with TMUS.N (NYSE-listed) and then became Nasdaq-listed.
In this case, if you want to make long-horizon time-based features, you need to join on `assetName`.
```
(1+df[df.assetName=='T-Mobile US Inc'].set_index('time').returnsClosePrevRaw1).cumprod().plot(title='Time joined cumulative return');
```
**One gotcha I see is that don't think that `assetName` is correct "point-in-time" .** This is hard to verify without proper commercial security master data, but:
- I don't think that the actual name of this company in 2007 was **T-Mobile** it was something like **Metro PCS**. T-Mobile acquired MetroPCS on May 1, 2013 (google search "when did t-mobile acquire MetroPCS"). You can see this data matches with the lifetimes dataframe subset above.
- Therefore, the `assetName` must **not be point-in-time**, rather it looks like `assetName` is the name of the company when this dataset was created for Kaggle recently, and then backfilled.
- However, it would be very odd for the Reuters News Data to **not be point-in-time.** Let's see if we can find any news on this company back in 2007.
```
news_train_df[news_train_df.assetName=='T-Mobile US Inc'].T
```
What's fascinating here is that you can see in the article headlines, that the company is named correctly, point-in-time, as "MetroPCS Communications Inc", however the `assetName` is listed as "T-Mobile US Inc.". So the organizers have also backfilled today's `assetName` into the news history.
This implies that **you cannot use NLP on the `headline` field in any way to join or infer asset clustering.** However, `assetName` continues to look like a consistent choice over time for a perm ID.
What about the other way around? Is `assetName` a unique identifier? In the real world, companies change their names all the time (a hilarious example of this is [here](https://www.businessinsider.com/long-blockchain-company-iced-tea-sec-stock-2018-8)). What about in this dataset?
```
df.groupby('assetCode')['assetName'].nunique().sort_values(ascending=False).head(20)
```
**YES!** We can conclude that since no `assetCode` has ever been linked to more than `assetName`, that `assetName` could be a good choice for a permanent identifier. It is possible that a company changed its ticker *and* it's name on the same day and therefore we would not be able to catch this, but let's assume this doesn't happen.
However, here is **a major gotcha**: dual class stock. Though not very common, some companies issue more than one class of stock at the same time. Likely the most well know is Google (called Alphabet Inc for its full life in this dataset); another is Comcast Corp.
```
df[df.assetName=='Alphabet Inc'].assetCode.unique()
lifetimes_df.loc[['GOOG.O', 'GOOGL.O']]
```
Because of this overlapping data, there is no way to be sure about how to link assets over time. You are stuck with one of two bad choices: link on `assetCode` and miss ticker changes and corporate actions, or link on `assetName` but get bad output in the case of dual-class shares.
## Making time-series features when rows dates are missing
Let's say you want to make rolling window time-series feature, like a moving average on volume. As we saw above, it is not possible to do this 100% without error because we don't know the permanent identifier; we must make a tradeoff between the error of using `assetCode` or `assetName`. Given that `assetCode` will never overlap on time (and therefore allows using time as an index), I choose that here.
To make a rolling feature, it was my initial inclination to try something like:
```
df = market_train_df.reset_index().sort_values(['assetCode', 'time']).set_index(['assetCode','time'])
grp = df.groupby('assetCode')
df['volume_avg20'] = (
grp.apply(lambda x: x.volume.rolling(20).mean())
.reset_index(0, drop=True)
)
```
Let's see what we got:
```
(df.reset_index().set_index('time')
.query('assetCode=="VNDA.O"').loc['2007-03-15':'2009-06', ['volume', 'volume_avg20']]
)
```
Look at the time index...the result makes no sense... the rolling average of 20 days spans **the missing period of >2007-03-20 and <2009-06-26 which is not right in the context of financial time series.** Instead we need to account for business days rolling. This will not be 100% accurate becuase we don't know exchange holidays, but it should be very close. **To do this correctly, you need to roll on business days**. However, pandas doesn't like to roll on business days (freq tag 'B') and will throw: `ValueError: <20 * BusinessDays> is a non-fixed frequency`. The next best thing is to roll on calendar days (freq tag 'D').
It took me awhile to get this to work as pandas complains a lot on multi-idexes (this [issue](https://github.com/pandas-dev/pandas/issues/15584) helped a lot).
```
df = df.reset_index().sort_values(['assetCode', 'time']).reset_index(drop=True)
df['volume_avg20d'] = (df
.groupby('assetCode')
.rolling('20D', on='time') # Note the 'D' and on='time'
.volume
.mean()
.reset_index(drop=True)
)
df.reset_index().set_index('time').query('assetCode=="VNDA.O"').loc['2007-03-15':'2009-06', ['volume', 'volume_avg20', 'volume_avg20d']]
```
This is much better! Note that the default `min_periods` is 1 when you use a freq tag (i.e., '20D') to roll on. So even though we asked for a 20-day window, as long as there is at least 1 data point, we will get a windowed average. The result makes sense: if you look at 2009-06-26, you will see that the rolling average does **not** include any information from the year 2007, rather it is time-aware and since there are 19+ missing rows before, give the 1-day windowed average.
# Takeaways
- Security master issues are critical.
- You have to be very careful with time-based features because of missing data. Long-horizon features like, say, 12m momentum, may not produce sufficient asset coverage to be useful becuase so much data is missing.
- The fact that an asset is missing data *is not informative in itself*; it is an artifact of the data collection and delivery process. For example, you cannot calcuate a true asset "age" (e.g., hypothesizing that days since IPO is a valid feature) and use that as a factor. This is unfortunate becuase you may hypothesize that news impact is a bigger driver of return variance during the early part of an asset's life due to lack of analyst coverage, lack of participation by quants, etc.
- `assetCode` is not consistent across time; the same economic entity can, and in many cases does, have a different `assetCode`; `assetCode` is not a permanent identifier.
- `assetName` while consistent across time, can refer to more than once stock *at the same time* and therefore cannot be used to make time series features; `assetName` is not a unique permanent identifier.
- Missing time series data does not show up as `NaN` on the trading calendar; rather the rows are just missing. As such, to make time series features, you have to be careful with pandas rolling calculations and roll on calendar days, not naively on the count of rows.
|
github_jupyter
|
**Chapter 5 – Support Vector Machines**
_This notebook contains all the sample code and solutions to the exercises in chapter 5._
# Setup
First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
```
# To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "svm"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
```
# Large margin classification
The next few code cells generate the first figures in chapter 5. The first actual code sample comes after:
```
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
setosa_or_versicolor = (y == 0) | (y == 1)
X = X[setosa_or_versicolor]
y = y[setosa_or_versicolor]
# SVM Classifier model
svm_clf = SVC(kernel="linear", C=float("inf"))
svm_clf.fit(X, y)
# Bad models
x0 = np.linspace(0, 5.5, 200)
pred_1 = 5*x0 - 20
pred_2 = x0 - 1.8
pred_3 = 0.1 * x0 + 0.5
def plot_svc_decision_boundary(svm_clf, xmin, xmax):
w = svm_clf.coef_[0]
b = svm_clf.intercept_[0]
# At the decision boundary, w0*x0 + w1*x1 + b = 0
# => x1 = -w0/w1 * x0 - b/w1
x0 = np.linspace(xmin, xmax, 200)
decision_boundary = -w[0]/w[1] * x0 - b/w[1]
margin = 1/w[1]
gutter_up = decision_boundary + margin
gutter_down = decision_boundary - margin
svs = svm_clf.support_vectors_
plt.scatter(svs[:, 0], svs[:, 1], s=180, facecolors='#FFAAAA')
plt.plot(x0, decision_boundary, "k-", linewidth=2)
plt.plot(x0, gutter_up, "k--", linewidth=2)
plt.plot(x0, gutter_down, "k--", linewidth=2)
plt.figure(figsize=(12,2.7))
plt.subplot(121)
plt.plot(x0, pred_1, "g--", linewidth=2)
plt.plot(x0, pred_2, "m-", linewidth=2)
plt.plot(x0, pred_3, "r-", linewidth=2)
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", label="Iris-Versicolor")
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", label="Iris-Setosa")
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="upper left", fontsize=14)
plt.axis([0, 5.5, 0, 2])
plt.subplot(122)
plot_svc_decision_boundary(svm_clf, 0, 5.5)
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs")
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo")
plt.xlabel("Petal length", fontsize=14)
plt.axis([0, 5.5, 0, 2])
save_fig("large_margin_classification_plot")
plt.show()
```
# Sensitivity to feature scales
```
Xs = np.array([[1, 50], [5, 20], [3, 80], [5, 60]]).astype(np.float64)
ys = np.array([0, 0, 1, 1])
svm_clf = SVC(kernel="linear", C=100)
svm_clf.fit(Xs, ys)
plt.figure(figsize=(12,3.2))
plt.subplot(121)
plt.plot(Xs[:, 0][ys==1], Xs[:, 1][ys==1], "bo")
plt.plot(Xs[:, 0][ys==0], Xs[:, 1][ys==0], "ms")
plot_svc_decision_boundary(svm_clf, 0, 6)
plt.xlabel("$x_0$", fontsize=20)
plt.ylabel("$x_1$ ", fontsize=20, rotation=0)
plt.title("Unscaled", fontsize=16)
plt.axis([0, 6, 0, 90])
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_scaled = scaler.fit_transform(Xs)
svm_clf.fit(X_scaled, ys)
plt.subplot(122)
plt.plot(X_scaled[:, 0][ys==1], X_scaled[:, 1][ys==1], "bo")
plt.plot(X_scaled[:, 0][ys==0], X_scaled[:, 1][ys==0], "ms")
plot_svc_decision_boundary(svm_clf, -2, 2)
plt.xlabel("$x_0$", fontsize=20)
plt.title("Scaled", fontsize=16)
plt.axis([-2, 2, -2, 2])
save_fig("sensitivity_to_feature_scales_plot")
```
# Sensitivity to outliers
```
X_outliers = np.array([[3.4, 1.3], [3.2, 0.8]])
y_outliers = np.array([0, 0])
Xo1 = np.concatenate([X, X_outliers[:1]], axis=0)
yo1 = np.concatenate([y, y_outliers[:1]], axis=0)
Xo2 = np.concatenate([X, X_outliers[1:]], axis=0)
yo2 = np.concatenate([y, y_outliers[1:]], axis=0)
svm_clf2 = SVC(kernel="linear", C=10**9)
svm_clf2.fit(Xo2, yo2)
plt.figure(figsize=(12,2.7))
plt.subplot(121)
plt.plot(Xo1[:, 0][yo1==1], Xo1[:, 1][yo1==1], "bs")
plt.plot(Xo1[:, 0][yo1==0], Xo1[:, 1][yo1==0], "yo")
plt.text(0.3, 1.0, "Impossible!", fontsize=24, color="red")
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.annotate("Outlier",
xy=(X_outliers[0][0], X_outliers[0][1]),
xytext=(2.5, 1.7),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.1),
fontsize=16,
)
plt.axis([0, 5.5, 0, 2])
plt.subplot(122)
plt.plot(Xo2[:, 0][yo2==1], Xo2[:, 1][yo2==1], "bs")
plt.plot(Xo2[:, 0][yo2==0], Xo2[:, 1][yo2==0], "yo")
plot_svc_decision_boundary(svm_clf2, 0, 5.5)
plt.xlabel("Petal length", fontsize=14)
plt.annotate("Outlier",
xy=(X_outliers[1][0], X_outliers[1][1]),
xytext=(3.2, 0.08),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.1),
fontsize=16,
)
plt.axis([0, 5.5, 0, 2])
save_fig("sensitivity_to_outliers_plot")
plt.show()
```
# Large margin *vs* margin violations
This is the first code example in chapter 5:
```
import numpy as np
from sklearn import datasets
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.float64) # Iris-Virginica
svm_clf = Pipeline([
("scaler", StandardScaler()),
("linear_svc", LinearSVC(C=1, loss="hinge", random_state=42)),
])
svm_clf.fit(X, y)
svm_clf.predict([[5.5, 1.7]])
```
Now let's generate the graph comparing different regularization settings:
```
scaler = StandardScaler()
svm_clf1 = LinearSVC(C=1, loss="hinge", random_state=42)
svm_clf2 = LinearSVC(C=100, loss="hinge", random_state=42)
scaled_svm_clf1 = Pipeline([
("scaler", scaler),
("linear_svc", svm_clf1),
])
scaled_svm_clf2 = Pipeline([
("scaler", scaler),
("linear_svc", svm_clf2),
])
scaled_svm_clf1.fit(X, y)
scaled_svm_clf2.fit(X, y)
# Convert to unscaled parameters
b1 = svm_clf1.decision_function([-scaler.mean_ / scaler.scale_])
b2 = svm_clf2.decision_function([-scaler.mean_ / scaler.scale_])
w1 = svm_clf1.coef_[0] / scaler.scale_
w2 = svm_clf2.coef_[0] / scaler.scale_
svm_clf1.intercept_ = np.array([b1])
svm_clf2.intercept_ = np.array([b2])
svm_clf1.coef_ = np.array([w1])
svm_clf2.coef_ = np.array([w2])
# Find support vectors (LinearSVC does not do this automatically)
t = y * 2 - 1
support_vectors_idx1 = (t * (X.dot(w1) + b1) < 1).ravel()
support_vectors_idx2 = (t * (X.dot(w2) + b2) < 1).ravel()
svm_clf1.support_vectors_ = X[support_vectors_idx1]
svm_clf2.support_vectors_ = X[support_vectors_idx2]
plt.figure(figsize=(12,3.2))
plt.subplot(121)
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^", label="Iris-Virginica")
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs", label="Iris-Versicolor")
plot_svc_decision_boundary(svm_clf1, 4, 6)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="upper left", fontsize=14)
plt.title("$C = {}$".format(svm_clf1.C), fontsize=16)
plt.axis([4, 6, 0.8, 2.8])
plt.subplot(122)
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^")
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs")
plot_svc_decision_boundary(svm_clf2, 4, 6)
plt.xlabel("Petal length", fontsize=14)
plt.title("$C = {}$".format(svm_clf2.C), fontsize=16)
plt.axis([4, 6, 0.8, 2.8])
save_fig("regularization_plot")
```
# Non-linear classification
```
X1D = np.linspace(-4, 4, 9).reshape(-1, 1)
X2D = np.c_[X1D, X1D**2]
y = np.array([0, 0, 1, 1, 1, 1, 1, 0, 0])
plt.figure(figsize=(11, 4))
plt.subplot(121)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.plot(X1D[:, 0][y==0], np.zeros(4), "bs")
plt.plot(X1D[:, 0][y==1], np.zeros(5), "g^")
plt.gca().get_yaxis().set_ticks([])
plt.xlabel(r"$x_1$", fontsize=20)
plt.axis([-4.5, 4.5, -0.2, 0.2])
plt.subplot(122)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.plot(X2D[:, 0][y==0], X2D[:, 1][y==0], "bs")
plt.plot(X2D[:, 0][y==1], X2D[:, 1][y==1], "g^")
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"$x_2$", fontsize=20, rotation=0)
plt.gca().get_yaxis().set_ticks([0, 4, 8, 12, 16])
plt.plot([-4.5, 4.5], [6.5, 6.5], "r--", linewidth=3)
plt.axis([-4.5, 4.5, -1, 17])
plt.subplots_adjust(right=1)
save_fig("higher_dimensions_plot", tight_layout=False)
plt.show()
from sklearn.datasets import make_moons
X, y = make_moons(n_samples=100, noise=0.15, random_state=42)
def plot_dataset(X, y, axes):
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^")
plt.axis(axes)
plt.grid(True, which='both')
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"$x_2$", fontsize=20, rotation=0)
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.show()
from sklearn.datasets import make_moons
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
polynomial_svm_clf = Pipeline([
("poly_features", PolynomialFeatures(degree=3)),
("scaler", StandardScaler()),
("svm_clf", LinearSVC(C=10, loss="hinge", random_state=42))
])
polynomial_svm_clf.fit(X, y)
def plot_predictions(clf, axes):
x0s = np.linspace(axes[0], axes[1], 100)
x1s = np.linspace(axes[2], axes[3], 100)
x0, x1 = np.meshgrid(x0s, x1s)
X = np.c_[x0.ravel(), x1.ravel()]
y_pred = clf.predict(X).reshape(x0.shape)
y_decision = clf.decision_function(X).reshape(x0.shape)
plt.contourf(x0, x1, y_pred, cmap=plt.cm.brg, alpha=0.2)
plt.contourf(x0, x1, y_decision, cmap=plt.cm.brg, alpha=0.1)
plot_predictions(polynomial_svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
save_fig("moons_polynomial_svc_plot")
plt.show()
from sklearn.svm import SVC
poly_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="poly", degree=3, coef0=1, C=5))
])
poly_kernel_svm_clf.fit(X, y)
poly100_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="poly", degree=10, coef0=100, C=5))
])
poly100_kernel_svm_clf.fit(X, y)
plt.figure(figsize=(11, 4))
plt.subplot(121)
plot_predictions(poly_kernel_svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.title(r"$d=3, r=1, C=5$", fontsize=18)
plt.subplot(122)
plot_predictions(poly100_kernel_svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
plt.title(r"$d=10, r=100, C=5$", fontsize=18)
save_fig("moons_kernelized_polynomial_svc_plot")
plt.show()
def gaussian_rbf(x, landmark, gamma):
return np.exp(-gamma * np.linalg.norm(x - landmark, axis=1)**2)
gamma = 0.3
x1s = np.linspace(-4.5, 4.5, 200).reshape(-1, 1)
x2s = gaussian_rbf(x1s, -2, gamma)
x3s = gaussian_rbf(x1s, 1, gamma)
XK = np.c_[gaussian_rbf(X1D, -2, gamma), gaussian_rbf(X1D, 1, gamma)]
yk = np.array([0, 0, 1, 1, 1, 1, 1, 0, 0])
plt.figure(figsize=(11, 4))
plt.subplot(121)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.scatter(x=[-2, 1], y=[0, 0], s=150, alpha=0.5, c="red")
plt.plot(X1D[:, 0][yk==0], np.zeros(4), "bs")
plt.plot(X1D[:, 0][yk==1], np.zeros(5), "g^")
plt.plot(x1s, x2s, "g--")
plt.plot(x1s, x3s, "b:")
plt.gca().get_yaxis().set_ticks([0, 0.25, 0.5, 0.75, 1])
plt.xlabel(r"$x_1$", fontsize=20)
plt.ylabel(r"Similarity", fontsize=14)
plt.annotate(r'$\mathbf{x}$',
xy=(X1D[3, 0], 0),
xytext=(-0.5, 0.20),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.1),
fontsize=18,
)
plt.text(-2, 0.9, "$x_2$", ha="center", fontsize=20)
plt.text(1, 0.9, "$x_3$", ha="center", fontsize=20)
plt.axis([-4.5, 4.5, -0.1, 1.1])
plt.subplot(122)
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.plot(XK[:, 0][yk==0], XK[:, 1][yk==0], "bs")
plt.plot(XK[:, 0][yk==1], XK[:, 1][yk==1], "g^")
plt.xlabel(r"$x_2$", fontsize=20)
plt.ylabel(r"$x_3$ ", fontsize=20, rotation=0)
plt.annotate(r'$\phi\left(\mathbf{x}\right)$',
xy=(XK[3, 0], XK[3, 1]),
xytext=(0.65, 0.50),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.1),
fontsize=18,
)
plt.plot([-0.1, 1.1], [0.57, -0.1], "r--", linewidth=3)
plt.axis([-0.1, 1.1, -0.1, 1.1])
plt.subplots_adjust(right=1)
save_fig("kernel_method_plot")
plt.show()
x1_example = X1D[3, 0]
for landmark in (-2, 1):
k = gaussian_rbf(np.array([[x1_example]]), np.array([[landmark]]), gamma)
print("Phi({}, {}) = {}".format(x1_example, landmark, k))
rbf_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="rbf", gamma=5, C=0.001))
])
rbf_kernel_svm_clf.fit(X, y)
from sklearn.svm import SVC
gamma1, gamma2 = 0.1, 5
C1, C2 = 0.001, 1000
hyperparams = (gamma1, C1), (gamma1, C2), (gamma2, C1), (gamma2, C2)
svm_clfs = []
for gamma, C in hyperparams:
rbf_kernel_svm_clf = Pipeline([
("scaler", StandardScaler()),
("svm_clf", SVC(kernel="rbf", gamma=gamma, C=C))
])
rbf_kernel_svm_clf.fit(X, y)
svm_clfs.append(rbf_kernel_svm_clf)
plt.figure(figsize=(11, 7))
for i, svm_clf in enumerate(svm_clfs):
plt.subplot(221 + i)
plot_predictions(svm_clf, [-1.5, 2.5, -1, 1.5])
plot_dataset(X, y, [-1.5, 2.5, -1, 1.5])
gamma, C = hyperparams[i]
plt.title(r"$\gamma = {}, C = {}$".format(gamma, C), fontsize=16)
save_fig("moons_rbf_svc_plot")
plt.show()
```
# Regression
```
np.random.seed(42)
m = 50
X = 2 * np.random.rand(m, 1)
y = (4 + 3 * X + np.random.randn(m, 1)).ravel()
from sklearn.svm import LinearSVR
svm_reg = LinearSVR(epsilon=1.5, random_state=42)
svm_reg.fit(X, y)
svm_reg1 = LinearSVR(epsilon=1.5, random_state=42)
svm_reg2 = LinearSVR(epsilon=0.5, random_state=42)
svm_reg1.fit(X, y)
svm_reg2.fit(X, y)
def find_support_vectors(svm_reg, X, y):
y_pred = svm_reg.predict(X)
off_margin = (np.abs(y - y_pred) >= svm_reg.epsilon)
return np.argwhere(off_margin)
svm_reg1.support_ = find_support_vectors(svm_reg1, X, y)
svm_reg2.support_ = find_support_vectors(svm_reg2, X, y)
eps_x1 = 1
eps_y_pred = svm_reg1.predict([[eps_x1]])
def plot_svm_regression(svm_reg, X, y, axes):
x1s = np.linspace(axes[0], axes[1], 100).reshape(100, 1)
y_pred = svm_reg.predict(x1s)
plt.plot(x1s, y_pred, "k-", linewidth=2, label=r"$\hat{y}$")
plt.plot(x1s, y_pred + svm_reg.epsilon, "k--")
plt.plot(x1s, y_pred - svm_reg.epsilon, "k--")
plt.scatter(X[svm_reg.support_], y[svm_reg.support_], s=180, facecolors='#FFAAAA')
plt.plot(X, y, "bo")
plt.xlabel(r"$x_1$", fontsize=18)
plt.legend(loc="upper left", fontsize=18)
plt.axis(axes)
plt.figure(figsize=(9, 4))
plt.subplot(121)
plot_svm_regression(svm_reg1, X, y, [0, 2, 3, 11])
plt.title(r"$\epsilon = {}$".format(svm_reg1.epsilon), fontsize=18)
plt.ylabel(r"$y$", fontsize=18, rotation=0)
#plt.plot([eps_x1, eps_x1], [eps_y_pred, eps_y_pred - svm_reg1.epsilon], "k-", linewidth=2)
plt.annotate(
'', xy=(eps_x1, eps_y_pred), xycoords='data',
xytext=(eps_x1, eps_y_pred - svm_reg1.epsilon),
textcoords='data', arrowprops={'arrowstyle': '<->', 'linewidth': 1.5}
)
plt.text(0.91, 5.6, r"$\epsilon$", fontsize=20)
plt.subplot(122)
plot_svm_regression(svm_reg2, X, y, [0, 2, 3, 11])
plt.title(r"$\epsilon = {}$".format(svm_reg2.epsilon), fontsize=18)
save_fig("svm_regression_plot")
plt.show()
np.random.seed(42)
m = 100
X = 2 * np.random.rand(m, 1) - 1
y = (0.2 + 0.1 * X + 0.5 * X**2 + np.random.randn(m, 1)/10).ravel()
```
**Warning**: the default value of `gamma` will change from `'auto'` to `'scale'` in version 0.22 to better account for unscaled features. To preserve the same results as in the book, we explicitly set it to `'auto'`, but you should probably just use the default in your own code.
```
from sklearn.svm import SVR
svm_poly_reg = SVR(kernel="poly", degree=2, C=100, epsilon=0.1, gamma="auto")
svm_poly_reg.fit(X, y)
from sklearn.svm import SVR
svm_poly_reg1 = SVR(kernel="poly", degree=2, C=100, epsilon=0.1, gamma="auto")
svm_poly_reg2 = SVR(kernel="poly", degree=2, C=0.01, epsilon=0.1, gamma="auto")
svm_poly_reg1.fit(X, y)
svm_poly_reg2.fit(X, y)
plt.figure(figsize=(9, 4))
plt.subplot(121)
plot_svm_regression(svm_poly_reg1, X, y, [-1, 1, 0, 1])
plt.title(r"$degree={}, C={}, \epsilon = {}$".format(svm_poly_reg1.degree, svm_poly_reg1.C, svm_poly_reg1.epsilon), fontsize=18)
plt.ylabel(r"$y$", fontsize=18, rotation=0)
plt.subplot(122)
plot_svm_regression(svm_poly_reg2, X, y, [-1, 1, 0, 1])
plt.title(r"$degree={}, C={}, \epsilon = {}$".format(svm_poly_reg2.degree, svm_poly_reg2.C, svm_poly_reg2.epsilon), fontsize=18)
save_fig("svm_with_polynomial_kernel_plot")
plt.show()
```
# Under the hood
```
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.float64) # Iris-Virginica
from mpl_toolkits.mplot3d import Axes3D
def plot_3D_decision_function(ax, w, b, x1_lim=[4, 6], x2_lim=[0.8, 2.8]):
x1_in_bounds = (X[:, 0] > x1_lim[0]) & (X[:, 0] < x1_lim[1])
X_crop = X[x1_in_bounds]
y_crop = y[x1_in_bounds]
x1s = np.linspace(x1_lim[0], x1_lim[1], 20)
x2s = np.linspace(x2_lim[0], x2_lim[1], 20)
x1, x2 = np.meshgrid(x1s, x2s)
xs = np.c_[x1.ravel(), x2.ravel()]
df = (xs.dot(w) + b).reshape(x1.shape)
m = 1 / np.linalg.norm(w)
boundary_x2s = -x1s*(w[0]/w[1])-b/w[1]
margin_x2s_1 = -x1s*(w[0]/w[1])-(b-1)/w[1]
margin_x2s_2 = -x1s*(w[0]/w[1])-(b+1)/w[1]
ax.plot_surface(x1s, x2, np.zeros_like(x1),
color="b", alpha=0.2, cstride=100, rstride=100)
ax.plot(x1s, boundary_x2s, 0, "k-", linewidth=2, label=r"$h=0$")
ax.plot(x1s, margin_x2s_1, 0, "k--", linewidth=2, label=r"$h=\pm 1$")
ax.plot(x1s, margin_x2s_2, 0, "k--", linewidth=2)
ax.plot(X_crop[:, 0][y_crop==1], X_crop[:, 1][y_crop==1], 0, "g^")
ax.plot_wireframe(x1, x2, df, alpha=0.3, color="k")
ax.plot(X_crop[:, 0][y_crop==0], X_crop[:, 1][y_crop==0], 0, "bs")
ax.axis(x1_lim + x2_lim)
ax.text(4.5, 2.5, 3.8, "Decision function $h$", fontsize=15)
ax.set_xlabel(r"Petal length", fontsize=15)
ax.set_ylabel(r"Petal width", fontsize=15)
ax.set_zlabel(r"$h = \mathbf{w}^T \mathbf{x} + b$", fontsize=18)
ax.legend(loc="upper left", fontsize=16)
fig = plt.figure(figsize=(11, 6))
ax1 = fig.add_subplot(111, projection='3d')
plot_3D_decision_function(ax1, w=svm_clf2.coef_[0], b=svm_clf2.intercept_[0])
#save_fig("iris_3D_plot")
plt.show()
```
# Small weight vector results in a large margin
```
def plot_2D_decision_function(w, b, ylabel=True, x1_lim=[-3, 3]):
x1 = np.linspace(x1_lim[0], x1_lim[1], 200)
y = w * x1 + b
m = 1 / w
plt.plot(x1, y)
plt.plot(x1_lim, [1, 1], "k:")
plt.plot(x1_lim, [-1, -1], "k:")
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.plot([m, m], [0, 1], "k--")
plt.plot([-m, -m], [0, -1], "k--")
plt.plot([-m, m], [0, 0], "k-o", linewidth=3)
plt.axis(x1_lim + [-2, 2])
plt.xlabel(r"$x_1$", fontsize=16)
if ylabel:
plt.ylabel(r"$w_1 x_1$ ", rotation=0, fontsize=16)
plt.title(r"$w_1 = {}$".format(w), fontsize=16)
plt.figure(figsize=(12, 3.2))
plt.subplot(121)
plot_2D_decision_function(1, 0)
plt.subplot(122)
plot_2D_decision_function(0.5, 0, ylabel=False)
save_fig("small_w_large_margin_plot")
plt.show()
from sklearn.svm import SVC
from sklearn import datasets
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.float64) # Iris-Virginica
svm_clf = SVC(kernel="linear", C=1)
svm_clf.fit(X, y)
svm_clf.predict([[5.3, 1.3]])
```
# Hinge loss
```
t = np.linspace(-2, 4, 200)
h = np.where(1 - t < 0, 0, 1 - t) # max(0, 1-t)
plt.figure(figsize=(5,2.8))
plt.plot(t, h, "b-", linewidth=2, label="$max(0, 1 - t)$")
plt.grid(True, which='both')
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.yticks(np.arange(-1, 2.5, 1))
plt.xlabel("$t$", fontsize=16)
plt.axis([-2, 4, -1, 2.5])
plt.legend(loc="upper right", fontsize=16)
save_fig("hinge_plot")
plt.show()
```
# Extra material
## Training time
```
X, y = make_moons(n_samples=1000, noise=0.4, random_state=42)
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^")
import time
tol = 0.1
tols = []
times = []
for i in range(10):
svm_clf = SVC(kernel="poly", gamma=3, C=10, tol=tol, verbose=1)
t1 = time.time()
svm_clf.fit(X, y)
t2 = time.time()
times.append(t2-t1)
tols.append(tol)
print(i, tol, t2-t1)
tol /= 10
plt.semilogx(tols, times)
```
## Linear SVM classifier implementation using Batch Gradient Descent
```
# Training set
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.float64).reshape(-1, 1) # Iris-Virginica
from sklearn.base import BaseEstimator
class MyLinearSVC(BaseEstimator):
def __init__(self, C=1, eta0=1, eta_d=10000, n_epochs=1000, random_state=None):
self.C = C
self.eta0 = eta0
self.n_epochs = n_epochs
self.random_state = random_state
self.eta_d = eta_d
def eta(self, epoch):
return self.eta0 / (epoch + self.eta_d)
def fit(self, X, y):
# Random initialization
if self.random_state:
np.random.seed(self.random_state)
w = np.random.randn(X.shape[1], 1) # n feature weights
b = 0
m = len(X)
t = y * 2 - 1 # -1 if t==0, +1 if t==1
X_t = X * t
self.Js=[]
# Training
for epoch in range(self.n_epochs):
support_vectors_idx = (X_t.dot(w) + t * b < 1).ravel()
X_t_sv = X_t[support_vectors_idx]
t_sv = t[support_vectors_idx]
J = 1/2 * np.sum(w * w) + self.C * (np.sum(1 - X_t_sv.dot(w)) - b * np.sum(t_sv))
self.Js.append(J)
w_gradient_vector = w - self.C * np.sum(X_t_sv, axis=0).reshape(-1, 1)
b_derivative = -C * np.sum(t_sv)
w = w - self.eta(epoch) * w_gradient_vector
b = b - self.eta(epoch) * b_derivative
self.intercept_ = np.array([b])
self.coef_ = np.array([w])
support_vectors_idx = (X_t.dot(w) + t * b < 1).ravel()
self.support_vectors_ = X[support_vectors_idx]
return self
def decision_function(self, X):
return X.dot(self.coef_[0]) + self.intercept_[0]
def predict(self, X):
return (self.decision_function(X) >= 0).astype(np.float64)
C=2
svm_clf = MyLinearSVC(C=C, eta0 = 10, eta_d = 1000, n_epochs=60000, random_state=2)
svm_clf.fit(X, y)
svm_clf.predict(np.array([[5, 2], [4, 1]]))
plt.plot(range(svm_clf.n_epochs), svm_clf.Js)
plt.axis([0, svm_clf.n_epochs, 0, 100])
print(svm_clf.intercept_, svm_clf.coef_)
svm_clf2 = SVC(kernel="linear", C=C)
svm_clf2.fit(X, y.ravel())
print(svm_clf2.intercept_, svm_clf2.coef_)
yr = y.ravel()
plt.figure(figsize=(12,3.2))
plt.subplot(121)
plt.plot(X[:, 0][yr==1], X[:, 1][yr==1], "g^", label="Iris-Virginica")
plt.plot(X[:, 0][yr==0], X[:, 1][yr==0], "bs", label="Not Iris-Virginica")
plot_svc_decision_boundary(svm_clf, 4, 6)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.title("MyLinearSVC", fontsize=14)
plt.axis([4, 6, 0.8, 2.8])
plt.subplot(122)
plt.plot(X[:, 0][yr==1], X[:, 1][yr==1], "g^")
plt.plot(X[:, 0][yr==0], X[:, 1][yr==0], "bs")
plot_svc_decision_boundary(svm_clf2, 4, 6)
plt.xlabel("Petal length", fontsize=14)
plt.title("SVC", fontsize=14)
plt.axis([4, 6, 0.8, 2.8])
from sklearn.linear_model import SGDClassifier
sgd_clf = SGDClassifier(loss="hinge", alpha = 0.017, max_iter = 50, tol=-np.infty, random_state=42)
sgd_clf.fit(X, y.ravel())
m = len(X)
t = y * 2 - 1 # -1 if t==0, +1 if t==1
X_b = np.c_[np.ones((m, 1)), X] # Add bias input x0=1
X_b_t = X_b * t
sgd_theta = np.r_[sgd_clf.intercept_[0], sgd_clf.coef_[0]]
print(sgd_theta)
support_vectors_idx = (X_b_t.dot(sgd_theta) < 1).ravel()
sgd_clf.support_vectors_ = X[support_vectors_idx]
sgd_clf.C = C
plt.figure(figsize=(5.5,3.2))
plt.plot(X[:, 0][yr==1], X[:, 1][yr==1], "g^")
plt.plot(X[:, 0][yr==0], X[:, 1][yr==0], "bs")
plot_svc_decision_boundary(sgd_clf, 4, 6)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.title("SGDClassifier", fontsize=14)
plt.axis([4, 6, 0.8, 2.8])
```
# Exercise solutions
## 1. to 7.
See appendix A.
# 8.
_Exercise: train a `LinearSVC` on a linearly separable dataset. Then train an `SVC` and a `SGDClassifier` on the same dataset. See if you can get them to produce roughly the same model._
Let's use the Iris dataset: the Iris Setosa and Iris Versicolor classes are linearly separable.
```
from sklearn import datasets
iris = datasets.load_iris()
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
setosa_or_versicolor = (y == 0) | (y == 1)
X = X[setosa_or_versicolor]
y = y[setosa_or_versicolor]
from sklearn.svm import SVC, LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.preprocessing import StandardScaler
C = 5
alpha = 1 / (C * len(X))
lin_clf = LinearSVC(loss="hinge", C=C, random_state=42)
svm_clf = SVC(kernel="linear", C=C)
sgd_clf = SGDClassifier(loss="hinge", learning_rate="constant", eta0=0.001, alpha=alpha,
max_iter=100000, tol=-np.infty, random_state=42)
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
lin_clf.fit(X_scaled, y)
svm_clf.fit(X_scaled, y)
sgd_clf.fit(X_scaled, y)
print("LinearSVC: ", lin_clf.intercept_, lin_clf.coef_)
print("SVC: ", svm_clf.intercept_, svm_clf.coef_)
print("SGDClassifier(alpha={:.5f}):".format(sgd_clf.alpha), sgd_clf.intercept_, sgd_clf.coef_)
```
Let's plot the decision boundaries of these three models:
```
# Compute the slope and bias of each decision boundary
w1 = -lin_clf.coef_[0, 0]/lin_clf.coef_[0, 1]
b1 = -lin_clf.intercept_[0]/lin_clf.coef_[0, 1]
w2 = -svm_clf.coef_[0, 0]/svm_clf.coef_[0, 1]
b2 = -svm_clf.intercept_[0]/svm_clf.coef_[0, 1]
w3 = -sgd_clf.coef_[0, 0]/sgd_clf.coef_[0, 1]
b3 = -sgd_clf.intercept_[0]/sgd_clf.coef_[0, 1]
# Transform the decision boundary lines back to the original scale
line1 = scaler.inverse_transform([[-10, -10 * w1 + b1], [10, 10 * w1 + b1]])
line2 = scaler.inverse_transform([[-10, -10 * w2 + b2], [10, 10 * w2 + b2]])
line3 = scaler.inverse_transform([[-10, -10 * w3 + b3], [10, 10 * w3 + b3]])
# Plot all three decision boundaries
plt.figure(figsize=(11, 4))
plt.plot(line1[:, 0], line1[:, 1], "k:", label="LinearSVC")
plt.plot(line2[:, 0], line2[:, 1], "b--", linewidth=2, label="SVC")
plt.plot(line3[:, 0], line3[:, 1], "r-", label="SGDClassifier")
plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs") # label="Iris-Versicolor"
plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo") # label="Iris-Setosa"
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="upper center", fontsize=14)
plt.axis([0, 5.5, 0, 2])
plt.show()
```
Close enough!
# 9.
_Exercise: train an SVM classifier on the MNIST dataset. Since SVM classifiers are binary classifiers, you will need to use one-versus-all to classify all 10 digits. You may want to tune the hyperparameters using small validation sets to speed up the process. What accuracy can you reach?_
First, let's load the dataset and split it into a training set and a test set. We could use `train_test_split()` but people usually just take the first 60,000 instances for the training set, and the last 10,000 instances for the test set (this makes it possible to compare your model's performance with others):
```
try:
from sklearn.datasets import fetch_openml
mnist = fetch_openml('mnist_784', version=1, cache=True)
except ImportError:
from sklearn.datasets import fetch_mldata
mnist = fetch_mldata('MNIST original')
X = mnist["data"]
y = mnist["target"]
X_train = X[:60000]
y_train = y[:60000]
X_test = X[60000:]
y_test = y[60000:]
```
Many training algorithms are sensitive to the order of the training instances, so it's generally good practice to shuffle them first:
```
np.random.seed(42)
rnd_idx = np.random.permutation(60000)
X_train = X_train[rnd_idx]
y_train = y_train[rnd_idx]
```
Let's start simple, with a linear SVM classifier. It will automatically use the One-vs-All (also called One-vs-the-Rest, OvR) strategy, so there's nothing special we need to do. Easy!
```
lin_clf = LinearSVC(random_state=42)
lin_clf.fit(X_train, y_train)
```
Let's make predictions on the training set and measure the accuracy (we don't want to measure it on the test set yet, since we have not selected and trained the final model yet):
```
from sklearn.metrics import accuracy_score
y_pred = lin_clf.predict(X_train)
accuracy_score(y_train, y_pred)
```
Wow, 86% accuracy on MNIST is a really bad performance. This linear model is certainly too simple for MNIST, but perhaps we just needed to scale the data first:
```
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train.astype(np.float32))
X_test_scaled = scaler.transform(X_test.astype(np.float32))
lin_clf = LinearSVC(random_state=42)
lin_clf.fit(X_train_scaled, y_train)
y_pred = lin_clf.predict(X_train_scaled)
accuracy_score(y_train, y_pred)
```
That's much better (we cut the error rate in two), but still not great at all for MNIST. If we want to use an SVM, we will have to use a kernel. Let's try an `SVC` with an RBF kernel (the default).
**Warning**: if you are using Scikit-Learn ≤ 0.19, the `SVC` class will use the One-vs-One (OvO) strategy by default, so you must explicitly set `decision_function_shape="ovr"` if you want to use the OvR strategy instead (OvR is the default since 0.19).
```
svm_clf = SVC(decision_function_shape="ovr", gamma="auto")
svm_clf.fit(X_train_scaled[:10000], y_train[:10000])
y_pred = svm_clf.predict(X_train_scaled)
accuracy_score(y_train, y_pred)
```
That's promising, we get better performance even though we trained the model on 6 times less data. Let's tune the hyperparameters by doing a randomized search with cross validation. We will do this on a small dataset just to speed up the process:
```
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
param_distributions = {"gamma": reciprocal(0.001, 0.1), "C": uniform(1, 10)}
rnd_search_cv = RandomizedSearchCV(svm_clf, param_distributions, n_iter=10, verbose=2, cv=3)
rnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000])
rnd_search_cv.best_estimator_
rnd_search_cv.best_score_
```
This looks pretty low but remember we only trained the model on 1,000 instances. Let's retrain the best estimator on the whole training set (run this at night, it will take hours):
```
rnd_search_cv.best_estimator_.fit(X_train_scaled, y_train)
y_pred = rnd_search_cv.best_estimator_.predict(X_train_scaled)
accuracy_score(y_train, y_pred)
```
Ah, this looks good! Let's select this model. Now we can test it on the test set:
```
y_pred = rnd_search_cv.best_estimator_.predict(X_test_scaled)
accuracy_score(y_test, y_pred)
```
Not too bad, but apparently the model is overfitting slightly. It's tempting to tweak the hyperparameters a bit more (e.g. decreasing `C` and/or `gamma`), but we would run the risk of overfitting the test set. Other people have found that the hyperparameters `C=5` and `gamma=0.005` yield even better performance (over 98% accuracy). By running the randomized search for longer and on a larger part of the training set, you may be able to find this as well.
## 10.
_Exercise: train an SVM regressor on the California housing dataset._
Let's load the dataset using Scikit-Learn's `fetch_california_housing()` function:
```
from sklearn.datasets import fetch_california_housing
housing = fetch_california_housing()
X = housing["data"]
y = housing["target"]
```
Split it into a training set and a test set:
```
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
```
Don't forget to scale the data:
```
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
```
Let's train a simple `LinearSVR` first:
```
from sklearn.svm import LinearSVR
lin_svr = LinearSVR(random_state=42)
lin_svr.fit(X_train_scaled, y_train)
```
Let's see how it performs on the training set:
```
from sklearn.metrics import mean_squared_error
y_pred = lin_svr.predict(X_train_scaled)
mse = mean_squared_error(y_train, y_pred)
mse
```
Let's look at the RMSE:
```
np.sqrt(mse)
```
In this training set, the targets are tens of thousands of dollars. The RMSE gives a rough idea of the kind of error you should expect (with a higher weight for large errors): so with this model we can expect errors somewhere around $10,000. Not great. Let's see if we can do better with an RBF Kernel. We will use randomized search with cross validation to find the appropriate hyperparameter values for `C` and `gamma`:
```
from sklearn.svm import SVR
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import reciprocal, uniform
param_distributions = {"gamma": reciprocal(0.001, 0.1), "C": uniform(1, 10)}
rnd_search_cv = RandomizedSearchCV(SVR(), param_distributions, n_iter=10, verbose=2, cv=3, random_state=42)
rnd_search_cv.fit(X_train_scaled, y_train)
rnd_search_cv.best_estimator_
```
Now let's measure the RMSE on the training set:
```
y_pred = rnd_search_cv.best_estimator_.predict(X_train_scaled)
mse = mean_squared_error(y_train, y_pred)
np.sqrt(mse)
```
Looks much better than the linear model. Let's select this model and evaluate it on the test set:
```
y_pred = rnd_search_cv.best_estimator_.predict(X_test_scaled)
mse = mean_squared_error(y_test, y_pred)
np.sqrt(mse)
cmap = matplotlib.cm.get_cmap("jet")
from sklearn.datasets import fetch_openml
mnist = fetch_openml("mnist_784", version=1)
print(mnist.data.shape)
```
|
github_jupyter
|
# Introduction to Biomechanics
> Marcos Duarte
> Laboratory of Biomechanics and Motor Control ([http://demotu.org/](http://demotu.org/))
> Federal University of ABC, Brazil
## Biomechanics @ UFABC
```
from IPython.display import IFrame
IFrame('http://demotu.org', width='100%', height=500)
```
## Biomechanics
The origin of the word *Biomechanics* is evident:
$$ Biomechanics := bios \, (life) + mechanics $$
Professor Herbert Hatze, on a letter to the editors of the Journal of Biomechanics in 1974, proposed a (very good) definition for *the science called Biomechanics*:
> "*Biomechanics is the study of the structure and function of biological systems by means of the methods of mechanics.*"
Hatze H (1974) [The meaning of the term biomechanics](https://github.com/demotu/BMC/blob/master/courses/HatzeJB74biomechanics.pdf).
### Biomechanics & Mechanics
And Hatze, advocating for *Biomechanics to be a science of its own*, argues that Biomechanics **is not** simply Mechanics of (applied to) living systems:
> "*It would not be correct to state that 'Biomechanics is the study of the mechanical aspects of the structure and function of biological systems' because biological systems do not have mechanical aspects. They only have biomechanical aspects (otherwise mechanics, as it exists, would be sufficient to describe all phenomena which we now call biomechanical features of biological systems).*" Hatze (1974)
### Biomechanics vs. Mechanics
To support this argument, Hatze illustrates the difference between Biomechanics and the application of Mechanics, with an example of a javelin throw: studying the mechanics aspects of the javelin flight trajectory (use existing knowledge about aerodynamics and ballistics) vs. studying the biomechanical aspects of the phase before the javelin leaves the thrower’s hand (there are no established mechanical models for this system).
### Branches of Mechanics
Mechanics is a branch of the physical sciences that is concerned with the state of rest or motion of bodies that are subjected to the action of forces. In general, this subject can be subdivided into three branches: rigid-body mechanics, deformable-body mechanics, and fluid mechanics (Hibbeler, 2012).
In fact, only a subset of Mechanics matters to Biomechanics, the Classical Mechanics subset, the domain of mechanics for bodies with moderate speeds $(\ll 3.10^8 m/s!)$ and not very small $(\gg 3.10^{-9} m!)$ as shown in the following diagram (image from [Wikipedia](http://en.wikipedia.org/wiki/Classical_mechanics)):
<figure><img src="http://upload.wikimedia.org/wikipedia/commons/thumb/f/f0/Physicsdomains.svg/500px-Physicsdomains.svg.png" width=300 alt="Domains of mechanics"/>
### Biomechanics & other Sciences I
One last point about the excellent letter from Hatze, already in 1974 he points for the following problem:
> "*The use of the term biomechanics imposes rather severe restrictions on its meaning because of the established definition of the term, mechanics. This is unfortunate, since the synonym Biomechanics, as it is being understood by the majority of biomechanists today, has a much wider meaning.*" Hatze (1974)
### Biomechanics & other Sciences II
Although the term Biomechanics may sound new to you, it's not rare that people think the use of methods outside the realm of Mechanics as Biomechanics.
For instance, electromyography and thermography are two methods that although may be useful in Biomechanics, particularly the former, they clearly don't have any relation with Mechanics; Electromagnetism and Thermodynamics are other [branches of Physics](https://en.wikipedia.org/wiki/Branches_of_physics).
### Biomechanics & Engineering
Even seeing Biomechanics as a field of Science, as argued by Hatze, it's also possible to refer to Engineering Biomechanics considering that Engineering is "*the application of scientific and mathematical principles to practical ends*" [[The Free Dictionary](http://www.thefreedictionary.com/engineering)] and particularly that "*Engineering Mechanics is the application of Mechanics to solve problems involving common engineering elements*" [[Wikibooks]](https://en.wikibooks.org/wiki/Engineering_Mechanics), and, last but not least, that Biomedical engineering is the application of engineering principles and design concepts to medicine and biology for healthcare purposes [[Wikipedia](https://en.wikipedia.org/wiki/Biomedical_engineering)].
### Applications of Biomechanics
Biomechanics matters to fields of science and technology related to biology and health and it's also relevant for the development of synthetic systems inspired on biological systems, as in robotics. To illustrate the variety of applications of Biomechanics, this is the current list of topics covered in the Journal of Biomechanics:
```
from IPython.display import IFrame
IFrame('http://www.jbiomech.com/aims', width='100%', height=500)
```
### On the branches of Mechanics and Biomechanics I
Nowadays, (Classical) Mechanics is typically partitioned in Statics and Dynamics. In turn, Dynamics is divided in Kinematics and Kinetics. This classification is clear; dynamics is the study of the motions of bodies and Statics is the study of forces in the absence of changes in motion. Kinematics is the study of motion without considering its possible causes (forces) and Kinetics is the study of the possible causes of motion.
### On the branches of Mechanics and Biomechanics II
Nevertheless, it's common in Biomechanics to adopt a slightly different classification: to partition it between Kinematics and Kinetics, and then Kinetics into Statics and Dynamics (David Winter, Nigg & Herzog, and Vladimir Zatsiorsky, among others, use this classification in their books). The rationale is that we first separate the study of motion considering or not its causes (forces). The partition of (Bio)Mechanics in this way is useful because is simpler to study and describe (measure) the kinematics of human motion and then go to the more complicated issue of understanding (measuring) the forces related to the human motion.
Anyway, these different classifications reveal a certain contradiction between Mechanics (particularly from an engineering point of view) and Biomechanics; some scholars will say that this taxonomy in Biomechanics is simply wrong and it should be corrected to align with the Mechanics. Be aware.
### The future of Biomechanics
(Human) Movement Science combines many disciplines of science (such as, physiology, biomechanics, and psychology) for the study of human movement. Professor Benno Nigg claims that with the growing concern for the well-being of humankind, Movement Science will have an important role:
> Movement science will be one of the most important and most recognized science fields in the twenty-first century... The future discipline of movement science has a unique opportunity to become an important contributor to the well-being of mankind.
Nigg BM (1993) [Sport science in the twenty-first century](http://www.ncbi.nlm.nih.gov/pubmed/8230394). Journal of Sports Sciences, 77, 343-347.
And so Biomechanics will also become an important contributor to the well-being of humankind.
### Biomechanics and the Biomedical Engineering at UFABC (2017) I
At the university level, the study of Mechanics is typically done in the disciplines Statics and Dynamics (rigid-body mechanics), Strength of Materials (deformable-body mechanics), and Mechanics of Fluids (fluid mechanics). Consequently, the study on Biomechanics must also cover these topics for a greater understanding of the structure and function of biological systems.
### Biomechanics and the Biomedical Engineering at UFABC (2017) II
The Biomedical Engineering degree at UFABC covers these topics for the study of biological systems in different courses: Ciência dos Materiais Biocompatíveis, Modelagem e Simulação de Sistemas Biomédicos, Métodos de Elementos Finitos aplicados a Sistemas Biomédicos, Mecânica dos Fluidos, Caracterização de Biomateriais, Sistemas Biológicos, and last but not least, Biomecânica I & Biomecânica II.
How much of biological systems is in fact studied in these disciplines varies a lot. Anyway, none of these courses cover the study of human motion with implications to health, rehabilitation, and sports, except the last course. This is the reason why the courses Biomecânica I & II focus on the analysis of the human movement.
### More on Biomechanics
The Wikipedia page on biomechanics is a good place to read more about Biomechanics:
```
from IPython.display import IFrame
IFrame('http://en.m.wikipedia.org/wiki/Biomechanics', width='100%', height=400)
```
## History of Biomechanics
Biomechanics progressed basically with the advancements in Mechanics and with the invention of instrumentations for measuring mechanical quantities and computing.
The development of Biomechanics was only possible because people became more interested in the understanding of the structure and function of biological systems and to apply these concepts to the progress of the humankind.
## Aristotle (384-322 BC)
Aristotle was the first to have written about the movement of animals in his works *On the Motion of Animals (De Motu Animalium)* and *On the Gait of Animals (De Incessu Animalium)* [[Works by Aristotle]](http://classics.mit.edu/Browse/index-Aristotle.html).
Aristotle clearly already knew what we nowadays refer as Newton's third law of motion:
"*For as the pusher pushes so is the pushed pushed, and with equal force.*" [Part 3, [On the Motion of Animals](http://classics.mit.edu/Aristotle/motion_animals.html)]
### Aristotle & the Scientific Revolution I
Although Aristotle's contributions were unvaluable to humankind, to make his discoveries he doesn't seem to have employed anything similar to what we today refer as [scientific method](https://en.wikipedia.org/wiki/Scientific_method) (the systematic observation, measurement, and experiment, and the formulation, testing, and modification of hypotheses).
Most of the Physics of Aristotle was ambiguous or incorrect; for example, for him there was no motion without a force. He even deduced that speed was proportional to force and inversely proportional to resistance [[Book VII, Physics](http://classics.mit.edu/Aristotle/physics.7.vii.html)]. Perhaps Aristotle was too influenced by the observation of motion of a body under the action of a friction force, where this notion is not at all unreasonable.
### Aristotle & the Scientific Revolution II
If Aristotle performed any observation/experiment at all in his works, he probably was not good on that as, ironically, evinced in this part of his writing:
> "Males have more teeth than females in the case of men, sheep, goats, and swine; in the case of other animals observations have not yet been made". Aristotle [The History of Animals](http://classics.mit.edu/Aristotle/history_anim.html).
## Leonardo da Vinci (1452-1519)
<figure><img src='https://upload.wikimedia.org/wikipedia/commons/thumb/2/22/Da_Vinci_Vitruve_Luc_Viatour.jpg/353px-Da_Vinci_Vitruve_Luc_Viatour.jpg' width="240" alt="Vitruvian Man" style="float:right;margin: 0 0 0 20px;"/></figure>
Contributions of Leonardo to Biomechanics:
- Studies on the proportions of humans and animals
- Anatomy studies of the human body, especially the foot
- Studies on the mechanical function of muscles
<br><br>
*"Le proporzioni del corpo umano secondo Vitruvio", also known as the [Vitruvian Man](https://en.wikipedia.org/wiki/Vitruvian_Man), drawing by [Leonardo da Vinci](https://en.wikipedia.org/wiki/Leonardo_da_Vinci) circa 1490 based on the work of [Marcus Vitruvius Pollio](https://en.wikipedia.org/wiki/Vitruvius) (1st century BC), depicting a man in supposedly ideal human proportions (image from [Wikipedia](https://en.wikipedia.org/wiki/Vitruvian_Man)).
## Giovanni Alfonso Borelli (1608-1679)
<figure><img src='.\..\images\borelli.jpg' width="240" alt="Borelli" style="float:right;margin: 0 0 0 20px;"/></figure>
- [The father of biomechanics](https://en.wikipedia.org/wiki/Giovanni_Alfonso_Borelli); the first to use modern scienfic method into 'Biomechanics' in his book [De Motu Animalium](http://www.e-rara.ch/doi/10.3931/e-rara-28707).
- Proposed that the levers of the musculoskeletal system magnify motion rather than force.
- Calculated the forces required for equilibrium in various joints of the human body before Newton published the laws of motion.
<br><br>
*Excerpt from the book De Motu Animalium*.
## More on the history of Biomechanics
See:
- <a href=http://courses.washington.edu/bioen520/notes/History_of_Biomechanics_(Martin_1999).pdf>http://courses.washington.edu/bioen520/notes/History_of_Biomechanics_(Martin_1999).pdf</a>
- [http://biomechanics.vtheatre.net/doc/history.html](http://biomechanics.vtheatre.net/doc/history.html)
- Chapter 1 of Nigg and Herzog (2006) [Biomechanics of the Musculo-skeletal System](https://books.google.com.br/books?id=hOIeAQAAIAAJ&dq=editions:ISBN0470017678)
### The International Society of Biomechanics
The biomechanics community has an official scientific society, the [International Society of Biomechanics](http://isbweb.org/), with a journal, the [Journal of Biomechanics](http://www.jbiomech.com), and an e-mail list, the [Biomch-L](http://biomch-l.isbweb.org):
```
from IPython.display import IFrame
IFrame('http://biomch-l.isbweb.org/forums/2-General-Discussion', width='100%', height=400)
```
### Examples of Biomechanics Classes around the World
```
from IPython.display import IFrame
IFrame('http://pages.uoregon.edu/karduna/biomechanics/bme.htm', width='100%', height=400)
```
## Problems
1. Go to [Biomechanics Classes on the Web](http://pages.uoregon.edu/karduna/biomechanics/) to visit websites of biomechanics classes around the world and find out how biomechanics is studied in different fields.
2. Find examples of applications of biomechanics in different areas.
3. Watch the video [The Weird World of Eadweard Muybridge](http://youtu.be/5Awo-P3t4Ho) to learn about [Eadweard Muybridge](http://en.wikipedia.org/wiki/Eadweard_Muybridge), an important person to the development of instrumentation for biomechanics.
4. Think about practical problems in nature that can be studied in biomechanics with simple approaches (simple modeling and low-tech methods) or very complicated approaches (complex modeling and high-tech methods).
5. What the study in the biomechanics of athletes, children, elderlies, persons with disabilities, other animals, and computer animation for the cinema industry may have in common and different?
6. Visit the website of the Laboratory of Biomechanics and Motor Control at UFABC and find out what we do and if there is anything you are interested in.
7. Is there anything in biomechanics that interests you? How could you pursue this interest?
## References
- [Biomechanics - Wikipedia, the free encyclopedia](http://en.wikipedia.org/wiki/Biomechanics)
- [Mechanics - Wikipedia, the free encyclopedia](http://en.wikipedia.org/wiki/Mechanics)
- [International Society of Biomechanics](http://isbweb.org/)
- [Biomech-l, the biomechanics' e-mail list](http://biomch-l.isbweb.org/)
- [Journal of Biomechanics' aims](http://www.jbiomech.com/aims)
- <a href="http://courses.washington.edu/bioen520/notes/History_of_Biomechanics_(Martin_1999).pdf">A Genealogy of Biomechanics</a>
- Duarte M (2014) A física da bicicleta no futebol. Ciência Hoje, 53, 313, 16-21. [Online](http://www.cienciahoje.org.br/revista/materia/id/824/n/a_fisica_da_bicicleta_no_futebol), [PDF](http://demotu.org/pubs/CH14.pdf). [Biomechanics of the Bicycle Kick website](http://demotu.org/x/pele/)
- Hatze H (1974) [The meaning of the term biomechanics](https://github.com/demotu/BMC/blob/master/courses/HatzeJB74biomechanics.pdf). Journal of Biomechanics, 7, 189–190.
- Hibbeler RC (2012) [Engineering Mechanics: Statics](http://books.google.com.br/books?id=PSEvAAAAQBAJ). Prentice Hall; 13 edition.
- Nigg BM and Herzog W (2006) [Biomechanics of the Musculo-skeletal System](https://books.google.com.br/books?id=hOIeAQAAIAAJ&dq=editions:ISBN0470017678). 3rd Edition. Wiley.
- Winter DA (2009) [Biomechanics and motor control of human movement](http://books.google.com.br/books?id=_bFHL08IWfwC). 4 ed. Hoboken, EUA: Wiley.
- Zatsiorsky VM (1997) [Kinematics of Human Motion](http://books.google.com.br/books/about/Kinematics_of_Human_Motion.html?id=Pql_xXdbrMcC&redir_esc=y). Champaign, Human Kinetics.
- Zatsiorsky VM (2002) [Kinetics of human motion](http://books.google.com.br/books?id=wp3zt7oF8a0C). Human Kinetics.
|
github_jupyter
|
# Keras Exercise
## Predict political party based on votes
As a fun little example, we'll use a public data set of how US congressmen voted on 17 different issues in the year 1984. Let's see if we can figure out their political party based on their votes alone, using a deep neural network!
For those outside the United States, our two main political parties are "Democrat" and "Republican." In modern times they represent progressive and conservative ideologies, respectively.
Politics in 1984 weren't quite as polarized as they are today, but you should still be able to get over 90% accuracy without much trouble.
Since the point of this exercise is implementing neural networks in Keras, I'll help you to load and prepare the data.
Let's start by importing the raw CSV file using Pandas, and make a DataFrame out of it with nice column labels:
```
import pandas as pd
feature_names = ['party','handicapped-infants', 'water-project-cost-sharing',
'adoption-of-the-budget-resolution', 'physician-fee-freeze',
'el-salvador-aid', 'religious-groups-in-schools',
'anti-satellite-test-ban', 'aid-to-nicaraguan-contras',
'mx-missle', 'immigration', 'synfuels-corporation-cutback',
'education-spending', 'superfund-right-to-sue', 'crime',
'duty-free-exports', 'export-administration-act-south-africa']
voting_data = pd.read_csv('../datasets/house-votes-84.data.txt', na_values=['?'],
names = feature_names)
voting_data.head()
```
We can use describe() to get a feel of how the data looks in aggregate:
```
voting_data.describe()
```
We can see there's some missing data to deal with here; some politicians abstained on some votes, or just weren't present when the vote was taken. We will just drop the rows with missing data to keep it simple, but in practice you'd want to first make sure that doing so didn't introduce any sort of bias into your analysis (if one party abstains more than another, that could be problematic for example.)
```
voting_data.dropna(inplace=True)
voting_data.describe()
```
Our neural network needs normalized numbers, not strings, to work. So let's replace all the y's and n's with 1's and 0's, and represent the parties as 1's and 0's as well.
```
voting_data.replace(('y', 'n'), (1, 0), inplace=True)
voting_data.replace(('democrat', 'republican'), (1, 0), inplace=True)
voting_data.head()
```
Finally let's extract the features and labels in the form that Keras will expect:
```
all_features = voting_data[feature_names].drop('party', axis=1).values
all_classes = voting_data['party'].values
```
OK, so have a go at it! You'll want to refer back to the slide on using Keras with binary classification - there are only two parties, so this is a binary problem. This also saves us the hassle of representing classes with "one-hot" format like we had to do with MNIST; our output is just a single 0 or 1 value.
Also refer to the scikit_learn integration slide, and use cross_val_score to evaluate your resulting model with 10-fold cross-validation.
**If you're using tensorflow-gpu on a Windows machine** by the way, you probably *do* want to peek a little bit at my solution - if you run into memory allocation errors, there's a workaround there you can use.
Try out your code here:
## My implementation is below
```
from tensorflow.keras.layers import Dense, Dropout
from tensorflow.keras.models import Sequential
from sklearn.model_selection import cross_val_score
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
def create_model():
model = Sequential()
#16 feature inputs (votes) going into an 32-unit layer
model.add(Dense(32, input_dim=16, kernel_initializer='normal', activation='relu'))
# Adding Dropout layer to prevent overfitting
model.add(Dropout(0.5))
# Another hidden layer of 16 units
model.add(Dense(16, kernel_initializer='normal', activation='relu'))
#Adding another Dropout layer
model.add(Dropout(0.5))
# Output layer with a binary classification (Democrat or Republican political party)
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
# Compile model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# Wrap our Keras model in an estimator compatible with scikit_learn
estimator = KerasClassifier(build_fn=create_model, epochs=100, verbose=0)
# Now we can use scikit_learn's cross_val_score to evaluate this model identically to the others
cv_scores = cross_val_score(estimator, all_features, all_classes, cv=10)
cv_scores.mean()
```
94% without even trying too hard! Did you do better? Maybe more neurons, more layers, or Dropout layers would help even more.
** Adding Dropout layer in between Dense layer will increase accuracy to 96 % **
|
github_jupyter
|
# 6. External Libraries
<a href="https://colab.research.google.com/github/chongsoon/intro-to-coding-with-python/blob/main/6-External-Libraries.ipynb" target="_parent">
<img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/>
</a>
Up till now, we have been using what ever is available to us in Python.
Sometimes, we need other people's help to solve our problem. For example, I need help in reading data from a website, or doing specific calculation on the data given to me.
Instead of creating my own functions, I can use libraries/packages developed by other people specifically to solve my problem.
Lets look at some common libraries that I use.
## Installed Libraries/Packages in this Environment
Lets find out what has been installed on this environment by running the following code:
```
!conda list
#If this code block fails, try the next one.
!pip list
```
You can see that a lot of packages have been installed. Let us try some of them.
## Getting data from web pages/api (Requests)
Have you ever used apps such as bus apps that tell you when the arrival time is? Those information are actually retrieved from LTA web site.
Of course in this practical, we will use some open and free website apis to get data.
We can use Requests package to get data from web pages and process them in Python.
Lets try it out.
First, we have to tell Python that we want to use this library. In order to do that, we have to "import" it into this program.
```
import requests
import json
```
Let us get data from Binance. Binance is a cryptocurrency exchange. Think of it like stock market for cryptocurrency like bitcoins. They have free public web api that we can get data from. We can start by declaring URL variables.
[Reference to Binance API](https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md)
```
url = 'https://api.binance.com/'
exchange_info_url = url + 'api/v3/exchangeInfo'
```
Next, we will use requests.get with the url as the parameter and execute the cell.
```
response = requests.get(exchange_info_url)
```
Then we will extract the data from the response into dictionary.
```
response_data = response.json()
```
Lets explore what the keys are in the dictionary.
```
print(response_data.keys())
```
I wonder what is inside the "symbols" key.
```
print(type(response_data['symbols']))
```
Since it contains list, let us see what are the first 5 items in the list.
```
print(response_data['symbols'][:5])
```
That is still too much information, lets just inspect the first item.
```
print(response_data['symbols'][0])
```
### Try it yourself: Get the type of data
This is definitely more manageable. It seems like dictionary types are contained in the list. Are you able to confirm that through code? Print out what is the **type** of the **first** item in the list.
```
#Type in your code here to print the type of the first item in the list.
```
### Try it yourself: Find the crypto!
How can I find the crypto information in such a long list of items? Do you have any idea?
Find information on Shiba Inu Coin (Symbol: SHIBUSDT), since Elon Musk's [tweet](https://twitter.com/elonmusk/status/1444840184500129797?s=20) increased the price of the coin recently.
```
coin_list = response_data['symbols']
#Type your code below, get information on "SHIBUSDT" coin.
```
We can find the crypto, but there are a lot of information. If we only want to find the price of the crypto, we can refer to this [link](https://github.com/binance/binance-spot-api-docs/blob/master/rest-api.md#symbol-price-ticker) to find the price of the crypto.
```
symbol_ticker_price_url = url + 'api/v3/ticker/price'
symbol_ticker_price_url
price_request = requests.get(symbol_ticker_price_url)
price_request.json()
```
Oh no, it is loading everything...Is there a way to just get the Shiba price? According to the documentation, we can add a parameter to find the price of a particular symbol. Let us see how we can do that.
Lets create a param payload.
```
symbol_parameter = {'symbol': 'SHIBUSDT'}
```
Then, use the same request, but add the symbol_paremeter that we created.
```
price_request = requests.get(symbol_ticker_price_url, params=symbol_parameter)
price_request.json()
```
Cool, now we are able to see the price of Shiba Crypto.
So far, we have used "requests" package to get data from website. There are a lot of other packages out there that could solve the problems that you encounter. Feel free to explore.
- [Python Package Repository](https://pypi.org/)
- [Conda Package Repository](https://anaconda.org/anaconda/repo)
Proceed to the next tutorial (last one) to learn simple data analysis.
|
github_jupyter
|
<a href="http://landlab.github.io"><img style="float: left" src="../../../landlab_header.png"></a>
# The Implicit Kinematic Wave Overland Flow Component
<hr>
<small>For more Landlab tutorials, click here: <a href="https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html">https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html</a></small>
<hr>
## Overview
This notebook demonstrates the `KinwaveImplicitOverlandFlow` Landlab component. The component implements a two-dimensional kinematic wave model of overland flow, using a digital elevation model or other source of topography as the surface over which water flows.
### Theory
The kinematic wave equations are a simplified form of the 2D shallow-water equations in which energy slope is assumed to equal bed slope. Conservation of water mass is expressed in terms of the time derivative of the local water depth, $H$, and the spatial derivative (divergence) of the unit discharge vector $\mathbf{q} = UH$ (where $U$ is the 2D depth-averaged velocity vector):
$$\frac{\partial H}{\partial t} = R - \nabla\cdot \mathbf{q}$$
where $R$ is the local runoff rate [L/T] and $\mathbf{q}$ has dimensions of volume flow per time per width [L$^2$/T]. The discharge depends on the local depth, bed-surface gradient $\mathbf{S}=-\nabla\eta$ (this is the kinematic wave approximation; $\eta$ is land surface height), and a roughness factor $C_r$:
$$\mathbf{q} = \frac{1}{C_r} \mathbf{S} H^\alpha |S|^{-1/2}$$
Reads may recognize this as a form of the Manning, Chezy, or Darcy-Weisbach equation. If $\alpha = 5/3$ then we have the Manning equation, and $C_r = n$ is "Manning's n". If $\alpha = 3/2$ then we have the Chezy/Darcy-Weisbach equation, and $C_r = 1/C = (f/8g)^{1/2}$ represents the Chezy roughness factor $C$ and the equivalent Darcy-Weisbach factor $f$.
### Numerical solution
The solution method used by this component is locally implicit, and works as follows. At each time step, we iterate from upstream to downstream over the topography. Because we are working downstream, we can assume that we know the total water inflow to a given cell. We solve the following mass conservation equation at each cell:
$$\frac{H^{t+1} - H^t}{\Delta t }= \frac{Q_{in}}{A} - \frac{Q_{out}}{A} + R$$
where $H$ is water depth at a given grid node, $t$ indicates time step number, $\Delta t$ is time step duration, $Q_{in}$ is total inflow discharge, $Q_{out}$ is total outflow discharge, $A$ is cell area, and $R$ is local runoff rate (precipitation minus infiltration; could be negative if runon infiltration is occurring).
The specific outflow discharge leaving a cell along one of its faces is:
$$q = (1/C_r) H^\alpha S^{1/2}$$
where $S$ is the downhill-positive gradient of the link that crosses this particular face. Outflow discharge is zero for links that are flat or "uphill" from the given node. Total discharge out of a cell is then the sum of (specific discharge x face width) over all outflow faces:
$$Q_{out} = \sum_{i=1}^N (1/C_r) H^\alpha S_i^{1/2} W_i$$
where $N$ is the number of outflow faces (i.e., faces where the ground slopes downhill away from the cell's node), and $W_i$ is the width of face $i$.
We use the depth at the cell's node, so this simplifies to:
$$Q_{out} = (1/C_r) H'^\alpha \sum_{i=1}^N S_i^{1/2} W_i$$
Notice that we know everything here except $H'$. The reason we know $Q_{out}$ is that it equals $Q_{in}$ (which is either zero or we calculated it previously) plus $RA$.
We define $H$ in the above as a weighted sum of the "old" (time step $t$) and "new" (time step $t+1$) depth values:
$$H' = w H^{t+1} + (1-w) H^t$$
If $w=1$, the method is fully implicit. If $w=0$, it is a simple forward explicit method.
When we combine these equations, we have an equation that includes the unknown $H^{t+1}$ and a bunch of terms that are known. If $w\ne 0$, it is a nonlinear equation in $H^{t+1}$, and must be solved iteratively. We do this using a root-finding method in the scipy.optimize library.
In order to implement the algorithm, we must already know which of neighbors of each node are lower than the neighbor, and what the slopes between them are. We accomplish this using the `FlowAccumulator` and `FlowDirectorMFD` components. Running the `FlowAccumulator` also generates a sorted list (array) of nodes in drainage order.
### The component
Import the needed libraries, then inspect the component's docstring:
```
import copy
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from landlab import RasterModelGrid, imshow_grid
from landlab.components.overland_flow import KinwaveImplicitOverlandFlow
from landlab.io.esri_ascii import read_esri_ascii
print(KinwaveImplicitOverlandFlow.__doc__)
```
The docstring for the `__init__` method will give us a list of parameters:
```
print(KinwaveImplicitOverlandFlow.__init__.__doc__)
```
## Example 1: downpour on a plane
The first example tests that the component can reproduce the expected steady flow pattern on a sloping plane, with a gradient of $S_p$. We'll adopt the Manning equation. Once the system comes into equilibrium, the discharge should increase width distance down the plane according to $q = Rx$. We can use this fact to solve for the corresponding water depth:
$$(1/n) H^{5/3} S^{1/2} = R x$$
which implies
$$H = \left( \frac{nRx}{S^{1/2}} \right)^{3/5}$$
This is the analytical solution against which to test the model.
Pick the initial and run conditions
```
# Process parameters
n = 0.01 # roughness coefficient, (s/m^(1/3))
dep_exp = 5.0 / 3.0 # depth exponent
S = 0.01 # slope of plane
R = 72.0 # runoff rate, mm/hr
# Run-control parameters
run_time = 240.0 # duration of run, (s)
nrows = 5 # number of node rows
ncols = 11 # number of node columns
dx = 2.0 # node spacing, m
dt = 10.0 # time-step size, s
plot_every = 60.0 # plot interval, s
# Derived parameters
num_steps = int(run_time / dt)
```
Create grid and fields:
```
# create and set up grid
grid = RasterModelGrid((nrows, ncols), xy_spacing=dx)
grid.set_closed_boundaries_at_grid_edges(False, True, True, True) # open only on east
# add required field
elev = grid.add_zeros('topographic__elevation', at='node')
# set topography
elev[grid.core_nodes] = S * (np.amax(grid.x_of_node) - grid.x_of_node[grid.core_nodes])
```
Plot topography, first in plan view...
```
imshow_grid(grid, elev)
```
...then as a cross-section:
```
plt.plot(grid.x_of_node, elev)
plt.xlabel('Distance (m)')
plt.ylabel('Height (m)')
plt.grid(True)
# Instantiate the component
olflow = KinwaveImplicitOverlandFlow(grid,
runoff_rate=R,
roughness=n,
depth_exp=dep_exp
)
# Helpful function to plot the profile
def plot_flow_profile(grid, olflow):
"""Plot the middle row of topography and water surface
for the overland flow model olflow."""
nc = grid.number_of_node_columns
nr = grid.number_of_node_rows
startnode = nc * (nr // 2) + 1
midrow = np.arange(startnode, startnode + nc - 1, dtype=int)
topo = grid.at_node['topographic__elevation']
plt.plot(grid.x_of_node[midrow],
topo[midrow] + grid.at_node['surface_water__depth'][midrow],
'b'
)
plt.plot(grid.x_of_node[midrow],
topo[midrow],
'k'
)
plt.xlabel('Distance (m)')
plt.ylabel('Ground and water surface height (m)')
```
Run the component forward in time, plotting the output in the form of a profile:
```
next_plot = plot_every
for i in range(num_steps):
olflow.run_one_step(dt)
if (i + 1) * dt >= next_plot:
plot_flow_profile(grid, olflow)
next_plot += plot_every
# Compare with analytical solution for depth
Rms = R / 3.6e6 # convert to m/s
nc = grid.number_of_node_columns
x = grid.x_of_node[grid.core_nodes][:nc - 2]
Hpred = (n * Rms * x / (S ** 0.5)) ** 0.6
plt.plot(x, Hpred, 'r', label='Analytical')
plt.plot(x,
grid.at_node['surface_water__depth'][grid.core_nodes][:nc - 2],
'b--',
label='Numerical'
)
plt.xlabel('Distance (m)')
plt.ylabel('Water depth (m)')
plt.grid(True)
plt.legend()
```
## Example 2: overland flow on a DEM
For this example, we'll import a small digital elevation model (DEM) for a site in New Mexico, USA.
```
# Process parameters
n = 0.1 # roughness coefficient, (s/m^(1/3))
dep_exp = 5.0 / 3.0 # depth exponent
R = 72.0 # runoff rate, mm/hr
# Run-control parameters
rain_duration = 240.0 # duration of rainfall, s
run_time = 480.0 # duration of run, s
dt = 10.0 # time-step size, s
dem_filename = '../hugo_site_filled.asc'
# Derived parameters
num_steps = int(run_time / dt)
# set up arrays to hold discharge and time
time_since_storm_start = np.arange(0.0, dt * (2 * num_steps + 1), dt)
discharge = np.zeros(2 * num_steps + 1)
# Read the DEM file as a grid with a 'topographic__elevation' field
(grid, elev) = read_esri_ascii(dem_filename, name='topographic__elevation')
# Configure the boundaries: valid right-edge nodes will be open;
# all NODATA (= -9999) nodes will be closed.
grid.status_at_node[grid.nodes_at_right_edge] = grid.BC_NODE_IS_FIXED_VALUE
grid.status_at_node[np.isclose(elev, -9999.)] = grid.BC_NODE_IS_CLOSED
# display the topography
cmap = copy.copy(mpl.cm.get_cmap('pink'))
imshow_grid(grid, elev, colorbar_label='Elevation (m)', cmap=cmap)
```
It would be nice to track discharge at the watershed outlet, but how do we find the outlet location? We actually have several valid nodes along the right-hand edge. Then we'll keep track of the field `surface_water_inflow__discharge` at these nodes. We can identify the nodes by the fact that they are (a) at the right-hand edge of the grid, and (b) have positive elevations (the ones with -9999 are outside of the watershed).
```
indices = np.where(elev[grid.nodes_at_right_edge] > 0.0)[0]
outlet_nodes = grid.nodes_at_right_edge[indices]
print('Outlet nodes:')
print(outlet_nodes)
print('Elevations of the outlet nodes:')
print(elev[outlet_nodes])
# Instantiate the component
olflow = KinwaveImplicitOverlandFlow(grid,
runoff_rate=R,
roughness=n,
depth_exp=dep_exp
)
discharge_field = grid.at_node['surface_water_inflow__discharge']
for i in range(num_steps):
olflow.run_one_step(dt)
discharge[i+1] = np.sum(discharge_field[outlet_nodes])
plt.plot(time_since_storm_start[:num_steps], discharge[:num_steps])
plt.xlabel('Time (s)')
plt.ylabel('Discharge (cms)')
plt.grid(True)
cmap = copy.copy(mpl.cm.get_cmap('Blues'))
imshow_grid(grid,
grid.at_node['surface_water__depth'],
cmap=cmap,
colorbar_label='Water depth (m)'
)
```
Now turn down the rain and run it a bit longer...
```
olflow.runoff_rate = 1.0 # just 1 mm/hr
for i in range(num_steps, 2 * num_steps):
olflow.run_one_step(dt)
discharge[i+1] = np.sum(discharge_field[outlet_nodes])
plt.plot(time_since_storm_start, discharge)
plt.xlabel('Time (s)')
plt.ylabel('Discharge (cms)')
plt.grid(True)
cmap = copy.copy(mpl.cm.get_cmap('Blues'))
imshow_grid(grid,
grid.at_node['surface_water__depth'],
cmap=cmap,
colorbar_label='Water depth (m)'
)
```
Voila! A fine hydrograph, and a water-depth map that shows deeper water in the channels (and highlights depressions in the topography).
### Click here for more <a href="https://landlab.readthedocs.io/en/latest/user_guide/tutorials.html">Landlab tutorials</a>
|
github_jupyter
|
# SageMaker Debugger Profiling Report
SageMaker Debugger auto generated this report. You can generate similar reports on all supported training jobs. The report provides summary of training job, system resource usage statistics, framework metrics, rules summary, and detailed analysis from each rule. The graphs and tables are interactive.
**Legal disclaimer:** This report and any recommendations are provided for informational purposes only and are not definitive. You are responsible for making your own independent assessment of the information.
```
import json
import pandas as pd
import glob
import matplotlib.pyplot as plt
import numpy as np
import datetime
from smdebug.profiler.utils import us_since_epoch_to_human_readable_time, ns_since_epoch_to_human_readable_time
from smdebug.core.utils import setup_profiler_report
import bokeh
from bokeh.io import output_notebook, show
from bokeh.layouts import column, row
from bokeh.plotting import figure
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn
from bokeh.models import ColumnDataSource, PreText
from math import pi
from bokeh.transform import cumsum
import warnings
from bokeh.models.widgets import Paragraph
from bokeh.models import Legend
from bokeh.util.warnings import BokehDeprecationWarning, BokehUserWarning
warnings.simplefilter('ignore', BokehDeprecationWarning)
warnings.simplefilter('ignore', BokehUserWarning)
output_notebook(hide_banner=True)
processing_job_arn = ""
# Parameters
processing_job_arn = "arn:aws:sagemaker:us-east-1:264082167679:processing-job/pytorch-training-2022-01-2-profilerreport-73c47060"
setup_profiler_report(processing_job_arn)
def create_piechart(data_dict, title=None, height=400, width=400, x1=0, x2=0.1, radius=0.4, toolbar_location='right'):
plot = figure(plot_height=height,
plot_width=width,
toolbar_location=toolbar_location,
tools="hover,wheel_zoom,reset,pan",
tooltips="@phase:@value",
title=title,
x_range=(-radius-x1, radius+x2))
data = pd.Series(data_dict).reset_index(name='value').rename(columns={'index':'phase'})
data['angle'] = data['value']/data['value'].sum() * 2*pi
data['color'] = bokeh.palettes.viridis(len(data_dict))
plot.wedge(x=0, y=0., radius=radius,
start_angle=cumsum('angle', include_zero=True),
end_angle=cumsum('angle'),
line_color="white",
source=data,
fill_color='color',
legend='phase'
)
plot.legend.label_text_font_size = "8pt"
plot.legend.location = 'center_right'
plot.axis.axis_label=None
plot.axis.visible=False
plot.grid.grid_line_color = None
plot.outline_line_color = "white"
return plot
from IPython.display import display, HTML, Markdown, Image
def pretty_print(df):
raw_html = df.to_html().replace("\\n","<br>").replace('<tr>','<tr style="text-align: left;">')
return display(HTML(raw_html))
```
## Training job summary
```
def load_report(rule_name):
try:
report = json.load(open('/opt/ml/processing/output/rule/profiler-output/profiler-reports/'+rule_name+'.json'))
return report
except FileNotFoundError:
print (rule_name + ' not triggered')
job_statistics = {}
report = load_report('MaxInitializationTime')
if report:
if "first" in report['Details']["step_num"] and "last" in report['Details']["step_num"]:
first_step = report['Details']["step_num"]["first"]
last_step = report['Details']["step_num"]["last"]
tmp = us_since_epoch_to_human_readable_time(report['Details']['job_start'] * 1000000)
date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f')
day = date.date().strftime("%m/%d/%Y")
hour = date.time().strftime("%H:%M:%S")
job_statistics["Start time"] = f"{hour} {day}"
tmp = us_since_epoch_to_human_readable_time(report['Details']['job_end'] * 1000000)
date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f')
day = date.date().strftime("%m/%d/%Y")
hour = date.time().strftime("%H:%M:%S")
job_statistics["End time"] = f"{hour} {day}"
job_duration_in_seconds = int(report['Details']['job_end'] - report['Details']['job_start'])
job_statistics["Job duration"] = f"{job_duration_in_seconds} seconds"
if "first" in report['Details']["step_num"] and "last" in report['Details']["step_num"]:
tmp = us_since_epoch_to_human_readable_time(first_step)
date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f')
day = date.date().strftime("%m/%d/%Y")
hour = date.time().strftime("%H:%M:%S")
job_statistics["Training loop start"] = f"{hour} {day}"
tmp = us_since_epoch_to_human_readable_time(last_step)
date = datetime.datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S:%f')
day = date.date().strftime("%m/%d/%Y")
hour = date.time().strftime("%H:%M:%S")
job_statistics["Training loop end"] = f"{hour} {day}"
training_loop_duration_in_seconds = int((last_step - first_step) / 1000000)
job_statistics["Training loop duration"] = f"{training_loop_duration_in_seconds} seconds"
initialization_in_seconds = int(first_step/1000000 - report['Details']['job_start'])
job_statistics["Initialization time"] = f"{initialization_in_seconds} seconds"
finalization_in_seconds = int(np.abs(report['Details']['job_end'] - last_step/1000000))
job_statistics["Finalization time"] = f"{finalization_in_seconds} seconds"
initialization_perc = int(initialization_in_seconds / job_duration_in_seconds * 100)
job_statistics["Initialization"] = f"{initialization_perc} %"
training_loop_perc = int(training_loop_duration_in_seconds / job_duration_in_seconds * 100)
job_statistics["Training loop"] = f"{training_loop_perc} %"
finalization_perc = int(finalization_in_seconds / job_duration_in_seconds * 100)
job_statistics["Finalization"] = f"{finalization_perc} %"
if report:
text = """The following table gives a summary about the training job. The table includes information about when the training job started and ended, how much time initialization, training loop and finalization took."""
if len(job_statistics) > 0:
df = pd.DataFrame.from_dict(job_statistics, orient='index')
start_time = us_since_epoch_to_human_readable_time(report['Details']['job_start'] * 1000000)
date = datetime.datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S:%f')
day = date.date().strftime("%m/%d/%Y")
hour = date.time().strftime("%H:%M:%S")
duration = job_duration_in_seconds
text = f"""{text} \n Your training job started on {day} at {hour} and ran for {duration} seconds."""
#pretty_print(df)
if "first" in report['Details']["step_num"] and "last" in report['Details']["step_num"]:
if finalization_perc < 0:
job_statistics["Finalization%"] = 0
if training_loop_perc < 0:
job_statistics["Training loop"] = 0
if initialization_perc < 0:
job_statistics["Initialization"] = 0
else:
text = f"""{text} \n Your training job started on {day} at {hour} and ran for {duration} seconds."""
if len(job_statistics) > 0:
df2 = df.reset_index()
df2.columns = ["0", "1"]
source = ColumnDataSource(data=df2)
columns = [TableColumn(field='0', title=""),
TableColumn(field='1', title="Job Statistics"),]
table = DataTable(source=source, columns=columns, width=450, height=380)
plot = None
if "Initialization" in job_statistics:
piechart_data = {}
piechart_data["Initialization"] = initialization_perc
piechart_data["Training loop"] = training_loop_perc
piechart_data["Finalization"] = finalization_perc
plot = create_piechart(piechart_data,
height=350,
width=500,
x1=0.15,
x2=0.15,
radius=0.15,
toolbar_location=None)
if plot != None:
paragraph = Paragraph(text=f"""{text}""", width = 800)
show(column(paragraph, row(table, plot)))
else:
paragraph = Paragraph(text=f"""{text}. No step information was profiled from your training job. The time spent on initialization and finalization cannot be computed.""" , width = 800)
show(column(paragraph, row(table)))
```
## System usage statistics
```
report = load_report('OverallSystemUsage')
text1 = ''
if report:
if "GPU" in report["Details"]:
for node_id in report["Details"]["GPU"]:
gpu_p95 = report["Details"]["GPU"][node_id]["p95"]
gpu_p50 = report["Details"]["GPU"][node_id]["p50"]
cpu_p95 = report["Details"]["CPU"][node_id]["p95"]
cpu_p50 = report["Details"]["CPU"][node_id]["p50"]
if gpu_p95 < 70 and cpu_p95 < 70:
text1 = f"""{text1}The 95th percentile of the total GPU utilization on node {node_id} is only {int(gpu_p95)}%.
The 95th percentile of the total CPU utilization is only {int(cpu_p95)}%. Node {node_id} is underutilized.
You may want to consider switching to a smaller instance type."""
elif gpu_p95 < 70 and cpu_p95 > 70:
text1 = f"""{text1}The 95th percentile of the total GPU utilization on node {node_id} is only {int(gpu_p95)}%.
However, the 95th percentile of the total CPU utilization is {int(cpu_p95)}%. GPUs on node {node_id} are underutilized,
likely because of CPU bottlenecks."""
elif gpu_p50 > 70:
text1 = f"""{text1}The median total GPU utilization on node {node_id} is {int(gpu_p50)}%.
GPUs on node {node_id} are well utilized."""
else:
text1 = f"""{text1}The median total GPU utilization on node {node_id} is {int(gpu_p50)}%.
The median total CPU utilization is {int(cpu_p50)}%."""
else:
for node_id in report["Details"]["CPU"]:
cpu_p95 = report["Details"]["CPU"][node_id]["p95"]
if cpu_p95 > 70:
text1 = f"""{text1}The 95th percentile of the total CPU utilization on node {node_id} is {int**(cpu_p95)}%. CPUs on node {node_id} are well utilized."""
text1 = Paragraph(text=f"""{text1}""", width=1100)
text2 = Paragraph(text=f"""The following table shows statistics of resource utilization per worker (node),
such as the total CPU and GPU utilization, and the memory utilization on CPU and GPU.
The table also includes the total I/O wait time and the total amount of data sent or received in bytes.
The table shows min and max values as well as p99, p90 and p50 percentiles.""", width=900)
pd.set_option('display.float_format', lambda x: '%.2f' % x)
rows = []
units = {"CPU": "percentage", "CPU memory": "percentage", "GPU": "percentage", "Network": "bytes", "GPU memory": "percentage", "I/O": "percentage"}
if report:
for metric in report['Details']:
for node_id in report['Details'][metric]:
values = report['Details'][metric][node_id]
rows.append([node_id, metric, units[metric], values['max'], values['p99'], values['p95'], values['p50'], values['min']])
df = pd.DataFrame(rows)
df.columns = ['Node', 'metric', 'unit', 'max', 'p99', 'p95', 'p50', 'min']
df2 = df.reset_index()
source = ColumnDataSource(data=df2)
columns = [TableColumn(field='Node', title="node"),
TableColumn(field='metric', title="metric"),
TableColumn(field='unit', title="unit"),
TableColumn(field='max', title="max"),
TableColumn(field='p99', title="p99"),
TableColumn(field='p95', title="p95"),
TableColumn(field='p50', title="p50"),
TableColumn(field='min', title="min"),]
table = DataTable(source=source, columns=columns, width=800, height=df2.shape[0]*30)
show(column( text1, text2, row(table)))
report = load_report('OverallFrameworkMetrics')
if report:
if 'Details' in report:
display(Markdown(f"""## Framework metrics summary"""))
plots = []
text = ''
if 'phase' in report['Details']:
text = f"""The following two pie charts show the time spent on the TRAIN phase, the EVAL phase,
and others. The 'others' includes the time spent between steps (after one step has finished and before
the next step has started). Ideally, most of the training time should be spent on the
TRAIN and EVAL phases. If TRAIN/EVAL were not specified in the training script, steps will be recorded as
GLOBAL."""
if 'others' in report['Details']['phase']:
others = float(report['Details']['phase']['others'])
if others > 25:
text = f"""{text} Your training job spent quite a significant amount of time ({round(others,2)}%) in phase "others".
You should check what is happening in between the steps."""
plot = create_piechart(report['Details']['phase'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="The ratio between the time spent on the TRAIN/EVAL phase and others")
plots.append(plot)
if 'forward_backward' in report['Details']:
event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get)
perc = report['Details']['forward_backward'][event]
text = f"""{text} The pie chart on the right shows a more detailed breakdown.
It shows that {int(perc)}% of the time was spent in event "{event}"."""
if perc > 70:
text = f"""There is quite a significant difference between the time spent on forward and backward
pass."""
else:
text = f"""{text} It shows that {int(perc)}% of the training time
was spent on "{event}"."""
plot = create_piechart(report['Details']['forward_backward'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="The ratio between forward and backward pass")
plots.append(plot)
if len(plots) > 0:
paragraph = Paragraph(text=text, width=1100)
show(column(paragraph, row(plots)))
plots = []
text=''
if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0:
key = list(report['Details']['ratio'].keys())[0]
ratio = report['Details']['ratio'][key]
text = f"""The following piechart shows a breakdown of the CPU/GPU operators.
It shows that {int(ratio)}% of training time was spent on executing the "{key}" operator."""
plot = create_piechart(report['Details']['ratio'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="The ratio between the time spent on CPU/GPU operators")
plots.append(plot)
if 'general' in report['Details']:
event = max(report['Details']['general'], key=report['Details']['general'].get)
perc = report['Details']['general'][event]
plot = create_piechart(report['Details']['general'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="General framework operations")
plots.append(plot)
if len(plots) > 0:
paragraph = Paragraph(text=text, width=1100)
show(column(paragraph, row(plots)))
plots = []
text = ''
if 'horovod' in report['Details']:
display(Markdown(f"""#### Overview: Horovod metrics"""))
event = max(report['Details']['horovod'], key=report['Details']['horovod'].get)
perc = report['Details']['horovod'][event]
text = f"""{text} The following pie chart shows a detailed breakdown of the Horovod metrics profiled
from your training job. The most expensive function was "{event}" with {int(perc)}%."""
plot = create_piechart(report['Details']['horovod'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="Horovod metrics ")
paragraph = Paragraph(text=text, width=1100)
show(column(paragraph, row(plot)))
pd.set_option('display.float_format', lambda x: '%.2f' % x)
rows = []
values = []
if report:
if 'CPU_total' in report['Details']:
display(Markdown(f"""#### Overview: CPU operators"""))
event = max(report['Details']['CPU'], key=report['Details']['CPU'].get)
perc = report['Details']['CPU'][event]
for function in report['Details']['CPU']:
percentage = round(report['Details']['CPU'][function],2)
time = report['Details']['CPU_total'][function]
rows.append([percentage, time, function])
df = pd.DataFrame(rows)
df.columns = ['percentage', 'time', 'operator']
df = df.sort_values(by=['percentage'], ascending=False)
source = ColumnDataSource(data=df)
columns = [TableColumn(field='percentage', title="Percentage"),
TableColumn(field='time', title="Cumulative time in microseconds"),
TableColumn(field='operator', title="CPU operator"),]
table = DataTable(source=source, columns=columns, width=550, height=350)
text = Paragraph(text=f"""The following table shows a list of operators that ran on the CPUs.
The most expensive operator on the CPUs was "{event}" with {int(perc)} %.""")
plot = create_piechart(report['Details']['CPU'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
)
show(column(text, row(table, plot)))
pd.set_option('display.float_format', lambda x: '%.2f' % x)
rows = []
values = []
if report:
if 'GPU_total' in report['Details']:
display(Markdown(f"""#### Overview: GPU operators"""))
event = max(report['Details']['GPU'], key=report['Details']['GPU'].get)
perc = report['Details']['GPU'][event]
for function in report['Details']['GPU']:
percentage = round(report['Details']['GPU'][function],2)
time = report['Details']['GPU_total'][function]
rows.append([percentage, time, function])
df = pd.DataFrame(rows)
df.columns = ['percentage', 'time', 'operator']
df = df.sort_values(by=['percentage'], ascending=False)
source = ColumnDataSource(data=df)
columns = [TableColumn(field='percentage', title="Percentage"),
TableColumn(field='time', title="Cumulative time in microseconds"),
TableColumn(field='operator', title="GPU operator"),]
table = DataTable(source=source, columns=columns, width=450, height=350)
text = Paragraph(text=f"""The following table shows a list of operators that your training job ran on GPU.
The most expensive operator on GPU was "{event}" with {int(perc)} %""")
plot = create_piechart(report['Details']['GPU'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
)
show(column(text, row(table, plot)))
```
## Rules summary
```
description = {}
description['CPUBottleneck'] = 'Checks if the CPU utilization is high and the GPU utilization is low. \
It might indicate CPU bottlenecks, where the GPUs are waiting for data to arrive \
from the CPUs. The rule evaluates the CPU and GPU utilization rates, and triggers the issue \
if the time spent on the CPU bottlenecks exceeds a threshold percent of the total training time. The default threshold is 50 percent.'
description['IOBottleneck'] = 'Checks if the data I/O wait time is high and the GPU utilization is low. \
It might indicate IO bottlenecks where GPU is waiting for data to arrive from storage. \
The rule evaluates the I/O and GPU utilization rates and triggers the issue \
if the time spent on the IO bottlenecks exceeds a threshold percent of the total training time. The default threshold is 50 percent.'
description['Dataloader'] = 'Checks how many data loaders are running in parallel and whether the total number is equal the number \
of available CPU cores. The rule triggers if number is much smaller or larger than the number of available cores. \
If too small, it might lead to low GPU utilization. If too large, it might impact other compute intensive operations on CPU.'
description['GPUMemoryIncrease'] = 'Measures the average GPU memory footprint and triggers if there is a large increase.'
description['BatchSize'] = 'Checks if GPUs are underutilized because the batch size is too small. \
To detect this problem, the rule analyzes the average GPU memory footprint, \
the CPU and the GPU utilization. '
description['LowGPUUtilization'] = 'Checks if the GPU utilization is low or fluctuating. \
This can happen due to bottlenecks, blocking calls for synchronizations, \
or a small batch size.'
description['MaxInitializationTime'] = 'Checks if the time spent on initialization exceeds a threshold percent of the total training time. \
The rule waits until the first step of training loop starts. The initialization can take longer \
if downloading the entire dataset from Amazon S3 in File mode. The default threshold is 20 minutes.'
description['LoadBalancing'] = 'Detects workload balancing issues across GPUs. \
Workload imbalance can occur in training jobs with data parallelism. \
The gradients are accumulated on a primary GPU, and this GPU might be overused \
with regard to other GPUs, resulting in reducing the efficiency of data parallelization.'
description['StepOutlier'] = 'Detects outliers in step duration. The step duration for forward and backward pass should be \
roughly the same throughout the training. If there are significant outliers, \
it may indicate a system stall or bottleneck issues.'
recommendation = {}
recommendation['CPUBottleneck'] = 'Consider increasing the number of data loaders \
or applying data pre-fetching.'
recommendation['IOBottleneck'] = 'Pre-fetch data or choose different file formats, such as binary formats that \
improve I/O performance.'
recommendation['Dataloader'] = 'Change the number of data loader processes.'
recommendation['GPUMemoryIncrease'] = 'Choose a larger instance type with more memory if footprint is close to maximum available memory.'
recommendation['BatchSize'] = 'The batch size is too small, and GPUs are underutilized. Consider running on a smaller instance type or increasing the batch size.'
recommendation['LowGPUUtilization'] = 'Check if there are bottlenecks, minimize blocking calls, \
change distributed training strategy, or increase the batch size.'
recommendation['MaxInitializationTime'] = 'Initialization takes too long. \
If using File mode, consider switching to Pipe mode in case you are using TensorFlow framework.'
recommendation['LoadBalancing'] = 'Choose a different distributed training strategy or \
a different distributed training framework.'
recommendation['StepOutlier'] = 'Check if there are any bottlenecks (CPU, I/O) correlated to the step outliers.'
files = glob.glob('/opt/ml/processing/output/rule/profiler-output/profiler-reports/*json')
summary = {}
for i in files:
rule_name = i.split('/')[-1].replace('.json','')
if rule_name == "OverallSystemUsage" or rule_name == "OverallFrameworkMetrics":
continue
rule_report = json.load(open(i))
summary[rule_name] = {}
summary[rule_name]['Description'] = description[rule_name]
summary[rule_name]['Recommendation'] = recommendation[rule_name]
summary[rule_name]['Number of times rule triggered'] = rule_report['RuleTriggered']
#summary[rule_name]['Number of violations'] = rule_report['Violations']
summary[rule_name]['Number of datapoints'] = rule_report['Datapoints']
summary[rule_name]['Rule parameters'] = rule_report['RuleParameters']
df = pd.DataFrame.from_dict(summary, orient='index')
df = df.sort_values(by=['Number of times rule triggered'], ascending=False)
display(Markdown(f"""The following table shows a profiling summary of the Debugger built-in rules.
The table is sorted by the rules that triggered the most frequently. During your training job, the {df.index[0]} rule
was the most frequently triggered. It processed {df.values[0,3]} datapoints and was triggered {df.values[0,2]} times."""))
with pd.option_context('display.colheader_justify','left'):
pretty_print(df)
analyse_phase = "training"
if job_statistics and "initialization_in_seconds" in job_statistics:
if job_statistics["initialization_in_seconds"] > job_statistics["training_loop_duration_in_seconds"]:
analyse_phase = "initialization"
time = job_statistics["initialization_in_seconds"]
perc = job_statistics["initialization_%"]
display(Markdown(f"""The initialization phase took {int(time)} seconds, which is {int(perc)}%*
of the total training time. Since the training loop has taken the most time,
we dive deep into the events occurring during this phase"""))
display(Markdown("""## Analyzing initialization\n\n"""))
time = job_statistics["training_loop_duration_in_seconds"]
perc = job_statistics["training_loop_%"]
display(Markdown(f"""The training loop lasted for {int(time)} seconds which is {int(perc)}% of the training job time.
Since the training loop has taken the most time, we dive deep into the events occured during this phase."""))
if analyse_phase == 'training':
display(Markdown("""## Analyzing the training loop\n\n"""))
if analyse_phase == "initialization":
display(Markdown("""### MaxInitializationTime\n\nThis rule helps to detect if the training initialization is taking too much time. \nThe rule waits until first step is available. The rule takes the parameter `threshold` that defines how many minutes to wait for the first step to become available. Default is 20 minutes.\nYou can run the rule locally in the following way:
"""))
_ = load_report("MaxInitializationTime")
if analyse_phase == "training":
display(Markdown("""### Step duration analysis"""))
report = load_report('StepOutlier')
if report:
parameters = report['RuleParameters']
params = report['RuleParameters'].split('\n')
stddev = params[3].split(':')[1]
mode = params[1].split(':')[1]
n_outlier = params[2].split(':')[1]
triggered = report['RuleTriggered']
datapoints = report['Datapoints']
text = f"""The StepOutlier rule measures step durations and checks for outliers. The rule
returns True if duration is larger than {stddev} times the standard deviation. The rule
also takes the parameter mode, that specifies whether steps from training or validation phase
should be checked. In your processing job mode was specified as {mode}.
Typically the first step is taking significantly more time and to avoid the
rule triggering immediately, one can use n_outliers to specify the number of outliers to ignore.
n_outliers was set to {n_outlier}.
The rule analysed {datapoints} datapoints and triggered {triggered} times.
"""
paragraph = Paragraph(text=text, width=900)
show(column(paragraph))
if report and len(report['Details']['step_details']) > 0:
for node_id in report['Details']['step_details']:
tmp = report['RuleParameters'].split('threshold:')
threshold = tmp[1].split('\n')[0]
n_outliers = report['Details']['step_details'][node_id]['number_of_outliers']
mean = report['Details']['step_details'][node_id]['step_stats']['mean']
stddev = report['Details']['step_details'][node_id]['stddev']
phase = report['Details']['step_details'][node_id]['phase']
display(Markdown(f"""**Step durations on node {node_id}:**"""))
display(Markdown(f"""The following table is a summary of the statistics of step durations measured on node {node_id}.
The rule has analyzed the step duration from {phase} phase.
The average step duration on node {node_id} was {round(mean, 2)}s.
The rule detected {n_outliers} outliers, where step duration was larger than {threshold} times the standard deviation of {stddev}s
\n"""))
step_stats_df = pd.DataFrame.from_dict(report['Details']['step_details'][node_id]['step_stats'], orient='index').T
step_stats_df.index = ['Step Durations in [s]']
pretty_print(step_stats_df)
display(Markdown(f"""The following histogram shows the step durations measured on the different nodes.
You can turn on or turn off the visualization of histograms by selecting or unselecting the labels in the legend."""))
plot = figure(plot_height=450,
plot_width=850,
title=f"""Step durations""")
colors = bokeh.palettes.viridis(len(report['Details']['step_details']))
for index, node_id in enumerate(report['Details']['step_details']):
probs = report['Details']['step_details'][node_id]['probs']
binedges = report['Details']['step_details'][node_id]['binedges']
plot.quad( top=probs,
bottom=0,
left=binedges[:-1],
right=binedges[1:],
line_color="white",
fill_color=colors[index],
fill_alpha=0.7,
legend=node_id)
plot.add_layout(Legend(), 'right')
plot.y_range.start = 0
plot.xaxis.axis_label = f"""Step durations in [s]"""
plot.yaxis.axis_label = "Occurrences"
plot.grid.grid_line_color = "white"
plot.legend.click_policy="hide"
plot.legend.location = 'center_right'
show(plot)
if report['RuleTriggered'] > 0:
text=f"""To get a better understanding of what may have caused those outliers,
we correlate the timestamps of step outliers with other framework metrics that happened at the same time.
The left chart shows how much time was spent in the different framework
metrics aggregated by event phase. The chart on the right shows the histogram of normal step durations (without
outliers). The following chart shows how much time was spent in the different
framework metrics when step outliers occurred. In this chart framework metrics are not aggregated byphase."""
plots = []
if 'phase' in report['Details']:
text = f"""{text} The chart (in the middle) shows whether step outliers mainly happened during TRAIN or EVAL phase.
"""
plot = create_piechart(report['Details']['phase'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="The ratio between the time spent on the TRAIN/EVAL phase")
plots.append(plot)
if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0:
event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get)
perc = report['Details']['forward_backward'][event]
text = f"""{text} The pie chart on the right shows a detailed breakdown.
It shows that {int(perc)}% of the training time was spent on event "{event}"."""
plot = create_piechart(report['Details']['forward_backward'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="The Ratio between forward and backward pass")
plots.append(plot)
if len(plots) > 0:
paragraph = Paragraph(text=text, width=900)
show(column(paragraph, row(plots)))
plots = []
text = ""
if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0:
key = list(report['Details']['ratio'].keys())[0]
ratio = report['Details']['ratio'][key]
text = f"""The following pie chart shows a breakdown of the CPU/GPU operators executed during the step outliers.
It shows that {int(ratio)}% of the training time was spent on executing operators in "{key}"."""
plot = create_piechart(report['Details']['ratio'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="The ratio between CPU/GPU operators")
plots.append(plot)
if 'general' in report['Details'] and len(report['Details']['general']) > 0:
event = max(report['Details']['general'], key=report['Details']['general'].get)
perc = report['Details']['general'][event]
plot = create_piechart(report['Details']['general'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="General metrics recorded in framework ")
plots.append(plot)
if len(plots) > 0:
paragraph = Paragraph(text=text, width=900)
show(column(paragraph, row(plots)))
plots = []
text = ""
if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0:
event = max(report['Details']['horovod'], key=report['Details']['horovod'].get)
perc = report['Details']['horovod'][event]
text = f"""The following pie chart shows a detailed breakdown of the Horovod metrics that have been
recorded when step outliers happened. The most expensive function was {event} with {int(perc)}%"""
plot = create_piechart(report['Details']['horovod'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="General metrics recorded in framework ")
paragraph = Paragraph(text=text, width=900)
show(column(paragraph, row(plot)))
if analyse_phase == "training":
display(Markdown("""### GPU utilization analysis\n\n"""))
display(Markdown("""**Usage per GPU** \n\n"""))
report = load_report('LowGPUUtilization')
if report:
params = report['RuleParameters'].split('\n')
threshold_p95 = params[0].split(':')[1]
threshold_p5 = params[1].split(':')[1]
window = params[2].split(':')[1]
patience = params[3].split(':')[1]
violations = report['Violations']
triggered = report['RuleTriggered']
datapoints = report['Datapoints']
text=Paragraph(text=f"""The LowGPUUtilization rule checks for a low and fluctuating GPU usage. If the GPU usage is
consistently low, it might be caused by bottlenecks or a small batch size. If usage is heavily
fluctuating, it can be due to bottlenecks or blocking calls. The rule computed the 95th and 5th
percentile of GPU utilization on {window} continuous datapoints and found {violations} cases where
p95 was above {threshold_p95}% and p5 was below {threshold_p5}%. If p95 is high and p5 is low,
it might indicate that the GPU usage is highly fluctuating. If both values are very low,
it would mean that the machine is underutilized. During initialization, the GPU usage is likely zero,
so the rule skipped the first {patience} data points.
The rule analysed {datapoints} datapoints and triggered {triggered} times.""", width=800)
show(text)
if len(report['Details']) > 0:
timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp'])
date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f')
day = date.date().strftime("%m/%d/%Y")
hour = date.time().strftime("%H:%M:%S")
text = Paragraph(text=f"""Your training job is underutilizing the instance. You may want to consider
to either switch to a smaller instance type or to increase the batch size.
The last time that the LowGPUUtilization rule was triggered in your training job was on {day} at {hour}.
The following boxplots are a snapshot from the timestamps.
They show the utilization per GPU (without outliers).
To get a better understanding of the workloads throughout the whole training,
you can check the workload histogram in the next section.""", width=800)
show(text)
del report['Details']['last_timestamp']
for node_id in report['Details']:
plot = figure(plot_height=350,
plot_width=1000,
toolbar_location='right',
tools="hover,wheel_zoom,reset,pan",
title=f"Node {node_id}",
x_range=(0,17),
)
for index, key in enumerate(report['Details'][node_id]):
display(Markdown(f"""**GPU utilization of {key} on node {node_id}:**"""))
text = ""
gpu_max = report['Details'][node_id][key]['gpu_max']
p_95 = report['Details'][node_id][key]['gpu_95']
p_5 = report['Details'][node_id][key]['gpu_5']
text = f"""{text} The max utilization of {key} on node {node_id} was {gpu_max}%"""
if p_95 < int(threshold_p95):
text = f"""{text} and the 95th percentile was only {p_95}%.
{key} on node {node_id} is underutilized"""
if p_5 < int(threshold_p5):
text = f"""{text} and the 5th percentile was only {p_5}%"""
if p_95 - p_5 > 50:
text = f"""{text} The difference between 5th percentile {p_5}% and 95th percentile {p_95}% is quite
significant, which means that utilization on {key} is fluctuating quite a lot.\n"""
upper = report['Details'][node_id][key]['upper']
lower = report['Details'][node_id][key]['lower']
p75 = report['Details'][node_id][key]['p75']
p25 = report['Details'][node_id][key]['p25']
p50 = report['Details'][node_id][key]['p50']
plot.segment(index+1, upper, index+1, p75, line_color="black")
plot.segment(index+1, lower, index+1, p25, line_color="black")
plot.vbar(index+1, 0.7, p50, p75, fill_color="#FDE725", line_color="black")
plot.vbar(index+1, 0.7, p25, p50, fill_color="#440154", line_color="black")
plot.rect(index+1, lower, 0.2, 0.01, line_color="black")
plot.rect(index+1, upper, 0.2, 0.01, line_color="black")
plot.xaxis.major_label_overrides[index+1] = key
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = "white"
plot.grid.grid_line_width = 0
plot.xaxis.major_label_text_font_size="10px"
text=Paragraph(text=f"""{text}""", width=900)
show(text)
plot.yaxis.axis_label = "Utilization in %"
plot.xaxis.ticker = np.arange(index+2)
show(plot)
if analyse_phase == "training":
display(Markdown("""**Workload balancing**\n\n"""))
report = load_report('LoadBalancing')
if report:
params = report['RuleParameters'].split('\n')
threshold = params[0].split(':')[1]
patience = params[1].split(':')[1]
triggered = report['RuleTriggered']
datapoints = report['Datapoints']
paragraph = Paragraph(text=f"""The LoadBalancing rule helps to detect issues in workload balancing
between multiple GPUs.
It computes a histogram of GPU utilization values for each GPU and compares then the
similarity between histograms. The rule checked if the distance of histograms is larger than the
threshold of {threshold}.
During initialization utilization is likely zero, so the rule skipped the first {patience} data points.
""", width=900)
show(paragraph)
if len(report['Details']) > 0:
for node_id in report['Details']:
text = f"""The following histogram shows the workload per GPU on node {node_id}.
You can enable/disable the visualization of a workload by clicking on the label in the legend.
"""
if len(report['Details']) == 1 and len(report['Details'][node_id]['workloads']) == 1:
text = f"""{text} Your training job only used one GPU so there is no workload balancing issue."""
plot = figure(plot_height=450,
plot_width=850,
x_range=(-1,100),
title=f"""Workloads on node {node_id}""")
colors = bokeh.palettes.viridis(len(report['Details'][node_id]['workloads']))
for index, gpu_id2 in enumerate(report['Details'][node_id]['workloads']):
probs = report['Details'][node_id]['workloads'][gpu_id2]
plot.quad( top=probs,
bottom=0,
left=np.arange(0,98,2),
right=np.arange(2,100,2),
line_color="white",
fill_color=colors[index],
fill_alpha=0.8,
legend=gpu_id2 )
plot.y_range.start = 0
plot.xaxis.axis_label = f"""Utilization"""
plot.yaxis.axis_label = "Occurrences"
plot.grid.grid_line_color = "white"
plot.legend.click_policy="hide"
paragraph = Paragraph(text=text)
show(column(paragraph, plot))
if "distances" in report['Details'][node_id]:
text = f"""The rule identified workload balancing issues on node {node_id}
where workloads differed by more than threshold {threshold}.
"""
for index, gpu_id2 in enumerate(report['Details'][node_id]['distances']):
for gpu_id1 in report['Details'][node_id]['distances'][gpu_id2]:
distance = round(report['Details'][node_id]['distances'][gpu_id2][gpu_id1], 2)
text = f"""{text} The difference of workload between {gpu_id2} and {gpu_id1} is: {distance}."""
paragraph = Paragraph(text=f"""{text}""", width=900)
show(column(paragraph))
if analyse_phase == "training":
display(Markdown("""### Dataloading analysis\n\n"""))
report = load_report('Dataloader')
if report:
params = report['RuleParameters'].split("\n")
min_threshold = params[0].split(':')[1]
max_threshold = params[1].split(':')[1]
triggered = report['RuleTriggered']
datapoints = report['Datapoints']
text=f"""The number of dataloader workers can greatly affect the overall performance
of your training job. The rule analyzed the number of dataloading processes that have been running in
parallel on the training instance and compares it against the total number of cores.
The rule checked if the number of processes is smaller than {min_threshold}% or larger than
{max_threshold}% the total number of cores. Having too few dataloader workers can slowdown data preprocessing and lead to GPU
underutilization. Having too many dataloader workers may hurt the
overall performance if you are running other compute intensive tasks on the CPU.
The rule analysed {datapoints} datapoints and triggered {triggered} times."""
paragraph = Paragraph(text=f"{text}", width=900)
show(paragraph)
text = ""
if 'cores' in report['Details']:
cores = int(report['Details']['cores'])
dataloaders = report['Details']['dataloaders']
if dataloaders < cores:
text=f"""{text} Your training instance provided {cores} CPU cores, however your training job only
ran on average {dataloaders} dataloader workers in parallel. We recommend you to increase the number of
dataloader workers."""
if dataloaders > cores:
text=f"""{text} Your training instance provided {cores} CPU cores, however your training job ran
on average {dataloaders} dataloader workers. We recommed you to decrease the number of dataloader
workers."""
if 'pin_memory' in report['Details'] and report['Details']['pin_memory'] == False:
text=f"""{text} Using pinned memory also improves performance because it enables fast data transfer to CUDA-enabled GPUs.
The rule detected that your training job was not using pinned memory.
In case of using PyTorch Dataloader, you can enable this by setting pin_memory=True."""
if 'prefetch' in report['Details'] and report['Details']['prefetch'] == False:
text=f"""{text} It appears that your training job did not perform any data pre-fetching. Pre-fetching can improve your
data input pipeline as it produces the data ahead of time."""
paragraph = Paragraph(text=f"{text}", width=900)
show(paragraph)
colors=bokeh.palettes.viridis(10)
if "dataloading_time" in report['Details']:
median = round(report['Details']["dataloading_time"]['p50'],4)
p95 = round(report['Details']["dataloading_time"]['p95'],4)
p25 = round(report['Details']["dataloading_time"]['p25'],4)
binedges = report['Details']["dataloading_time"]['binedges']
probs = report['Details']["dataloading_time"]['probs']
text=f"""The following histogram shows the distribution of dataloading times that have been measured throughout your training job. The median dataloading time was {median}s.
The 95th percentile was {p95}s and the 25th percentile was {p25}s"""
plot = figure(plot_height=450,
plot_width=850,
toolbar_location='right',
tools="hover,wheel_zoom,reset,pan",
x_range=(binedges[0], binedges[-1])
)
plot.quad( top=probs,
bottom=0,
left=binedges[:-1],
right=binedges[1:],
line_color="white",
fill_color=colors[0],
fill_alpha=0.8,
legend="Dataloading events" )
plot.y_range.start = 0
plot.xaxis.axis_label = f"""Dataloading in [s]"""
plot.yaxis.axis_label = "Occurrences"
plot.grid.grid_line_color = "white"
plot.legend.click_policy="hide"
paragraph = Paragraph(text=f"{text}", width=900)
show(column(paragraph, plot))
if analyse_phase == "training":
display(Markdown(""" ### Batch size"""))
report = load_report('BatchSize')
if report:
params = report['RuleParameters'].split('\n')
cpu_threshold_p95 = int(params[0].split(':')[1])
gpu_threshold_p95 = int(params[1].split(':')[1])
gpu_memory_threshold_p95 = int(params[2].split(':')[1])
patience = int(params[3].split(':')[1])
window = int(params[4].split(':')[1])
violations = report['Violations']
triggered = report['RuleTriggered']
datapoints = report['Datapoints']
text = Paragraph(text=f"""The BatchSize rule helps to detect if GPU is underutilized because of the batch size being
too small. To detect this the rule analyzes the GPU memory footprint, CPU and GPU utilization. The rule checked if the 95th percentile of CPU utilization is below cpu_threshold_p95 of
{cpu_threshold_p95}%, the 95th percentile of GPU utilization is below gpu_threshold_p95 of {gpu_threshold_p95}% and the 95th percentile of memory footprint \
below gpu_memory_threshold_p95 of {gpu_memory_threshold_p95}%. In your training job this happened {violations} times. \
The rule skipped the first {patience} datapoints. The rule computed the percentiles over window size of {window} continuous datapoints.\n
The rule analysed {datapoints} datapoints and triggered {triggered} times.
""", width=800)
show(text)
if len(report['Details']) >0:
timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp'])
date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f')
day = date.date().strftime("%m/%d/%Y")
hour = date.time().strftime("%H:%M:%S")
del report['Details']['last_timestamp']
text = Paragraph(text=f"""Your training job is underutilizing the instance. You may want to consider
either switch to a smaller instance type or to increase the batch size.
The last time the BatchSize rule triggered in your training job was on {day} at {hour}.
The following boxplots are a snapshot from the timestamps. They the total
CPU utilization, the GPU utilization, and the GPU memory usage per GPU (without outliers).""",
width=800)
show(text)
for node_id in report['Details']:
xmax = max(20, len(report['Details'][node_id]))
plot = figure(plot_height=350,
plot_width=1000,
toolbar_location='right',
tools="hover,wheel_zoom,reset,pan",
title=f"Node {node_id}",
x_range=(0,xmax)
)
for index, key in enumerate(report['Details'][node_id]):
upper = report['Details'][node_id][key]['upper']
lower = report['Details'][node_id][key]['lower']
p75 = report['Details'][node_id][key]['p75']
p25 = report['Details'][node_id][key]['p25']
p50 = report['Details'][node_id][key]['p50']
plot.segment(index+1, upper, index+1, p75, line_color="black")
plot.segment(index+1, lower, index+1, p25, line_color="black")
plot.vbar(index+1, 0.7, p50, p75, fill_color="#FDE725", line_color="black")
plot.vbar(index+1, 0.7, p25, p50, fill_color="#440154", line_color="black")
plot.rect(index+1, lower, 0.2, 0.01, line_color="black")
plot.rect(index+1, upper, 0.2, 0.01, line_color="black")
plot.xaxis.major_label_overrides[index+1] = key
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = "white"
plot.grid.grid_line_width = 0
plot.xaxis.major_label_text_font_size="10px"
plot.xaxis.ticker = np.arange(index+2)
plot.yaxis.axis_label = "Utilization in %"
show(plot)
if analyse_phase == "training":
display(Markdown("""### CPU bottlenecks\n\n"""))
report = load_report('CPUBottleneck')
if report:
params = report['RuleParameters'].split('\n')
threshold = int(params[0].split(':')[1])
cpu_threshold = int(params[1].split(':')[1])
gpu_threshold = int(params[2].split(':')[1])
patience = int(params[3].split(':')[1])
violations = report['Violations']
triggered = report['RuleTriggered']
datapoints = report['Datapoints']
if report['Violations'] > 0:
perc = int(report['Violations']/report['Datapoints']*100)
else:
perc = 0
if perc < threshold:
string = 'below'
else:
string = 'above'
text = f"""The CPUBottleneck rule checked when the CPU utilization was above cpu_threshold of {cpu_threshold}%
and GPU utilization was below gpu_threshold of {gpu_threshold}%.
During initialization utilization is likely to be zero, so the rule skipped the first {patience} datapoints.
With this configuration the rule found {violations} CPU bottlenecks which is {perc}% of the total time. This is {string} the threshold of {threshold}%
The rule analysed {datapoints} data points and triggered {triggered} times."""
paragraph = Paragraph(text=text, width=900)
show(paragraph)
if report:
plots = []
text = ""
if report['RuleTriggered'] > 0:
low_gpu = report['Details']['low_gpu_utilization']
cpu_bottleneck = {}
cpu_bottleneck["GPU usage above threshold"] = report["Datapoints"] - report["Details"]["low_gpu_utilization"]
cpu_bottleneck["GPU usage below threshold"] = report["Details"]["low_gpu_utilization"] - len(report["Details"])
cpu_bottleneck["Low GPU usage due to CPU bottlenecks"] = len(report["Details"]["bottlenecks"])
n_bottlenecks = round(len(report['Details']['bottlenecks'])/datapoints * 100, 2)
text = f"""The following chart (left) shows how many datapoints were below the gpu_threshold of {gpu_threshold}%
and how many of those datapoints were likely caused by a CPU bottleneck. The rule found {low_gpu} out of {datapoints} datapoints which had a GPU utilization
below {gpu_threshold}%. Out of those datapoints {n_bottlenecks}% were likely caused by CPU bottlenecks.
"""
plot = create_piechart(cpu_bottleneck,
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="Low GPU usage caused by CPU bottlenecks")
plots.append(plot)
if 'phase' in report['Details']:
text = f"""{text} The chart (in the middle) shows whether CPU bottlenecks mainly
happened during train/validation phase.
"""
plot = create_piechart(report['Details']['phase'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="The ratio between time spent on TRAIN/EVAL phase")
plots.append(plot)
if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0:
event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get)
perc = report['Details']['forward_backward'][event]
text = f"""{text} The pie charts on the right shows a more detailed breakdown.
It shows that {int(perc)}% of the training time was spent on event {event}"""
plot = create_piechart(report['Details']['forward_backward'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="The ratio between forward and backward pass")
plots.append(plot)
if len(plots) > 0:
paragraph = Paragraph(text=text, width=900)
show(column(paragraph, row(plots)))
plots = []
text = ""
if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0:
key = list(report['Details']['ratio'].keys())[0]
ratio = report['Details']['ratio'][key]
text = f"""The following pie chart shows a breakdown of the CPU/GPU operators that happened during CPU bottlenecks.
It shows that {int(ratio)}% of the training time was spent on executing operators in "{key}"."""
plot = create_piechart(report['Details']['ratio'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="The ratio between CPU/GPU operators")
plots.append(plot)
if 'general' in report['Details'] and len(report['Details']['general']) > 0:
event = max(report['Details']['general'], key=report['Details']['general'].get)
perc = report['Details']['general'][event]
plot = create_piechart(report['Details']['general'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="General metrics recorded in framework ")
plots.append(plot)
if len(plots) > 0:
paragraph = Paragraph(text=text, width=900)
show(column(paragraph, row(plots)))
plots = []
text = ""
if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0:
event = max(report['Details']['horovod'], key=report['Details']['horovod'].get)
perc = report['Details']['horovod'][event]
text = f"""The following pie chart shows a detailed breakdown of the Horovod metrics
that have been recorded when the CPU bottleneck happened. The most expensive function was
{event} with {int(perc)}%"""
plot = create_piechart(report['Details']['horovod'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="General metrics recorded in framework ")
paragraph = Paragraph(text=text, width=900)
show(column(paragraph, row(plot)))
if analyse_phase == "training":
display(Markdown("""### I/O bottlenecks\n\n"""))
report = load_report('IOBottleneck')
if report:
params = report['RuleParameters'].split('\n')
threshold = int(params[0].split(':')[1])
io_threshold = int(params[1].split(':')[1])
gpu_threshold = int(params[2].split(':')[1])
patience = int(params[3].split(':')[1])
violations = report['Violations']
triggered = report['RuleTriggered']
datapoints = report['Datapoints']
if report['Violations'] > 0:
perc = int(report['Violations']/report['Datapoints']*100)
else:
perc = 0
if perc < threshold:
string = 'below'
else:
string = 'above'
text = f"""The IOBottleneck rule checked when I/O wait time was above io_threshold of {io_threshold}%
and GPU utilization was below gpu_threshold of {gpu_threshold}. During initialization utilization is likely to be zero, so the rule skipped the first {patience} datapoints.
With this configuration the rule found {violations} I/O bottlenecks which is {perc}% of the total time. This is {string} the threshold of {threshold}%.
The rule analysed {datapoints} datapoints and triggered {triggered} times."""
paragraph = Paragraph(text=text, width=900)
show(paragraph)
if report:
plots = []
text = ""
if report['RuleTriggered'] > 0:
low_gpu = report['Details']['low_gpu_utilization']
cpu_bottleneck = {}
cpu_bottleneck["GPU usage above threshold"] = report["Datapoints"] - report["Details"]["low_gpu_utilization"]
cpu_bottleneck["GPU usage below threshold"] = report["Details"]["low_gpu_utilization"] - len(report["Details"])
cpu_bottleneck["Low GPU usage due to I/O bottlenecks"] = len(report["Details"]["bottlenecks"])
n_bottlenecks = round(len(report['Details']['bottlenecks'])/datapoints * 100, 2)
text = f"""The following chart (left) shows how many datapoints were below the gpu_threshold of {gpu_threshold}%
and how many of those datapoints were likely caused by a I/O bottleneck. The rule found {low_gpu} out of {datapoints} datapoints which had a GPU utilization
below {gpu_threshold}%. Out of those datapoints {n_bottlenecks}% were likely caused by I/O bottlenecks.
"""
plot = create_piechart(cpu_bottleneck,
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="Low GPU usage caused by I/O bottlenecks")
plots.append(plot)
if 'phase' in report['Details']:
text = f"""{text} The chart (in the middle) shows whether I/O bottlenecks mainly happened during the training or validation phase.
"""
plot = create_piechart(report['Details']['phase'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="The ratio between the time spent on the TRAIN/EVAL phase")
plots.append(plot)
if 'forward_backward' in report['Details'] and len(report['Details']['forward_backward']) > 0:
event = max(report['Details']['forward_backward'], key=report['Details']['forward_backward'].get)
perc = report['Details']['forward_backward'][event]
text = f"""{text} The pie charts on the right shows a more detailed breakdown.
It shows that {int(perc)}% of the training time was spent on event "{event}"."""
plot = create_piechart(report['Details']['forward_backward'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="The ratio between forward and backward pass")
plots.append(plot)
if len(plots) > 0:
paragraph = Paragraph(text=text, width=900)
show(column(paragraph, row(plots)))
plots = []
text = ""
if 'ratio' in report['Details'] and len(report['Details']['ratio']) > 0:
key = list(report['Details']['ratio'].keys())[0]
ratio = report['Details']['ratio'][key]
text = f"""The following pie chart shows a breakdown of the CPU/GPU operators that happened
during I/O bottlenecks. It shows that {int(ratio)}% of the training time was spent on executing operators in "{key}"."""
plot = create_piechart(report['Details']['ratio'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="Ratio between CPU/GPU operators")
plots.append(plot)
if 'general' in report['Details'] and len(report['Details']['general']) > 0:
event = max(report['Details']['general'], key=report['Details']['general'].get)
perc = report['Details']['general'][event]
plot = create_piechart(report['Details']['general'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="General metrics recorded in framework ")
plots.append(plot)
if len(plots) > 0:
paragraph = Paragraph(text=text, width=900)
show(column(paragraph, row(plots)))
plots = []
text = ""
if 'horovod' in report['Details'] and len(report['Details']['horovod']) > 0:
event = max(report['Details']['horovod'], key=report['Details']['horovod'].get)
perc = report['Details']['horovod'][event]
text = f"""The following pie chart shows a detailed breakdown of the Horovod metrics that have been
recorded when I/O bottleneck happened. The most expensive function was {event} with {int(perc)}%"""
plot = create_piechart(report['Details']['horovod'],
height=350,
width=600,
x1=0.2,
x2=0.6,
radius=0.3,
title="General metrics recorded in framework ")
paragraph = Paragraph(text=text, width=900)
show(column(paragraph, row(plot)))
if analyse_phase == "training":
display(Markdown("""### GPU memory\n\n"""))
report = load_report('GPUMemoryIncrease')
if report:
params = report['RuleParameters'].split('\n')
increase = float(params[0].split(':')[1])
patience = params[1].split(':')[1]
window = params[2].split(':')[1]
violations = report['Violations']
triggered = report['RuleTriggered']
datapoints = report['Datapoints']
text=Paragraph(text=f"""The GPUMemoryIncrease rule helps to detect large increase in memory usage on GPUs.
The rule checked if the moving average of memory increased by more than {increase}%.
So if the moving average increased for instance from 10% to {11+increase}%,
the rule would have triggered. During initialization utilization is likely 0, so the rule skipped the first {patience} datapoints.
The moving average was computed on a window size of {window} continuous datapoints. The rule detected {violations} violations
where the moving average between previous and current time window increased by more than {increase}%.
The rule analysed {datapoints} datapoints and triggered {triggered} times.""",
width=900)
show(text)
if len(report['Details']) > 0:
timestamp = us_since_epoch_to_human_readable_time(report['Details']['last_timestamp'])
date = datetime.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S:%f')
day = date.date().strftime("%m/%d/%Y")
hour = date.time().strftime("%H:%M:%S")
text = Paragraph(text=f"""Your training job triggered memory spikes.
The last time the GPUMemoryIncrease rule triggered in your training job was on {day} at {hour}.
The following boxplots are a snapshot from the timestamps. They show for each node and GPU the corresponding
memory utilization (without outliers).""", width=900)
show(text)
del report['Details']['last_timestamp']
for node_id in report['Details']:
plot = figure(plot_height=350,
plot_width=1000,
toolbar_location='right',
tools="hover,wheel_zoom,reset,pan",
title=f"Node {node_id}",
x_range=(0,17),
)
for index, key in enumerate(report['Details'][node_id]):
display(Markdown(f"""**Memory utilization of {key} on node {node_id}:**"""))
text = ""
gpu_max = report['Details'][node_id][key]['gpu_max']
text = f"""{text} The max memory utilization of {key} on node {node_id} was {gpu_max}%."""
p_95 = int(report['Details'][node_id][key]['p95'])
p_5 = report['Details'][node_id][key]['p05']
if p_95 < int(50):
text = f"""{text} The 95th percentile was only {p_95}%."""
if p_5 < int(5):
text = f"""{text} The 5th percentile was only {p_5}%."""
if p_95 - p_5 > 50:
text = f"""{text} The difference between 5th percentile {p_5}% and 95th percentile {p_95}% is quite
significant, which means that memory utilization on {key} is fluctuating quite a lot."""
text = Paragraph(text=f"""{text}""", width=900)
show(text)
upper = report['Details'][node_id][key]['upper']
lower = report['Details'][node_id][key]['lower']
p75 = report['Details'][node_id][key]['p75']
p25 = report['Details'][node_id][key]['p25']
p50 = report['Details'][node_id][key]['p50']
plot.segment(index+1, upper, index+1, p75, line_color="black")
plot.segment(index+1, lower, index+1, p25, line_color="black")
plot.vbar(index+1, 0.7, p50, p75, fill_color="#FDE725", line_color="black")
plot.vbar(index+1, 0.7, p25, p50, fill_color="#440154", line_color="black")
plot.rect(index+1, lower, 0.2, 0.01, line_color="black")
plot.rect(index+1, upper, 0.2, 0.01, line_color="black")
plot.xaxis.major_label_overrides[index+1] = key
plot.xgrid.grid_line_color = None
plot.ygrid.grid_line_color = "white"
plot.grid.grid_line_width = 0
plot.xaxis.major_label_text_font_size="10px"
plot.xaxis.ticker = np.arange(index+2)
plot.yaxis.axis_label = "Utilization in %"
show(plot)
```
|
github_jupyter
|
# Overview
This lab has been adapted from the angr [motivating example](https://github.com/angr/angr-doc/tree/master/examples/fauxware). It shows the basic lifecycle and capabilities of the angr framework.
Note this lab (and other notebooks running angr) should be run with the Python 3 kernel!
Look at fauxware.c! This is the source code for a "faux firmware" (@zardus really likes the puns) that's meant to be a simple representation of a firmware that can authenticate users but also has a backdoor - the backdoor is that anybody who provides the string "SOSNEAKY" as their password will be automatically authenticated.
```
# import the python system and angr libraries
import angr
import sys
# We can use this as a basic demonstration of using angr for symbolic execution.
# First, we load the binary into an angr project.
p = angr.Project('/home/pac/Desktop/lab7/fauxware/fauxware')
# Now, we want to construct a representation of symbolic program state.
# SimState objects are what angr manipulates when it symbolically executes
# binary code.
# The entry_state constructor generates a SimState that is a very generic
# representation of the possible program states at the program's entry
# point. There are more constructors, like blank_state, which constructs a
# "blank slate" state that specifies as little concrete data as possible,
# or full_init_state, which performs a slow and pedantic initialization of
# program state as it would execute through the dynamic loader.
state = p.factory.entry_state()
# Now, in order to manage the symbolic execution process from a very high
# level, we have a SimulationManager. SimulationManager is just collections
# of states with various tags attached with a number of convenient
# interfaces for managing them.
sm = p.factory.simulation_manager(state)
# Now, we begin execution. This will symbolically execute the program until
# we reach a branch statement for which both branches are satisfiable.
sm.run(until=lambda sm_: len(sm_.active) > 1)
# If you look at the C code, you see that the first "if" statement that the
# program can come across is comparing the result of the strcmp with the
# backdoor password. So, we have halted execution with two states, each of
# which has taken a different arm of that conditional branch. If you drop
# an IPython shell here and examine sm.active[n].solver.constraints
# you will see the encoding of the condition that was added to the state to
# constrain it to going down this path, instead of the other one. These are
# the constraints that will eventually be passed to our constraint solver
# (z3) to produce a set of concrete inputs satisfying them.
# As a matter of fact, we'll do that now.
input_0 = sm.active[0].posix.dumps(0)
input_1 = sm.active[1].posix.dumps(0)
# We have used a utility function on the state's posix plugin to perform a
# quick and dirty concretization of the content in file descriptor zero,
# stdin. One of these strings should contain the substring "SOSNEAKY"!
if b'SOSNEAKY' in input_0:
analysis_result = input_0
else:
analysis_result = input_1
print("Result: " + str(analysis_result))
with open("/home/pac/Desktop/lab7/fauxware/analysis_result", "wb") as file:
file.write(analysis_result)
# You should be able to run this script and pipe its output to fauxware and
# fauxware will authenticate you!
import os
command = "/home/pac/Desktop/lab7/fauxware/fauxware < /home/pac/Desktop/lab7/fauxware/analysis_result"
print(os.popen(command).read())
```
|
github_jupyter
|
```
pip install mlxtend --upgrade --no-deps
import mlxtend
print(mlxtend.__version__)
from google.colab import drive
drive.mount('/content/gdrive')
import cv2
import skimage
import keras
import tensorflow
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from mlxtend.evaluate import confusion_matrix
from mlxtend.plotting import plot_confusion_matrix
from keras.models import Sequential, Model, load_model
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import Convolution2D, MaxPooling2D, Dense, Flatten, concatenate, Concatenate#, Dropout
from keras import regularizers
from keras.callbacks import ModelCheckpoint
from sklearn.metrics import classification_report
from skimage import color
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
img_width, img_height = 224, 224
batch_size = 1
epochs = 100
train_samples = 7200
validation_samples = 2400
test_samples = 2400
train_data_dir = 'path to train data'
validation_data_dir = 'path to validation data'
test_data_dir = 'path to test data'
def scale0to255(image):
converted_image = image
min_1 = np.min(converted_image[:,:,0])
max_1 = np.max(converted_image[:,:,0])
converted_image[:,:,0] = np.round(((converted_image[:,:,0] - min_1) / (max_1 - min_1)) * 255)
min_2 = np.min(converted_image[:,:,1])
max_2 = np.max(converted_image[:,:,1])
converted_image[:,:,1] = np.round(((converted_image[:,:,1] - min_2) / (max_2 - min_2)) * 255)
min_3 = np.min(converted_image[:,:,2])
max_3 = np.max(converted_image[:,:,2])
converted_image[:,:,2] = np.round(((converted_image[:,:,2] - min_3) / (max_3 - min_3)) * 255)
return converted_image
def log(image):
gaus_image = cv2.GaussianBlur(image,(3,3),0)
laplacian_image = cv2.Laplacian(np.uint8(gaus_image), cv2.CV_64F)
sharp_image = np.uint8(image + laplacian_image)
return sharp_image
def lch_colorFunction(image):
log_image = log(image)
lab_image = skimage.color.rgb2lab(log_image)
lch_image = skimage.color.lab2lch(lab_image)
scale_lch_image = scale0to255(lch_image)
return scale_lch_image
def hsv_colorFunction(image):
log_image = log(image)
hsv_image = skimage.color.rgb2hsv(log_image)
np.nan_to_num(hsv_image, copy=False, nan=0.0, posinf=None, neginf=None)
scale_hsv_image = scale0to255(hsv_image)
return scale_hsv_image
datagen_rgb = ImageDataGenerator()
datagen_lch = ImageDataGenerator(preprocessing_function = lch_colorFunction)
datagen_hsv = ImageDataGenerator(preprocessing_function = hsv_colorFunction)
def myGenerator (gen1, gen2, gen3):#
while True:
xy1 = gen1.next()
xy2 = gen2.next()
xy3 = gen3.next()
yield ([xy1[0], xy2[0], xy3[0]], xy1[1]) #
train_generator_rgb = datagen_rgb.flow_from_directory(
train_data_dir,
color_mode="rgb",
target_size=(img_width, img_height),
batch_size=batch_size,
shuffle=False,
class_mode='categorical')
train_generator_lch = datagen_lch.flow_from_directory(
train_data_dir,
color_mode="rgb",
target_size=(img_width, img_height),
batch_size=batch_size,
shuffle=False,
class_mode='categorical')
train_generator_hsv = datagen_hsv.flow_from_directory(
train_data_dir,
color_mode="rgb",
target_size=(img_width, img_height),
batch_size=batch_size,
shuffle=False,
class_mode='categorical')
train_generator = myGenerator(train_generator_rgb, train_generator_lch, train_generator_hsv)#
validation_generator_rgb = datagen_rgb.flow_from_directory(
validation_data_dir,
color_mode="rgb",
target_size=(img_width, img_height),
batch_size=batch_size,
shuffle=False,
class_mode='categorical')
validation_generator_lch = datagen_lch.flow_from_directory(
validation_data_dir,
color_mode="rgb",
target_size=(img_width, img_height),
batch_size=batch_size,
shuffle=False,
class_mode='categorical')
validation_generator_hsv = datagen_hsv.flow_from_directory(
validation_data_dir,
color_mode="rgb",
target_size=(img_width, img_height),
batch_size=batch_size,
shuffle=False,
class_mode='categorical')
validation_generator = myGenerator(validation_generator_rgb, validation_generator_lch, validation_generator_hsv)#
test_generator_rgb = datagen_rgb.flow_from_directory(
test_data_dir,
color_mode="rgb",
target_size=(img_width, img_height),
batch_size= 1,
shuffle=False,
class_mode='categorical')
test_generator_lch = datagen_lch.flow_from_directory(
test_data_dir,
color_mode="rgb",
target_size=(img_width, img_height),
batch_size= 1,
shuffle=False,
class_mode='categorical')
test_generator_hsv = datagen_hsv.flow_from_directory(
test_data_dir,
color_mode="rgb",
target_size=(img_width, img_height),
batch_size= 1,
shuffle=False,
class_mode='categorical')
test_generator = myGenerator(test_generator_rgb, test_generator_lch, test_generator_hsv)#
model = load_model('path to mceffnet2_model.h5')
model.summary()
inp = model.input
out =model.layers[-1].output
model2 = Model(inp, out)
model2.summary()
keras.utils.plot_model(model2, "model.png", show_shapes=True)
train_pred = model2.predict_generator(train_generator,train_samples, verbose=1)
train_pred.shape
train_target = train_generator_rgb.classes
train_target.shape
val_pred = model2.predict_generator(validation_generator,validation_samples, verbose=1)
val_pred.shape
val_target = validation_generator_rgb.classes
val_target.shape
test_pred = model2.predict_generator(test_generator,test_samples, verbose=1)
test_pred.shape
test_target = test_generator_rgb.classes
test_target.shape
X = np.append(train_pred, val_pred, axis=0)
X = np.append(X, test_pred, axis=0)
np.save("path to save mceffnet_features.npy", X)
X.shape
y = np.append(train_target, val_target, axis=0)
y = np.append(y, test_target, axis=0)
np.save("path to save labels.npy", y)
y.shape
list_fams = ['gan', 'graphics', 'real']
list_fams
pip install tsne
import numpy as np
from numpy.random import RandomState
np.random.seed(1)
from tsne import bh_sne
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
import os
import os.path
import glob
from keras.preprocessing import image
print("Running t-SNE ...")
vis_eff_data = bh_sne(np.float64(X), d=2, perplexity=30., theta=0.5, random_state=RandomState(1))
np.save("path to save mceffnet_tsne_features.npy", vis_eff_data)
vis_eff_data.shape
vis_eff_data = np.load("path to mceffnet_tsne_features.npy")
y = np.load("path to labels.npy")
print("Plotting t-SNE ...")
figure = plt.gcf()
figure.set_size_inches(20, 17)
plt.scatter(vis_eff_data[y.astype(int)==0, 0], vis_eff_data[y.astype(int)==0, 1], c='green', marker='o', edgecolors="black", label="GAN")
plt.scatter(vis_eff_data[y.astype(int)==1, 0], vis_eff_data[y.astype(int)==1, 1], c='white', marker='s', edgecolors="blue", label="Graphics")
plt.scatter(vis_eff_data[y.astype(int)==2, 0], vis_eff_data[y.astype(int)==2, 1], c='red', marker='D', edgecolors="pink", label="Real")
plt.clim(-0.5, len(list_fams)-0.5)
frame1 = plt.gca()
frame1.axes.xaxis.set_ticklabels([])
frame1.axes.yaxis.set_ticklabels([])
frame1.axes.get_xaxis().set_visible(False)
frame1.axes.get_yaxis().set_visible(False)
plt.legend(loc="upper right", prop={'size': 35})
#plt.savefig('TSNE_EfficientNet_features_visualization_color_size_20_17.jpg', format='jpg')
plt.show()
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D1_RealNeurons/W3D1_Tutorial2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Neuromatch Academy: Week 3, Day 1, Tutorial 2
# Real Neurons: Effects of Input Correlation
__Content creators:__ Qinglong Gu, Songtin Li, John Murray, Richard Naud, Arvind Kumar
__Content reviewers:__ Maryam Vaziri-Pashkam, Ella Batty, Lorenzo Fontolan, Richard Gao, Matthew Krause, Spiros Chavlis, Michael Waskom
---
# Tutorial Objectives
In this tutorial, we will use the leaky integrate-and-fire (LIF) neuron model (see Tutorial 1) to study how they transform input correlations to output properties (transfer of correlations). In particular, we are going to write a few lines of code to:
- inject correlated GWN in a pair of neurons
- measure correlations between the spiking activity of the two neurons
- study how the transfer of correlation depends on the statistics of the input, i.e. mean and standard deviation.
---
# Setup
```
# Import libraries
import matplotlib.pyplot as plt
import numpy as np
import time
# @title Figure Settings
import ipywidgets as widgets # interactive display
%config InlineBackend.figure_format = 'retina'
# use NMA plot style
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
my_layout = widgets.Layout()
# @title Helper functions
def default_pars(**kwargs):
pars = {}
### typical neuron parameters###
pars['V_th'] = -55. # spike threshold [mV]
pars['V_reset'] = -75. # reset potential [mV]
pars['tau_m'] = 10. # membrane time constant [ms]
pars['g_L'] = 10. # leak conductance [nS]
pars['V_init'] = -75. # initial potential [mV]
pars['V_L'] = -75. # leak reversal potential [mV]
pars['tref'] = 2. # refractory time (ms)
### simulation parameters ###
pars['T'] = 400. # Total duration of simulation [ms]
pars['dt'] = .1 # Simulation time step [ms]
### external parameters if any ###
for k in kwargs:
pars[k] = kwargs[k]
pars['range_t'] = np.arange(0, pars['T'], pars['dt']) # Vector of discretized
# time points [ms]
return pars
def run_LIF(pars, Iinj):
"""
Simulate the LIF dynamics with external input current
Args:
pars : parameter dictionary
Iinj : input current [pA]. The injected current here can be a value or an array
Returns:
rec_spikes : spike times
rec_v : mebrane potential
"""
# Set parameters
V_th, V_reset = pars['V_th'], pars['V_reset']
tau_m, g_L = pars['tau_m'], pars['g_L']
V_init, V_L = pars['V_init'], pars['V_L']
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
tref = pars['tref']
# Initialize voltage and current
v = np.zeros(Lt)
v[0] = V_init
Iinj = Iinj * np.ones(Lt)
tr = 0.
# simulate the LIF dynamics
rec_spikes = [] # record spike times
for it in range(Lt - 1):
if tr > 0:
v[it] = V_reset
tr = tr - 1
elif v[it] >= V_th: # reset voltage and record spike event
rec_spikes.append(it)
v[it] = V_reset
tr = tref / dt
# calculate the increment of the membrane potential
dv = (-(v[it] - V_L) + Iinj[it] / g_L) * (dt / tau_m)
# update the membrane potential
v[it + 1] = v[it] + dv
rec_spikes = np.array(rec_spikes) * dt
return v, rec_spikes
def my_GWN(pars, sig, myseed=False):
"""
Function that calculates Gaussian white noise inputs
Args:
pars : parameter dictionary
mu : noise baseline (mean)
sig : noise amplitute (standard deviation)
myseed : random seed. int or boolean
the same seed will give the same random number sequence
Returns:
I : Gaussian white noise input
"""
# Retrieve simulation parameters
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
# Set random seed. You can fix the seed of the random number generator so
# that the results are reliable however, when you want to generate multiple
# realization make sure that you change the seed for each new realization
if myseed:
np.random.seed(seed=myseed)
else:
np.random.seed()
# generate GWN
# we divide here by 1000 to convert units to sec.
I_GWN = sig * np.random.randn(Lt) * np.sqrt(pars['tau_m'] / dt)
return I_GWN
def Poisson_generator(pars, rate, n, myseed=False):
"""
Generates poisson trains
Args:
pars : parameter dictionary
rate : noise amplitute [Hz]
n : number of Poisson trains
myseed : random seed. int or boolean
Returns:
pre_spike_train : spike train matrix, ith row represents whether
there is a spike in ith spike train over time
(1 if spike, 0 otherwise)
"""
# Retrieve simulation parameters
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
# set random seed
if myseed:
np.random.seed(seed=myseed)
else:
np.random.seed()
# generate uniformly distributed random variables
u_rand = np.random.rand(n, Lt)
# generate Poisson train
poisson_train = 1. * (u_rand < rate * (dt / 1000.))
return poisson_train
def example_plot_myCC():
pars = default_pars(T=50000, dt=.1)
c = np.arange(10) * 0.1
r12 = np.zeros(10)
for i in range(10):
I1gL, I2gL = correlate_input(pars, mu=20.0, sig=7.5, c=c[i])
r12[i] = my_CC(I1gL, I2gL)
plt.figure()
plt.plot(c, r12, 'bo', alpha=0.7, label='Simulation', zorder=2)
plt.plot([-0.05, 0.95], [-0.05, 0.95], 'k--', label='y=x',
dashes=(2, 2), zorder=1)
plt.xlabel('True CC')
plt.ylabel('Sample CC')
plt.legend(loc='best')
def LIF_output_cc(pars, mu, sig, c, bin_size, n_trials=20):
""" Simulates two LIF neurons with correlated input and computes output correlation
Args:
pars : parameter dictionary
mu : noise baseline (mean)
sig : noise amplitute (standard deviation)
c : correlation coefficient ~[0, 1]
bin_size : bin size used for time series
n_trials : total simulation trials
Returns:
r : output corr. coe.
sp_rate : spike rate
sp1 : spike times of neuron 1 in the last trial
sp2 : spike times of neuron 2 in the last trial
"""
r12 = np.zeros(n_trials)
sp_rate = np.zeros(n_trials)
for i_trial in range(n_trials):
I1gL, I2gL = correlate_input(pars, mu, sig, c)
_, sp1 = run_LIF(pars, pars['g_L'] * I1gL)
_, sp2 = run_LIF(pars, pars['g_L'] * I2gL)
my_bin = np.arange(0, pars['T'], bin_size)
sp1_count, _ = np.histogram(sp1, bins=my_bin)
sp2_count, _ = np.histogram(sp2, bins=my_bin)
r12[i_trial] = my_CC(sp1_count[::20], sp2_count[::20])
sp_rate[i_trial] = len(sp1) / pars['T'] * 1000.
return r12.mean(), sp_rate.mean(), sp1, sp2
def plot_c_r_LIF(c, r, mycolor, mylabel):
z = np.polyfit(c, r, deg=1)
c_range = np.array([c.min() - 0.05, c.max() + 0.05])
plt.plot(c, r, 'o', color=mycolor, alpha=0.7, label=mylabel, zorder=2)
plt.plot(c_range, z[0] * c_range + z[1], color=mycolor, zorder=1)
```
The helper function contains the:
- Parameter dictionary: `default_pars( **kwargs)`
- LIF simulator: `run_LIF`
- Gaussian white noise generator: `my_GWN(pars, sig, myseed=False)`
- Poisson type spike train generator: `Poisson_generator(pars, rate, n, myseed=False)`
- Two LIF neurons with correlated inputs simulator: `LIF_output_cc(pars, mu, sig, c, bin_size, n_trials=20)`
- Some additional plotting utilities
---
# Section 1: Correlations (Synchrony)
Correlation or synchrony in neuronal activity can be described for any readout of brain activity. Here, we are concerned with the spiking activity of neurons.
In the simplest way, correlation/synchrony refers to coincident spiking of neurons, i.e., when two neurons spike together, they are firing in **synchrony** or are **correlated**. Neurons can be synchronous in their instantaneous activity, i.e., they spike together with some probability. However, it is also possible that spiking of a neuron at time $t$ is correlated with the spikes of another neuron with a delay (time-delayed synchrony).
## Origin of synchronous neuronal activity:
- Common inputs, i.e., two neurons are receiving input from the same sources. The degree of correlation of the shared inputs is proportional to their output correlation.
- Pooling from the same sources. Neurons do not share the same input neurons but are receiving inputs from neurons which themselves are correlated.
- Neurons are connected to each other (uni- or bi-directionally): This will only give rise to time-delayed synchrony. Neurons could also be connected via gap-junctions.
- Neurons have similar parameters and initial conditions.
## Implications of synchrony
When neurons spike together, they can have a stronger impact on downstream neurons. Synapses in the brain are sensitive to the temporal correlations (i.e., delay) between pre- and postsynaptic activity, and this, in turn, can lead to the formation of functional neuronal networks - the basis of unsupervised learning (we will study some of these concepts in a forthcoming tutorial).
Synchrony implies a reduction in the dimensionality of the system. In addition, correlations, in many cases, can impair the decoding of neuronal activity.
```
# @title Video 1: Input & output correlations
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="nsAYFBcAkes", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
## How to study the emergence of correlations
A simple model to study the emergence of correlations is to inject common inputs to a pair of neurons and measure the output correlation as a function of the fraction of common inputs.
Here, we are going to investigate the transfer of correlations by computing the correlation coefficient of spike trains recorded from two unconnected LIF neurons, which received correlated inputs.
The input current to LIF neuron $i$ $(i=1,2)$ is:
\begin{equation}
\frac{I_i}{g_L} =\mu_i + \sigma_i (\sqrt{1-c}\xi_i + \sqrt{c}\xi_c) \quad (1)
\end{equation}
where $\mu_i$ is the temporal average of the current. The Gaussian white noise $\xi_i$ is independent for each neuron, while $\xi_c$ is common to all neurons. The variable $c$ ($0\le c\le1$) controls the fraction of common and independent inputs. $\sigma_i$ shows the variance of the total input.
So, first, we will generate correlated inputs.
```
# @title
#@markdown Execute this cell to get a function for generating correlated GWN inputs
def correlate_input(pars, mu=20., sig=7.5, c=0.3):
"""
Args:
pars : parameter dictionary
mu : noise baseline (mean)
sig : noise amplitute (standard deviation)
c. : correlation coefficient ~[0, 1]
Returns:
I1gL, I2gL : two correlated inputs with corr. coe. c
"""
# generate Gaussian whute noise xi_1, xi_2, xi_c
xi_1 = my_GWN(pars, sig)
xi_2 = my_GWN(pars, sig)
xi_c = my_GWN(pars, sig)
# Generate two correlated inputs by Equation. (1)
I1gL = mu + np.sqrt(1. - c) * xi_1 + np.sqrt(c) * xi_c
I2gL = mu + np.sqrt(1. - c) * xi_2 + np.sqrt(c) * xi_c
return I1gL, I2gL
print(help(correlate_input))
```
### Exercise 1: Compute the correlation
The _sample correlation coefficient_ between two input currents $I_i$ and $I_j$ is defined as the sample covariance of $I_i$ and $I_j$ divided by the square root of the sample variance of $I_i$ multiplied with the square root of the sample variance of $I_j$. In equation form:
\begin{align}
r_{ij} &= \frac{cov(I_i, I_j)}{\sqrt{var(I_i)} \sqrt{var(I_j)}}\\
cov(I_i, I_j) &= \sum_{k=1}^L (I_i^k -\bar{I}_i)(I_j^k -\bar{I}_j) \\
var(I_i) &= \sum_{k=1}^L (I_i^k -\bar{I}_i)^2
\end{align}
where $\bar{I}_i$ is the sample mean, k is the time bin, and L is the length of $I$. This means that $I_i^k$ is current i at time $k\cdot dt$. Note that the equations above are not accurate for sample covariances and variances as they should be additionally divided by L-1 - we have dropped this term because it cancels out in the sample correlation coefficient formula.
The _sample correlation coefficient_ may also be referred to as the _sample Pearson correlation coefficient_. Here, is a beautiful paper that explains multiple ways to calculate and understand correlations [Rodgers and Nicewander 1988](https://www.stat.berkeley.edu/~rabbee/correlation.pdf).
In this exercise, we will create a function, `my_CC` to compute the sample correlation coefficient between two time series. Note that while we introduced this computation here in the context of input currents, the sample correlation coefficient is used to compute the correlation between any two time series - we will use it later on binned spike trains.
```
def my_CC(i, j):
"""
Args:
i, j : two time series with the same length
Returns:
rij : correlation coefficient
"""
########################################################################
## TODO for students: compute rxy, then remove the NotImplementedError #
# Tip1: array([a1, a2, a3])*array([b1, b2, b3]) = array([a1*b1, a2*b2, a3*b3])
# Tip2: np.sum(array([a1, a2, a3])) = a1+a2+a3
# Tip3: square root, np.sqrt()
# Fill out function and remove
raise NotImplementedError("Student exercise: compute the sample correlation coefficient")
########################################################################
# Calculate the covariance of i and j
cov = ...
# Calculate the variance of i
var_i = ...
# Calculate the variance of j
var_j = ...
# Calculate the correlation coefficient
rij = ...
return rij
# Uncomment the line after completing the my_CC function
# example_plot_myCC()
# to_remove solution
def my_CC(i, j):
"""
Args:
i, j : two time series with the same length
Returns:
rij : correlation coefficient
"""
# Calculate the covariance of i and j
cov = ((i - i.mean()) * (j - j.mean())).sum()
# Calculate the variance of i
var_i = ((i - i.mean()) * (i - i.mean())).sum()
# Calculate the variance of j
var_j = ((j - j.mean()) * (j - j.mean())).sum()
# Calculate the correlation coefficient
rij = cov / np.sqrt(var_i*var_j)
return rij
with plt.xkcd():
example_plot_myCC()
```
### Exercise 2: Measure the correlation between spike trains
After recording the spike times of the two neurons, how can we estimate their correlation coefficient?
In order to find this, we need to bin the spike times and obtain two time series. Each data point in the time series is the number of spikes in the corresponding time bin. You can use `np.histogram()` to bin the spike times.
Complete the code below to bin the spike times and calculate the correlation coefficient for two Poisson spike trains. Note that `c` here is the ground-truth correlation coefficient that we define.
```
# @title
# @markdown Execute this cell to get a function for generating correlated Poisson inputs (generate_corr_Poisson)
def generate_corr_Poisson(pars, poi_rate, c, myseed=False):
"""
function to generate correlated Poisson type spike trains
Args:
pars : parameter dictionary
poi_rate : rate of the Poisson train
c. : correlation coefficient ~[0, 1]
Returns:
sp1, sp2 : two correlated spike time trains with corr. coe. c
"""
range_t = pars['range_t']
mother_rate = poi_rate / c
mother_spike_train = Poisson_generator(pars, rate=mother_rate,
n=1, myseed=myseed)[0]
sp_mother = range_t[mother_spike_train > 0]
L_sp_mother = len(sp_mother)
sp_mother_id = np.arange(L_sp_mother)
L_sp_corr = int(L_sp_mother * c)
np.random.shuffle(sp_mother_id)
sp1 = np.sort(sp_mother[sp_mother_id[:L_sp_corr]])
np.random.shuffle(sp_mother_id)
sp2 = np.sort(sp_mother[sp_mother_id[:L_sp_corr]])
return sp1, sp2
print(help(generate_corr_Poisson))
def corr_coeff_pairs(pars, rate, c, trials, bins):
"""
Calculate the correlation coefficient of two spike trains, for different
realizations
Args:
pars : parameter dictionary
rate : rate of poisson inputs
c : correlation coefficient ~ [0, 1]
trials : number of realizations
bins : vector with bins for time discretization
Returns:
r12 : correlation coefficient of a pair of inputs
"""
r12 = np.zeros(n_trials)
for i in range(n_trials):
##############################################################
## TODO for students: Use np.histogram to bin the spike time #
## e.g., sp1_count, _= np.histogram(...)
# Use my_CC() compute corr coe, compare with c
# Note that you can run multiple realizations and compute their r_12(diff_trials)
# with the defined function above. The average r_12 over trials can get close to c.
# Note: change seed to generate different input per trial
# Fill out function and remove
raise NotImplementedError("Student exercise: compute the correlation coefficient")
##############################################################
# Generate correlated Poisson inputs
sp1, sp2 = generate_corr_Poisson(pars, ..., ..., myseed=2020+i)
# Bin the spike times of the first input
sp1_count, _ = np.histogram(..., bins=...)
# Bin the spike times of the second input
sp2_count, _ = np.histogram(..., bins=...)
# Calculate the correlation coefficient
r12[i] = my_CC(..., ...)
return r12
poi_rate = 20.
c = 0.2 # set true correlation
pars = default_pars(T=10000)
# bin the spike time
bin_size = 20 # [ms]
my_bin = np.arange(0, pars['T'], bin_size)
n_trials = 100 # 100 realizations
# Uncomment to test your function
# r12 = corr_coeff_pairs(pars, rate=poi_rate, c=c, trials=n_trials, bins=my_bin)
# print(f'True corr coe = {c:.3f}')
# print(f'Simu corr coe = {r12.mean():.3f}')
```
Sample output
```
True corr coe = 0.200
Simu corr coe = 0.197
```
```
# to_remove solution
def corr_coeff_pairs(pars, rate, c, trials, bins):
"""
Calculate the correlation coefficient of two spike trains, for different
realizations
Args:
pars : parameter dictionary
rate : rate of poisson inputs
c : correlation coefficient ~ [0, 1]
trials : number of realizations
bins : vector with bins for time discretization
Returns:
r12 : correlation coefficient of a pair of inputs
"""
r12 = np.zeros(n_trials)
for i in range(n_trials):
# Generate correlated Poisson inputs
sp1, sp2 = generate_corr_Poisson(pars, poi_rate, c, myseed=2020+i)
# Bin the spike times of the first input
sp1_count, _ = np.histogram(sp1, bins=bins)
# Bin the spike times of the second input
sp2_count, _ = np.histogram(sp2, bins=bins)
# Calculate the correlation coefficient
r12[i] = my_CC(sp1_count, sp2_count)
return r12
poi_rate = 20.
c = 0.2 # set true correlation
pars = default_pars(T=10000)
# bin the spike time
bin_size = 20 # [ms]
my_bin = np.arange(0, pars['T'], bin_size)
n_trials = 100 # 100 realizations
r12 = corr_coeff_pairs(pars, rate=poi_rate, c=c, trials=n_trials, bins=my_bin)
print(f'True corr coe = {c:.3f}')
print(f'Simu corr coe = {r12.mean():.3f}')
```
---
# Section 2: Investigate the effect of input correlation on the output correlation
Now let's combine the aforementioned two procedures. We first generate the correlated inputs by Equation (1). Then we inject the correlated inputs $I_1, I_2$ into a pair of neurons and record their output spike times. We continue measuring the correlation between the output and
investigate the relationship between the input correlation and the output correlation.
## Drive a neuron with correlated inputs and visualize its output
In the following, you will inject correlated GWN in two neurons. You need to define the mean (`gwn_mean`), standard deviation (`gwn_std`), and input correlations (`c_in`).
We will simulate $10$ trials to get a better estimate of the output correlation. Change the values in the following cell for the above variables (and then run the next cell) to explore how they impact the output correlation.
```
# Play around with these parameters
pars = default_pars(T=80000, dt=1.) # get the parameters
c_in = 0.3 # set input correlation value
gwn_mean = 10.
gwn_std = 10.
# @title
# @markdown Do not forget to execute this cell to simulate the LIF
bin_size = 10. # ms
starttime = time.perf_counter() # time clock
r12_ss, sp_ss, sp1, sp2 = LIF_output_cc(pars, mu=gwn_mean, sig=gwn_std, c=c_in,
bin_size=bin_size, n_trials=10)
# just the time counter
endtime = time.perf_counter()
timecost = (endtime - starttime) / 60.
print(f"Simulation time = {timecost:.2f} min")
print(f"Input correlation = {c_in}")
print(f"Output correlation = {r12_ss}")
plt.figure(figsize=(12, 6))
plt.plot(sp1, np.ones(len(sp1)) * 1, '|', ms=20, label='neuron 1')
plt.plot(sp2, np.ones(len(sp2)) * 1.1, '|', ms=20, label='neuron 2')
plt.xlabel('time (ms)')
plt.ylabel('neuron id.')
plt.xlim(1000, 8000)
plt.ylim(0.9, 1.2)
plt.legend()
plt.show()
```
## Think!
- Is the output correlation always smaller than the input correlation? If yes, why?
- Should there be a systematic relationship between input and output correlations?
You will explore these questions in the next figure but try to develop your own intuitions first!
Lets vary `c_in` and plot the relationship between the `c_in` and output correlation. This might take some time depending on the number of trials.
```
#@title
#@markdown Don't forget to execute this cell!
pars = default_pars(T=80000, dt=1.) # get the parameters
bin_size = 10.
c_in = np.arange(0, 1.0, 0.1) # set the range for input CC
r12_ss = np.zeros(len(c_in)) # small mu, small sigma
starttime = time.perf_counter() # time clock
for ic in range(len(c_in)):
r12_ss[ic], sp_ss, sp1, sp2 = LIF_output_cc(pars, mu=10.0, sig=10.,
c=c_in[ic], bin_size=bin_size,
n_trials=10)
endtime = time.perf_counter()
timecost = (endtime - starttime) / 60.
print(f"Simulation time = {timecost:.2f} min")
plt.figure(figsize=(7, 6))
plot_c_r_LIF(c_in, r12_ss, mycolor='b', mylabel='Output CC')
plt.plot([c_in.min() - 0.05, c_in.max() + 0.05],
[c_in.min() - 0.05, c_in.max() + 0.05],
'k--', dashes=(2, 2), label='y=x')
plt.xlabel('Input CC')
plt.ylabel('Output CC')
plt.legend(loc='best', fontsize=16)
plt.show()
# to_remove explanation
"""
Discussion: The results above show that
- output correlation is smaller than input correlation
- output correlation varies linearly as a function of input correlation.
While the general result holds, this relationship might change depending on the neuron type.
""";
```
---
# Section 3: Correlation transfer function
The above plot of input correlation vs. output correlation is called the __correlation transfer function__ of the neurons.
## Section 3.1: How do the mean and standard deviation of the GWN affect the correlation transfer function?
The correlations transfer function appears to be linear. The above can be taken as the input/output transfer function of LIF neurons for correlations, instead of the transfer function for input/output firing rates as we had discussed in the previous tutorial (i.e., F-I curve).
What would you expect to happen to the slope of the correlation transfer function if you vary the mean and/or the standard deviation of the GWN?
```
#@markdown Execute this cell to visualize correlation transfer functions
pars = default_pars(T=80000, dt=1.) # get the parameters
no_trial = 10
bin_size = 10.
c_in = np.arange(0., 1., 0.2) # set the range for input CC
r12_ss = np.zeros(len(c_in)) # small mu, small sigma
r12_ls = np.zeros(len(c_in)) # large mu, small sigma
r12_sl = np.zeros(len(c_in)) # small mu, large sigma
starttime = time.perf_counter() # time clock
for ic in range(len(c_in)):
r12_ss[ic], sp_ss, sp1, sp2 = LIF_output_cc(pars, mu=10.0, sig=10.,
c=c_in[ic], bin_size=bin_size,
n_trials=no_trial)
r12_ls[ic], sp_ls, sp1, sp2 = LIF_output_cc(pars, mu=18.0, sig=10.,
c=c_in[ic], bin_size=bin_size,
n_trials=no_trial)
r12_sl[ic], sp_sl, sp1, sp2 = LIF_output_cc(pars, mu=10.0, sig=20.,
c=c_in[ic], bin_size=bin_size,
n_trials=no_trial)
endtime = time.perf_counter()
timecost = (endtime - starttime) / 60.
print(f"Simulation time = {timecost:.2f} min")
plt.figure(figsize=(7, 6))
plot_c_r_LIF(c_in, r12_ss, mycolor='b', mylabel=r'Small $\mu$, small $\sigma$')
plot_c_r_LIF(c_in, r12_ls, mycolor='y', mylabel=r'Large $\mu$, small $\sigma$')
plot_c_r_LIF(c_in, r12_sl, mycolor='r', mylabel=r'Small $\mu$, large $\sigma$')
plt.plot([c_in.min() - 0.05, c_in.max() + 0.05],
[c_in.min() - 0.05, c_in.max() + 0.05],
'k--', dashes=(2, 2), label='y=x')
plt.xlabel('Input CC')
plt.ylabel('Output CC')
plt.legend(loc='best', fontsize=14)
plt.show()
```
### Think!
Why do both the mean and the standard deviation of the GWN affect the slope of the correlation transfer function?
```
# to_remove explanation
"""
Discussion: This has got to do with which part of the input current distribution
is transferred to the spiking activity.
Intuitive understanding is difficult but this relationship arises due to non-linearities
in the neuron F-I curve. When F-I curve is linear, output correlation is independent
of the mean and standard deviation. But this relationship arises even in neurons with
threshold-linear F-I curve.
Please see:
De La Rocha J, Doiron B, Shea-Brown E, Josić K, Reyes A. Correlation between
neural spike trains increases with firing rate. Nature. 2007 Aug;448(7155):802-6.
""";
```
## Section 3.2: What is the rationale behind varying $\mu$ and $\sigma$?
The mean and the variance of the synaptic current depends on the spike rate of a Poisson process. We can use [Campbell's theorem](https://en.wikipedia.org/wiki/Campbell%27s_theorem_(probability)) to estimate the mean and the variance of the synaptic current:
\begin{align}
\mu_{\rm syn} = \lambda J \int P(t) \\
\sigma_{\rm syn} = \lambda J \int P(t)^2 dt\\
\end{align}
where $\lambda$ is the firing rate of the Poisson input, $J$ the amplitude of the postsynaptic current and $P(t)$ is the shape of the postsynaptic current as a function of time.
Therefore, when we varied $\mu$ and/or $\sigma$ of the GWN, we mimicked a change in the input firing rate. Note that, if we change the firing rate, both $\mu$ and $\sigma$ will change simultaneously, not independently.
Here, since we observe an effect of $\mu$ and $\sigma$ on correlation transfer, this implies that the input rate has an impact on the correlation transfer function.
### Think!
- What are the factors that would make output correlations smaller than input correlations? (Notice that the colored lines are below the black dashed line)
- What does it mean for the correlation in the network?
- Here we have studied the transfer of correlations by injecting GWN. But in the previous tutorial, we mentioned that GWN is unphysiological. Indeed, neurons receive colored noise (i.e., Shot noise or OU process). How do these results obtained from injection of GWN apply to the case where correlated spiking inputs are injected in the two LIFs? Will the results be the same or different?
Reference
- De La Rocha, Jaime, et al. "Correlation between neural spike trains increases with firing rate." Nature (2007) (https://www.nature.com/articles/nature06028/)
- Bujan AF, Aertsen A, Kumar A. Role of input correlations in shaping the variability and noise correlations of evoked activity in the neocortex. Journal of Neuroscience. 2015 Jun 3;35(22):8611-25. (https://www.jneurosci.org/content/35/22/8611)
```
# to_remove explanation
"""
Discussion:
1. Anything that tries to reduce the mean or variance of the input e.g. mean can
be reduced by inhibition, sigma can be reduced by the membrane time constant.
Obviously, if the two neurons have different parameters that will decorrelate them.
But more importantly, it is the slope of neuron transfer function that will affect the
output correlation.
2. These observations pose an interesting problem at the network level. If the
output correlation are smaller than the input correlation, then the network activity
should eventually converge to zero correlation. But that does not happen. So there
is something missing in this model to understand origin of synchrony in the network.
3. For spike trains, if we do not have explicit control over mu and sigma.
And these two variables will be tied to the firing rate of the inputs. So the
results will be qualitatively similar. But when we think of multiple spike inputs
two different types of correlations arise (see Bujan et al. 2015 for more info)
""";
```
---
# Summary
In this tutorial, we studied how the input correlation of two LIF neurons is mapped to their output correlation. Specifically, we:
- injected correlated GWN in a pair of neurons,
- measured correlations between the spiking activity of the two neurons, and
- studied how the transfer of correlation depends on the statistics of the input, i.e., mean and standard deviation.
Here, we were concerned with zero time lag correlation. For this reason, we restricted estimation of correlation to instantaneous correlations. If you are interested in time-lagged correlation, then we should estimate the cross-correlogram of the spike trains and find out the dominant peak and area under the peak to get an estimate of output correlations.
We leave this as a future to-do for you if you are interested.
---
# Bonus 1: Example of a conductance-based LIF model
Above, we have written code to generate correlated Poisson spike trains. You can write code to stimulate the LIF neuron with such correlated spike trains and study the correlation transfer function for spiking input and compare it to the correlation transfer function obtained by injecting correlated GWNs.
```
# @title Function to simulate conductance-based LIF
def run_LIF_cond(pars, I_inj, pre_spike_train_ex, pre_spike_train_in):
"""
conductance-based LIF dynamics
Args:
pars : parameter dictionary
I_inj : injected current [pA]. The injected current here can
be a value or an array
pre_spike_train_ex : spike train input from presynaptic excitatory neuron
pre_spike_train_in : spike train input from presynaptic inhibitory neuron
Returns:
rec_spikes : spike times
rec_v : mebrane potential
gE : postsynaptic excitatory conductance
gI : postsynaptic inhibitory conductance
"""
# Retrieve parameters
V_th, V_reset = pars['V_th'], pars['V_reset']
tau_m, g_L = pars['tau_m'], pars['g_L']
V_init, E_L = pars['V_init'], pars['E_L']
gE_bar, gI_bar = pars['gE_bar'], pars['gI_bar']
VE, VI = pars['VE'], pars['VI']
tau_syn_E, tau_syn_I = pars['tau_syn_E'], pars['tau_syn_I']
tref = pars['tref']
dt, range_t = pars['dt'], pars['range_t']
Lt = range_t.size
# Initialize
tr = 0.
v = np.zeros(Lt)
v[0] = V_init
gE = np.zeros(Lt)
gI = np.zeros(Lt)
Iinj = I_inj * np.ones(Lt) # ensure I has length Lt
if pre_spike_train_ex.max() == 0:
pre_spike_train_ex_total = np.zeros(Lt)
else:
pre_spike_train_ex_total = pre_spike_train_ex * np.ones(Lt)
if pre_spike_train_in.max() == 0:
pre_spike_train_in_total = np.zeros(Lt)
else:
pre_spike_train_in_total = pre_spike_train_in * np.ones(Lt)
# simulation
rec_spikes = [] # recording spike times
for it in range(Lt - 1):
if tr > 0:
v[it] = V_reset
tr = tr - 1
elif v[it] >= V_th: # reset voltage and record spike event
rec_spikes.append(it)
v[it] = V_reset
tr = tref / dt
# update the synaptic conductance
gE[it+1] = gE[it] - (dt / tau_syn_E) * gE[it] + gE_bar * pre_spike_train_ex_total[it + 1]
gI[it+1] = gI[it] - (dt / tau_syn_I) * gI[it] + gI_bar * pre_spike_train_in_total[it + 1]
# calculate the increment of the membrane potential
dv = (-(v[it] - E_L) - (gE[it + 1] / g_L) * (v[it] - VE) - \
(gI[it + 1] / g_L) * (v[it] - VI) + Iinj[it] / g_L) * (dt / tau_m)
# update membrane potential
v[it + 1] = v[it] + dv
rec_spikes = np.array(rec_spikes) * dt
return v, rec_spikes, gE, gI
print(help(run_LIF_cond))
```
## Interactive Demo: Correlated spike input to an LIF neuron
In the following you can explore what happens when the neurons receive correlated spiking input.
You can vary the correlation between excitatory input spike trains. For simplicity, the correlation between inhibitory spike trains is set to 0.01.
Vary both excitatory rate and correlation and see how the output correlation changes. Check if the results are qualitatively similar to what you observed previously when you varied the $\mu$ and $\sigma$.
```
# @title
# @markdown Make sure you execute this cell to enable the widget!
my_layout.width = '450px'
@widgets.interact(
pwc_ee=widgets.FloatSlider(0.3, min=0.05, max=0.99, step=0.01,
layout=my_layout),
exc_rate=widgets.FloatSlider(1e3, min=500., max=5e3, step=50.,
layout=my_layout),
inh_rate=widgets.FloatSlider(500., min=300., max=5e3, step=5.,
layout=my_layout),
)
def EI_isi_regularity(pwc_ee, exc_rate, inh_rate):
pars = default_pars(T=1000.)
# Add parameters
pars['V_th'] = -55. # spike threshold [mV]
pars['V_reset'] = -75. # reset potential [mV]
pars['tau_m'] = 10. # membrane time constant [ms]
pars['g_L'] = 10. # leak conductance [nS]
pars['V_init'] = -65. # initial potential [mV]
pars['E_L'] = -75. # leak reversal potential [mV]
pars['tref'] = 2. # refractory time (ms)
pars['gE_bar'] = 4.0 # [nS]
pars['VE'] = 0. # [mV] excitatory reversal potential
pars['tau_syn_E'] = 2. # [ms]
pars['gI_bar'] = 2.4 # [nS]
pars['VI'] = -80. # [mV] inhibitory reversal potential
pars['tau_syn_I'] = 5. # [ms]
my_bin = np.arange(0, pars['T']+pars['dt'], .1) # 20 [ms] bin-size
# exc_rate = 1e3
# inh_rate = 0.4e3
# pwc_ee = 0.3
pwc_ii = 0.01
# generate two correlated spike trains for excitatory input
sp1e, sp2e = generate_corr_Poisson(pars, exc_rate, pwc_ee)
sp1_spike_train_ex, _ = np.histogram(sp1e, bins=my_bin)
sp2_spike_train_ex, _ = np.histogram(sp2e, bins=my_bin)
# generate two uncorrelated spike trains for inhibitory input
sp1i, sp2i = generate_corr_Poisson(pars, inh_rate, pwc_ii)
sp1_spike_train_in, _ = np.histogram(sp1i, bins=my_bin)
sp2_spike_train_in, _ = np.histogram(sp2i, bins=my_bin)
v1, rec_spikes1, gE, gI = run_LIF_cond(pars, 0, sp1_spike_train_ex, sp1_spike_train_in)
v2, rec_spikes2, gE, gI = run_LIF_cond(pars, 0, sp2_spike_train_ex, sp2_spike_train_in)
# bin the spike time
bin_size = 20 # [ms]
my_bin = np.arange(0, pars['T'], bin_size)
spk_1, _ = np.histogram(rec_spikes1, bins=my_bin)
spk_2, _ = np.histogram(rec_spikes2, bins=my_bin)
r12 = my_CC(spk_1, spk_2)
print(f"Input correlation = {pwc_ee}")
print(f"Output correlation = {r12}")
plt.figure(figsize=(14, 7))
plt.subplot(211)
plt.plot(sp1e, np.ones(len(sp1e)) * 1, '|', ms=20,
label='Exc. input 1')
plt.plot(sp2e, np.ones(len(sp2e)) * 1.1, '|', ms=20,
label='Exc. input 2')
plt.plot(sp1i, np.ones(len(sp1i)) * 1.3, '|k', ms=20,
label='Inh. input 1')
plt.plot(sp2i, np.ones(len(sp2i)) * 1.4, '|k', ms=20,
label='Inh. input 2')
plt.ylim(0.9, 1.5)
plt.legend()
plt.ylabel('neuron id.')
plt.subplot(212)
plt.plot(pars['range_t'], v1, label='neuron 1')
plt.plot(pars['range_t'], v2, label='neuron 2')
plt.xlabel('time (ms)')
plt.ylabel('membrane voltage $V_{m}$')
plt.tight_layout()
plt.show()
```
Above, we are estimating the output correlation for one trial. You can modify the code to get a trial average of output correlations.
---
# Bonus 2: Ensemble Response
Finally, there is a short BONUS lecture video on the firing response of an ensemble of neurons to time-varying input. There are no associated coding exercises - just enjoy.
```
#@title Video 2 (Bonus): Response of ensemble of neurons to time-varying input
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="78_dWa4VOIo", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
|
github_jupyter
|
# Initialize a game
```
from ConnectN import ConnectN
game_setting = {'size':(6,6), 'N':4, 'pie_rule':True}
game = ConnectN(**game_setting)
% matplotlib notebook
from Play import Play
gameplay=Play(ConnectN(**game_setting),
player1=None,
player2=None)
```
# Define our policy
Please go ahead and define your own policy! See if you can train it under 1000 games and with only 1000 steps of exploration in each move.
```
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import *
import numpy as np
from ConnectN import ConnectN
game_setting = {'size':(6,6), 'N':4}
game = ConnectN(**game_setting)
class Policy(nn.Module):
def __init__(self, game):
super(Policy, self).__init__()
# input = 6x6 board
# convert to 5x5x8
self.conv1 = nn.Conv2d(1, 16, kernel_size=2, stride=1, bias=False)
# 5x5x16 to 3x3x32
self.conv2 = nn.Conv2d(16, 32, kernel_size=3, stride=1, bias=False)
self.size=3*3*32
# the part for actions
self.fc_action1 = nn.Linear(self.size, self.size//4)
self.fc_action2 = nn.Linear(self.size//4, 36)
# the part for the value function
self.fc_value1 = nn.Linear(self.size, self.size//6)
self.fc_value2 = nn.Linear(self.size//6, 1)
self.tanh_value = nn.Tanh()
def forward(self, x):
y = F.leaky_relu(self.conv1(x))
y = F.leaky_relu(self.conv2(y))
y = y.view(-1, self.size)
# action head
a = self.fc_action2(F.leaky_relu(self.fc_action1(y)))
avail = (torch.abs(x.squeeze())!=1).type(torch.FloatTensor)
avail = avail.view(-1, 36)
maxa = torch.max(a)
exp = avail*torch.exp(a-maxa)
prob = exp/torch.sum(exp)
# value head
value = self.tanh_value(self.fc_value2(F.leaky_relu( self.fc_value1(y) )))
return prob.view(6,6), value
policy = Policy(game)
```
# Define a MCTS player for Play
```
import MCTS
from copy import copy
def Policy_Player_MCTS(game):
mytree = MCTS.Node(copy(game))
for _ in range(1000):
mytree.explore(policy)
mytreenext, (v, nn_v, p, nn_p) = mytree.next(temperature=0.1)
return mytreenext.game.last_move
import random
def Random_Player(game):
return random.choice(game.available_moves())
```
# Play a game against a random policy
```
% matplotlib notebook
from Play import Play
gameplay=Play(ConnectN(**game_setting),
player1=Policy_Player_MCTS,
player2=None)
```
# Training
```
# initialize our alphazero agent and optimizer
import torch.optim as optim
game=ConnectN(**game_setting)
policy = Policy(game)
optimizer = optim.Adam(policy.parameters(), lr=.01, weight_decay=1.e-5)
! pip install progressbar
```
Beware, training is **VERY VERY** slow!!
```
# train our agent
from collections import deque
import MCTS
# try a higher number
episodes = 2000
import progressbar as pb
widget = ['training loop: ', pb.Percentage(), ' ',
pb.Bar(), ' ', pb.ETA() ]
timer = pb.ProgressBar(widgets=widget, maxval=episodes).start()
outcomes = []
policy_loss = []
Nmax = 1000
for e in range(episodes):
mytree = MCTS.Node(game)
logterm = []
vterm = []
while mytree.outcome is None:
for _ in range(Nmax):
mytree.explore(policy)
if mytree.N >= Nmax:
break
current_player = mytree.game.player
mytree, (v, nn_v, p, nn_p) = mytree.next()
mytree.detach_mother()
loglist = torch.log(nn_p)*p
constant = torch.where(p>0, p*torch.log(p),torch.tensor(0.))
logterm.append(-torch.sum(loglist-constant))
vterm.append(nn_v*current_player)
# we compute the "policy_loss" for computing gradient
outcome = mytree.outcome
outcomes.append(outcome)
loss = torch.sum( (torch.stack(vterm)-outcome)**2 + torch.stack(logterm) )
optimizer.zero_grad()
loss.backward()
policy_loss.append(float(loss))
optimizer.step()
if e%10==0:
print("game: ",e+1, ", mean loss: {:3.2f}".format(np.mean(policy_loss[-20:])),
", recent outcomes: ", outcomes[-10:])
if e%500==0:
torch.save(policy,'6-6-4-pie-{:d}.mypolicy'.format(e))
del loss
timer.update(e+1)
timer.finish()
```
# setup environment to pit your AI against the challenge policy '6-6-4-pie.policy'
```
challenge_policy = torch.load('6-6-4-pie.policy')
def Challenge_Player_MCTS(game):
mytree = MCTS.Node(copy(game))
for _ in range(1000):
mytree.explore(challenge_policy)
mytreenext, (v, nn_v, p, nn_p) = mytree.next(temperature=0.1)
return mytreenext.game.last_move
```
# Let the game begin!
```
% matplotlib notebook
gameplay=Play(ConnectN(**game_setting),
player2=Policy_Player_MCTS,
player1=Challenge_Player_MCTS)
```
|
github_jupyter
|
# Cleaning Your Data
Let's take a web access log, and figure out the most-viewed pages on a website from it! Sounds easy, right?
Let's set up a regex that lets us parse an Apache access log line:
```
import re
format_pat= re.compile(
r"(?P<host>[\d\.]+)\s"
r"(?P<identity>\S*)\s"
r"(?P<user>\S*)\s"
r"\[(?P<time>.*?)\]\s"
r'"(?P<request>.*?)"\s'
r"(?P<status>\d+)\s"
r"(?P<bytes>\S*)\s"
r'"(?P<referer>.*?)"\s'
r'"(?P<user_agent>.*?)"\s*'
)
```
Here's the path to the log file I'm analyzing:
```
logPath = "access_log.txt"
```
Now we'll whip up a little script to extract the URL in each access, and use a dictionary to count up the number of times each one appears. Then we'll sort it and print out the top 20 pages. What could go wrong?
```
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
request = access['request']
(action, URL, protocol) = request.split()
if URL in URLCounts:
URLCounts[URL] = URLCounts[URL] + 1
else:
URLCounts[URL] = 1
results = sorted(URLCounts, key=lambda i: int(URLCounts[i]), reverse=True)
for result in results[:20]:
print(result + ": " + str(URLCounts[result]))
```
Hm. The 'request' part of the line is supposed to look something like this:
GET /blog/ HTTP/1.1
There should be an HTTP action, the URL, and the protocol. But it seems that's not always happening. Let's print out requests that don't contain three items:
```
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
request = access['request']
fields = request.split()
if (len(fields) != 3):
print(fields)
```
Huh. In addition to empty fields, there's one that just contains garbage. Well, let's modify our script to check for that case:
```
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
request = access['request']
fields = request.split()
if (len(fields) == 3):
URL = fields[1]
if URL in URLCounts:
URLCounts[URL] = URLCounts[URL] + 1
else:
URLCounts[URL] = 1
results = sorted(URLCounts, key=lambda i: int(URLCounts[i]), reverse=True)
for result in results[:20]:
print(result + ": " + str(URLCounts[result]))
```
It worked! But, the results don't really make sense. What we really want is pages accessed by real humans looking for news from our little news site. What the heck is xmlrpc.php? A look at the log itself turns up a lot of entries like this:
46.166.139.20 - - [05/Dec/2015:05:19:35 +0000] "POST /xmlrpc.php HTTP/1.0" 200 370 "-" "Mozilla/4.0 (compatible: MSIE 7.0; Windows NT 6.0)"
I'm not entirely sure what the script does, but it points out that we're not just processing GET actions. We don't want POSTS, so let's filter those out:
```
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
request = access['request']
fields = request.split()
if (len(fields) == 3):
(action, URL, protocol) = fields
if (action == 'GET'):
if URL in URLCounts:
URLCounts[URL] = URLCounts[URL] + 1
else:
URLCounts[URL] = 1
results = sorted(URLCounts, key=lambda i: int(URLCounts[i]), reverse=True)
for result in results[:20]:
print(result + ": " + str(URLCounts[result]))
```
That's starting to look better. But, this is a news site - are people really reading the little blog on it instead of news pages? That doesn't make sense. Let's look at a typical /blog/ entry in the log:
54.165.199.171 - - [05/Dec/2015:09:32:05 +0000] "GET /blog/ HTTP/1.0" 200 31670 "-" "-"
Hm. Why is the user agent blank? Seems like some sort of malicious scraper or something. Let's figure out what user agents we are dealing with:
```
UserAgents = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
agent = access['user_agent']
if agent in UserAgents:
UserAgents[agent] = UserAgents[agent] + 1
else:
UserAgents[agent] = 1
results = sorted(UserAgents, key=lambda i: int(UserAgents[i]), reverse=True)
for result in results:
print(result + ": " + str(UserAgents[result]))
```
Yikes! In addition to '-', there are also a million different web robots accessing the site and polluting my data. Filtering out all of them is really hard, but getting rid of the ones significantly polluting my data in this case should be a matter of getting rid of '-', anything containing "bot" or "spider", and W3 Total Cache.
```
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
agent = access['user_agent']
if (not('bot' in agent or 'spider' in agent or
'Bot' in agent or 'Spider' in agent or
'W3 Total Cache' in agent or agent =='-')):
request = access['request']
fields = request.split()
if (len(fields) == 3):
(action, URL, protocol) = fields
if (action == 'GET'):
if URL in URLCounts:
URLCounts[URL] = URLCounts[URL] + 1
else:
URLCounts[URL] = 1
results = sorted(URLCounts, key=lambda i: int(URLCounts[i]), reverse=True)
for result in results[:20]:
print(result + ": " + str(URLCounts[result]))
```
Now, our new problem is that we're getting a bunch of hits on things that aren't web pages. We're not interested in those, so let's filter out any URL that doesn't end in / (all of the pages on my site are accessed in that manner - again this is applying knowledge about my data to the analysis!)
```
URLCounts = {}
with open(logPath, "r") as f:
for line in (l.rstrip() for l in f):
match= format_pat.match(line)
if match:
access = match.groupdict()
agent = access['user_agent']
if (not('bot' in agent or 'spider' in agent or
'Bot' in agent or 'Spider' in agent or
'W3 Total Cache' in agent or agent =='-')):
request = access['request']
fields = request.split()
if (len(fields) == 3):
(action, URL, protocol) = fields
if (URL.endswith("/")):
if (action == 'GET'):
if URL in URLCounts:
URLCounts[URL] = URLCounts[URL] + 1
else:
URLCounts[URL] = 1
results = sorted(URLCounts, key=lambda i: int(URLCounts[i]), reverse=True)
for result in results[:20]:
print(result + ": " + str(URLCounts[result]))
```
This is starting to look more believable! But if you were to dig even deeper, you'd find that the /feed/ pages are suspect, and some robots are still slipping through. However, it is accurate to say that Orlando news, world news, and comics are the most popular pages accessed by a real human on this day.
The moral of the story is - know your data! And always question and scrutinize your results before making decisions based on them. If your business makes a bad decision because you provided an analysis of bad source data, you could get into real trouble.
Be sure the decisions you make while cleaning your data are justifiable too - don't strip out data just because it doesn't support the results you want!
## Activity
These results still aren't perfect; URL's that include "feed" aren't actually pages viewed by humans. Modify this code further to strip out URL's that include "/feed". Even better, extract some log entries for these pages and understand where these views are coming from.
|
github_jupyter
|
# CORDIS FP7
```
import json
import re
import urllib
from titlecase import titlecase
import pandas as pd
pd.set_option('display.max_columns', 50)
```
## Read in Data
```
all_projects = pd.read_excel('input/fp7/cordis-fp7projects.xlsx')
all_projects.shape
all_organizations = pd.read_excel('input/fp7/cordis-fp7organizations.xlsx')
all_organizations.shape
all_briefs = pd.read_excel('input/fp7/cordis-fp7briefs.xlsx')
all_briefs.shape
```
## Count Organisations and Countries
It is useful to know the total number of organisations and the number of countries involved, to deal with cases where the contribution of each organisation is unknown.
```
all_organizations[['projectRcn', 'id', 'country']].count()
[
all_organizations.country.isna().sum(),
(all_organizations.country[~all_organizations.country.isna()] !=
all_organizations.country[~all_organizations.country.isna()].str.strip()).sum(),
(all_organizations.country[~all_organizations.country.isna()] !=
all_organizations.country[~all_organizations.country.isna()].str.upper()).sum(),
]
project_num_organizations = all_organizations.groupby('projectRcn').\
id.nunique().reset_index().rename(columns={'id': 'num_organizations'})
project_num_organizations.shape
project_num_countries = all_organizations.groupby('projectRcn').\
country.nunique().reset_index().rename(columns={'country': 'num_countries'})
project_num_countries.shape
project_num_organizations_and_countries = pd.merge(
project_num_countries, project_num_organizations,
on='projectRcn', validate='1:1'
)
project_num_organizations_and_countries.shape
project_num_organizations_and_countries.head()
```
## Restrict to UK
We are only interested in projects and organizations where the coordinator or at least one participant institution is in the UK.
```
uk_organizations = all_organizations[all_organizations.country == 'UK']
uk_organizations.shape
uk_organizations.head()
uk_projects = all_projects[all_projects.id.isin(uk_organizations.projectID)]
uk_projects.shape
uk_projects.head()
uk_briefs = all_briefs[all_briefs.projectRcn.isin(uk_projects.rcn)]
uk_briefs.shape
uk_briefs.head()
```
## Examples
### Coordinator outside UK
The UK has two participant institutions. It appears that `projects.ecMaxContribution` is the sum of all `organizations.ecContribution`s for all coordinator and participant institutions.
```
uk_projects[uk_projects.rcn == 101244]
uk_organizations[uk_organizations.projectRcn == 101244]
all_organizations[all_organizations.projectRcn == 101244].ecContribution.max()
all_organizations[all_organizations.projectRcn == 101244].ecContribution.sum()
all_briefs[all_briefs.projectRcn == 101244]
```
### Coordinator in UK
This one is also interesting in that it seems to have a lot of duplicate records that don't have titles, for some reason. We will need to filter those out.
```
uk_projects[uk_projects.rcn == 99464]
uk_organizations[uk_organizations.projectRcn == 99464]
uk_organizations[uk_organizations.projectRcn == 99464].ecContribution.unique().sum()
all_briefs[all_briefs.projectRcn == 99464]
```
## Duplicate Projects
It looks like it's safe to just drop projects without titles; those seem to be the only duplicates.
```
[uk_projects.rcn.nunique(), uk_projects.id.nunique(), uk_projects.shape]
uk_projects[uk_projects.duplicated('rcn', keep=False)]
uk_projects[pd.isnull(uk_projects.title)]
clean_projects = uk_projects[~pd.isnull(uk_projects.title)].copy()
# Could include coordinator and participants... would need some extra cleaning.
clean_projects.drop([
'id', 'programme', 'topics', 'frameworkProgramme', 'call',
'fundingScheme', 'coordinator', 'participants', 'subjects'
], axis=1, inplace=True)
clean_projects.rename(columns={
'startDate': 'start_date',
'endDate': 'end_date',
'projectUrl': 'project_url',
'totalCost': 'total_cost_eur',
'ecMaxContribution': 'max_contribution_eur',
'coordinatorCountry': 'coordinator_country',
'participantCountries': 'participant_countries'
}, inplace=True)
clean_projects.shape
clean_projects.describe()
clean_projects.head()
```
## Check Project Columns
```
clean_projects.count()
```
### Acronym
Just missing one.
```
clean_projects[clean_projects.acronym.isna()]
```
### Status
Some projects are listed as cancelled. It's not clear what this means exactly. Spot checks reveal that some of them apparently received at least partial funding and delivered some results, so it does not seem appropriate to remove them altogether.
- https://cordis.europa.eu/result/rcn/237795_en.html (TORTELLEX)
- https://cordis.europa.eu/result/rcn/196663_en.html (YSCHILLER)
- https://cordis.europa.eu/project/rcn/188111_en.html (MICARTREGEN) - no results
```
clean_projects.status.value_counts()
clean_projects[clean_projects.status == 'CAN'].head()
```
### Title
```
(clean_projects.title.str.strip() != clean_projects.title).sum()
```
### Start and End Dates
Some are missing. Discard for now. There is some overlap with the cancelled projects, but it is not exact.
```
(clean_projects.start_date.isna() | clean_projects.end_date.isna()).sum()
((clean_projects.status == 'CAN') & (clean_projects.start_date.isna() | clean_projects.end_date.isna())).sum()
((clean_projects.status != 'CAN') & (clean_projects.start_date.isna() | clean_projects.end_date.isna())).sum()
clean_projects = clean_projects[
~clean_projects.start_date.isna() | ~clean_projects.end_date.isna()
]
clean_projects.shape
(clean_projects.start_date > clean_projects.end_date).sum()
```
### Project URL
Looks pretty clean.
```
(~clean_projects.project_url.isna()).sum()
def is_valid_url(url):
result = urllib.parse.urlparse(str(url))
return bool((result.scheme == 'http' or result.scheme == 'https') and result.netloc)
project_url_bad = ~clean_projects.project_url.isna() & ~clean_projects.project_url.apply(is_valid_url)
project_url_bad.sum()
clean_projects[project_url_bad]
clean_projects.loc[project_url_bad, 'project_url'] = 'http://' + clean_projects.loc[project_url_bad, 'project_url']
(~clean_projects.project_url.isna() & ~clean_projects.project_url.apply(is_valid_url)).sum()
```
### Objective
```
(clean_projects.objective.str.strip() != clean_projects.objective).sum()
clean_projects.objective = clean_projects.objective.str.strip()
```
### Total Cost and EC Max Contribution
```
clean_projects.total_cost_eur.describe()
clean_projects.max_contribution_eur.describe()
(clean_projects.max_contribution_eur > clean_projects.total_cost_eur).sum()
```
## Clean Up Organizations
I notice several issues:
- Some are missing IDs (but do have postcodes)
- Some are missing postcodes
- Some postcodes are clearly typo'd (digit substitutions, etc);
- Some postcodes have been terminated (searched for them with google)
There are only 2993 unique organization IDs, so this is probably the result of a join.
For now, drop all organizations that don't have both an ID and a valid postcode. (It does look possible to match names to find IDs, and many without postcodes still have addresses, which we could geocode.)
Would be interesting to try this: https://codereview.stackexchange.com/questions/117801/uk-postcode-validation-and-format-correction-tool
```
[
uk_organizations.shape,
uk_organizations.id.notna().sum(),
uk_organizations.id.isna().sum(),
uk_organizations.id[uk_organizations.id.notna()].nunique(),
uk_organizations.postCode.isna().sum(),
uk_organizations.postCode[uk_organizations.postCode.notna()].nunique()
]
organizations = uk_organizations[uk_organizations.id.notna() & uk_organizations.postCode.notna()].copy()
organizations.id = organizations.id.astype('int64')
organizations.postCode = organizations.postCode.astype('str')
[
organizations.shape,
organizations.id.nunique(),
organizations.postCode.nunique()
]
ukpostcodes = pd.read_csv('../postcodes/input/ukpostcodes.csv.gz')
ukpostcodes.shape
organizations.postCode.isin(ukpostcodes.postcode).sum()
organizations['cleanPostcode'] = organizations.postCode.\
str.upper().\
str.strip().\
str.replace(r'[^A-Z0-9]', '').\
str.replace(r'^(\S+)([0-9][A-Z]{2})$', r'\1 \2')
organizations.cleanPostcode.isin(ukpostcodes.postcode).sum()
organizations.cleanPostcode[~organizations.cleanPostcode.isin(ukpostcodes.postcode)].unique()
organizations = organizations[organizations.cleanPostcode.isin(ukpostcodes.postcode)]
organizations.shape
clean_projects = clean_projects[clean_projects.rcn.isin(organizations.projectRcn)]
clean_projects.shape
```
## Clean Up Duplicate Organizations
I think there is also a join on the contacts, because we get multiple rows for some project-organization pairs. The main thing is that we want the `ecContribution` to be consistent. Otherwise, any row will do.
```
organizations.sort_values(['projectRcn', 'id']).\
groupby(['projectRcn', 'id']).\
filter(lambda x: x.shape[0] > 1)
organizations.groupby(['projectRcn', 'id']).\
filter(lambda x: x.ecContribution.nunique() > 1).shape
clean_organizations = organizations.groupby(['projectRcn', 'id']).first()
clean_organizations.reset_index(inplace=True)
clean_organizations.drop([
'projectID', 'projectAcronym', 'shortName', 'activityType', 'endOfParticipation',
'country', 'street', 'city', 'postCode',
'contactType', 'contactTitle', 'contactFirstNames', 'contactLastNames',
'contactFunction', 'contactTelephoneNumber', 'contactFaxNumber', 'contactEmail'
], axis=1, inplace=True)
clean_organizations.rename({
'projectRcn': 'project_rcn',
'id': 'organization_id',
'ecContribution': 'contribution_eur',
'organizationUrl': 'organization_url',
'cleanPostcode': 'postcode'
}, axis=1, inplace=True)
clean_organizations.name = clean_organizations.name.apply(titlecase)
clean_organizations.shape
clean_organizations.head()
```
## Check Organisations
```
clean_organizations.count()
```
### Role
```
clean_organizations.role.value_counts()
```
### Name
```
(clean_organizations.name.str.strip() != clean_organizations.name).sum()
```
### Contribution EUR
Missing for some organisations.
```
clean_organizations.contribution_eur.describe()
clean_organizations.contribution_eur.isna().sum()
```
### Organisation URL
Mostly clean. Found a couple with a `;` delimiting two URLs, neither of which resolved, so we can get rid of those.
```
(~clean_organizations.organization_url.isna()).sum()
organization_url_bad = ~clean_organizations.organization_url.isna() & \
~clean_organizations.organization_url.apply(is_valid_url)
organization_url_bad.sum()
clean_organizations.loc[organization_url_bad, 'organization_url'] = \
'http://' + clean_organizations.loc[organization_url_bad, 'organization_url']
organization_url_bad = ~clean_organizations.organization_url.isna() & \
~clean_organizations.organization_url.apply(is_valid_url)
organization_url_bad.sum()
clean_organizations[
~clean_organizations.organization_url.isna() & \
clean_organizations.organization_url.str.match('http.*http')].organization_url.unique()
clean_organizations.loc[
~clean_organizations.organization_url.isna() & \
clean_organizations.organization_url.str.match('http.*http'), 'organization_url'] = float('nan')
```
## Briefs
Might as well merge these into the projects where we have them. We have a few duplicates to take care of.
```
clean_briefs = uk_briefs[
uk_briefs.projectRcn.isin(clean_projects.rcn) &\
(uk_briefs.title.notna() | uk_briefs.teaser.notna() | uk_briefs.article.notna())
].copy()
clean_briefs.shape
clean_briefs[clean_briefs.projectRcn.duplicated(keep=False)]
clean_briefs = clean_briefs.sort_values('lastUpdateDate')
clean_briefs = clean_briefs[~clean_briefs.projectRcn.duplicated(keep='last')]
clean_briefs.shape
clean_briefs.drop([
'rcn', 'language', 'lastUpdateDate', 'country', 'projectAcronym',
'programme', 'topics', 'relatedReportRcn'
], axis=1, inplace=True)
clean_briefs.rename({
'projectRcn': 'rcn',
'title': 'brief_title',
'relatedReportTitle': 'related_report_title',
'imageUri': 'image_path'
}, axis=1, inplace=True)
clean_briefs.head()
clean_projects_with_briefs = pd.merge(
clean_projects, clean_briefs, on='rcn', how='left', validate='1:1'
)
clean_projects_with_briefs.head()
```
## Checks
```
clean_organizations[clean_organizations.project_rcn == 101244]
clean_projects_with_briefs[clean_projects_with_briefs.rcn == 101244]
clean_organizations[clean_organizations.project_rcn == 99464]
clean_projects_with_briefs[clean_projects_with_briefs.rcn == 99464]
project_organizations = pd.merge(
clean_projects_with_briefs, clean_organizations,
left_on='rcn', right_on='project_rcn', validate='1:m')
project_organizations.drop(['project_rcn'], axis=1, inplace=True)
project_organizations.shape
project_organizations.head()
uk_contributions = project_organizations.groupby('rcn').aggregate({'contribution_eur': sum})
uk_contributions.reset_index(inplace=True)
uk_contributions.head()
project_uk_contributions = pd.merge(
clean_projects_with_briefs,
uk_contributions,
on='rcn', validate='1:1')
project_uk_contributions.head()
project_uk_contributions[project_uk_contributions.contribution_eur > project_uk_contributions.max_contribution_eur + 0.1].shape
project_organization_uk_contributions = pd.merge(
project_uk_contributions, clean_organizations,
left_on='rcn', right_on='project_rcn', validate='1:m'
)
project_organization_uk_contributions = pd.merge(
project_organization_uk_contributions, ukpostcodes, on='postcode', validate='m:1'
)
project_organization_uk_contributions.shape
project_organization_uk_contributions.head()
(project_uk_contributions.contribution_eur < 1000).value_counts()
```
### Add Numbers of Organisations and Countries
Add these back on and do a sanity check against the `participant_countries` field. They mostly match up, except for a few relatively small discrepancies.
```
clean_projects_with_briefs.shape
clean_projects_with_briefs = pd.merge(
clean_projects_with_briefs, project_num_organizations_and_countries,
left_on='rcn', right_on='projectRcn', validate='1:1')
clean_projects_with_briefs.drop('projectRcn', axis=1, inplace=True)
clean_projects_with_briefs.shape
clean_projects_with_briefs.head()
[
clean_projects_with_briefs.num_countries.isna().sum(),
clean_projects_with_briefs.coordinator_country.isna().sum(),
clean_projects_with_briefs.participant_countries.isna().sum()
]
def check_num_countries():
ccs = clean_projects_with_briefs.coordinator_country
pcs = clean_projects_with_briefs.participant_countries
ncs = clean_projects_with_briefs.num_countries
pcs_isna = pcs.isna()
coordinator_mismatch = clean_projects_with_briefs[pcs_isna][ncs[pcs_isna] != 1].copy()
coordinator_mismatch['check'] = 1
cs = ccs[~pcs_isna] + ';' + pcs[~pcs_isna]
check_ncs = cs.apply(lambda x: len(set(x.split(';'))))
participant_mismatch = clean_projects_with_briefs[~pcs_isna][ncs[~pcs_isna] != check_ncs].copy()
participant_mismatch['check'] = check_ncs
return pd.concat([coordinator_mismatch, participant_mismatch])\
[['rcn', 'coordinator_country', 'participant_countries', 'num_countries', 'check', 'num_organizations']]
check_num_countries()
all_organizations.country[all_organizations.projectRcn == 100467].unique()
all_organizations.country[all_organizations.projectRcn == 203681].unique()
all_organizations.country[all_organizations.projectRcn == 90982].unique()
```
I suspect a problem with handling of `NA`; that is a valid code (Namibia), but maybe in some cases it is being used for Not Available.
### Convert to GBP
```
eur_gbp = pd.read_pickle('../exchange_rates/output/exchange_rates.pkl.gz')
eur_gbp.tail()
def find_average_eur_gbp_rate(row):
# create timeseries from start to end
days = pd.date_range(row.start_date, row.end_date, closed='left')
daily = pd.DataFrame({
'month_start': days,
'weight': 1.0 / days.shape[0]
})
monthly = daily.resample('MS', on='month_start').sum()
monthly = pd.merge(monthly, eur_gbp, on='month_start', validate='1:1')
return (monthly.weight * monthly.rate).sum()
clean_projects_with_briefs['eur_gbp'] = \
clean_projects_with_briefs.apply(
find_average_eur_gbp_rate, axis=1, result_type='reduce')
clean_projects_with_briefs.head()
```
## Save Data
```
clean_projects_with_briefs.to_pickle('output/fp7_projects.pkl.gz')
clean_organizations.to_pickle('output/fp7_organizations.pkl.gz')
```
|
github_jupyter
|
# Step 2: Building GTFS graphs and merging it with a walking graph
We heavily follow Kuan Butts's Calculating Betweenness Centrality with GTFS blog post: https://gist.github.com/kuanb/c54d0ae7ee353cac3d56371d3491cf56
### The peartree (https://github.com/kuanb/peartree) source code was modified. Until code is merged you should use code from this fork: https://github.com/d3netxer/peartree
```
%load_ext autoreload
%autoreload 2
import matplotlib.pyplot as plt
%matplotlib inline
import osmnx as ox
import pandas as pd
import geopandas as gpd
import networkx as nx
import numpy as np
from shapely.geometry import Point
import partridge as ptg
import os, sys
sys.path.append(r"C:\repos\peartree")
import peartree as pt
print(pt.__file__)
path = r'input_folder/cap_haitien_gtfs.zip'
```
### Build a graph from service_0001
service_0001 is on the weekends, so below we are choosing a data that lands on a weekend
```
# from: http://simplistic.me/playing-with-gtfs.html
import datetime
service_ids_by_date = ptg.read_service_ids_by_date(path)
service_ids = service_ids_by_date[datetime.date(2019, 6, 29)]
print(f"service_ids is {service_ids}")
# view lets you filter before you load the feed. For example, below you are filtering by the service_ids
feed_0001 = ptg.load_feed(path, view={
'trips.txt': {
'service_id': service_ids,
},
})
feed_0001.calendar
```
### give all trips a direction of 0
PearTree wants directions assigned
```
feed_0001.trips['direction_id'] = 0
```
### Preview the GTFS network
```
# Set a target time period to summarize impedance
start = 0 # 0:00
end = 24*60*60 # 24:00
# Converts feed subset into a directed
# network multigraph
G = pt.load_feed_as_graph(feed_0001, start, end, add_trips_per_edge=True)
fig, ax = ox.plot_graph(G,
figsize=(12,12),
show=False,
close=False,
node_color='#8aedfc',
node_size=5,
edge_color='#e2dede',
edge_alpha=0.25,
bgcolor='black')
# PearTree prepends the stop ids with a code the is different each time it loads a graph
list(G.edges)
#list(G.edges(data='True'))
len(G.nodes)
```
### Inspect edge data, and you should see the length attribute, which is the time in seconds needs to traverse an edge. The trips attribute represents how many trips cross that edge.
```
for edge in list(G.edges):
print(G.get_edge_data(edge[0],edge[1]))
```
### get feed 2
```
service_ids_by_date = ptg.read_service_ids_by_date(path)
service_ids = service_ids_by_date[datetime.date(2019,8,6)]
print(f"service_ids is {service_ids}")
# view lets you filter before you load the feed. For example, below you are filtering by the service_ids
feed_0002 = ptg.load_feed(path, view={
'trips.txt': {
'service_id': service_ids,
},
})
```
### Inspect graph as a shapefile
Used for testing
```
# Get reference to GOSTNets
#sys.path.append(r'C:\repos\GOSTnets')
#import GOSTnets as gn
#gn.save(G,"gtfs_export_cap_haitien_service0001",r"temp")
#gn.save(G,"gtfs_export_cap_haitien_service0002",r"temp")
# Also these saved edges will be used in the optional PostProcessing notebook to compare differences between the two graphs
```
note: On inspection the edges have a length field. This length field is the average traversal time per edge based on the GTFS data in seconds.
## Merge a walk network
following this blog post: http://kuanbutts.com/2018/12/24/peartree-with-walk-network/
```
# load existing walk/ferry graph from step 1
G = nx.read_gpickle(r"temp\cap_haitien_walk_w_ferries_via_osmnx_origins_adv_snap.pickle")
#G = nx.read_gpickle(r"temp\cap_haitien_walk_w_ferries_via_osmnx_salted.pickle")
print(nx.info(G))
list(G.edges(data=True))[:10]
```
### Assign traversal times in seconds to edges
Since peartree represents edge length (that is the impedance value associated with the edge) in seconds; we will need to convert the edge values that are in meters into seconds:
```
walk_speed = 3.5 #km per hour; about 3 miles per hour
ferry_speed = 15
# Make a copy of the graph in case we make a mistake
G_adj = G.copy()
# Iterate through and convert lengths to seconds
for from_node, to_node, edge in G_adj.edges(data=True):
orig_len = edge['length']
# Note that this is a MultiDiGraph so there could
# be multiple indices here, I naively assume this is not the case
G_adj[from_node][to_node][0]['orig_length'] = orig_len
try:
# if ferry
if 'ferry' in G_adj[from_node][to_node][0]:
ferry_var = G_adj[from_node][to_node][0]['ferry']
# if ferry does not have nan as a value
# if it is a string then it will produce an error and go to the except statement
# print('print ferry_var')
# print(ferry_var)
# print(type(ferry_var))
# print(np.isnan(ferry_var))
if not np.isnan(ferry_var):
print(G_adj[from_node][to_node][0]['ferry'])
print(G_adj[from_node][to_node][0])
# Conversion of walk speed and into seconds from meters
kmph = (orig_len / 1000) / ferry_speed
in_seconds = kmph * 60 * 60
G_adj[from_node][to_node][0]['length'] = in_seconds
# And state the mode, too
G_adj[from_node][to_node][0]['mode'] = 'ferry'
else:
# Conversion of walk speed and into seconds from meters
kmph = (orig_len / 1000) / walk_speed
in_seconds = kmph * 60 * 60
G_adj[from_node][to_node][0]['length'] = in_seconds
# And state the mode, too
G_adj[from_node][to_node][0]['mode'] = 'walk'
except:
# Conversion of walk speed and into seconds from meters
kmph = (orig_len / 1000) / walk_speed
in_seconds = kmph * 60 * 60
G_adj[from_node][to_node][0]['length'] = in_seconds
# And state the mode, too
G_adj[from_node][to_node][0]['mode'] = 'walk'
G_adj.nodes[330530920]
G_adj.nodes[6770195160]
# So this should be easy - just go through all nodes
# and make them have a 0 cost to board
for i, node in G_adj.nodes(data=True):
G_adj.nodes[i]['boarding_cost'] = 0
# testing
list(G_adj.edges(data=True))[1]
```
### save the graph again to be used for the isochrones notebook
```
sys.path.append(r'C:\repos\GOSTnets')
import GOSTnets as gn
gn.save(G,"cap_haitien_walk_w_ferries_via_osmnx_w_time_adv_snap",r"temp")
```
## Loading the feeds as graphs with the walking graph as the existing graph
Now that the two graphs have the same internal structures, we can load the walk network onto the transit network with the following peartree helper method.
```
# Now that we have a formatted walk network
# it should be easy to reload the peartree graph
# and stack it on the walk network
start = 0 # 0:00
end = 24*60*60 # 24:00
feeds = {'service0001':feed_0001,'service0002':feed_0002}
#feeds = {'service0002':feed_0002}
for feed in feeds.items():
G_adj_copy = G_adj.copy()
# Note this will be a little slow - an optimization here would be
# to have coalesced the walk network
%time G = pt.load_feed_as_graph(feed[1], start, end, existing_graph=G_adj_copy, impute_walk_transfers=True, add_trips_per_edge=True)
# compatible with NetworkX 2.4
list_of_subgraphs = list(G.subgraph(c).copy() for c in nx.weakly_connected_components(G))
max_graph = None
max_edges = 0
for i in list_of_subgraphs:
if i.number_of_edges() > max_edges:
max_edges = i.number_of_edges()
max_graph = i
# set your graph equal to the largest sub-graph
G = max_graph
# save again and inspect
gn.save(G,f"gtfs_export_cap_haitien_merged_impute_walk_adv_snap_{feed[0]}",r"temp")
#gn.save(G,f"gtfs_export_cap_haitien_merged_impute_walk_salted_{feed[0]}",r"temp")
```
## Visualize the last merged feed in the loop
```
G.graph['crs'] = 'epsg:4326'
G.graph
G.nodes[6770195160]
fig, ax = ox.plot_graph(G,
figsize=(12,12),
show=False,
close=False,
node_color='#8aedfc',
node_size=5,
edge_color='#e2dede',
edge_alpha=0.25,
bgcolor='black')
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/mengwangk/dl-projects/blob/master/04_02_auto_ml_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Automated ML
```
COLAB = True
if COLAB:
!sudo apt-get install git-lfs && git lfs install
!rm -rf dl-projects
!git clone https://github.com/mengwangk/dl-projects
#!cd dl-projects && ls -l --block-size=M
if COLAB:
!cp dl-projects/utils* .
!cp dl-projects/preprocess* .
%reload_ext autoreload
%autoreload 2
%matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import scipy.stats as ss
import math
import matplotlib
from scipy import stats
from collections import Counter
from pathlib import Path
plt.style.use('fivethirtyeight')
sns.set(style="ticks")
# Automated feature engineering
import featuretools as ft
# Machine learning
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer, MinMaxScaler, StandardScaler
from sklearn.impute import SimpleImputer
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, precision_recall_curve, roc_curve
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.ensemble import RandomForestClassifier
from IPython.display import display
from utils import *
from preprocess import *
# The Answer to the Ultimate Question of Life, the Universe, and Everything.
np.random.seed(42)
%aimport
```
## Preparation
```
if COLAB:
from google.colab import drive
drive.mount('/content/gdrive')
GDRIVE_DATASET_FOLDER = Path('gdrive/My Drive/datasets/')
if COLAB:
DATASET_PATH = GDRIVE_DATASET_FOLDER
ORIGIN_DATASET_PATH = Path('dl-projects/datasets')
else:
DATASET_PATH = Path("datasets")
ORIGIN_DATASET_PATH = Path('datasets')
DATASET = DATASET_PATH/"feature_matrix.csv"
ORIGIN_DATASET = ORIGIN_DATASET_PATH/'4D.zip'
if COLAB:
!ls -l gdrive/"My Drive"/datasets/ --block-size=M
!ls -l dl-projects/datasets --block-size=M
data = pd.read_csv(DATASET, header=0, sep=',', quotechar='"', parse_dates=['time'])
origin_data = format_tabular(ORIGIN_DATASET)
data.info()
```
## Preliminary Modeling
```
feature_matrix = data
feature_matrix.columns
feature_matrix.head(4).T
origin_data[origin_data['LuckyNo']==0].head(10)
# feature_matrix.drop(columns=['MODE(Results.PrizeType)_1stPrizeNo',
# 'MODE(Results.PrizeType)_2ndPrizeNo',
# 'MODE(Results.PrizeType)_3rdPrizeNo',
# 'MODE(Results.PrizeType)_ConsolationNo1',
# 'MODE(Results.PrizeType)_ConsolationNo10',
# 'MODE(Results.PrizeType)_ConsolationNo2',
# 'MODE(Results.PrizeType)_ConsolationNo3',
# 'MODE(Results.PrizeType)_ConsolationNo4',
# 'MODE(Results.PrizeType)_ConsolationNo5',
# 'MODE(Results.PrizeType)_ConsolationNo6',
# 'MODE(Results.PrizeType)_ConsolationNo7',
# 'MODE(Results.PrizeType)_ConsolationNo8',
# 'MODE(Results.PrizeType)_ConsolationNo9',
# 'MODE(Results.PrizeType)_SpecialNo1',
# 'MODE(Results.PrizeType)_SpecialNo10',
# 'MODE(Results.PrizeType)_SpecialNo2',
# 'MODE(Results.PrizeType)_SpecialNo3',
# 'MODE(Results.PrizeType)_SpecialNo4',
# 'MODE(Results.PrizeType)_SpecialNo5',
# 'MODE(Results.PrizeType)_SpecialNo6',
# 'MODE(Results.PrizeType)_SpecialNo7',
# 'MODE(Results.PrizeType)_SpecialNo8',
# 'MODE(Results.PrizeType)_SpecialNo9'], inplace=True)
feature_matrix.groupby('time')['COUNT(Results)'].mean().plot()
plt.title('Average Monthly Count of Results')
plt.ylabel('Strike Per Number')
```
## Correlations
```
feature_matrix.shape
corrs = feature_matrix.corr().sort_values('TotalStrike')
corrs['TotalStrike'].head()
corrs['TotalStrike'].dropna().tail()
```
### Random Forest
```
model = RandomForestClassifier(n_estimators = 1000,
random_state = 50,
n_jobs = -1)
def predict_dt(dt, feature_matrix, return_probs = False):
feature_matrix['date'] = feature_matrix['time']
# Subset labels
test_labels = feature_matrix.loc[feature_matrix['date'] == dt, 'Label']
train_labels = feature_matrix.loc[feature_matrix['date'] < dt, 'Label']
print(f"Size of test labels {len(test_labels)}")
print(f"Size of train labels {len(train_labels)}")
# Features
X_train = feature_matrix[feature_matrix['date'] < dt].drop(columns = ['NumberId', 'time',
'date', 'Label', 'TotalStrike', 'month', 'year'])
X_test = feature_matrix[feature_matrix['date'] == dt].drop(columns = ['NumberId', 'time',
'date', 'Label', 'TotalStrike', 'month', 'year'])
print(f"Size of X train {len(X_train)}")
print(f"Size of X test {len(X_test)}")
feature_names = list(X_train.columns)
# Impute and scale features
pipeline = Pipeline([('imputer', SimpleImputer(strategy = 'median')),
('scaler', MinMaxScaler())])
# Fit and transform training data
X_train = pipeline.fit_transform(X_train)
X_test = pipeline.transform(X_test)
# Labels
y_train = np.array(train_labels).reshape((-1, ))
y_test = np.array(test_labels).reshape((-1, ))
print('Training on {} observations.'.format(len(X_train)))
print('Testing on {} observations.\n'.format(len(X_test)))
# Train
model.fit(X_train, y_train)
# Make predictions
predictions = model.predict(X_test)
probs = model.predict_proba(X_test)[:, 1]
# Calculate metrics
p = precision_score(y_test, predictions)
r = recall_score(y_test, predictions)
f = f1_score(y_test, predictions)
auc = roc_auc_score(y_test, probs)
print(f'Precision: {round(p, 5)}')
print(f'Recall: {round(r, 5)}')
print(f'F1 Score: {round(f, 5)}')
print(f'ROC AUC: {round(auc, 5)}')
# Feature importances
fi = pd.DataFrame({'feature': feature_names, 'importance': model.feature_importances_})
if return_probs:
return fi, probs
return fi
# All the months
len(feature_matrix['time'].unique()), feature_matrix['time'].unique()
june_2019 = predict_dt(pd.datetime(2019,6,1), feature_matrix)
from utils import plot_feature_importances
norm_june_fi = plot_feature_importances(june_2019)
```
## Comparison to Baseline
|
github_jupyter
|
# Print Compact Transitivity Tables
```
import qualreas as qr
import os
import json
path = os.path.join(os.getenv('PYPROJ'), 'qualreas')
```
## Algebras from Original Files
## Algebras from Compact Files
```
alg = qr.Algebra(os.path.join(path, "Algebras/Misc/Linear_Interval_Algebra.json"))
alg.summary()
alg.check_composition_identity()
alg.is_associative()
algX = qr.Algebra(os.path.join(path, "Algebras/Misc/Extended_Linear_Interval_Algebra.json"))
algX.summary()
algX.check_composition_identity()
algX.is_associative()
algR = qr.Algebra(os.path.join(path, "Algebras/Misc/Right_Branching_Interval_Algebra.json"))
algR.summary()
algR.check_composition_identity()
algR.is_associative()
algL = qr.Algebra(os.path.join(path, "Algebras/Misc/Left_Branching_Interval_Algebra.json"))
algL.summary()
algL.check_composition_identity()
algL.is_associative()
rcc8 = qr.Algebra(os.path.join(path, "Algebras/Misc/RCC8_Algebra.json"))
rcc8.summary()
rcc8.check_composition_identity()
rcc8.is_associative()
ptalg = qr.Algebra(os.path.join(path, "Algebras/Misc/Linear_Point_Algebra.json"))
ptalg.summary()
ptalg.check_composition_identity()
ptalg.is_associative()
ptalgR = qr.Algebra(os.path.join(path, "Algebras/Misc/Right_Branching_Point_Algebra.json"))
ptalgR.summary()
ptalgR.check_composition_identity()
ptalgR.is_associative()
ptalgL = qr.Algebra(os.path.join(path, "Algebras/Misc/Left_Branching_Point_Algebra.json"))
ptalgL.summary()
ptalgL.check_composition_identity()
ptalgL.is_associative()
```
## Print Compact Tables
The following function definition was added, as a method, to the definition of an Algebra.
```
def print_compact_transitivity_table(alg):
num_elements = len(alg.elements)
print(" \"TransTable\": {")
outer_count = num_elements # Used to avoid printing last comma in outer list
for rel1 in alg.transitivity_table:
outer_count -= 1
print(f" \"{rel1}\": {{")
inner_count = num_elements # Used to avoid printing last comma in inner list
for rel2 in alg.transitivity_table[rel1]:
inner_count -= 1
if inner_count > 0:
print(f" \"{rel2}\": \"{alg.transitivity_table[rel1][rel2]}\",")
else:
print(f" \"{rel2}\": \"{alg.transitivity_table[rel1][rel2]}\"")
if outer_count > 0:
print(f" }},")
else:
print(f" }}")
print(" }")
```
### Linear Point Algebra
```
print_compact_transitivity_table(ptalg)
```
### Right-Branching Point Algebra
```
print_compact_transitivity_table(ptalgR)
```
### Left-Branching Point Algebra
```
print_compact_transitivity_table(ptalgL)
```
### Linear Interval Algebra
```
print_compact_transitivity_table(alg)
```
### Extended Linear Interval Algebra
```
print_compact_transitivity_table(algX)
```
### Right-Branching Linear Interval Algebra
```
print_compact_transitivity_table(algR)
```
### Left-Branching Linear Interval Algebra
```
print_compact_transitivity_table(algL)
```
### Region Connection Calculus 8
```
print_compact_transitivity_table(rcc8)
```
|
github_jupyter
|
# Logistic Regression With Linear Boundary Demo
> ☝Before moving on with this demo you might want to take a look at:
> - 📗[Math behind the Logistic Regression](https://github.com/trekhleb/homemade-machine-learning/tree/master/homemade/logistic_regression)
> - ⚙️[Logistic Regression Source Code](https://github.com/trekhleb/homemade-machine-learning/blob/master/homemade/logistic_regression/logistic_regression.py)
**Logistic regression** is the appropriate regression analysis to conduct when the dependent variable is dichotomous (binary). Like all regression analyses, the logistic regression is a predictive analysis. Logistic regression is used to describe data and to explain the relationship between one dependent binary variable and one or more nominal, ordinal, interval or ratio-level independent variables.
Logistic Regression is used when the dependent variable (target) is categorical.
For example:
- To predict whether an email is spam (`1`) or (`0`).
- Whether online transaction is fraudulent (`1`) or not (`0`).
- Whether the tumor is malignant (`1`) or not (`0`).
> **Demo Project:** In this example we will try to classify Iris flowers into tree categories (`Iris setosa`, `Iris virginica` and `Iris versicolor`) based on `petal_length` and `petal_width` parameters.
```
# To make debugging of logistic_regression module easier we enable imported modules autoreloading feature.
# By doing this you may change the code of logistic_regression library and all these changes will be available here.
%load_ext autoreload
%autoreload 2
# Add project root folder to module loading paths.
import sys
sys.path.append('../..')
```
### Import Dependencies
- [pandas](https://pandas.pydata.org/) - library that we will use for loading and displaying the data in a table
- [numpy](http://www.numpy.org/) - library that we will use for linear algebra operations
- [matplotlib](https://matplotlib.org/) - library that we will use for plotting the data
- [logistic_regression](https://github.com/trekhleb/homemade-machine-learning/blob/master/homemade/logistic_regression/logistic_regression.py) - custom implementation of logistic regression
```
# Import 3rd party dependencies.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Import custom logistic regression implementation.
from homemade.logistic_regression import LogisticRegression
```
### Load the Data
In this demo we will use [Iris data set](http://archive.ics.uci.edu/ml/datasets/Iris).
The data set consists of several samples from each of three species of Iris (`Iris setosa`, `Iris virginica` and `Iris versicolor`). Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters. Based on the combination of these four features, [Ronald Fisher](https://en.wikipedia.org/wiki/Iris_flower_data_set) developed a linear discriminant model to distinguish the species from each other.
```
# Load the data.
data = pd.read_csv('../../data/iris.csv')
# Print the data table.
data.head(10)
```
### Plot the Data
Let's take two parameters `petal_length` and `petal_width` for each flower into consideration and plot the dependency of the Iris class on these two parameters.
```
# List of suppported Iris classes.
iris_types = ['SETOSA', 'VERSICOLOR', 'VIRGINICA']
# Pick the Iris parameters for consideration.
x_axis = 'petal_length'
y_axis = 'petal_width'
# Plot the scatter for every type of Iris.
for iris_type in iris_types:
plt.scatter(
data[x_axis][data['class'] == iris_type],
data[y_axis][data['class'] == iris_type],
label=iris_type
)
# Plot the data.
plt.xlabel(x_axis + ' (cm)')
plt.ylabel(y_axis + ' (cm)')
plt.title('Iris Types')
plt.legend()
plt.show()
```
### Prepara the Data for Training
Let's extract `petal_length` and `petal_width` data and form a training feature set and let's also form out training labels set.
```
# Get total number of Iris examples.
num_examples = data.shape[0]
# Get features.
x_train = data[[x_axis, y_axis]].values.reshape((num_examples, 2))
# Get labels.
y_train = data['class'].values.reshape((num_examples, 1))
```
### Init and Train Logistic Regression Model
> ☝🏻This is the place where you might want to play with model configuration.
- `polynomial_degree` - this parameter will allow you to add additional polynomial features of certain degree. More features - more curved the line will be.
- `max_iterations` - this is the maximum number of iterations that gradient descent algorithm will use to find the minimum of a cost function. Low numbers may prevent gradient descent from reaching the minimum. High numbers will make the algorithm work longer without improving its accuracy.
- `regularization_param` - parameter that will fight overfitting. The higher the parameter, the simplier is the model will be.
- `polynomial_degree` - the degree of additional polynomial features (`x1^2 * x2, x1^2 * x2^2, ...`). This will allow you to curve the predictions.
- `sinusoid_degree` - the degree of sinusoid parameter multipliers of additional features (`sin(x), sin(2*x), ...`). This will allow you to curve the predictions by adding sinusoidal component to the prediction curve.
```
# Set up linear regression parameters.
max_iterations = 1000 # Max number of gradient descent iterations.
regularization_param = 0 # Helps to fight model overfitting.
polynomial_degree = 0 # The degree of additional polynomial features.
sinusoid_degree = 0 # The degree of sinusoid parameter multipliers of additional features.
# Init logistic regression instance.
logistic_regression = LogisticRegression(x_train, y_train, polynomial_degree, sinusoid_degree)
# Train logistic regression.
(thetas, costs) = logistic_regression.train(regularization_param, max_iterations)
# Print model parameters that have been learned.
pd.DataFrame(thetas, columns=['Theta 1', 'Theta 2', 'Theta 3'], index=['SETOSA', 'VERSICOLOR', 'VIRGINICA'])
```
### Analyze Gradient Descent Progress
The plot below illustrates how the cost function value changes over each iteration. You should see it decreasing.
In case if cost function value increases it may mean that gradient descent missed the cost function minimum and with each step it goes further away from it.
From this plot you may also get an understanding of how many iterations you need to get an optimal value of the cost function.
```
# Draw gradient descent progress for each label.
labels = logistic_regression.unique_labels
plt.plot(range(len(costs[0])), costs[0], label=labels[0])
plt.plot(range(len(costs[1])), costs[1], label=labels[1])
plt.plot(range(len(costs[2])), costs[2], label=labels[2])
plt.xlabel('Gradient Steps')
plt.ylabel('Cost')
plt.legend()
plt.show()
```
### Calculate Model Training Precision
Calculate how many flowers from the training set have been guessed correctly.
```
# Make training set predictions.
y_train_predictions = logistic_regression.predict(x_train)
# Check what percentage of them are actually correct.
precision = np.sum(y_train_predictions == y_train) / y_train.shape[0] * 100
print('Precision: {:5.4f}%'.format(precision))
```
### Draw Decision Boundaries
Let's build our decision boundaries. These are the lines that distinguish classes from each other. This will give us a pretty clear overview of how successfull our training process was. You should see clear distinguishment of three sectors on the data plain.
```
# Get the number of training examples.
num_examples = x_train.shape[0]
# Set up how many calculations we want to do along every axis.
samples = 150
# Generate test ranges for x and y axis.
x_min = np.min(x_train[:, 0])
x_max = np.max(x_train[:, 0])
y_min = np.min(x_train[:, 1])
y_max = np.max(x_train[:, 1])
X = np.linspace(x_min, x_max, samples)
Y = np.linspace(y_min, y_max, samples)
# z axis will contain our predictions. So let's get predictions for every pair of x and y.
Z_setosa = np.zeros((samples, samples))
Z_versicolor = np.zeros((samples, samples))
Z_virginica = np.zeros((samples, samples))
for x_index, x in enumerate(X):
for y_index, y in enumerate(Y):
data = np.array([[x, y]])
prediction = logistic_regression.predict(data)[0][0]
if prediction == 'SETOSA':
Z_setosa[x_index][y_index] = 1
elif prediction == 'VERSICOLOR':
Z_versicolor[x_index][y_index] = 1
elif prediction == 'VIRGINICA':
Z_virginica[x_index][y_index] = 1
# Now, when we have x, y and z axes being setup and calculated we may print decision boundaries.
for iris_type in iris_types:
plt.scatter(
x_train[(y_train == iris_type).flatten(), 0],
x_train[(y_train == iris_type).flatten(), 1],
label=iris_type
)
plt.contour(X, Y, Z_setosa)
plt.contour(X, Y, Z_versicolor)
plt.contour(X, Y, Z_virginica)
plt.xlabel(x_axis + ' (cm)')
plt.ylabel(y_axis + ' (cm)')
plt.title('Iris Types')
plt.legend()
plt.show()
```
|
github_jupyter
|
# Working with Tensorforce to Train a Reinforcement-Learning Agent
This notebook serves as an educational introduction to the usage of Tensorforce using a gym-electric-motor (GEM) environment. The goal of this notebook is to give an understanding of what tensorforce is and how to use it to train and evaluate a reinforcement learning agent that can solve a current control problem of the GEM toolbox.
## 1. Installation
Before you can start you need to make sure that you have both gym-electric-motor and tensorforce installed. You can install both easily using pip:
- ```pip install gym-electric-motor```
- ```pip install tensorforce```
Alternatively, you can install their latest developer version directly from GitHub:
- [GitHub Gym-Electric-Motor](https://github.com/upb-lea/gym-electric-motor)
- [GitHub Tensorforce](https://github.com/tensorforce/tensorforce)
For this notebook, the following cell will do the job:
```
!pip install -q git+https://github.com/upb-lea/gym-electric-motor.git tensorforce==0.5.5
%matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
```
## 2. Setting up a GEM Environment
The basic idea behind reinforcement learning is to create a so-called agent, that should learn by itself to solve a specified task in a given environment.
This environment gives the agent feedback on its actions and reinforces the targeted behavior.
In this notebook, the task is to train a controller for the current control of a *permanent magnet synchronous motor* (*PMSM*).
In the following, the used GEM-environment is briefly presented, but this notebook does not focus directly on the detailed usage of GEM. If you are new to the used environment and interested in finding out what it does and how to use it, you should take a look at the [GEM cookbook](https://colab.research.google.com/github/upb-lea/gym-electric-motor/blob/master/examples/example_notebooks/GEM_cookbook.ipynb).
To save some space in this notebook, there is a function defined in an external python file called **getting_environment.py**. If you want to know how the environment's parameters are defined you can take a look at that file. By simply calling the **get_env()** function from the external file, you can set up an environment for a *PMSM* with discrete inputs.
The basic idea of the control setup from the GEM-environment is displayed in the following figure.

The agent controls the converter who converts the supply currents to the currents flowing into the motor - for the *PMSM*: $i_{sq}$ and $i_{sd}$
In the continuous case, the agent's action equals a duty cycle which will be modulated into a corresponding voltage.
In the discrete case, the agent's actions denote switching states of the converter at the given instant. Here, only a discrete amount of options are available. In this notebook, for the PMSM the *discrete B6 bridge converter* with six switches is utilized per default. This converter provides a total of eight possible actions.

The motor schematic is the following:

And the electrical ODEs for that motor are:
<h3 align="center">
<!-- $\frac{\mathrm{d}i_{sq}}{\mathrm{d}t} = \frac{u_{sq}-pL_d\omega_{me}i_{sd}-R_si_{sq}}{L_q}$
$\frac{\mathrm{d}i_{sd}}{\mathrm{d}t} = \frac{u_{sd}-pL_q\omega_{me}i_{sq}-R_si_{sd}}{L_d}$
$\frac{\mathrm{d}\epsilon_{el}}{\mathrm{d}t} = p\omega_{me}$
-->
$ \frac{\mathrm{d}i_{sd}}{\mathrm{d}t}=\frac{u_{sd} + p\omega_{me}L_q i_{sq} - R_s i_{sd}}{L_d} $ <br><br>
$\frac{\mathrm{d} i_{sq}}{\mathrm{d} t}=\frac{u_{sq} - p \omega_{me} (L_d i_{sd} + \mathit{\Psi}_p) - R_s i_{sq}}{L_q}$ <br><br>
$\frac{\mathrm{d}\epsilon_{el}}{\mathrm{d}t} = p\omega_{me}$
</h3>
The target for the agent is now to learn to control the currents. For this, a reference generator produces a trajectory that the agent has to follow.
Therefore, it has to learn a function (policy) from given states, references and rewards to appropriate actions.
For a deeper understanding of the used models behind the environment see the [documentation](https://upb-lea.github.io/gym-electric-motor/).
Comprehensive learning material to RL is also [freely available](https://github.com/upb-lea/reinforcement_learning_course_materials).
```
import numpy as np
from pathlib import Path
import gym_electric_motor as gem
from gym_electric_motor.reference_generators import \
MultipleReferenceGenerator,\
WienerProcessReferenceGenerator
from gym_electric_motor.visualization import MotorDashboard
from gym_electric_motor.core import Callback
from gym.spaces import Discrete, Box
from gym.wrappers import FlattenObservation, TimeLimit
from gym import ObservationWrapper
# helper functions and classes
class FeatureWrapper(ObservationWrapper):
"""
Wrapper class which wraps the environment to change its observation. Serves
the purpose to improve the agent's learning speed.
It changes epsilon to cos(epsilon) and sin(epsilon). This serves the purpose
to have the angles -pi and pi close to each other numerically without losing
any information on the angle.
Additionally, this wrapper adds a new observation i_sd**2 + i_sq**2. This should
help the agent to easier detect incoming limit violations.
"""
def __init__(self, env, epsilon_idx, i_sd_idx, i_sq_idx):
"""
Changes the observation space to fit the new features
Args:
env(GEM env): GEM environment to wrap
epsilon_idx(integer): Epsilon's index in the observation array
i_sd_idx(integer): I_sd's index in the observation array
i_sq_idx(integer): I_sq's index in the observation array
"""
super(FeatureWrapper, self).__init__(env)
self.EPSILON_IDX = epsilon_idx
self.I_SQ_IDX = i_sq_idx
self.I_SD_IDX = i_sd_idx
new_low = np.concatenate((self.env.observation_space.low[
:self.EPSILON_IDX], np.array([-1.]),
self.env.observation_space.low[
self.EPSILON_IDX:], np.array([0.])))
new_high = np.concatenate((self.env.observation_space.high[
:self.EPSILON_IDX], np.array([1.]),
self.env.observation_space.high[
self.EPSILON_IDX:],np.array([1.])))
self.observation_space = Box(new_low, new_high)
def observation(self, observation):
"""
Gets called at each return of an observation. Adds the new features to the
observation and removes original epsilon.
"""
cos_eps = np.cos(observation[self.EPSILON_IDX] * np.pi)
sin_eps = np.sin(observation[self.EPSILON_IDX] * np.pi)
currents_squared = observation[self.I_SQ_IDX]**2 + observation[self.I_SD_IDX]**2
observation = np.concatenate((observation[:self.EPSILON_IDX],
np.array([cos_eps, sin_eps]),
observation[self.EPSILON_IDX + 1:],
np.array([currents_squared])))
return observation
# define motor arguments
motor_parameter = dict(p=3, # [p] = 1, nb of pole pairs
r_s=17.932e-3, # [r_s] = Ohm, stator resistance
l_d=0.37e-3, # [l_d] = H, d-axis inductance
l_q=1.2e-3, # [l_q] = H, q-axis inductance
psi_p=65.65e-3, # [psi_p] = Vs, magnetic flux of the permanent magnet
)
# supply voltage
u_sup = 350
# nominal and absolute state limitations
nominal_values=dict(omega=4000*2*np.pi/60,
i=230,
u=u_sup
)
limit_values=dict(omega=4000*2*np.pi/60,
i=1.5*230,
u=u_sup
)
# defining reference-generators
q_generator = WienerProcessReferenceGenerator(reference_state='i_sq')
d_generator = WienerProcessReferenceGenerator(reference_state='i_sd')
rg = MultipleReferenceGenerator([q_generator, d_generator])
# defining sampling interval
tau = 1e-5
# defining maximal episode steps
max_eps_steps = 10_000
motor_initializer={'random_init': 'uniform', 'interval': [[-230, 230], [-230, 230], [-np.pi, np.pi]]}
reward_function=gem.reward_functions.WeightedSumOfErrors(
reward_weights={'i_sq': 10, 'i_sd': 10},
gamma=0.99, # discount rate
reward_power=1)
# creating gem environment
env = gem.make( # define a PMSM with discrete action space
"PMSMDisc-v1",
# visualize the results
visualization=MotorDashboard(state_plots=['i_sq', 'i_sd'], reward_plot=True),
# parameterize the PMSM and update limitations
motor_parameter=motor_parameter,
limit_values=limit_values, nominal_values=nominal_values,
# define the random initialisation for load and motor
load='ConstSpeedLoad',
load_initializer={'random_init': 'uniform', },
motor_initializer=motor_initializer,
reward_function=reward_function,
# define the duration of one sampling step
tau=tau, u_sup=u_sup,
# turn off terminations via limit violation, parameterize the rew-fct
reference_generator=rg, ode_solver='euler',
)
# remove one action from the action space to help the agent speed up its training
# this can be done as both switchting states (1,1,1) and (-1,-1,-1) - which are encoded
# by action 0 and 7 - both lead to the same zero voltage vector in alpha/beta-coordinates
env.action_space = Discrete(7)
# applying wrappers
eps_idx = env.physical_system.state_names.index('epsilon')
i_sd_idx = env.physical_system.state_names.index('i_sd')
i_sq_idx = env.physical_system.state_names.index('i_sq')
env = TimeLimit(FeatureWrapper(FlattenObservation(env),
eps_idx, i_sd_idx, i_sq_idx),
max_eps_steps)
```
## 3. Using Tensorforce
To take advantage of some already implemented deep-RL agents, we use the *tensorforce-framework*. It is built on *TensorFlow* and offers agents based on deep Q-networks, policy gradients, or actor-critic algorithms.
For more information to specific agents or different modules that can be used, some good explanations can be found in the corresponding [documentation](https://tensorforce.readthedocs.io/en/latest/).
For the control task with a discrete action space we will use a [deep Q-network (DQN)]((https://www.nature.com/articles/nature14236)).
### 3.1 Defining a Tensorforce-Environment
Tensorforce requires you to define a *tensorforce-environment*. This is done simply by using the ```Environment.create``` interface, which acts as a wrapper around usual [gym](https://github.com/openai/gym) instances.
```
from tensorforce.environments import Environment
# creating tensorforce environment
tf_env = Environment.create(environment=env,
max_episode_timesteps=max_eps_steps)
```
### 3.2 Setting-up a Tensorforce-Agent
The Agent is created just like the environment. The agent's parameters can be passed as arguments to the ```create()``` function or via a configuration as a dictionary or as *.json* file.
In the following, the way via a dictionary is demonstrated.
With the *tensorforce-framework* it is possible to define own network-architectures like it is shown below.
For some parameters, it can be useful to have a decaying value during the training. A possible way for this is also shown in the following code.
The exact meaning of the used parameters can be found in the already mentioned tensorforce documentation.
```
# using a parameter decay for the exploration
epsilon_decay = {'type': 'decaying',
'decay': 'polynomial',
'decay_steps': 50000,
'unit': 'timesteps',
'initial_value': 1.0,
'decay_rate': 5e-2,
'final_value': 5e-2,
'power': 3.0}
# defining a simple network architecture: 2 dense-layers with 64 nodes each
net = [
dict(type='dense', size=64, activation='relu'),
dict(type='dense', size=64, activation='relu'),
]
# defining the parameters of a dqn-agent
agent_config = {
'agent': 'dqn',
'memory': 200000,
'batch_size': 25,
'network': net,
'update_frequency': 1,
'start_updating': 10000,
'learning_rate': 1e-4,
'discount': 0.99,
'exploration': epsilon_decay,
'target_sync_frequency': 1000,
'target_update_weight': 1.0,
}
from tensorforce.agents import Agent
tau = 1e-5
simulation_time = 2 # seconds
training_steps = int(simulation_time // tau)
# creating agent via dictionary
dqn_agent = Agent.create(agent=agent_config, environment=tf_env)
```
### 3.3 Training the Agent
Training the agent is executed with the **tensorforce-runner**. The runner stores metrics during the training, like the reward per episode, and can be used to save learned agents. If you just want to experiment a little with an already trained agent, it is possible to skip the next cells and just load a pre-trained agent.
```
from tensorforce.execution import Runner
# create and train the agent
runner = Runner(agent=dqn_agent, environment=tf_env)
runner.run(num_timesteps=training_steps)
```
Accessing saved metrics from the runner, it is possible to have a look on the mean reward per episode or the corresponding episode-length.
```
# accesing the metrics from runner
rewards = np.asarray(runner.episode_rewards)
episode_length = np.asarray(runner.episode_timesteps)
# calculating the mean-reward per episode
mean_reward = rewards/episode_length
num_episodes = len(mean_reward)
# plotting mean-reward over episodes
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(20,10))
ax1.plot(range(num_episodes), mean_reward, linewidth=3)
#plt.xticks(fontsize=15)
ax1.set_ylabel('mean-reward', fontsize=22)
ax1.grid(True)
ax1.tick_params(axis="y", labelsize=15)
# plotting episode length over episodes
ax2.plot(range(num_episodes), episode_length, linewidth=3)
ax2.set_xlabel('# episodes', fontsize=22)
ax2.set_ylabel('episode-length', fontsize=22)
ax2.tick_params(axis="y", labelsize=15)
ax2.tick_params(axis="x", labelsize=15)
ax2.grid(True)
plt.show()
print('number of episodes during training: ', len(rewards))
```
Saving the agents trained model makes it available for a separate evaluation and further usage.
```
agent_path = Path('saved_agents')
agent_path.mkdir(parents=True, exist_ok=True)
agent_name = 'dqn_agent_tensorforce'
runner.agent.save(directory=str(agent_path), filename=agent_name)
print('\n agent saved \n')
runner.close()
```
## 4. Evaluating the Trained Agent
### 4.1 Loading a Model
If a previously saved agent is available, it can be restored by using the runner to load the model with the ```load()``` function. To load the agent it is necessary to pass the directory, the filename, the environment, and the agent configuration used for the training.
```
from tensorforce import Agent
dqn_agent = Agent.load(
directory=str(agent_path),
filename=agent_name,
environment=tf_env,
**agent_config
)
print('\n agent loaded \n')
```
### 4.3 Evaluating the Agent
To use the trained agent as a controller, a typical loop to interact with the environment can be used, which is displayed in the cell below.
Now the agent takes the observations from the environment and reacts with an action, which is used to control the environment. To get an impression of how the trained agent performs, the trajectory of the control-states can be observed. A live-plot will be displayed in a jupyter-notebook. If you are using jupyter-lab, the following cell could cause problems regarding the visualization.
```
%matplotlib notebook
# currently the visualization crashes for larger values, than the defined value
visualization_steps = int(9e4)
obs = env.reset()
for step in range(visualization_steps):
# getting the next action from the agent
actions = dqn_agent.act(obs, evaluation=True)
# the env return the next state, reward and the information, if the state is terminal
obs, reward, done, _ = env.step(action=actions)
# activating the visualization
env.render()
if done:
# reseting the env, if a terminal state is reached
obs = env.reset()
```
In the next example a classic *environment-interaction loop* can be extended to access different metrics and values, e.g. the cumulated reward over all steps. The number of evaluation-steps can be reduced, but a higher variance of the evaluation result must then be accepted.
```
# test agent
steps = 250000
rewards = []
episode_lens = []
obs = env.reset()
terminal = False
cumulated_rew = 0
step_counter = 0
episode_rew = 0
for step in (range(steps)):
actions = dqn_agent.act(obs, evaluation=True)
obs, reward, done, _ = env.step(action=actions)
cumulated_rew += reward
episode_rew += reward
step_counter += 1
if done:
rewards.append(episode_rew)
episode_lens.append(step_counter)
episode_rew = 0
step_counter = 0
obs = env.reset()
done = False
print(f' \n Cumulated reward per step is {cumulated_rew/steps} \n')
print(f' \n Number of episodes Reward {len(episode_lens)} \n')
%matplotlib inline
# accesing the metrics from runner
rewards = np.asarray(rewards)
episode_length = np.asarray(episode_lens)
# calculating the mean-reward per episode
mean_reward = rewards/episode_length
num_episodes = len(rewards)
# plotting mean-reward over episodes
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True, figsize=(20, 10))
ax1.plot(range(num_episodes), mean_reward, linewidth=3)
#plt.xticks(fontsize=15)
ax1.set_ylabel('reward', fontsize=22)
ax1.grid(True)
ax1.tick_params(axis="y", labelsize=15)
# plotting episode length over episodes
ax2.plot(range(num_episodes), episode_length, linewidth=3)
ax2.set_xlabel('# episodes', fontsize=22)
ax2.set_ylabel('episode-length', fontsize=20)
ax2.tick_params(axis="y", labelsize=15)
ax2.tick_params(axis="x", labelsize=15)
ax2.grid(True)
plt.show()
print('number of episodes during training: ', len(episode_lens))
```
|
github_jupyter
|
<a href="https://colab.research.google.com/github/martin-fabbri/colab-notebooks/blob/master/deeplearning.ai/nlp/c3_w1_03_trax_intro_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Trax : Ungraded Lecture Notebook
In this notebook you'll get to know about the Trax framework and learn about some of its basic building blocks.
## Background
### Why Trax and not TensorFlow or PyTorch?
TensorFlow and PyTorch are both extensive frameworks that can do almost anything in deep learning. They offer a lot of flexibility, but that often means verbosity of syntax and extra time to code.
Trax is much more concise. It runs on a TensorFlow backend but allows you to train models with 1 line commands. Trax also runs end to end, allowing you to get data, model and train all with a single terse statements. This means you can focus on learning, instead of spending hours on the idiosyncrasies of big framework implementation.
### Why not Keras then?
Keras is now part of Tensorflow itself from 2.0 onwards. Also, trax is good for implementing new state of the art algorithms like Transformers, Reformers, BERT because it is actively maintained by Google Brain Team for advanced deep learning tasks. It runs smoothly on CPUs,GPUs and TPUs as well with comparatively lesser modifications in code.
### How to Code in Trax
Building models in Trax relies on 2 key concepts:- **layers** and **combinators**.
Trax layers are simple objects that process data and perform computations. They can be chained together into composite layers using Trax combinators, allowing you to build layers and models of any complexity.
### Trax, JAX, TensorFlow and Tensor2Tensor
You already know that Trax uses Tensorflow as a backend, but it also uses the JAX library to speed up computation too. You can view JAX as an enhanced and optimized version of numpy.
**Watch out for assignments which import `import trax.fastmath.numpy as np`. If you see this line, remember that when calling `np` you are really calling Trax’s version of numpy that is compatible with JAX.**
As a result of this, where you used to encounter the type `numpy.ndarray` now you will find the type `jax.interpreters.xla.DeviceArray`.
Tensor2Tensor is another name you might have heard. It started as an end to end solution much like how Trax is designed, but it grew unwieldy and complicated. So you can view Trax as the new improved version that operates much faster and simpler.
### Resources
- Trax source code can be found on Github: [Trax](https://github.com/google/trax)
- JAX library: [JAX](https://jax.readthedocs.io/en/latest/index.html)
## Installing Trax
Trax has dependencies on JAX and some libraries like JAX which are yet to be supported in [Windows](https://github.com/google/jax/blob/1bc5896ee4eab5d7bb4ec6f161d8b2abb30557be/README.md#installation) but work well in Ubuntu and MacOS. We would suggest that if you are working on Windows, try to install Trax on WSL2.
Official maintained documentation - [trax-ml](https://trax-ml.readthedocs.io/en/latest/) not to be confused with this [TraX](https://trax.readthedocs.io/en/latest/index.html)
```
%%capture
!pip install trax==1.3.1
```
## Imports
```
%%capture
import numpy as np # regular ol' numpy
from trax import layers as tl # core building block
from trax import shapes # data signatures: dimensionality and type
from trax import fastmath # uses jax, offers numpy on steroids
# Trax version 1.3.1 or better
!pip list | grep trax
```
## Layers
Layers are the core building blocks in Trax or as mentioned in the lectures, they are the base classes.
They take inputs, compute functions/custom calculations and return outputs.
You can also inspect layer properties. Let me show you some examples.
### Relu Layer
First I'll show you how to build a relu activation function as a layer. A layer like this is one of the simplest types. Notice there is no object initialization so it works just like a math function.
**Note: Activation functions are also layers in Trax, which might look odd if you have been using other frameworks for a longer time.**
```
# Layers
# Create a relu trax layer
relu = tl.Relu()
# Inspect properties
print("-- Properties --")
print("name :", relu.name)
print("expected inputs :", relu.n_in)
print("promised outputs :", relu.n_out, "\n")
# Inputs
x = np.array([-2, -1, 0, 1, 2])
print("-- Inputs --")
print("x :", x, "\n")
# Outputs
y = relu(x)
print("-- Outputs --")
print("y :", y)
```
### Concatenate Layer
Now I'll show you how to build a layer that takes 2 inputs. Notice the change in the expected inputs property from 1 to 2.
```
# Create a concatenate trax layer
concat = tl.Concatenate()
print("-- Properties --")
print("name :", concat.name)
print("expected inputs :", concat.n_in)
print("promised outputs :", concat.n_out, "\n")
# Inputs
x1 = np.array([-10, -20, -30])
x2 = x1 / -10
print("-- Inputs --")
print("x1 :", x1)
print("x2 :", x2, "\n")
# Outputs
y = concat([x1, x2])
print("-- Outputs --")
print("y :", y)
```
## Layers are Configurable
You can change the default settings of layers. For example, you can change the expected inputs for a concatenate layer from 2 to 3 using the optional parameter `n_items`.
```
# Configure a concatenate layer
concat_3 = tl.Concatenate(n_items=3) # configure the layer's expected inputs
print("-- Properties --")
print("name :", concat_3.name)
print("expected inputs :", concat_3.n_in)
print("promised outputs :", concat_3.n_out, "\n")
# Inputs
x1 = np.array([-10, -20, -30])
x2 = x1 / -10
x3 = x2 * 0.99
print("-- Inputs --")
print("x1 :", x1)
print("x2 :", x2)
print("x3 :", x3, "\n")
# Outputs
y = concat_3([x1, x2, x3])
print("-- Outputs --")
print("y :", y)
```
**Note: At any point,if you want to refer the function help/ look up the [documentation](https://trax-ml.readthedocs.io/en/latest/) or use help function.**
```
#help(tl.Concatenate) #Uncomment this to see the function docstring with explaination
```
## Layers can have Weights
Some layer types include mutable weights and biases that are used in computation and training. Layers of this type require initialization before use.
For example the `LayerNorm` layer calculates normalized data, that is also scaled by weights and biases. During initialization you pass the data shape and data type of the inputs, so the layer can initialize compatible arrays of weights and biases.
```
# Uncomment any of them to see information regarding the function
# help(tl.LayerNorm)
# help(shapes.signature)
# Layer initialization
norm = tl.LayerNorm()
# You first must know what the input data will look like
x = np.array([0, 1, 2, 3], dtype="float")
# Use the input data signature to get shape and type for initializing weights and biases
# We need to convert the input datatype from usual tuple to trax ShapeDtype
norm.init(shapes.signature(x))
print("Normal shape:",x.shape, "Data Type:",type(x.shape))
print("Shapes Trax:",shapes.signature(x),"Data Type:",type(shapes.signature(x)))
# Inspect properties
print("-- Properties --")
print("name :", norm.name)
print("expected inputs :", norm.n_in)
print("promised outputs :", norm.n_out)
# Weights and biases
print("weights :", norm.weights[0])
print("biases :", norm.weights[1], "\n")
# Inputs
print("-- Inputs --")
print("x :", x)
# Outputs
y = norm(x)
print("-- Outputs --")
print("y :", y)
```
## Custom Layers
This is where things start getting more interesting!
You can create your own custom layers too and define custom functions for computations by using `tl.Fn`. Let me show you how.
```
#help(tl.Fn)
# Define a custom layer
# In this example you will create a layer to calculate the input times 2
def TimesTwo():
layer_name = "TimesTwo"
def func(x):
return x * 2
return tl.Fn(layer_name, func)
# Test it
times_two = TimesTwo()
# Inspect properties
print("-- Properties --")
print("name :", times_two.name)
print("expected inputs :", times_two.n_in)
print("promised outputs :", times_two.n_out, "\n")
# Inputs
x = np.array([1, 2, 3])
print("-- Inputs --")
print("x :", x, "\n")
# Outputs
y = times_two(x)
print("-- Outputs --")
print("y :", y)
```
## Combinators
You can combine layers to build more complex layers. Trax provides a set of objects named combinator layers to make this happen. Combinators are themselves layers, so behavior commutes.
### Serial Combinator
This is the most common and easiest to use. For example could build a simple neural network by combining layers into a single layer using the `Serial` combinator. This new layer then acts just like a single layer, so you can inspect intputs, outputs and weights. Or even combine it into another layer! Combinators can then be used as trainable models. _Try adding more layers_
**Note:As you must have guessed, if there is serial combinator, there must be a parallel combinator as well. Do try to explore about combinators and other layers from the trax documentation and look at the repo to understand how these layers are written.**
```
# help(tl.Serial)
# help(tl.Parallel)
# Serial combinator
serial = tl.Serial(
tl.LayerNorm(),
tl.Relu(),
times_two,
)
# Initialization
x = np.array([-2, -1, 0, 1, 2])
serial.init(shapes.signature(x))
print("-- Serial Model --")
print(serial,"\n")
print("-- Properties --")
print("name :", serial.name)
print("sublayers :", serial.sublayers)
print("expected inputs :", serial.n_in)
print("promised outputs :", serial.n_out)
print("weights & biases:", serial.weights, "\n")
# Inputs
print("-- Inputs --")
print("x :", x, "\n")
# Outputs
y = serial(x)
print("-- Outputs --")
print("y :", y)
```
## JAX
Just remember to lookout for which numpy you are using, the regular ol' numpy or Trax's JAX compatible numpy. Both tend to use the alias np so watch those import blocks.
**Note:There are certain things which are still not possible in fastmath.numpy which can be done in numpy so you will see in assignments we will switch between them to get our work done.**
```
# Numpy vs fastmath.numpy have different data types
# Regular ol' numpy
x_numpy = np.array([1, 2, 3])
print("good old numpy : ", type(x_numpy), "\n")
# Fastmath and jax numpy
x_jax = fastmath.numpy.array([1, 2, 3])
print("jax trax numpy : ", type(x_jax))
```
## Summary
Trax is a concise framework, built on TensorFlow, for end to end machine learning. The key building blocks are layers and combinators. This notebook is just a taste, but sets you up with some key inuitions to take forward into the rest of the course and assignments where you will build end to end models.
|
github_jupyter
|
# XGBoost model for Bike sharing dataset
```
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# preprocessing methods
from sklearn.preprocessing import StandardScaler
# accuracy measures and data spliting
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
# deep learning libraries
import xgboost as xgb
import graphviz
plt.style.use('fivethirtyeight')
plt.rcParams['figure.figsize'] = 15, 7
```
## 1. Data import
```
DATADIR = '../data/bike/'
MODELDIR = '../checkpoints/bike-sharing/xgb/'
data_path = os.path.join(DATADIR, 'bike-sharing-processed.csv')
data = pd.read_csv(data_path)
data.set_index('date', inplace=True)
data.sort_index(inplace=True)
data.head()
plt.plot(data.cnt, '.')
plt.title('Bike sharing count')
plt.xlabel('sample id')
plt.ylabel('count')
plt.show()
```
## 2. Train test split
```
y = data[['cnt']].copy()
X = data.drop(columns=['cnt'], axis=1)
print(f'X and y shape:')
print(X.shape, y.shape)
# date selection
datelist = data.index.unique()
# two month data for testset
print(f'Test start date: {datelist[-61]}')
# Train test split : last 60 days for test set
X_train = X[X.index < datelist[-61]]
X_test = X[X.index >= datelist[-61]]
y_train = y[y.index < datelist[-61]]
y_test = y[y.index >= datelist[-61]]
print(f'Size of train and test set respectively:')
print(X_train.shape,X_test.shape, y_train.shape, y_test.shape)
```
## 3. Parameter selection using grid search
```
def xgb_parameter_selection(X, y, grid_param, xgb_param):
xgb_grid = GridSearchCV(estimator=xgb.XGBRegressor(**xgb_param, seed=seed),
param_grid=grid_param, cv=3)
xgb_grid.fit(X, y)
return xgb_grid
```
### 3.1 Depth and child weight selection
```
seed = 42
# max depth and child weight selection
grid_param_1 = {'max_depth': [3, 5],
'min_child_weight': [3, 5, 7]
}
xgb_param_1 = {'objective' :'reg:linear',
'silent' : 1,
'n_estimators': 100,
'learning_rate' : 0.1}
model_1 = xgb_parameter_selection(X_train, y_train, grid_param_1, xgb_param_1)
# print(f'Best estimator : {model_1.best_estimator_}')
print(f'Best parameter : {model_1.best_params_}')
print(f'Best score : {model_1.best_score_}')
```
### 3.2 colsample_bytree and subsample selection
```
# column and sample selection parameter
grid_param_2 = {'colsample_bytree' : [0.7, 1.0],
'subsample' : [0.8, 1]
}
xgb_param_2 = {'objective' :'reg:linear',
'silent' : 1,
'max_depth': 5,
'min_child_weight':7,
'n_estimators': 100,
'learning_rate' : 0.1,
'eval_metric' : 'mae' }
model_2 = xgb_parameter_selection(X_train, y_train, grid_param_2, xgb_param_2)
print(f'Best parameter : {model_2.best_params_}')
print(f'Best score : {model_2.best_score_}')
```
### 3.3 gamma selection
```
# gamma selection
grid_param_3 = {'gamma' : [0, 0.1, 0.2, 5]
}
xgb_param_3 = {'objective' :'reg:linear',
'silent' : 1,
'max_depth': 5,
'min_child_weight': 7,
'n_estimators': 100,
'learning_rate' : 0.1,
'colsample_bytree' : 0.7,
'subsample' : 1}
model_3 = xgb_parameter_selection(X_train, y_train, grid_param_3, xgb_param_3)
print(f'Best parameter : {model_3.best_params_}')
print(f'Best score : {model_3.best_score_}')
```
### 3.4 learning rate
```
# learning_rate selection
grid_param_4 = {'learning_rate' : [0.1, 0.01, 0.001]
}
xgb_param_4 = {'objective' :'reg:linear',
'silent' : 1,
'max_depth': 5,
'min_child_weight': 7,
'n_estimators': 100,
'learning_rate' : 0.1,
'colsample_bytree' : 0.7,
'subsample' : 1,
'gamma' : 0}
model_4 = xgb_parameter_selection(X_train, y_train, grid_param_4, xgb_param_4)
print(f'Best parameter : {model_4.best_params_}')
print(f'Best score : {model_4.best_score_}')
```
## 4. Final model training
```
final_param = {'objective' :'reg:linear',
'silent' : 1,
'max_depth': 5,
'min_child_weight': 7,
'n_estimators': 100,
'learning_rate' : 0.1,
'colsample_bytree' : 0.7,
'subsample' : 1,
'gamma' : 0,
'eval_metric' : 'mae'}
def xgb_final(X_train, y_train, param, MODELDIR):
model = xgb.XGBRegressor(**param)
model.fit(X_train, y_train, verbose=True)
# directory for saving model
if os.path.exists(MODELDIR):
pass
else:
os.makedirs(MODELDIR)
model.save_model(os.path.join(MODELDIR, 'xgb-v1.model'))
return model
model = xgb_final(X_train, y_train, final_param, MODELDIR)
```
## 5. Model evaluation
```
def model_evaluation(X_train, X_test, y_train, y_test):
# predict and tranform to original scale
y_train_pred = model.predict(X_train)
y_test_pred = model.predict(X_test)
# MAE and NRMSE calculation
train_rmse = np.sqrt(mean_squared_error(y_train, y_train_pred))
train_mae = mean_absolute_error(y_train, y_train_pred)
train_nrmse = train_rmse/np.std(y_train.values)
test_rmse = np.sqrt(mean_squared_error(y_test, y_test_pred))
test_mae = mean_absolute_error(y_test, y_test_pred)
test_nrmse = test_rmse/np.std(y_test.values)
print(f'Training MAE: {np.round(train_mae, 3)}')
print(f'Trainig NRMSE: {np.round(train_nrmse, 3)}')
print()
print(f'Test MAE: {np.round(test_mae)}')
print(f'Test NRMSE: {np.round(test_nrmse)}')
return y_train_pred, y_test_pred
y_train_pred, y_test_pred = model_evaluation(X_train, X_test, y_train, y_test)
```
## 6. Result plotting
```
plt.plot(y_train.values, label='actual')
plt.plot(y_train_pred, label='predicted')
plt.ylabel('count')
plt.xlabel('sample id')
plt.title('Actual vs Predicted on training data using XGBoost')
plt.legend()
plt.tight_layout()
plt.show()
plt.plot(y_test.values, label='actual')
plt.plot(y_test_pred, label='predicted')
plt.ylabel('count')
plt.xlabel('sample id')
plt.title('Actual vs Predicted on test data using XGBoost', fontsize=14)
plt.legend()
plt.tight_layout()
plt.show()
```
## 7. Variable importance
```
xgb.plot_importance(model)
plt.show()
```
|
github_jupyter
|
# Weather Data Collection
```
import pandas as pd
import numpy as np
from selenium import webdriver
import time
races = pd.read_csv('./data/races.csv')
races.head()
races.shape
weather = races.iloc[:,[0,1,2]]
info = []
for link in races.url:
try:
df = pd.read_html(link)[0]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
df = pd.read_html(link)[1]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
df = pd.read_html(link)[2]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
df = pd.read_html(link)[3]
if 'Weather' in list(df.iloc[:,0]):
n = list(df.iloc[:,0]).index('Weather')
info.append(df.iloc[n,1])
else:
driver = webdriver.Chrome()
driver.get(link)
# click language button
button = driver.find_element_by_link_text('Italiano')
button.click()
clima = driver.find_element_by_xpath('//*[@id="mw-content-text"]/div/table[1]/tbody/tr[9]/td').text
info.append(clima)
except:
info.append('not found')
len(info)
weather['weather'] = info
weather.head()
weather.tail()
weather_dict = {'weather_warm': ['soleggiato', 'clear', 'warm', 'hot', 'sunny', 'fine', 'mild', 'sereno'],
'weather_cold': ['cold', 'fresh', 'chilly', 'cool'],
'weather_dry': ['dry', 'asciutto'],
'weather_wet': ['showers', 'wet', 'rain', 'pioggia', 'damp', 'thunderstorms', 'rainy'],
'weather_cloudy': ['overcast', 'nuvoloso', 'clouds', 'cloudy', 'grey', 'coperto']}
weather_df = pd.DataFrame(columns = weather_dict.keys())
for col in weather_df:
weather_df[col] = weather['weather'].map(lambda x: 1 if any(i in weather_dict[col] for i in x.lower().split()) else 0)
weather_df.head()
weather_info = pd.concat([weather, weather_df], axis = 1)
weather_info.shape
weather_info.head()
weather_info.tail()
weather_info.to_csv('./data/weather.csv', index= False)
```
|
github_jupyter
|
# Temporal Congruency Experiments
```
from scripts.imports import *
from scripts.df_styles import df_highlighter
out = Exporter(paths['outdir'], 'clause')
# redefine df_sg to include adverbs
df_sg = df[df.n_times == 1]
df_sg.columns
```
# Tense Collocations with tokens
```
token_ct = df_sg.pivot_table(
index=['lex_token'],
columns='verbtense',
aggfunc='size',
fill_value=0,
)
# pair down to top tenses
token_ct = token_ct.loc[token_ct.index[token_ct.sum(1) >= 2]]
token_ct = token_ct[token_ct.columns[token_ct.sum(0) >= 2]]
# sorting
token_ct = token_ct.loc[token_ct.sum(1).sort_values(ascending=False).index]
token_ct
token_dp = sig.apply_deltaP(token_ct, 0, 1)
token_dp = token_dp.dropna()
token_dp.head()
token_fs, token_odds = sig.apply_fishers(token_ct, 0, 1)
token_fs
```
## PCA Analysis
```
vtense_pca, vtense_loadings = apply_pca(token_dp, 0, 1, components=4)
fig, ax = plt.subplots(figsize=(8, 8))
x, y = (vtense_pca['PC1'], vtense_pca['PC2'])
ax.scatter(x, y, s=15)
fig, ax = plt.subplots(figsize=(8, 8))
s = 70
x, y = (vtense_pca.iloc[:,0], vtense_pca.iloc[:,1])
ax.scatter(x, y, facecolor=[], s=2)
texts = []
for lex_tok in vtense_pca.index:
tx, ty = vtense_pca.loc[idx[lex_tok]][:2]
show_lex = get_display(lex_tok)
texts.append(plt.text(tx, ty, show_lex, size=12,
fontfamily='SBL Biblit'))
offsets = {}
top_loadings = vtense_loadings.abs().sum().sort_values(ascending=False).index[:8]
texts = []
for feature in top_loadings:
x_off, y_off, size = offsets.get(feature, (0,0,15)) # config offsets / size
fx, fy = vtense_loadings[feature][:2] * 2
plt.arrow(0, 0, fx, fy, color='#808080', linewidth=1, head_width=0)
show_text = get_display(feature) # handle bidirectional
texts.append(plt.text(fx+x_off, fy+y_off, show_text, color='#808080', size=size, fontfamily='SBL Biblit'))
out.plot('tense_PCA')
```
## PCA Analysis (with Fisher's)
```
vtense_pca2, vtense_loadings2 = apply_pca(token_fs, 0, 1, components=4)
fig, ax = plt.subplots(figsize=(8, 8))
x, y = (vtense_pca2['PC1'], vtense_pca2['PC2'])
ax.scatter(x, y, s=15)
fig, ax = plt.subplots(figsize=(8, 8))
s = 70
x, y = (vtense_pca2.iloc[:,0], vtense_pca2.iloc[:,1])
ax.scatter(x, y, facecolor=[], s=2)
texts = []
for lex_tok in vtense_pca2.index:
tx, ty = vtense_pca2.loc[idx[lex_tok]][:2]
show_lex = get_display(lex_tok)
texts.append(plt.text(tx, ty, show_lex, size=12,
fontfamily='SBL Biblit'))
#adjust_text(texts)
#out.plot('pca2_durVSloc_TENSE_text')
offsets = {}
top_loadings2 = vtense_loadings2.abs().sum().sort_values(ascending=False).index[:5]
texts = []
for feature in top_loadings2:
x_off, y_off, size = offsets.get(feature, (0,0,15)) # config offsets / size
fx, fy = vtense_loadings2[feature][:2] * 2
plt.arrow(0, 0, fx, fy, color='#808080', linewidth=1, head_width=0)
show_text = get_display(feature) # handle bidirectional
texts.append(plt.text(fx+x_off, fy+y_off, show_text, color='#808080', size=size, fontfamily='SBL Biblit'))
```
## With Demonstratives (generally)
```
df_sg.demon_type.value_counts()
df_sg.columns
demon_ct = df_sg[df_sg.DEMON == 1].pivot_table(
index=['front', 'demon_type'],
columns=['verbtense'],
aggfunc='size',
fill_value=0,
)
demon_ct
df_sg[
(df_sg.verbtense == 'PRES')
& (df_sg.demon_type == 'THAT')
][['verse', 'clause']]
```
## Tense + Verbform + Modifiers
```
modi_ct = df_sg.pivot_table(
index=['verbform', 'verbtense'],
values=['DEF', 'ORDN', 'QUANT', 'PL', 'NUM', 'DEMON', 'SFX', 'unmodified'],
aggfunc='sum',
fill_value=0,
)
modi_ct
modi_fs, modi_odds = sig.apply_fishers(modi_ct, 0, 1)
df_highlighter(modi_fs, rule='fishers')
```
# With Tagged Tenses
```
tense_advbs = df_sg[
(df_sg.is_advb == 1)
]
tense_advbs.shape
# get counts about which adverbs are being tagged as what
lex_tense_ct = tense_advbs.pivot_table(
index=['TA Heads'],
columns=['tense'],
aggfunc='size',
fill_value=0,
)
lex_tense_ct = lex_tense_ct.loc[lex_tense_ct.sum(1).sort_values(ascending=False).index]
lex_tense_ct
# first look at it with adverbs only
at_counts = tense_advbs.pivot_table(
index=['tense'],
columns=['verbform'],
aggfunc='size',
fill_value=0,
)
at_counts.drop('infc', axis=1, inplace=True)
# sort
at_counts = at_counts.loc[at_counts.sum(1).sort_values(ascending=False).index]
at_counts = at_counts[at_counts.sum().sort_values(ascending=False).index]
out.table(
at_counts,
'advb_tense_ct',
caption='Tense and Hebrew Verb Collocation Frequencies (adverbs)'
)
at_counts
at_pr = at_counts.div(at_counts.sum(1), 0).round(2)
out.table(
at_pr,
'advb_tense_pr',
caption='Tense and Hebrew Verb Collocation Proportions (adverbs)'
)
at_pr
```
#### Now look across non-adverbial versions
```
tense_np = df_sg[
(df_sg.is_advb == 0)
& (df_sg.function == 'simultaneous')
]
np_ct = df_sg[df_sg.is_advb == 0].pivot_table(
index=['tense'],
columns=['verbform'],
aggfunc='size',
fill_value=0,
)
np_ct.drop(['infc', 'infa'], axis=1, inplace=True)
# sort in accord with the adverb DF
np_ct = np_ct.loc[at_pr.index]
np_ct = np_ct[at_pr.columns]
out.table(
np_ct,
'np_tense_ct',
caption='Tense and Hebrew Verb Collocation Frequencies (NP-based adverbials)'
)
np_ct
np_pr = np_ct.div(np_ct.sum(1), 0).round(2)
out.table(
np_pr,
'np_tense_pr',
caption='Tense and Hebrew Verb Collocation Proportions (NP-based adverbials)'
)
np_pr
# how much more frequent is weqatal future than in adverb set?
wqtl_diff = np_pr.loc['FUT']['wqtl'] - at_pr.loc['FUT']['wqtl']
out.number(wqtl_diff*100, 'wqtl_diff')
wqtl_diff
out.number(
np_pr['wayq']['PAST']*100,
'NP_past_wayq_perc'
)
df_sg[
(df_sg.tense == 'FUT')
& (df_sg.verbform == 'qtl')
][['verse', 'clause']]
df_sg[
(df_sg.tense == 'PAST')
& (df_sg.verbform == 'wqtl')
][['verse', 'clause']]
df_sg[
(df_sg.tense == 'PAST')
& (df_sg.verbform == 'yqtl')
][['verse', 'clause']]
df_sg[
(df_sg.tense == 'PRES')
& (df_sg.verbform == 'wqtl')
][['verse', 'clause']]
df_sg[
(df_sg.tense == 'PAST')
& (df_sg.verbform == 'ptcp')
][['verse', 'clause']]
df_sg[
(df_sg.tense == 'FUT')
& (df_sg.verbform == 'ptcp')
][['verse', 'clause']]
fig, axs = plt.subplots(2, 3, figsize=(12, 8))
axs = axs.ravel()
tensenames = {'PRES':'Present', 'FUT': 'Future', 'PAST': 'Past'}
i = 0
for table, kind in ([at_pr, 'Adverb'], [np_pr, 'Adverbial']):
for tense in table.index:
ax = axs[i]
i += 1
data = table.loc[tense]
if kind == 'Adverb':
ct_data = at_counts.loc[tense]
else:
ct_data = np_ct.loc[tense]
tensename = tensenames[tense]
kwargs = {}
if tensename == 'Present' and kind == 'Adverbial':
tensename = '"Today"'
if kind == 'Adverbial':
kwargs['color'] = 'orange'
data.plot(ax=ax, kind='bar', edgecolor='black', **kwargs)
ax.set_xticklabels(ax.get_xticklabels(), rotation=0)
ax.set_title(f'Collocations with {tensename} Time {kind} (N={ct_data.sum()})', size=10)
ax.set_ylabel('proportion')
ax.set_ylim((0, 0.7))
ax.grid(True, axis='y')
ax.set_axisbelow(True)
fig.tight_layout()
out.plot(
'advb_np_prs'
)
```
|
github_jupyter
|
## APIs
Let's start by looking at [OMDb API](https://www.omdbapi.com/).
The OMDb API is a free web service to obtain movie information, all content and images on the site are contributed and maintained by users.
The Python package [urllib](https://docs.python.org/3/howto/urllib2.html) can be used to fetch resources from the internet.
OMDb tells us what kinds of requests we can make. We are going to do a title search. As you can see below, we have an additional parameter "&Season=1" which does not appear in the parameter tables. If you read through the change log, you will see it documented there.
Using the urllib and json packages allow us to call an API and store the results locally.
```
import json
import urllib.request
data = json.loads(urllib.request.urlopen('http://www.omdbapi.com/?t=Game%20of%20Thrones&Season=1').read().\
decode('utf8'))
```
What should we expect the type to be for the variable data?
```
print(type(data))
```
What do you think the data will look like?
```
data.keys()
data
```
We now have a dictionary object of our data. We can use python to manipulate it in a variety of ways. For example, we can print all the titles of the episodes.
```
for episode in data['Episodes']:
print(episode['Title'], episode['imdbRating'])
```
We can use pandas to convert the episode information to a dataframe.
```
import pandas as pd
df = pd.DataFrame.from_dict(data['Episodes'])
df
```
And, we can save our data locally to use later.
```
with open('tutorial_output/omdb_api_data.json', 'w') as f:
json.dump(data, f)
```
Let's try an API that requires an API key!
"The [Digital Public Library of America](https://dp.la/) brings together the riches of America’s libraries, archives, and museums, and makes them freely available to the world. It strives to contain the full breadth of human expression, from the written word, to works of art and culture, to records of America’s heritage, to the efforts and data of science."
And, they have an [API](https://dp.la/info/developers/codex/api-basics/).
In order to use the API, you need to [request a key](https://dp.la/info/developers/codex/policies/#get-a-key). You can do this with an HTTP POST request.
If you are using **OS X or Linux**, replace "[email protected]" in the cell below with your email address and execute the cell. This will send the rquest to DPLA and they will email your API key to the email address you provided. To successfully query the API, you must include the ?api_key= parameter with the 32-character hash following.
```
# execute this on OS X or Linux by removing '#' on the next line and excuting the cell
#! curl -v -XPOST http://api.dp.la/v2/api_key/[email protected]
```
If you are on **Windows 7 or 10**, [open PowerShell](http://www.tenforums.com/tutorials/25581-windows-powershell-open-windows-10-a.html). Replace "[email protected]" in the cell below with your email address. Copy the code and paste it at the command prompt in PowerShell. This will send the rquest to DPLA and they will email your API key to the email address you provided. To successfully query the API, you must include the ?api_key= parameter with the 32-character hash following.
```
#execute this on Windows by running the line below, without the leading '#', in PowerShell
#Invoke-WebRequest -Uri ("http://api.dp.la/v2/api_key/[email protected]") -Method POST -Verbose -usebasicparsing
```
You will get a response similar to what is shown below and will receive an email fairly quickly from DPLA with your key.
shell-init: error retrieving current directory: getcwd: cannot access parent directories: No such file or directory
* Trying 52.2.169.251...
* Connected to api.dp.la (52.2.169.251) port 80 (#0)
> POST /v2/api_key/[email protected] HTTP/1.1
> Host: api.dp.la
> User-Agent: curl/7.43.0
> Accept: */*
>
< HTTP/1.1 201 Created
< Access-Control-Allow-Origin: *
< Cache-Control: max-age=0, private, must-revalidate
< Content-Type: application/json; charset=utf-8
< Date: Thu, 20 Oct 2016 20:53:24 GMT
< ETag: "8b66d9fe7ded79e3151d5a22f0580d99"
< Server: nginx/1.1.19
< Status: 201 Created
< X-Request-Id: d61618751a376452ac3540b3157dcf48
< X-Runtime: 0.179920
< X-UA-Compatible: IE=Edge,chrome=1
< Content-Length: 89
< Connection: keep-alive
<
* Connection #0 to host api.dp.la left intact
{"message":"API key created and sent via email. Be sure to check your Spam folder, too."}
It is good practice not to put your keys in your code. You can store them in a file and read them in from there. If you are pushing your code to GitHub, make sure you put your key files in .gitignore.
I created a file on my drive called "config_secret.json". The contents of the file look like this:
{
"api_key" : "my api key here"
}
I can then write code to read the information in.
A template called config_secret_template.json has been provided for you to add your keys to.
```
with open("./dpla_config_secret.json") as key_file:
key = json.load(key_file)
key
```
Then, when I create my API query, I can use a variable in place of my actual key.
The Requests library allows us to build urls with different parameters. You build the parameters as a dictionary that contains key/value pairs for everything after the '?' in your url.
```
import requests
# we are specifying our url and parameters here as variables
url = 'http://api.dp.la/v2/items/'
params = {'api_key' : key['api_key'], 'q' : 'goats+AND+cats'}
# we are creating a response object, r
r = requests.get(url, params=params)
type(r)
# we can look at the url that was created by requests with our specified variables
r.url
# we can check the status code of our request
r.status_code
```
[HTTP Status Codes](http://www.restapitutorial.com/httpstatuscodes.html)
```
# we can look at the content of our request
print(r.content)
```
By default, DPLA returns 10 items at a time. We can see from the count value, our query has 29 results. DPLA does give us a paramter we can set to change this to get up to 500 items at a time.
```
params = {'api_key' : key['api_key'], 'q' : 'goats+AND+cats', 'page_size': 500}
r = requests.get(url, params=params)
print(r.content)
```
If we were working with an API that limited us to only 10 items at a time, we could write a loop to pull our data.
The file [seeclickfix_api.py](./seeclickfix_api.py) in the api folder of this repo is an example of how you can pull multiple pages of data from an API. It uses the [SeeClickFix API](http://dev.seeclickfix.com/). "[SeeClickFix](https://seeclickfix.com/) allows you to play an integral role in public services — routing neighborhood concerns like potholes and light outages to the right official with the right information."
|
github_jupyter
|
CWPK \#34: A Python Module, Part II: Packaging and The Structure Extractor
=======================================
Moving from Notebook to Package Proved Perplexing
--------------------------
<div style="float: left; width: 305px; margin-right: 10px;">
<img src="http://kbpedia.org/cwpk-files/cooking-with-kbpedia-305.png" title="Cooking with KBpedia" width="305" />
</div>
This installment of the [*Cooking with Python and KBpedia*](https://www.mkbergman.com/cooking-with-python-and-kbpedia/) series is the second of a three-part mini-series on writing and packaging a formal Python project. The previous installment described a [DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself) (don't repeat yourself) approach to how to generalize our annotation extraction routine. This installment describes how to transition that code from [Jupyter Notebook](https://en.wikipedia.org/wiki/Project_Jupyter#Jupyter_Notebook) interactive code to a formally organized Python package. We also extend our generalized approach to the structure extractor.
In this installment I am working with the notebook and the [Spyder](https://en.wikipedia.org/wiki/Spyder_(software)) IDE in tandem. The notebook is the source of the initial prototype code. It is also the testbed for seeing if the package may be imported and is working properly. We use Spyder for all of the final code development, including moving into functions and classes and organizing by files. We also start to learn some of its [IDE](https://en.wikipedia.org/wiki/Integrated_development_environment) features, such as auto-complete, which is a nice way to test questions about namespaces and local and global variables.
As noted in earlier installments, a Python 'module' is a single script file (in the form of <code>my_file.py</code>) that itself may contain multiple functions, variable declarations, class (object) definitions, and the like, kept in this single file because of their related functionality. A 'package' in Python is a directory with at least one module and (generally) a standard <code>\_\_init\_\_.py</code> file that informs Python a package is available and its name. Python packages and modules are named with lower case. A package name is best when short and without underscores. A module may use underscores to better convey its purpose, such as <code>do_something.py</code>.
For our project based on _Cooking with Python and KBpedia_ (**CWPK**), we will pick up on this acronym and name our project '*cowpoke*'. The functional module we are starting the project with is <code>extract.py</code>, the module for the extraction routines we have been developing over the past few installments..
### Perplexing Questions
While it is true the Python organization has some thorough tutorials, referenced in the concluding **Additional Documentation**, I found it surprisingly difficult to figure out how to move my [Jupyter Notebook](https://en.wikipedia.org/wiki/Project_Jupyter#Jupyter_Notebook) prototypes to a packaged [Python](https://en.wikipedia.org/wiki/Python_(programming_language)) program. I could see that logical modules (single Python scripts, <code>\*.py</code>) made sense, and that there were going to be shared functions across those modules. I could also see that I wanted to use a standard set of variable descriptions in order to specify 'record-like' inputs to the routines. My hope was to segregate all of the input information required for a new major exercise of *cowpoke* into the editing of a single file. That would make configuring a new run a simple process.
I read and tried many tutorials trying to figure out an architecture and design for this packaging. I found the tutorials helpful at a broad, structural level of what goes into a package and how to refer and import other parts, but the nuances of where and how to use classes and functions and how to best share some variables and specifications across modules remained opaque to me. Here are some of the questions and answers I needed to discover before I could make progress:
***1. Where do I put the files to be seen by the notebook and the project?***
After installing Python and setting up the environment noted in installments [**CWPK #9**](https://www.mkbergman.com/2336/cwpk-9-installing-python/) - [**#11**](https://www.mkbergman.com/2338/cwpk-11-installing-a-python-ide/) you should have many packages already on your system, including for Spyder and Jupyter Notebook. There are at least two listings of full packages in different locations. To re-discover what your Python paths are, Run this cell:
```
import sys
print(sys.path)
```
You want to find the site packages directory under your Python library (mine is <code>C:\\1-PythonProjects\\Python\\lib\\site-packages</code>). We will define the '*cowpoke*' directory under this parent and also point our Spyder project to it. (**NB:** Of course, you can locate your package directory anywhere you want, but you would need to add that location to your path as well, and later configuration steps may also require customization.)
***2. What is the role of class and defined variables?***
I know the major functions I have been prototyping, such as the annotation extractor from the last [**CWPK #33**](https://www.mkbergman.com/2370/cwpk-33-a-python-package-part-i-the-annotation-extractor/) installment, need to be formalized as a defined function (the <code>def *function_name*</code> statement). Going into this packaging, however, it is not clear to me whether I should package multiple function definitions under one class (some tutorials seem to so suggest) or where and how I need to declare variables such as *loop* that are part of a run configuration.
One advantage of putting both variables and functions under a single class is that they can be handled as a unit. On the other hand, having a separate class of only input variables seems to be the best arrangement for a record orientation (see next question #4). In practice, I chose to embrace both types.
***3. What is the role of <code>self</code> and where to introduce or use?***
The question of the role of <code>self</code> perplexed me for some time. On the one hand, <code>self</code> is not a reserved keyword in Python, but it is used frequently by convention. Class variables come in two flavors. One flavor is when the variable value is universal to all instances of class. Every instance of this class will share the same value for this variable. It is declared simply after first defining the class and outside of any methods:
<pre>variable = my_variable</pre>
In contrast, instance variables, which is where <code>self</code> is used, are variables with values specific to each instance of class. The values of one instance typically vary from the values of another instance. Class instance variables should be declared within a method, often with this kind of form, as this example from the **Additional Documentation** shows:
<pre>
class SomeClass:
variable_1 = “ This is a class variable”
variable_2 = 100 #this is also a class variable.
def __init__(self, param1, param2):
self.instance_var1 = param1
#instance_var1 is a instance variable
self.instance_var2 = param2
#instance_var2 is a instance variable
</pre>
In this recipe, we are assigning <code>self</code> by convention to the first parameter of the function (method). We can then access the values of the instance variable as declared in the definition via the <code>self</code> convention, also without the need to pass additional arguments or parameters, making for simpler use and declarations. (**NB:** You may name this first parameter something other than <code>self</code>, but that is likely confusing since it goes against the convention.)
Importantly, know we may use this same approach to assign <code>self</code> as the first parameter for instance methods, in addition to instance variables. For either instance variables or methods, Python explicitly passes the current instance and its arguments (<code>self</code>) as the first argument to the instance call.
At any rate, for our interest of being able to pass variable assignments from a separate <code>config.py</code> file to a local extraction routine, the approach using the universal class variable is the right form. But, is it the best form?
***4. What is the best practice for initializing a record?***
If one stands back and thinks about what we are trying to do with our annotation extraction routine (as with other build or extraction steps), we see that we are trying to set a number of key parameters for what data we use and what branches we take during the routine. These parameters are, in effect, keywords used in the routines, the specific values of which (sources of data, what to loop over, etc.) vary by the specific instance of the extraction or build run we are currently invoking. This set-up sounds very much like a kind of 'record' format where we have certain method fields (such as output file or source of the looping data) that vary by run. This is equivalent to a *key:value* pair. In other words, we can treat our configuration specification as the input to a given run of the annotation extractor as a dictionary (<code>dict</code>) as we discussed in the last installment. The <code>dict</code> form looks to be the best form for our objective. We'll see this use below.
***5. What are the special privileges about <code>\_\_main\_\_.py</code>?***
Another thing I saw while reading the background tutorials was reference to a more-or-less standard <code>\_\_main.\_\_.py</code> file. However, in looking at many of the packages installed in my current Python installation I saw that this construct is by no means universally used, though some packages do. Should I be using this format or not?
For two reasons my general desire is to remove this file. The first reason is because this file can be confused with the <code>\_\_main\_\_</code> module. The second reason is because I could find no real clear guidance about best practices for the file except to keep it simple. That seemed to me thin gruel for keeping something I did not fully understand and found confusing. So, I initially decided not to use this form.
However, I found things broke when I tried to remove it. I assume with greater knowledge or more experience I might find the compelling recipe for simplifying this file away. But, it is easier to keep it and move on rather than get stuck on a question not central to our project.
***6. What is the best practice for arranging internal imports across a project?***
I think one of the reasons I did not see a simple answer to the above question is the fact I have not yet fully understood the relationships between global and local variables and module functions and inheritance, all of which require a sort of grokking, I suppose, of namespaces.
I plan to continue to return to these questions as I learn more with subsequent installments and code development. If I encounter new insights or better ways to do things, my current intent is to return to any prior installments, leave the existing text as is, and then add annotations as to what I learned. If you have not seen any of these notices by now, I guess I have not later discovered better approaches. (**Note**: I think I began to get a better understanding about namespaces on the return leg of our build 'roundtrip', roughly about **CWPK #40** from now, but I still have questions, even from that later vantage point.)
### New File Definitions
As one may imagine, the transition from notebook to module package has resulted in some changes to the code. The first change, of course, was to split the code into the starting pieces, including adding the <code>\_\_init\_\_.py</code> that signals the available *cowpoke* package. Here is the new file structure:
<pre>
|-- PythonProject
|-- Python
|-- [Anaconda3 distribution]
|-- Lib
|-- site-packages # location to store files
|-- alot
|-- cowpoke # new project directory
|-- __init__.py # four new files here
|-- __main__.py
|-- config.py
|-- extract.py
|-- TBA
|-- TBA
</pre>
At the top of each file we place our import statements, including references to other modules within the *cowpoke* project. Here is the statement at the top of <code>\_\_init\_\_.py</code> (which also includes some package identification boilerplate):
<pre>
from cowpoke.__main__ import *
from cowpoke.config import *
from cowpoke.extract import *
</pre>
I should note that the asterisk (\*) character above tells the system to import all objects within the file, a practice that is generally not encouraged, though is common. It is discouraged because of the amount of objects brought into a current working space, which may pose name conflicts or a burdened system for larger projects. However, since our system is quite small and I do not foresee unmanageable namespace complexity, I use this simpler shorthand.
Our <code>\_\_main\_\_.py</code> contains the standard start-up script that we have recently been using for many installments. You can see this code and the entire file by Running the next cell (assuming you have been following this entire **CWPK** series and have stored earlier distribution files):
<div style="background-color:#eee; border:1px dotted #aaa; vertical-align:middle; margin:15px 60px; padding:8px;"><strong>Which environment?</strong> The specific load routine you should choose below depends on whether you are using the online MyBinder service (the 'raw' version) or local files. The example below is based on using local files (though replace with your own local directory specification). If loading from MyBinder, replace with the lines that are commented (<code>#</code>) out.</div>
```
with open(r'C:\1-PythonProjects\Python\Lib\site-packages\cowpoke\__main__.py', 'r') as f:
print(f.read())
```
(**NB:** Remember the '<code>r</code>' switch on the file name is to treat the string as 'raw'.)
We move our dictionary definitions to the <code>config.py</code>. Go ahead and inspect it in the next cell, but realized much has been added to this file due to subsequent coding steps in our project installments:
```
with open(r'C:\1-PythonProjects\Python\Lib\site-packages\cowpoke\config.py', 'r') as f:
print(f.read())
```
We already had the class and property dictionaries as presented in the [**CWPK #33**](https://www.mkbergman.com/2370/cwpk-33-a-python-package-part-i-the-annotation-extractor/) installment. The key change notable for the <code>config.py</code>, which remember is intended for where we enter run specifications for a new run (build or extract) of the code, was to pull out our specifications for the annotation extractor. This new dictionary, the <code>extract_deck</code>, is expanded later to embrace other run parameters for additional functions. At the time of this initial set-up, however, the dictionary contained these relatively few entries:
<pre>
extract_deck = {
"""This is the dictionary for the specifications of each
extraction run; what is its run deck.
"""
'property_loop' : '',
'class_loop' : '',
'loop' : 'property_loop',
'loop_list' : prop_dict.values(),
'out_file' : 'C:/1-PythonProjects/kbpedia/sandbox/prop_annot_out.csv',
}
</pre>
These are the values passed to the new annotation extraction function, <code>def annot_extractor</code>, now migrated to the <code>extract.py</code> module. Here is the commented code block (which will not run on its own as a cell):
```
def annot_extractor(**extract_deck): # define the method here, see note
print('Beginning annotation extraction . . .')
loop_list = extract_deck.get('loop_list') # notice we are passing run_deck to current vars
loop = extract_deck.get('loop')
out_file = extract_deck.get('out_file')
class_loop = extract_deck.get('class_loop')
property_loop = extract_deck.get('property_loop')
a_dom = ''
a_rng = ''
a_func = ''
""" These are internal counters used in this module's methods """
p_set = ''
x = 1
cur_list = []
with open(out_file, mode='w', encoding='utf8', newline='') as output:
csv_out = csv.writer(output)
... # remainder of code as prior installment . . .
```
**Note:** Normally, a function definition is followed by its arguments in parentheses. The special notation of the double asterisks (\*\*) signals to expect a variable list of keywords (more often in tutorials shown as '<code>\*\*kwargs</code>'), which is how we make the connection to the values of the keys in the <code>extract_deck</code> dictionary. We retrieve these values based on the <code>.get()</code> method shown in the next assignments. Note, as well, that positional arguments can also be treated in a similar way using the single asterisk (<code>\*</code>) notation ('<code>\*args</code>').
At the command line or in an interactive notebook, we can run this function with the following call:
<pre>
import cowpoke
cowpoke.annot_extractor(**cowpoke.extract_deck)
</pre>
We are not calling it here given that your local <code>config.py</code> is not set up with the proper configuration parameters for this specific example.
These efforts complete our initial set-up on the Python *cowpoke* package.
### Generalizing and Moving the Structure Extractor
You may want to relate the modified code in this section to the last state of our structure extraction routine, shown as the last code cell in [**CWPK #32**](https://www.mkbergman.com/2368/cwpk-32-iterating-over-a-full-extraction/).
We took that code, applied the generalization approaches earlier discussed, and added a <code>set.union</code> method to getting the unique list from a very large list of large sets. This approach using sets (that can be hashed) sped up what had been a linear lookup by about 10x. We also moved the general parameters to share the same <code>extract_deck</code> dictionary.
We made the same accommodations for processing properties v classes (and typologies). We wrapped the resulting code block into a defined function wrapper, similar for what we did for annotations, only now for (is-a) structure:
```
from owlready2 import *
from cowpoke.config import *
from cowpoke.__main__ import *
import csv
import types
world = World()
kko = []
kb = []
rc = []
core = []
skos = []
kb_src = master_deck.get('kb_src') # we get the build setting from config.py
if kb_src is None:
kb_src = 'standard'
if kb_src == 'sandbox':
kbpedia = 'C:/1-PythonProjects/kbpedia/sandbox/kbpedia_reference_concepts.owl'
kko_file = 'C:/1-PythonProjects/kbpedia/sandbox/kko.owl'
elif kb_src == 'standard':
kbpedia = 'C:/1-PythonProjects/kbpedia/v300/targets/ontologies/kbpedia_reference_concepts.owl'
kko_file = 'C:/1-PythonProjects/kbpedia/v300/build_ins/stubs/kko.owl'
elif kb_src == 'extract':
kbpedia = 'C:/1-PythonProjects/kbpedia/v300/build_ins/ontologies/kbpedia_reference_concepts.owl'
kko_file = 'C:/1-PythonProjects/kbpedia/v300/build_ins/ontologies/kko.owl'
elif kb_src == 'full':
kbpedia = 'C:/1-PythonProjects/kbpedia/v300/build_ins/stubs/kbpedia_rc_stub.owl'
kko_file = 'C:/1-PythonProjects/kbpedia/v300/build_ins/stubs/kko.owl'
else:
print('You have entered an inaccurate source parameter for the build.')
skos_file = 'http://www.w3.org/2004/02/skos/core'
kb = world.get_ontology(kbpedia).load()
rc = kb.get_namespace('http://kbpedia.org/kko/rc/')
skos = world.get_ontology(skos_file).load()
kb.imported_ontologies.append(skos)
core = world.get_namespace('http://www.w3.org/2004/02/skos/core#')
kko = world.get_ontology(kko_file).load()
kb.imported_ontologies.append(kko)
kko = kb.get_namespace('http://kbpedia.org/ontologies/kko#')
def struct_extractor(**extract_deck):
print('Beginning structure extraction . . .')
loop_list = extract_deck.get('loop_list')
loop = extract_deck.get('loop')
out_file = extract_deck.get('out_file')
class_loop = extract_deck.get('class_loop')
property_loop = extract_deck.get('property_loop')
x = 1
cur_list = []
a_set = []
s_set = []
# r_default = '' # Series of variables needed later
# r_label = '' #
# r_iri = '' #
# render = '' #
new_class = 'owl:Thing'
with open(out_file, mode='w', encoding='utf8', newline='') as output:
csv_out = csv.writer(output)
if loop == class_loop:
header = ['id', 'subClassOf', 'parent']
p_item = 'rdfs:subClassOf'
else:
header = ['id', 'subPropertyOf', 'parent']
p_item = 'rdfs:subPropertyOf'
csv_out.writerow(header)
for value in loop_list:
print(' . . . processing', value)
root = eval(value)
a_set = root.descendants()
a_set = set(a_set)
s_set = a_set.union(s_set)
print(' . . . processing consolidated set.')
for s_item in s_set:
o_set = s_item.is_a
for o_item in o_set:
row_out = (s_item,p_item,o_item)
csv_out.writerow(row_out)
if loop == class_loop:
if s_item not in cur_list:
row_out = (s_item,p_item,new_class)
csv_out.writerow(row_out)
cur_list.append(s_item)
x = x + 1
print('Total rows written to file:', x)
struct_extractor(**extract_deck)
```
Again, since we can not guarantee the operating circumstance, you can try this on your own instance with the command:
<pre>
cowpoke.struct_extractor(**cowpoke.extract_deck)
</pre>
Note we're using a prefixed *cowpoke* function to make the generic dictionary request. All we need to do before the run is to go to the <code>config.py</code> file, and make the value (right-hand side) changes to the <code>extract_deck</code> dictionary. Save the file, make sure your current notebook instance has been cleared, and enter the command above.
There aren't any commercial-grade checks here to make sure you are not inadvertently overwriting a desired file. Loose code and routines such as what we are developing in this **CWPK** series warrant making frequent backups, and scrutinizing your <code>config.py</code> assignments before kicking off a run.
### Additional Documentation
Here are additional guides resulting from the research in today's installation:
- Python's [Class and Instance Variable](https://docs.python.org/3/tutorial/classes.html#class-and-instance-variables) documentation
- [Understanding self in Python](https://medium.com/quick-code/understanding-self-in-python-a3704319e5f0)
- PythonTips' [The self variable in python explained](https://pythontips.com/2013/08/07/the-self-variable-in-python-explained/)
- DEV's [class v instance variables](https://dev.to/ogwurujohnson/distinguishing-instance-variables-from-class-variables-in-python-81)
- Programiz' [self in Python, Demystified](https://www.programiz.com/article/python-self-why)
- StackOverflow's [What is \_\_main\_\_.py?](https://stackoverflow.com/questions/4042905/what-is-main-py)
- See StackOverflow for a nice example of the advantage of [using sets to find unique items](https://stackoverflow.com/questions/12897374/get-unique-values-from-a-list-in-python) in a listing.
<div style="background-color:#efefff; border:1px dotted #ceceff; vertical-align:middle; margin:15px 60px; padding:8px;">
<span style="font-weight: bold;">NOTE:</span> This article is part of the <a href="https://www.mkbergman.com/cooking-with-python-and-kbpedia/" style="font-style: italic;">Cooking with Python and KBpedia</a> series. See the <a href="https://www.mkbergman.com/cooking-with-python-and-kbpedia/"><strong>CWPK</strong> listing</a> for other articles in the series. <a href="http://kbpedia.org/">KBpedia</a> has its own Web site.
</div>
<div style="background-color:#ebf8e2; border:1px dotted #71c837; vertical-align:middle; margin:15px 60px; padding:8px;">
<span style="font-weight: bold;">NOTE:</span> This <strong>CWPK
installment</strong> is available both as an online interactive
file <a href="https://mybinder.org/v2/gh/Cognonto/CWPK/master" ><img src="https://mybinder.org/badge_logo.svg" style="display:inline-block; vertical-align: middle;" /></a> or as a <a href="https://github.com/Cognonto/CWPK" title="CWPK notebook" alt="CWPK notebook">direct download</a> to use locally. Make sure and pick the correct installment number. For the online interactive option, pick the <code>*.ipynb</code> file. It may take a bit of time for the interactive option to load.</div>
<div style="background-color:#feeedc; border:1px dotted #f7941d; vertical-align:middle; margin:15px 60px; padding:8px;">
<div style="float: left; margin-right: 5px;"><img src="http://kbpedia.org/cwpk-files/warning.png" title="Caution!" width="32" /></div>I am at best an amateur with Python. There are likely more efficient methods for coding these steps than what I provide. I encourage you to experiment -- which is part of the fun of Python -- and to <a href="mailto:[email protected]">notify me</a> should you make improvements.
</div>
|
github_jupyter
|
```
############## PLEASE RUN THIS CELL FIRST! ###################
# import everything and define a test runner function
from importlib import reload
from helper import run
import ecc, helper, tx, script
# Signing Example
from ecc import G, N
from helper import hash256
secret = 1800555555518005555555
z = int.from_bytes(hash256(b'ECDSA is awesome!'), 'big')
k = 12345
r = (k*G).x.num
s = (z+r*secret) * pow(k, -1, N) % N
print(hex(z), hex(r), hex(s))
print(secret*G)
# Verification Example
from ecc import S256Point, G, N
z = 0xbc62d4b80d9e36da29c16c5d4d9f11731f36052c72401a76c23c0fb5a9b74423
r = 0x37206a0610995c58074999cb9767b87af4c4978db68c06e8e6e81d282047a7c6
s = 0x8ca63759c1157ebeaec0d03cecca119fc9a75bf8e6d0fa65c841c8e2738cdaec
point = S256Point(0x04519fac3d910ca7e7138f7013706f619fa8f033e6ec6e09370ea38cee6a7574,
0x82b51eab8c27c66e26c858a079bcdf4f1ada34cec420cafc7eac1a42216fb6c4)
u = z * pow(s, -1, N) % N
v = r * pow(s, -1, N) % N
print((u*G + v*point).x.num == r)
```
### Exercise 1
Which sigs are valid?
```
P = (887387e452b8eacc4acfde10d9aaf7f6d9a0f975aabb10d006e4da568744d06c,
61de6d95231cd89026e286df3b6ae4a894a3378e393e93a0f45b666329a0ae34)
z, r, s = ec208baa0fc1c19f708a9ca96fdeff3ac3f230bb4a7ba4aede4942ad003c0f60,
ac8d1c87e51d0d441be8b3dd5b05c8795b48875dffe00b7ffcfac23010d3a395,
68342ceff8935ededd102dd876ffd6ba72d6a427a3edb13d26eb0781cb423c4
z, r, s = 7c076ff316692a3d7eb3c3bb0f8b1488cf72e1afcd929e29307032997a838a3d,
eff69ef2b1bd93a66ed5219add4fb51e11a840f404876325a1e8ffe0529a2c,
c7207fee197d27c618aea621406f6bf5ef6fca38681d82b2f06fddbdce6feab6
```
```
# Exercise 1
from ecc import S256Point, G, N
px = 0x887387e452b8eacc4acfde10d9aaf7f6d9a0f975aabb10d006e4da568744d06c
py = 0x61de6d95231cd89026e286df3b6ae4a894a3378e393e93a0f45b666329a0ae34
signatures = (
# (z, r, s)
(0xec208baa0fc1c19f708a9ca96fdeff3ac3f230bb4a7ba4aede4942ad003c0f60,
0xac8d1c87e51d0d441be8b3dd5b05c8795b48875dffe00b7ffcfac23010d3a395,
0x68342ceff8935ededd102dd876ffd6ba72d6a427a3edb13d26eb0781cb423c4),
(0x7c076ff316692a3d7eb3c3bb0f8b1488cf72e1afcd929e29307032997a838a3d,
0xeff69ef2b1bd93a66ed5219add4fb51e11a840f404876325a1e8ffe0529a2c,
0xc7207fee197d27c618aea621406f6bf5ef6fca38681d82b2f06fddbdce6feab6),
)
# initialize the public point
# use: S256Point(x-coordinate, y-coordinate)
point = S256Point(px, py)
# iterate over signatures
for z, r, s in signatures:
# u = z / s, v = r / s
u = z * pow(s, -1, N) % N
v = r * pow(s, -1, N) % N
# finally, uG+vP should have the x-coordinate equal to r
print((u*G+v*point).x.num == r)
```
### Exercise 2
#### Make [this test](/edit/session3/ecc.py) pass: `ecc.py:S256Test:test_verify`
```
# Exercise 2
reload(ecc)
run(ecc.S256Test('test_verify'))
```
### Exercise 3
#### Make [this test](/edit/session3/ecc.py) pass: `ecc.py:PrivateKeyTest:test_sign`
```
# Exercise 3
reload(ecc)
run(ecc.PrivateKeyTest('test_sign'))
```
### Exercise 4
Verify the DER signature for the hash of "ECDSA is awesome!" for the given SEC pubkey
`z = int.from_bytes(hash256('ECDSA is awesome!'), 'big')`
Public Key in SEC Format:
0204519fac3d910ca7e7138f7013706f619fa8f033e6ec6e09370ea38cee6a7574
Signature in DER Format: 304402201f62993ee03fca342fcb45929993fa6ee885e00ddad8de154f268d98f083991402201e1ca12ad140c04e0e022c38f7ce31da426b8009d02832f0b44f39a6b178b7a1
```
# Exercise 4
from ecc import S256Point, Signature
from helper import hash256
der = bytes.fromhex('304402201f62993ee03fca342fcb45929993fa6ee885e00ddad8de154f268d98f083991402201e1ca12ad140c04e0e022c38f7ce31da426b8009d02832f0b44f39a6b178b7a1')
sec = bytes.fromhex('0204519fac3d910ca7e7138f7013706f619fa8f033e6ec6e09370ea38cee6a7574')
# message is the hash256 of the message "ECDSA is awesome!"
z = int.from_bytes(hash256(b'ECDSA is awesome!'), 'big')
# parse the der format to get the signature
sig = Signature.parse(der)
# parse the sec format to get the public key
point = S256Point.parse(sec)
# use the verify method on S256Point to validate the signature
print(point.verify(z, sig))
```
### Exercise 5
#### Make [this test](/edit/session3/tx.py) pass: `tx.py:TxTest:test_parse_version`
```
# Exercise 5
reload(tx)
run(tx.TxTest('test_parse_version'))
```
### Exercise 6
#### Make [this test](/edit/session3/tx.py) pass: `tx.py:TxTest:test_parse_inputs`
```
# Exercise 6
reload(tx)
run(tx.TxTest('test_parse_inputs'))
```
### Exercise 7
#### Make [this test](/edit/session3/tx.py) pass: `tx.py:TxTest:test_parse_outputs`
```
# Exercise 7
reload(tx)
run(tx.TxTest('test_parse_outputs'))
```
### Exercise 8
#### Make [this test](/edit/session3/tx.py) pass: `tx.py:TxTest:test_parse_locktime`
```
# Exercise 8
reload(tx)
run(tx.TxTest('test_parse_locktime'))
```
### Exercise 9
What is the scriptSig from the second input in this tx? What is the scriptPubKey and amount of the first output in this tx? What is the amount for the second output?
```
010000000456919960ac691763688d3d3bcea9ad6ecaf875df5339e148a1fc61c6ed7a069e010000006a47304402204585bcdef85e6b1c6af5c2669d4830ff86e42dd205c0e089bc2a821657e951c002201024a10366077f87d6bce1f7100ad8cfa8a064b39d4e8fe4ea13a7b71aa8180f012102f0da57e85eec2934a82a585ea337ce2f4998b50ae699dd79f5880e253dafafb7feffffffeb8f51f4038dc17e6313cf831d4f02281c2a468bde0fafd37f1bf882729e7fd3000000006a47304402207899531a52d59a6de200179928ca900254a36b8dff8bb75f5f5d71b1cdc26125022008b422690b8461cb52c3cc30330b23d574351872b7c361e9aae3649071c1a7160121035d5c93d9ac96881f19ba1f686f15f009ded7c62efe85a872e6a19b43c15a2937feffffff567bf40595119d1bb8a3037c356efd56170b64cbcc160fb028fa10704b45d775000000006a47304402204c7c7818424c7f7911da6cddc59655a70af1cb5eaf17c69dadbfc74ffa0b662f02207599e08bc8023693ad4e9527dc42c34210f7a7d1d1ddfc8492b654a11e7620a0012102158b46fbdff65d0172b7989aec8850aa0dae49abfb84c81ae6e5b251a58ace5cfeffffffd63a5e6c16e620f86f375925b21cabaf736c779f88fd04dcad51d26690f7f345010000006a47304402200633ea0d3314bea0d95b3cd8dadb2ef79ea8331ffe1e61f762c0f6daea0fabde022029f23b3e9c30f080446150b23852028751635dcee2be669c2a1686a4b5edf304012103ffd6f4a67e94aba353a00882e563ff2722eb4cff0ad6006e86ee20dfe7520d55feffffff0251430f00000000001976a914ab0c0b2e98b1ab6dbf67d4750b0a56244948a87988ac005a6202000000001976a9143c82d7df364eb6c75be8c80df2b3eda8db57397088ac46430600
```
```
# Exercise 9
from io import BytesIO
from tx import Tx
hex_transaction = '010000000456919960ac691763688d3d3bcea9ad6ecaf875df5339e148a1fc61c6ed7a069e010000006a47304402204585bcdef85e6b1c6af5c2669d4830ff86e42dd205c0e089bc2a821657e951c002201024a10366077f87d6bce1f7100ad8cfa8a064b39d4e8fe4ea13a7b71aa8180f012102f0da57e85eec2934a82a585ea337ce2f4998b50ae699dd79f5880e253dafafb7feffffffeb8f51f4038dc17e6313cf831d4f02281c2a468bde0fafd37f1bf882729e7fd3000000006a47304402207899531a52d59a6de200179928ca900254a36b8dff8bb75f5f5d71b1cdc26125022008b422690b8461cb52c3cc30330b23d574351872b7c361e9aae3649071c1a7160121035d5c93d9ac96881f19ba1f686f15f009ded7c62efe85a872e6a19b43c15a2937feffffff567bf40595119d1bb8a3037c356efd56170b64cbcc160fb028fa10704b45d775000000006a47304402204c7c7818424c7f7911da6cddc59655a70af1cb5eaf17c69dadbfc74ffa0b662f02207599e08bc8023693ad4e9527dc42c34210f7a7d1d1ddfc8492b654a11e7620a0012102158b46fbdff65d0172b7989aec8850aa0dae49abfb84c81ae6e5b251a58ace5cfeffffffd63a5e6c16e620f86f375925b21cabaf736c779f88fd04dcad51d26690f7f345010000006a47304402200633ea0d3314bea0d95b3cd8dadb2ef79ea8331ffe1e61f762c0f6daea0fabde022029f23b3e9c30f080446150b23852028751635dcee2be669c2a1686a4b5edf304012103ffd6f4a67e94aba353a00882e563ff2722eb4cff0ad6006e86ee20dfe7520d55feffffff0251430f00000000001976a914ab0c0b2e98b1ab6dbf67d4750b0a56244948a87988ac005a6202000000001976a9143c82d7df364eb6c75be8c80df2b3eda8db57397088ac46430600'
# bytes.fromhex to get the binary representation
bin_transaction = bytes.fromhex(hex_transaction)
# create a stream using BytesIO()
stream = BytesIO(bin_transaction)
# Tx.parse() the stream
tx_obj = Tx.parse(stream)
# print tx's second input's scriptSig
print(tx_obj.tx_ins[1].script_sig)
# print tx's first output's scriptPubKey
print(tx_obj.tx_outs[0].script_pubkey)
# print tx's second output's amount
print(tx_obj.tx_outs[1].amount)
```
|
github_jupyter
|
# Advanced Matplotlib Concepts Lecture
In this lecture we cover some more advanced topics which you won't usually use as often. You can always reference the documentation for more resources!
### Logarithmic Scale
* It is also possible to set a logarithmic scale for one or both axes. This functionality is in fact only one application of a more general transformation system in Matplotlib. Each of the axes' scales are set seperately using `set_xscale` and `set_yscale` methods which accept one parameter (with the value "log" in this case):
```
import matplotlib.pyplot as plt
import matplotlib as mp
%matplotlib inline
import numpy as np
x = np.linspace(0,5,11) # We go from 0 to 5 and grab 11 points which are linearly spaced.
y = x ** 2
fig, axes = plt.subplots(1, 2, figsize=(10,4))
axes[0].plot(x, x**2, x, np.exp(x))
axes[0].set_title("Normal scale")
axes[1].plot(x, x**2, x, np.exp(x))
axes[1].set_yscale("log")
axes[1].set_title("Logarithmic scale (y)");
```
### Placement of ticks and custom tick labels
* We can explicitly determine where we want the axis ticks with `set_xticks` and `set_yticks`, which both take a list of values for where on the axis the ticks are to be placed. We can also use the `set_xticklabels` and `set_yticklabels` methods to provide a list of custom text labels for each tick location:
```
fig, ax = plt.subplots(figsize=(10, 4))
ax.plot(x, x**2, x, x**3, lw=2)
ax.set_xticks([1, 2, 3, 4, 5])
ax.set_xticklabels([r'$\alpha$', r'$\beta$', r'$\gamma$', r'$\delta$', r'$\epsilon$'], fontsize=18)
yticks = [0, 50, 100, 150]
ax.set_yticks(yticks)
ax.set_yticklabels(["$%.1f$" % y for y in yticks], fontsize=18); # use LaTeX formatted labels
```
There are a number of more advanced methods for controlling major and minor tick placement in matplotlib figures, such as automatic placement according to different policies. See http://matplotlib.org/api/ticker_api.html for details.
#### Scientific notation
With large numbers on axes, it is often better use scientific notation:
```
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_title("scientific notation")
ax.set_yticks([0, 50, 100, 150])
from matplotlib import ticker
formatter = ticker.ScalarFormatter(useMathText=True)
formatter.set_scientific(True)
formatter.set_powerlimits((-1,1))
ax.yaxis.set_major_formatter(formatter)
```
## Axis number and axis label spacing
```
# distance between x and y axis and the numbers on the axes
mp.rcParams['xtick.major.pad'] = 5
mp.rcParams['ytick.major.pad'] = 5
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_yticks([0, 50, 100, 150])
ax.set_title("label and axis spacing")
# padding between axis label and axis numbers
ax.xaxis.labelpad = 5
ax.yaxis.labelpad = 5
ax.set_xlabel("x")
ax.set_ylabel("y")
# restore defaults
mp.rcParams['xtick.major.pad'] = 3
mp.rcParams['ytick.major.pad'] = 3
```
#### Axis position adjustments
Unfortunately, when saving figures the labels are sometimes clipped, and it can be necessary to adjust the positions of axes a little bit. This can be done using `subplots_adjust`:
```
fig, ax = plt.subplots(1, 1)
ax.plot(x, x**2, x, np.exp(x))
ax.set_yticks([0, 50, 100, 150])
ax.set_title("title")
ax.set_xlabel("x")
ax.set_ylabel("y")
fig.subplots_adjust(left=0.15, right=.9, bottom=0.1, top=0.9);
```
### Axis grid
With the `grid` method in the axis object, we can turn on and off grid lines. We can also customize the appearance of the grid lines using the same keyword arguments as the `plot` function:
```
fig, axes = plt.subplots(1, 2, figsize=(10,3))
# default grid appearance
axes[0].plot(x, x**2, x, x**3, lw=2)
axes[0].grid(True)
# custom grid appearance
axes[1].plot(x, x**2, x, x**3, lw=2)
axes[1].grid(color='b', alpha=0.5, linestyle='dashed', linewidth=0.5)
```
### Axis spines
* We can also change the properties of axis spines:
```
fig, ax = plt.subplots(figsize=(6,2))
ax.spines['bottom'].set_color('blue')
ax.spines['top'].set_color('blue')
ax.spines['left'].set_color('red')
ax.spines['left'].set_linewidth(2)
# turn off axis spine to the right
ax.spines['right'].set_color("none")
ax.yaxis.tick_left() # only ticks on the left side
```
### Twin axes
Sometimes it is useful to have dual x or y axes in a figure; for example, when plotting curves with different units together. Matplotlib supports this with the `twinx` and `twiny` functions:
```
fig, ax1 = plt.subplots()
ax1.plot(x, x**2, lw=2, color="blue")
ax1.set_ylabel(r"area $(m^2)$", fontsize=18, color="blue")
for label in ax1.get_yticklabels():
label.set_color("blue")
ax2 = ax1.twinx()
ax2.plot(x, x**3, lw=2, color="red")
ax2.set_ylabel(r"volume $(m^3)$", fontsize=18, color="red")
for label in ax2.get_yticklabels():
label.set_color("red")
```
### Axes where x and y is zero
```
fig, ax = plt.subplots()
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.spines['bottom'].set_position(('data',0)) # set position of x spine to x=0
ax.yaxis.set_ticks_position('left')
ax.spines['left'].set_position(('data',0)) # set position of y spine to y=0
xx = np.linspace(-0.75, 1., 100)
ax.plot(xx, xx**3);
```
## Other 2D plot styles
In addition to the regular `plot` method, there are a number of other functions for generating different kind of plots. See the matplotlib plot gallery for a complete list of available plot types: http://matplotlib.org/gallery.html. Some of the more useful ones are show below:
```
n = np.array([0,1,2,3,4,5])
fig, axes = plt.subplots(1, 4, figsize=(12,3))
axes[0].scatter(xx, xx + 0.25*np.random.randn(len(xx)))
axes[0].set_title("scatter")
axes[1].step(n, n**2, lw=2)
axes[1].set_title("step")
axes[2].bar(n, n**2, align="center", width=0.5, alpha=0.5)
axes[2].set_title("bar")
axes[3].fill_between(x, x**2, x**3, color="green", alpha=0.5);
axes[3].set_title("fill_between");
```
### Text annotation
* Annotating text in matplotlib figures can be done using the `text` function. It supports LaTeX formatting just like axis label texts and titles:
```
fig, ax = plt.subplots()
ax.plot(xx, xx**2, xx, xx**3)
ax.text(0.15, 0.2, r"$y=x^2$", fontsize=20, color="blue")
ax.text(0.65, 0.1, r"$y=x^3$", fontsize=20, color="green");
```
### Figures with multiple subplots and insets
* Axes can be added to a matplotlib Figure canvas manually using `fig.add_axes` or using a sub-figure layout manager such as `subplots`, `subplot2grid`, or `gridspec`:
#### subplots
```
fig,ax = plt.subplots(2,3)
fig.tight_layout()
```
### subplot2grid
```
fig = plt.figure()
ax1 = plt.subplot2grid((3,3), (0,0), colspan=3)
ax2 = plt.subplot2grid((3,3), (1,0), colspan=2)
ax3 = plt.subplot2grid((3,3), (1,2), rowspan=2)
ax4 = plt.subplot2grid((3,3), (2,0))
ax5 = plt.subplot2grid((3,3), (2,1))
fig.tight_layout()
```
## gridspec
```
import matplotlib.gridspec as gridspec
fig = plt.figure()
gs = gridspec.GridSpec(2, 3, height_ratios=[2,1], width_ratios=[1,2,1])
for g in gs:
ax = fig.add_subplot(g)
fig.tight_layout()
```
### add axes
* Manually adding axes with `add_axes` is useful for adding insets to figures:
```
fig, ax = plt.subplots()
ax.plot(xx, xx**2, xx, xx**3)
fig.tight_layout()
# inset
inset_ax = fig.add_axes([0.2, 0.55, 0.35, 0.35]) # X, Y, width, height
inset_ax.plot(xx, xx**2, xx, xx**3)
inset_ax.set_title('zoom near origin')
# set axis range
inset_ax.set_xlim(-.2, .2)
inset_ax.set_ylim(-.005, .01)
# set axis tick locations
inset_ax.set_yticks([0, 0.005, 0.01])
inset_ax.set_xticks([-0.1,0,.1]);
```
### Colormap and contour figures
* Colormaps and contour figures are useful for plotting functions of two variables. In most of these functions we will use a colormap to encode one dimension of the data. There are a number of predefined colormaps. It is relatively straightforward to define custom colormaps. For a list of pre-defined colormaps, see: http://www.scipy.org/Cookbook/Matplotlib/Show_colormaps
```
alpha = 0.7
phi_ext = 2 * np.pi * 0.5
def flux_qubit_potential(phi_m, phi_p):
return 2 + alpha - 2 * np.cos(phi_p) * np.cos(phi_m) - alpha * np.cos(phi_ext - 2*phi_p)
phi_m = np.linspace(0, 2*np.pi, 100)
phi_p = np.linspace(0, 2*np.pi, 100)
X,Y = np.meshgrid(phi_p, phi_m)
Z = flux_qubit_potential(X, Y).T
```
#### pcolor
```
fig, ax = plt.subplots()
p = ax.pcolor(X/(2*np.pi), Y/(2*np.pi), Z, cmap=mp.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max())
cb = fig.colorbar(p, ax=ax)
```
#### imshow
```
fig, ax = plt.subplots()
im = ax.imshow(Z, cmap=mp.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1])
im.set_interpolation('bilinear')
cb = fig.colorbar(im, ax=ax)
```
## Contour
```
fig, ax = plt.subplots()
cnt = ax.contour(Z, cmap=mp.cm.RdBu, vmin=abs(Z).min(), vmax=abs(Z).max(), extent=[0, 1, 0, 1])
```
## 3D figures
* To use 3D graphics in matplotlib, we first need to create an instance of the `Axes3D` class. 3D axes can be added to a matplotlib figure canvas in exactly the same way as 2D axes; or, more conveniently, by passing a `projection='3d'` keyword argument to the `add_axes` or `add_subplot` methods.
```
from mpl_toolkits.mplot3d.axes3d import Axes3D
```
#### Surface plots
```
fig = plt.figure(figsize=(14,6))
# `ax` is a 3D-aware axis instance because of the projection='3d' keyword argument to add_subplot
ax = fig.add_subplot(1, 2, 1, projection='3d')
p = ax.plot_surface(X, Y, Z, rstride=4, cstride=4, linewidth=0)
# surface_plot with color grading and color bar
ax = fig.add_subplot(1, 2, 2, projection='3d')
p = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=mp.cm.coolwarm, linewidth=0, antialiased=False)
cb = fig.colorbar(p, shrink=0.5)
```
## Wire-frame plot
```
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(1, 1, 1, projection='3d')
p = ax.plot_wireframe(X, Y, Z, rstride=4, cstride=4,color='teal')
```
#### Coutour plots with projections
```
fig = plt.figure(figsize=(8,6))
ax = fig.add_subplot(1,1,1, projection='3d')
ax.plot_surface(X, Y, Z, rstride=4, cstride=4, alpha=0.25)
cset = ax.contour(X, Y, Z, zdir='z', offset=-np.pi, cmap=mp.cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='x', offset=-np.pi, cmap=mp.cm.coolwarm)
cset = ax.contour(X, Y, Z, zdir='y', offset=3*np.pi, cmap=mp.cm.coolwarm)
ax.set_xlim3d(-np.pi, 2*np.pi);
ax.set_ylim3d(0, 3*np.pi);
ax.set_zlim3d(-np.pi, 2*np.pi);
```
## FURTHER READING :
* http://www.matplotlib.org - The project web page for matplotlib.
* https://github.com/matplotlib/matplotlib - The source code for matplotlib.
* http://matplotlib.org/gallery.html - A large gallery showcaseing various types of plots matplotlib can create. Highly recommended!
* http://www.loria.fr/~rougier/teaching/matplotlib - A good matplotlib tutorial.
* http://scipy-lectures.github.io/matplotlib/matplotlib.html - Another good matplotlib reference.
|
github_jupyter
|
# Practice Exercise: Exploring data (Exploratory Data Analysis)
## Context:
- The data includes 120 years (1896 to 2016) of Olympic games with information about athletes and medal results.
- We'll focus on practicing the summary statistics and data visualization techniques that we've learned in the course.
- In general, this dataset is popular to explore how the Olympics have evolved over time, including the participation and performance of different genders, different countries, in various sports and events.
- Check out the original source if you are interested in using this data for other purposes (https://www.kaggle.com/heesoo37/120-years-of-olympic-history-athletes-and-results)
## Dataset Description:
We'll work on the data within athlete_events.csv.
Each row corresponds to an individual athlete competing in an individual Olympic event.
The columns are:
- **ID**: Unique number for each athlete
- **Name**: Athlete's name
- **Sex**: M or F
- **Age**: Integer
- **Height**: In centimeters
- **Weight**: In kilograms
- **Team**: Team name
- **NOC**: National Olympic Committee 3-letter code
- **Games**: Year and season
- **Year**: Integer
- **Season**: Summer or Winter
- **City**: Host city
- **Sport**: Sport
- **Event**: Event
- **Medal**: Gold, Silver, Bronze, or NA
## Objective:
- Examine/clean the dataset
- Explore distributions of single numerical and categorical features via statistics and plots
- Explore relationships of multiple features via statistics and plots
We are only going to explore part of the dataset, please feel free to explore more if you are interested.
### 1. Import the libraries `Pandas` and `Seaborn`
```
import pandas as pd
import seaborn as sns
```
### 2. Import the data from the csv file as DataFrame `olympics`
```
olympics = pd.read_csv('athlete_events.csv')
```
### 3. Look at the info summary, head of the DataFrame
```
olympics.info()
olympics.head()
```
### 4. Impute the missing data
#### Use `IterativeImputer` in `sklearn` to impute based on columns `Year`, `Age`, `Height`, `Weight`
##### Import libraries
```
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
```
##### Build a list of columns that will be used for imputation, which are `Year`, `Age`, `Height`, `Weight`
The column `Year` doesn't have mssing values, but we include it since it might be helpful modeling the other three columns. The age, height, and weight could change across years.
```
cols_to_impute = ['Year', 'Age', 'Height', 'Weight']
```
##### Create an `IterativeImputer` object and set its `min_value` and `max_value` parameters to be the minumum and maximum of corresponding columns
```
iter_imp = IterativeImputer(min_value=olympics[cols_to_impute].min(), max_value=olympics[cols_to_impute].max())
```
##### Apply the imputer to fit and transform the columns to an imputed NumPy array
```
imputed_cols = iter_imp.fit_transform(olympics[cols_to_impute])
```
##### Assign the imputed array back to the original DataFrame's columns
```
olympics[cols_to_impute] = imputed_cols
```
#### Fill the missing values in the column `Medal` with string of 'NA'
```
olympics['Medal'] = olympics['Medal'].fillna('NA')
```
#### Double check that the columns are all imputed
```
olympics.isna().sum()
```
### 5. Use the `describe` method to check the numerical columns
```
olympics.describe()
```
### 6. Plot the histograms of the numerical columns using `Pandas`
```
olympics.hist(figsize=(15, 10))
```
Notice that there could be outliers for `Age`, `Weight`, `Height`. But we'll only focus on `Age`.
### 7. Plot the histogram with a rug plot of the column `Age` using `Seaborn`, with both 20 and 50 bins
```
sns.displot(data=olympics, x='Age', bins=20, rug=True)
sns.displot(data=olympics, x='Age', bins=50, rug=True)
```
Notice the slight changes of distributions of `Age` when the number of bins changes.
### 8. Plot the boxplot of the column `Age` using `Pandas`
```
olympics['Age'].plot(kind='box')
```
### 9. Plot the boxplot of the column `Age` using `Seaborn`
```
sns.catplot(data=olympics, y='Age', kind='box')
```
### 10. Calculate the first quartile, third quartile, and IQR of the column `Age`
```
Q1 = olympics['Age'].quantile(0.25)
Q3 = olympics['Age'].quantile(0.75)
IQR = Q3 - Q1
print(Q1)
print(Q3)
print(IQR)
```
### 11. Print out the lower and upper thresholds for outliers based on IQR for the column `Age`
```
print(f'Low age outlier threshold: {Q1 - 1.5*IQR}')
print(f'High age outlier threshold: {Q3 + 1.5*IQR}')
```
### 12. What are the `Sport` for the athletes of really young age
#### Filter for the column `Sport` when the column `Age` has outliers of lower values
```
msk_lower = (olympics['Age'] < (Q1 - 1.5*IQR))
olympics.loc[msk_lower,'Sport']
```
#### Look at the unique values of `Sport` and their counts when `Age` are low-valued outliers
Did you find any sports popular for really young athletes?
```
olympics.loc[msk_lower,'Sport'].value_counts()
```
There are specific sports with really young age athletes, e.g., Swimming, Figure Skating.
### 13. What are the `Sport` for the athletes of older age
#### Filter for the column `Sport` when the column `Age` has outliers of higher values
```
msk_upper = (olympics['Age'] > (Q3 + 1.5*IQR))
olympics.loc[msk_upper,'Sport']
```
#### Look at the unique values of `Sport` and their counts when `Age` are high-valued outliers
Did you find any sports popular for older age athletes?
```
olympics.loc[msk_upper,'Sport'].value_counts()
```
There are specific sports popular for higher-aged athletes. They tend to need more skills rather than movements.
### 14. Check for the number of unique values in each column
```
olympics.nunique()
```
Olympics is a large event! There are many `Name`, `Team`, `NOC`, `Games`, `Year`, `City`, `Sport`, and `Event`!
### 15. Use the `describe` method to check the non-numerical columns
```
olympics.describe(exclude='number')
```
### 16. Apply the `value_counts` method for each non-numerical column, check for their unique values and counts
```
cat_cols = olympics.select_dtypes(exclude='number').columns
cat_cols
for col in cat_cols:
print(olympics[col].value_counts())
print()
```
### 17. Check the first record within the dataset for each Olympic `Sport`
*Hint: sort the DataFrame by `Year`, then groupby by `Sport`*
```
olympics.sort_values('Year').groupby('Sport').first()
```
### 18. What are the average `Age`, `Height`, `Weight` of female versus male Olympic athletes
```
olympics.groupby('Sex')[['Age','Height','Weight']].mean()
```
### 19. What are the minimum, average, maximum `Age`, `Height`, `Weight` of athletes in different `Year`
```
olympics.groupby('Year')[['Age','Height','Weight']].agg(['min', 'mean', 'max'])
```
### 20. What are the minimum, average, median, maximum `Age` of athletes for different `Season` and `Sex` combinations
```
olympics.groupby(['Season', 'Sex'])['Age'].agg(['min', 'mean', 'median', 'max'])
```
### 21. What are the average `Age` of athletes, and numbers of unique `Team`, `Sport`, `Event`, for different `Season` and `Sex` combinations
```
olympics.groupby(['Season', 'Sex']).agg({'Age': 'mean', 'Team': 'nunique', 'Sport': 'nunique', 'Event': 'nunique'})
```
### 22. What are the average `Age`, `Height`, `Weight` of athletes, for different `Medal`, `Season`, `Sex` combinations
```
olympics.groupby(['Medal', 'Season', 'Sex'])[['Age', 'Height', 'Weight']].mean()
```
### 23. Plot the scatterplot of `Height` and `Weight`
```
sns.relplot(data=olympics, x='Height', y='Weight', kind='scatter')
```
### 24. Plot the scatterplot of `Height` and `Weight`, using different colors and styles of dots for different `Sex`
```
sns.relplot(data=olympics, x='Height', y='Weight', hue='Sex', style='Sex')
```
### 25. Plot the pairwise relationships of `Age`, `Height`, `Weight`
```
sns.pairplot(olympics[['Age', 'Height', 'Weight']])
```
### 26. Plot the pairwise relationships of `Age`, `Height`, `Weight`, with different colors for `Sex`
```
sns.pairplot(olympics[['Age', 'Height', 'Weight', 'Sex']], hue='Sex')
```
### 27. Print out the correlation matrix of `Age`, `Height`, `Weight`
```
olympics[['Age', 'Height', 'Weight']].corr()
```
Notice the strong positive relationship between `Height` and `Weight`, which is intuitive.
### 28. Use heatmap to demonstrate the correlation matrix of `Age`, `Height`, `Weight`, use a colormap (`cmap`) of 'crest'
```
sns.heatmap(olympics[['Age', 'Height', 'Weight']].corr(), cmap='crest')
```
### 29. Plot the histograms of `Age`, with different colors for different `Sex`
```
sns.displot(data=olympics, x='Age', hue='Sex', aspect=2)
```
### 30. Plot the histograms of `Age`, on separate plots for different `Sex`
```
sns.displot(data=olympics, x='Age', col='Sex', aspect=2)
```
### 31. Look at the changes of average `Age` across `Year` by line charts, with separate lines for different `Season` using different colors
```
sns.relplot(data=olympics, x='Year', y='Age', hue='Season', kind='line', aspect=2)
```
### 32. Look at the distributions of `Age` for different `Sex` using boxplots
```
sns.catplot(data=olympics, x='Sex', y='Age', kind='box')
```
### 33. Look at the distributions of `Age` for different `Sex` using violin plots
```
sns.catplot(data=olympics, x='Sex', y='Age', kind='violin')
```
### 34. Look at the distributions of `Age` for different `Sex` using boxplots, with different colors of plots for different `Season`
```
sns.catplot(data=olympics, x='Sex', y='Age', kind='box', hue='Season')
```
### 35. Use count plots to look at the changes of number of athlete-events across `Year`, for different `Sex` by colors, and different `Season` on separate plots
```
sns.catplot(data=olympics, x='Year', hue='Sex', kind='count', col='Season', col_wrap=1, aspect=4)
```
Notice the obvious increase of female athlete-events in the Olympics across years.
|
github_jupyter
|
```
# hide
%load_ext nb_black
# default_exp clients
from will_it_saturate.clients import BaseClient
from will_it_saturate.registry import register_model
# export
import os
import math
import time
import httpx
import asyncio
import aiohttp
import subprocess
from pathlib import Path
from datetime import datetime
from multiprocessing import Pool
from multiprocessing import set_start_method
# from will_it_saturate.old_core import Benchmark
from will_it_saturate.servers import BaseServer
# os.environ["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"] = "YES"
# set_start_method("fork")
# print(os.environ["OBJC_DISABLE_INITIALIZE_FORK_SAFETY"])
```
## Caveats
On macOS increase open file limit with:
```
ulimit -n 2048
```
Before starting the fastAPI Server with:
```
uvicorn will_it_saturate.fastapi.main:app --reload
```
It's not really possible to test forked client from this notebook. I don't know why. It works in the 03_run_benchmark script. Here I have to set_start_method("fork") and other ugly stuff.
```
# dont_test
byte = 8
gigabit = 10 ** 9
bandwidth = gigabit / byte
# file_sizes = [10 ** 7, 10 ** 6]
file_sizes = [10 ** 7, 10 ** 6, 10 ** 5]
# file_sizes = [10 ** 7]
# benchmark = Benchmark(
# bandwidth=bandwidth,
# duration=3,
# file_sizes=file_sizes,
# )
# benchmark.create_epochs()
# export
# just here because of broken nbdev confusing lua with python
counter = 0
request = None
@register_model
class HttpxClient(BaseClient):
async def measure_server(self, epoch):
print("measure server")
print(epoch.urls[0])
max_connections = min(epoch.number_of_connections, 50)
print("max_connections: ", max_connections)
# max_connections = 10
limits = httpx.Limits(
max_keepalive_connections=10, max_connections=max_connections
)
timeout = httpx.Timeout(30.0, connect=60.0)
start = time.perf_counter()
async with httpx.AsyncClient(limits=limits, timeout=timeout) as client:
responses = await asyncio.gather(*[client.get(url) for url in epoch.urls])
elapsed = time.perf_counter() - start
print("done: ", elapsed)
print("responses status: ", responses[0].status_code)
return elapsed, responses
def measure_in_new_process(self, epoch):
print("new process")
elapsed, responses = asyncio.run(self.measure_server(epoch))
self.verify_checksums(epoch, responses)
return elapsed
def measure(self, epoch):
print("measure")
with Pool(1) as p:
[result] = p.map(self.measure_in_new_process, [epoch])
return result
# def run_httpx():
# byte = 8
# gigabit = 10 ** 9
# bandwidth = gigabit / byte
#
# # file_sizes = [10 ** 7, 10 ** 6]
# # file_sizes = [10 ** 7, 10 ** 6, 10 ** 5]
# file_sizes = [10 ** 7]
#
# benchmark = Benchmark(
# bandwidth=bandwidth,
# duration=3,
# file_sizes=file_sizes,
# servers=[BenchmarkServer(name="uvicorn")],
# clients=[HttpxClient(name="httpx")],
# )
# benchmark.create_rows()
# benchmark.run()
# print(benchmark.results_frame)
# export
import sys
import typer
from will_it_saturate.hosts import Host
from will_it_saturate.epochs import Epoch
from will_it_saturate.servers import BaseServer
from will_it_saturate.control.client import ControlClient
def run_httpx_with_args(exponent: int):
print("running httpx")
typer.echo(f"exponent {exponent}")
control_server_port, server_port = 8100, 5100
server_host_name = "192.168.178.113"
server = BaseServer(host=server_host_name, port=server_port)
server_control_host = Host(name=server_host_name, port=control_server_port)
server_control_client = ControlClient(host=server_control_host)
epoch = Epoch(file_size=10 ** exponent, duration=10)
epoch.files = server_control_client.get_or_create_files(epoch)
epoch.create_urls_from_files(server)
benchmark_client = HttpxClient(name="httpx", host=server_host_name, port=server_port)
elapsed = benchmark_client.measure(epoch)
print(f"elapsed: {elapsed}")
def run_httpx():
typer.run(run_httpx_with_args)
# dont_test
# client = HttpxClient()
# elapsed, responses = await client.measure_server(benchmark.epochs[0])
# print(elapsed)
```
## aiohttp
```
# export
class AioHttpResponse:
def __init__(self, url, content, started, stopped):
self.url = url
self.content = content
self.started = started
self.stopped = stopped
@register_model
class AioHttpClient(BaseClient):
timestamps = []
def set_timestamps(self, responses):
for response in responses:
self.timestamps.append((response.started, response.stopped))
async def fetch_page(self, session, url):
async with session.get(url) as response:
started = datetime.now()
content = await response.read()
stopped = datetime.now()
return AioHttpResponse(url, content, started, stopped)
async def measure_server(self, epoch):
print("measure server")
print(epoch.urls[0])
urls = epoch.urls
max_connections = min(epoch.number_of_connections, 200)
conn = aiohttp.TCPConnector(limit=max_connections)
responses = []
start = time.perf_counter()
async with aiohttp.ClientSession(connector=conn) as session:
tasks = [asyncio.create_task(self.fetch_page(session, url)) for url in urls]
responses = await asyncio.gather(*tasks)
elapsed = time.perf_counter() - start
return elapsed, responses
def measure_in_new_process(self, epoch):
elapsed, responses = asyncio.run(self.measure_server(epoch))
self.verify_checksums(epoch, responses)
self.set_timestamps(responses)
print("timestamps: ", len(self.timestamps))
return elapsed, self.timestamps
def measure(self, epoch):
with Pool(1) as p:
[result, timestamps] = p.map(self.measure_in_new_process, [epoch])
return result, timestamps
# dont_test
client = AioHttpClient()
elapsed, responses = await client.measure_server(benchmark.epochs[0])
print(elapsed)
```
## wrk
```
# export
@register_model
class WrkClient(BaseClient):
connections: int = 20
# set duration to two minutes since it is 10 seconds by default and kills the benchmark
duration: int = 120
threads: int = 1
host: str = "localhost"
port: str = "8000"
def create_urls_string(self, epoch):
urls = []
for bf in epoch.files:
urls.append(f' {{path = "/{bf.path}"}},')
return "\n".join(urls)
def create_lua_script(self, epoch):
requests_head = "requests = {"
requests_tail = "}"
lua_body = """
print(requests[1])
if #requests <= 0 then
print("multiplerequests: No requests found.")
os.exit()
end
print("multiplerequests: Found " .. #requests .. " requests")
counter = 1
request = function()
-- Get the next requests array element
local request_object = requests[counter]
-- Increment the counter
counter = counter + 1
-- If the counter is longer than the requests array length -> stop and exit
if counter > #requests then
wrk.thread:stop()
os.exit()
end
-- Return the request object with the current URL path
return wrk.format(request_object.method, request_object.path, request_object.headers, request_object.body)
end
"""
urls = self.create_urls_string(epoch)
lua = "\n".join([requests_head, urls, requests_tail, lua_body])
with Path(f"wrk.lua").open("w") as f:
f.write(lua)
def run_wrk(self):
kwargs = {"capture_output": True, "text": True}
start = time.perf_counter()
command = [
"wrk",
"-d",
str(self.duration),
"-c",
str(self.connections),
"-t",
str(self.threads),
"-s",
"wrk.lua",
f"http://{self.host}:{self.port}",
]
print("command: ", " ".join(command))
output = subprocess.run(
command,
**kwargs,
)
elapsed = time.perf_counter() - start
return elapsed
def measure(self, epoch):
print("measure? wtf?")
self.create_lua_script(epoch)
elapsed = self.run_wrk()
return elapsed
```
## Wrk CLI command
```shell
time wrk -d 30 -c 20 -t 1 -s wrk.lua http://staging.wersdoerfer.de:5001
```
```
%%time
# dont_test
kwargs = {"capture_output": True, "text": True}
output = subprocess.run(["wrk", "-c20", "-t1", "-d2", "-s", "wrk.lua", "http://localhost:8000"], **kwargs)
# output = subprocess.run(["wrk", "-d2", "http://localhost:8000"], **kwargs)
# dont_test
print(output.stdout)
# dont_test
client = WrkClient()
elapsed = client.measure(benchmark.epochs[0])
print(elapsed)
# hide
# dont_test
from nbdev.export import notebook2script
notebook2script()
```
|
github_jupyter
|
##### Copyright 2020 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/io/tutorials/genome"><img src="https://www.tensorflow.org/images/tf_logo_32px.png"> TensorFlow.orgで表示</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/io/tutorials/genome.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colab で実行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/io/tutorials/genome.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">View source on GitHub</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/io/tutorials/genome.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード/a0}</a></td>
</table>
## 概要
このチュートリアルでは、一般的に使用されるゲノミクス IO 機能を提供する<code>tfio.genome</code>パッケージについて解説します。これは、いくつかのゲノミクスファイル形式を読み取り、データを準備するための一般的な演算を提供します (例: One-Hot エンコーディングまたは Phred クオリティスコアを確率に解析します)。
このパッケージは、[Google Nucleus](https://github.com/google/nucleus) ライブラリを使用して、主な機能の一部を提供します。
## セットアップ
```
try:
%tensorflow_version 2.x
except Exception:
pass
!pip install tensorflow-io
import tensorflow_io as tfio
import tensorflow as tf
```
## FASTQ データ
FASTQ は、基本的な品質情報に加えて両方の配列情報を保存する一般的なゲノミクスファイル形式です。
まず、サンプルの`fastq`ファイルをダウンロードします。
```
# Download some sample data:
!curl -OL https://raw.githubusercontent.com/tensorflow/io/master/tests/test_genome/test.fastq
```
### FASTQ データの読み込み
`tfio.genome.read_fastq`を使用してこのファイルを読みこみます (`tf.data` API は近日中にリリースされる予定です)。
```
fastq_data = tfio.genome.read_fastq(filename="test.fastq")
print(fastq_data.sequences)
print(fastq_data.raw_quality)
```
ご覧のとおり、返された`fastq_data`には fastq ファイル内のすべてのシーケンスの文字列テンソル (それぞれ異なるサイズにすることが可能) である`fastq_data.sequences`、および、シーケンスで読み取られた各塩基の品質に関する Phred エンコードされた品質情報を含む`fastq_data.raw_quality`が含まれています。
### 品質
関心がある場合は、ヘルパーオペレーションを使用して、この品質情報を確率に変換できます。
```
quality = tfio.genome.phred_sequences_to_probability(fastq_data.raw_quality)
print(quality.shape)
print(quality.row_lengths().numpy())
print(quality)
```
### One-Hot エンコーディング
また、One-Hot エンコーダ―を使用してゲノムシーケンスデータ (`A` `T` `C` `G`の塩基配列で構成される) をエンコードすることもできます。これに役立つ演算が組み込まれています。
```
print(tfio.genome.sequences_to_onehot.__doc__)
print(tfio.genome.sequences_to_onehot.__doc__)
```
|
github_jupyter
|
```
import netCDF4
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import numpy as np
import sys
import math
import os
import glob
import pandas
import re
from scipy.interpolate import griddata
%matplotlib inline
plt.rcParams["figure.figsize"] = (10,6)
plt.rcParams.update({'font.size': 20})
data_path = "/path/netcdf/"
fname = "20200515.ssp585.TEST_SSP585_DEBUG.ne30_oECv3_ICG.grizzly.cam.h2.2015-01-01-00000.nc"
def print_data_info(data):
# Print some data info
###############################
print (data.variables.keys())
print (data)
for d in data.dimensions.items():
print (d)
## http://schubert.atmos.colostate.edu/~cslocum/netcdf_example.html
print (data.data_model)
nc_attrs = data.ncattrs()
for nc_attr in nc_attrs:
print ('\t%s:' % nc_attr, repr(data.getncattr(nc_attr)))
print ("NetCDF dimension information:")
nc_dims = [dim for dim in data.dimensions] # list of nc dimensions
for dim in nc_dims:
print ("\tName:", dim)
print ("\t\tsize:", len(data.dimensions[dim]))
nc_vars = [var for var in data.variables] # list of nc variables
print ("NetCDF variable information:")
for var in nc_vars:
if var not in nc_dims:
print ('\tName:', var)
print ("\t\tdimensions:", data.variables[var].dimensions)
print ("\t\tsize:", data.variables[var].size)
def load_data(filename):
data = Dataset(filename)
return data
## Load data
data = load_data(data_path+fname)
#print_data_info(data)
tsteps_per_month = len(data.variables['time'][:])
var_name = 'T001'
tstep = 100
lon_array = np.asarray(data.variables['lon'][:])
lat_array = np.asarray(data.variables['lat'][:])
uvel = np.asarray(data.variables[var_name][:])
uvel = np.asarray(uvel[tstep,:])
print (np.min(lon_array),np.max(lon_array))
print (np.min(lat_array),np.max(lat_array))
import matplotlib.colors as matcolors
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.patches import Rectangle
top = plt.get_cmap('twilight_shifted', 256)
top_cmp = matcolors.ListedColormap(top(np.linspace(0.55, 1, 256)))
bottom = cm.get_cmap('twilight_shifted', 256)
bottom_cmp = matcolors.ListedColormap(bottom(np.linspace(0.05,0.45,256)))
white = np.array(([256/256, 256/256, 256/256, 1]))
newcolors = np.vstack((bottom_cmp(np.linspace(0, 1, 256)),
top_cmp(np.linspace(0, 1, 256))))
newcmp = matcolors.ListedColormap(newcolors, name='OrangeBlue')
newcmp2 = matcolors.ListedColormap(newcmp(np.linspace(0.0, 0.64, 512)))
## Render using python grid data
lon_dim = 360
lat_dim = 180
points = np.column_stack((lon_array, lat_array))
## create 2D regular grid
grid_x, grid_y = np.mgrid[0:360:360j, -89:89:180j] ## grid for whole world
cur_loc = np.zeros((lat_dim*lon_dim,2),dtype='float')
ind = 0
for j in range(lat_dim):
for i in range(lon_dim):
cur_loc[ind,:] = np.array([grid_x[i][j],grid_y[i][j]])
ind = ind+1
print(len(points))
grid_z0 = griddata(points, uvel, cur_loc, method='linear')
grid_z0_2d = grid_z0.reshape((lat_dim,lon_dim))
plt.imshow(grid_z0_2d, origin='lower',cmap=plt.get_cmap(newcmp2))
plt.colorbar(orientation="vertical", shrink=0.74, label="Kelvin")
plt.xlabel("Longitude")
plt.ylabel("Latitude")
plt.yticks(np.arange(0, 190, 90))
plt.savefig('out.png')
```
|
github_jupyter
|
```
! pip install fastcore --upgrade -qq
! pip install fastai --upgrade -qq
from fastai.vision.all import *
import fastai
from sys import exit
from operator import itemgetter
import re
import torch
from torch.nn import functional as F
import numpy as np
from time import process_time_ns, process_time
import gc
def scale(val, spec="#0.4G"):
PREFIXES = np.array([c for c in u"yzafpnµm kMGTPEZY"])
exp = np.int8(np.log10(np.abs(val)) // 3 * 3 * np.sign(val))
val /= 10.**exp
prefix = PREFIXES[exp//3 + len(PREFIXES)//2]
return f"{val:{spec}}{prefix}"
def display_times(times):
return f"{scale(times.mean())}s ± {scale(times.std())}s, {scale(times.min())}s, {scale(times.max())}s"
def profile_cpu(func, inp, n_repeat=100, warmup=10):
fwd_times,bwd_times = [],[]
for i in range(n_repeat + warmup):
start = process_time()
res = func(inp)
end = process_time()
if i >= warmup: fwd_times.append(end-start)
inp = inp.clone().requires_grad_()
y = func(inp)
l = y.mean()
start = process_time()
_ = torch.autograd.grad(l, inp)
end = process_time()
if i >= warmup: bwd_times.append(end-start)
return (np.array(fwd_times), # Elapsed time is in seconds
np.array(bwd_times))
def profile_cuda(func, inp, n_repeat=100, warmup=10):
fwd_times,bwd_times = [],[]
for i in range(n_repeat + warmup):
start,end = (torch.cuda.Event(enable_timing=True) for _ in range(2))
start.record()
res = func(inp)
end.record()
torch.cuda.synchronize()
if i >= warmup: fwd_times.append(start.elapsed_time(end))
start,end = (torch.cuda.Event(enable_timing=True) for _ in range(2))
inp = inp.clone().requires_grad_()
y = func(inp)
l = y.mean()
start.record()
_ = torch.autograd.grad(l, inp)
end.record()
torch.cuda.synchronize()
if i >= warmup: bwd_times.append(start.elapsed_time(end))
return (np.array(fwd_times)/1000, # Elapsed time is in ms
np.array(bwd_times)/1000)
mish_pt = lambda x: x.mul(torch.tanh(F.softplus(x)))
def profile(device='cuda', n_repeat=100, warmup=10, size='(16,10,256,256)', baseline=True, types='all'):
if types == 'all':
dtypes = [torch.float16, torch.bfloat16, torch.float32, torch.float64]
else:
if not hasattr(torch, types): exit("Invalid data type, expected torch type or 'all', got {types}")
dtypes = [getattr(torch, types)]
dev = torch.device(type=device)
sz_str = size.replace(' ','')
if not re.match(r"[\(\[]\d+(,\d+)*[\)\]]", sz_str):
exit("Badly formatted size, should be a list or tuple such as \"(1,2,3)\".")
sz = list(map(int, sz_str[1:-1].split(',')))
print(f"Profiling over {n_repeat} runs after {warmup} warmup runs.")
for dtype in dtypes:
if len(dtypes) > 1:
print(f"Testing on {dtype}:")
ind = ' '
else: ind = ''
inp = torch.randn(*sz, dtype=dtype, device=dev)
timings = []
funcs = {}
funcs.update(relu = torch.nn.functional.relu,
leaky_relu = torch.nn.functional.leaky_relu,
softplus = torch.nn.functional.softplus,
silu_jit = fastai.layers.swish,
silu_native = torch.nn.functional.silu,
mish_naive = mish_pt,
mish_jit = fastai.layers.mish,
mish_native = torch.nn.functional.mish)
if device=='cuda': funcs['mish_cuda'] = MishCudaFunction.apply
max_name = max(map(len, funcs.keys())) + 6
for (name,func) in funcs.items():
if device=='cuda':
if (name=='mish_cuda') and (dtype==torch.bfloat16):
pass
else:
fwd_times,bwd_times = profile_cuda(func, inp, n_repeat, warmup)
torch.cuda.empty_cache()
if device=='cpu':
fwd_times,bwd_times = profile_cpu(func, inp, n_repeat, warmup)
gc.collect()
print(ind+(name+'_fwd:').ljust(max_name) + display_times(fwd_times))
print(ind+(name+'_bwd:').ljust(max_name) + display_times(bwd_times))
```
# Haswell Benchmark
```
!cat /proc/cpuinfo
profile('cpu', types='float32')
profile('cpu', size='(64,10,256,256)', types='float32')
```
# Broadwell Benchmark
```
!cat /proc/cpuinfo
profile('cpu', types='float32')
profile('cpu', size='(64,10,256,256)', types='float32')
```
# Skylake Benchmark
```
!cat /proc/cpuinfo
profile('cpu', types='float32')
profile('cpu', size='(64,10,256,256)', types='float32')
```
|
github_jupyter
|
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
%matplotlib inline
class NNModel:
def __init__(self,learning_rate, n_iter, args):
self.learning_rate = learning_rate
self.args = args
self.n_iter = n_iter
def z_score(self,w,x,b):
return np.dot(w,x)+b
def init_params(self,n_x):
parameters={}
n_a = n_x
for i in range(1,len(self.args)+1):
n_h = self.args[i-1][0]
parameters['w'+str(i)] = np.random.rand(n_h,n_a)*np.sqrt(1/n_x)
parameters['b'+str(i)] = np.random.randn(n_h,1)
n_a = n_h
return parameters
def activation(self,z,fn = 'linear'):
act_fn ={'linear':z,
'relu':np.maximum(z,0),
'tanh':np.tanh(z),
'sigmoid':1/(1+np.exp(-z)),
'softmax':np.exp(z)/np.sum(np.exp(z))}
return act_fn[fn]
def forward_prop(self,x, parameters):
L = len(args)
z_scores = {}
activations = {'a0':x}
for i in range(1,L+1):
z_scores['z'+str(i)] = self.z_score(parameters['w'+str(i)],activations['a'+str(i-1)],parameters['b'+str(i)])
z = z_scores['z'+str(i)]
activations['a'+str(i)] = self.activation(z,fn=self.args[i-1][1])
return z_scores, activations
def compute_cost(self,y,y_hat):
m = y.shape[0]
cost = (-1/m)*(np.dot(y, np.log(y_hat.T+0.0000001)) + np.dot(1-y, np.log(1-y_hat.T+0.0000001)))
return np.squeeze(cost)
def backprop(self,y, parameters, z_scores, activations):
gradients = {}
L = len(self.args)
m = y.shape[0]
for i in range(L,0,-1):
if i==L:
gradients['dz'+str(i)]=activations['a'+str(i)]-y
else:
gradients['dz'+str(i)] = np.multiply(np.dot(parameters['w'+str(i+1)].T, gradients['dz'+str(i+1)]), 1*(z_scores['z'+str(i)]>=0))
dz = gradients['dz'+str(i)]
gradients['dw'+str(i)] = (1/m)*np.matmul(dz,activations['a'+str(i-1)].T)
gradients['db'+str(i)] = (1/m)*np.sum(dz,axis=1,keepdims=True)
return gradients
def update_params(self,parameters, gradients):
eta = self.learning_rate
for i in range(1,len(parameters)//2+1):
parameters['w'+str(i)]-=eta*gradients['dw'+str(i)]
parameters['b'+str(i)]-=eta*gradients['db'+str(i)]
return parameters
def fit(self,x,y):
np.random.seed(5)
params = self.init_params(x.shape[0])
for i in range(self.n_iter):
z_scores,activations = self.forward_prop(x,params)
y_hat = activations['a'+str(len(self.args))]
#print(y_hat)
cost = self.compute_cost(y,y_hat)
gradients = self.backprop(y,params,z_scores,activations)
params = self.update_params(params,gradients)
if i%1000==0:
print('Iteration : {} Cost : {}'.format(i,cost))
return params
def predict(self,x_test,params):
z_scores, activations = self.forward_prop(x_test,params)
y_pred = 1*(activations['a'+str(len(params)//2)]>0.5)
return np.squeeze(y_pred)
path = '/home/mrityunjay/Downloads/sonar.csv'
df = pd.read_csv(path)
df.columns=['x'+str(i) for i in range(len(df.columns))]
X=df.drop(['x60'],axis=1)
Y=df['x60']
X_train,X_test,Y_train,Y_test = train_test_split(X,Y,test_size=0.25,random_state=1)
X_train = np.transpose(X_train.values)
X_test = np.transpose(X_test.values)
Y_train = 1*(Y_train.values=='R')
Y_test = 1*(Y_test.values=='R')
Y_train = Y_train.reshape(1,Y_train.shape[0])
Y_test = Y_test.reshape(1,Y_test.shape[0])
args=[(100,'relu'),(50,'relu'),(10,'relu'),(5,'relu'),(3,'relu'),(1,'sigmoid')]
nn = NNModel(learning_rate=0.001, n_iter = 10000, args=args)
params = nn.fit(X_train,Y_train)
Y_pred = nn.predict(X_test,params)
print(Y_pred)
print(Y_test)
acc = accuracy_score(Y_pred,np.squeeze(Y_test))
print(acc)
```
|
github_jupyter
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.