repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
dataqa | dataqa-master/continuum/validation_tool/catalogue.py | from __future__ import division
from functions import axis_lim, flux_at_freq, two_freq_power_law, config2dic, SED
import os
import glob
import numpy as np
import pandas as pd
from astropy.io import fits as f
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.io.votable import parse_single_table
from astropy.utils.exceptions import AstropyWarning
from astropy.wcs import WCS
import warnings
from inspect import currentframe, getframeinfo
# from dynamic_range import source_dynamic_range, local_dynamic_range
#ignore annoying astropy warnings and set my own obvious warning output
warnings.simplefilter('ignore', category=AstropyWarning)
cf = currentframe()
WARN = '\n\033[91mWARNING: \033[0m' + getframeinfo(cf).filename
class catalogue(object):
def __init__(self, filename, name, image=None, frequency=1400, search_rad=10.0,
finder=None, ra_col='ra', dec_col='dec', ra_fmt='deg', dec_fmt='deg',
flux_col='int_flux', flux_err_col='err_int_flux', peak_col='peak_flux',
peak_err_col='err_peak_flux', rms_val='local_rms', flux_unit='Jy',
use_peak=False, island_col='island', flag_col='flags', maj_col='a',
SNR=5.0, col_suffix='', sep='\t', basename=None, autoload=True,
verbose=False):
"""Initialise a catalogue object.
Arguments:
----------
filename : string
The path to a fits, csv, xml or 'sep'-delimited catalogue.
name : string
A unique short-hand name for the catalogue (e.g. 'NVSS'). This will act as the key for
a number of dictionaries of the key catalogue fields, including the positions and fluxes.
Keyword arguments:
------------------
image : radio_image
A radio image object used to initialise certain fields.
freq : float
The frequency of this image in MHz.
search_rad : float
The search radius in arcsec to use when cross-matching this catalogue.
finder : string
The source finder that produced this catalogue (if known) ['Aegean' | 'Selavy' | 'pybdsf'].
This sets all the column names (including rms) and flux units to their defaults for that source finder.
ra_col : string
The name of the RA column.
dec_col : string
The name of the DEC column.
ra_fmt : string
The format of the RA column, input to SkyCoord (e.g. 'deg' or 'hour').
dec_fmt : string
The format of the DEC column, input to SkyCoord.
flux_col : string
The name of the integrated flux column.
flux_err_col : string
The name of the integrated flux error column. Use None if this doesn't exist, and 10% errors will be assumed.
peak_col : string
The name of the peak flux column (if any). Use None if this doesn't exist and it won't be used.
peak_err_col : string
The name of the integrated flux error column. Use None if this doesn't exist, and 10% errors will be assumed.
rms_val : string or float
The name of the local rms column, or a fixed value across the whole catalogue. The units must be the same as the flux columns.
flux_unit : string
The (case-insensitive) units of all the flux columns ['Jy' | 'mJy' | 'uJy'].
use_peak : bool
Use the peak flux instead of the integrated flux.
island_col : string
The name of the island column (if any).
flag_col : string
The name of the flag column (if any).
maj_col : string
The name of the fitted major axis column (if any). This is assumed to be in arcsec.
SNR : float
The minimum signal-to-noise ratio of the input catalogue.
col_suffix : string
A suffix to add to the end of all column names (e.g. '_deep' for GLEAM).
sep : string
When reading in a delimited file, use this delimiter (not needed for csv file).
basename : string
The base of the name to use for all output catalogues. Use None to use the same as paramater 'name'.
autoload : bool
Look for files already processed and load these if they exist.
verbose : bool
Verbose output.
See Also
--------
astropy.coordinates.SkyCoord
pandas.DataFrame"""
print "--------------------------"
print "| Reading {0} catalogue |".format(name)
print "--------------------------"
self.verbose = verbose
self.name = name
self.filename = filename.split('/')[-1]
self.image = image
self.SNR = SNR
#set basename
self.basename = basename
if self.basename is None:
self.basename = name
# finder
if finder is None:
self.finder = None
else:
self.finder = finder.lower()
self.knownFinder = (self.finder in ['aegean', 'selavy', 'pybdsf'])
#set names of all output catalogues
self.cutout_name = "{0}_cutout.csv".format(self.basename)
self.filtered_name = "{0}_filtered.csv".format(self.basename)
self.si_name = ''
si_files = glob.glob("{0}*_si.csv".format(self.basename))
if len(si_files) > 0:
self.si_name = si_files[0] #this is a guess, but is updated later if doesn't exist
#look for files already processed in order of preference
fileFound = False
if autoload:
fileFound = True
if os.path.exists(self.si_name):
filename = self.si_name
print "Spectral index catalogue already exists. Using file '{0}'".format(filename)
elif os.path.exists(self.filtered_name):
filename = self.filtered_name
print "Filtered catalogue already exists. Using file '{0}'".format(filename)
elif os.path.exists(self.cutout_name):
filename = self.cutout_name
print "Cutout catalogue already exists. Using file '{0}'".format(filename)
else:
fileFound = False
#Convert file to pandas data frame
self.df = self.cat2df(filename, sep, verbose=True)
#Read frequency and search radius from image object if exists, otherwise from input paramaters
if self.image is not None:
self.frequency=self.image.freq
self.search_rad=3*self.image.posErr
else:
self.frequency = frequency
self.search_rad = search_rad
#To ensure unique column names, append catalogue name to beginning of column names when not already done
if not fileFound:
self.df.columns = '{0}_'.format(self.name) + self.df.columns
self.col_suffix = col_suffix
if self.finder is not None and not self.knownFinder:
warnings.warn_explicit("Unrecognised source finder: {0}. Use 'Aegean', 'Selavy' or 'pybdsf'\n".format(finder),UserWarning,WARN,cf.f_lineno)
if self.finder is not None and self.finder == 'selavy':
if self.verbose:
print 'Using default configuration for Selavy.'
#set default column names for Selavy, appending catalogue name to beginning
self.flux_col=self.unique_col_name('flux_int')
self.flux_err_col=self.unique_col_name('flux_int_err')
self.peak_col=self.unique_col_name('flux_peak')
self.peak_err_col=self.unique_col_name('flux_peak_err')
self.rms_val=self.unique_col_name('rms_image')
self.island_col=self.unique_col_name('island_id')
self.flag_col=self.unique_col_name('fit_is_estimate')
self.maj_col=self.unique_col_name('maj_axis')
self.ra_col=self.unique_col_name('ra_deg_cont')
self.dec_col=self.unique_col_name('dec_deg_cont')
self.ra_fmt='deg'
self.dec_fmt='deg'
self.flux_unit='mjy'
self.si_col=self.unique_col_name('spectral_index')
elif self.finder is not None and self.finder == 'pybdsf':
if self.verbose:
print 'Using default configuration for pybdsf.'
#set default column names for pybdsf, appending catalogue name to beginning
self.flux_col=self.unique_col_name('Total_flux')
self.flux_err_col=self.unique_col_name('E_Total_flux')
self.peak_col=self.unique_col_name('Peak_flux')
self.peak_err_col=self.unique_col_name('E_Peak_flux')
self.rms_val=self.unique_col_name('Isl_rms')
self.island_col=self.unique_col_name('Isl_id')
self.flag_col=None
self.maj_col=self.unique_col_name('DC_Maj')
self.ra_col=self.unique_col_name('RA')
self.dec_col=self.unique_col_name('DEC')
self.ra_fmt='deg'
self.dec_fmt='deg'
self.flux_unit='jy'
self.si_col=None
else:
if self.finder is not None and self.finder == 'aegean':
if self.verbose:
print 'Using default configuration for Aegean.'
#append catalogue name to beginning of all columns
self.flux_col=self.unique_col_name(flux_col)
self.flux_err_col=self.unique_col_name(flux_err_col)
self.peak_col=self.unique_col_name(peak_col)
self.peak_err_col=self.unique_col_name(peak_err_col)
self.island_col=self.unique_col_name(island_col)
self.flag_col=self.unique_col_name(flag_col)
self.maj_col=self.unique_col_name(maj_col)
if type(rms_val) is str:
self.rms_val=self.unique_col_name(rms_val)
else:
self.rms_val = rms_val
#specific fix for GLEAM catalogue
if finder is not None and finder.lower() == 'aegean':
self.col_suffix = ''
self.flux_unit = flux_unit
self.ra_col=self.unique_col_name(ra_col)
self.dec_col=self.unique_col_name(dec_col)
self.ra_fmt = ra_fmt
self.dec_fmt = dec_fmt
self.si_col=None
self.use_peak = use_peak
self.flux_unit = self.flux_unit.lower()
if self.flux_unit not in ('jy','mjy','ujy'):
warnings.warn_explicit("Unrecognised flux unit '{0}'. Use 'Jy', 'mJy' or 'uJy'. Assuming 'Jy'\n".format(flux_unit),UserWarning,WARN,cf.f_lineno)
self.flux_unit = 'jy'
#keep a running list of key values for all sources, as a dictionary with key 'name'
self.cat_list = [self.name]
self.freq = {self.name : self.frequency}
self.radius = {self.name : self.search_rad}
self.count = {self.name : len(self.df)}
self.coords = {}
self.ra = {}
self.dec = {}
self.flux = {}
self.flux_err = {}
self.rms = {}
self.sep = {}
self.dRAsec = {}
self.dRA = {}
self.dDEC = {}
self.si = {}
#Initialise all the key fields, including positions and fluxes.
self.set_key_fields(set_coords=False)
def unique_col_name(self, col):
"""Return a unique column name by appending the catalogue name to the beginning. If column is None, return None.
Arguments:
----------
col : string
Column name.
Returns:
--------
col : string
Unique column name or None."""
if col is not None:
col = '{0}_{1}{2}'.format(self.name, col, self.col_suffix)
return col
def cat2df(self, filepath, sep, verbose=False):
"""Return a pandas dataframe of the provided catalogue.
If a '.fits' or '.csv' file isn't given, a file delimited by 'sep' will be assumed.
Arguments:
----------
filepath : string
The absolute path to the catalogue.
Keyword arguments:
------------------
sep : string
Delimiter for delimited files.
verbose : bool
Verbose output.
Returns:
--------
df : pandas.DataFrame
A pandas dataframe of the catalogue.
See Also
--------
pandas.DataFrame"""
if verbose:
print "Loading '{0}' catalogue into pandas.".format(filepath.split('/')[-1])
#convert from fits or xml to dataframe
extn = filepath.split('.')[-1].lower()
if extn == 'fits' or extn == 'gz':
table = f.open(filepath)[1]
df = Table(data=table.data).to_pandas()
elif extn == 'xml': #assumed to come from Selavy
table = parse_single_table(filepath).to_table(use_names_over_ids=True)
df = table.to_pandas()
#otherwise, load straight into pandas as csv or 'sep' delimited file
elif extn in ['csv', 'cat']:
if self.finder == 'pybdsf':
df = pd.read_csv(filepath, skip_blank_lines=True, skiprows=5, skipinitialspace=True)
else:
df = pd.read_csv(filepath)
else:
df = pd.read_table(filepath,sep=sep)
if verbose:
print "Catalogue contains {0} sources.".format(len(df))
return df
def set_specs(self,img):
"""Set the key fields of this catalogue using an input image. This must be done before the catalogue is filtered.
img : radio_image
A radio_image object corresponding to this catalogue, which is used to calculate various quantities."""
if img is not None:
#Get area of non-nan pixels of image
self.area = img.area
self.ra_bounds = img.ra_bounds
self.dec_bounds = img.dec_bounds
#Get dynamic range between image and rms map and sum of image flux vs. total catalogue flux
rms_map = f.open(img.rms_map)[0]
img_data = img.fits.data
if self.finder == 'aegean':# or self.finder == 'pybdsf':
img_data = img_data[0][0]
self.img_peak = np.max(img_data[~np.isnan(img_data)])
self.rms_bounds = rms_map.data > 0
self.img_rms = int(np.median(rms_map.data[self.rms_bounds])*1e6) #uJy
self.img_peak_bounds = np.max(img_data[self.rms_bounds])
self.img_peak_pos = np.where(img_data == self.img_peak_bounds)
self.img_peak_rms = rms_map.data[self.img_peak_pos][0]
self.dynamic_range = self.img_peak_bounds/self.img_peak_rms
self.img_flux = np.sum(img_data[~np.isnan(img_data)]) / (1.133*((img.bmaj * img.bmin) / (img.raPS * img.decPS))) #divide by beam area
# ...
self.img_center = img.center
if os.path.exists(img.residual):
self.source_dynrange = self.source_dynamic_range()
self.local_dynrange = self.local_dynamic_range()
else:
self.sources_dynrange = None
self.local_dynrange = None
#Get the approximate area from catalogue
else:
if self.name not in self.coords.keys():
self.set_key_fields()
self.ra_bounds = (max(self.ra[self.name]),min(self.ra[self.name]))
dRA = (self.ra_bounds[0] - self.ra_bounds[1])*np.cos(np.deg2rad(np.mean(self.dec[self.name])))
self.dec_bounds = (max(self.dec[self.name]),min(self.dec[self.name]))
dDEC = abs(self.dec_bounds[0] - self.dec_bounds[1])
self.area = dRA*dDEC
self.img_peak = np.max(self.flux[self.name])
self.img_rms = int(np.median(self.rms[self.name])*1e6) #uJy
self.dynamic_range = self.img_peak/self.img_rms
self.img_flux = np.nan
self.sources_dynrange = None
self.local_dynrange = None
self.blends = len(np.where(self.df[self.island_col].value_counts() > 1)[0])
self.cat_flux = np.sum(self.flux[self.name])
#get median spectral index if column exists
if self.name in self.si.keys():
self.med_si = np.median(self.si[self.name])
else:
self.med_si = -99
if self.verbose:
print "Sum of flux in image is {0:.3f} Jy and sum of all fitted gaussians is {1:.3f} Jy.".format(self.img_flux,self.cat_flux)
print "Image peak is {0:.2f} Jy.".format(self.img_peak)
print "Dynamic range is {0:.0E}.".format(self.dynamic_range)
print "Number of multi-component islands is {0}.".format(self.blends)
if self.med_si != -99:
print "Median spectral index is {0:.2f}.".format(self.med_si)
#Store the initial length of the catalogue
self.initial_count = self.count[self.name]
#Derive the fraction of resolved sources as the fraction of sources with int flux > 3-sigma away from peak flux
if self.name in self.flux.keys():
#Josh's method
self.uncertainty = np.sqrt(self.df[self.flux_err_col]**2 + self.df[self.peak_err_col]**2)
if self.finder == 'selavy':
self.uncertainty += np.sqrt(self.df[self.rms_val]**2)
self.sigma = (self.df[self.flux_col] - self.df[self.peak_col]) / self.uncertainty
self.resolved = np.where(self.sigma > 3)[0]
self.resolved_frac = len(self.resolved) / len(self.df)
#Tom's method
# self.R = np.log(self.df[self.flux_col] / self.df[self.peak_col])
# self.uncertainty = np.sqrt((self.df[self.flux_err_col]/self.df[self.flux_col])**2 + (self.df[self.peak_err_col]/self.df[self.peak_col])**2)
# if self.finder == 'selavy':
# self.uncertainty += np.sqrt(self.df[self.rms_val]**2)
# self.resolved = np.where(self.R > 2*self.uncertainty)[0]
# self.resolved_frac = len(self.resolved) / len(self.df)
else:
self.resolved_frac = -1
def set_key_fields(self, indices=None, cat=None, set_coords=True):
"""Set the key fields, including the positions, frequency and fluxes. This must be run
each time the dataframe is updated. Each field is a dictionary with key catalogue.name.
Keyword Arguments
-----------------
indices : list
A list of indices from this instance to subselect after removing rows. If indices is None, all indices of this instance will be used.
cat : catalogue
Another catalogue object used to initialise the fields. If None is provided, this instance will be used.
set_coords : bool
Create a list of SkyCoord objects for every source. As this is a time-consuming task, it is
only recommended after a cutout has been applied. This only applies when cat is None.
See Also
--------
astropy.coordinates.SkyCoord
pandas.Series"""
coords_set = False
#after cross-match, add new coordinates, positions and fluxes, etc to dictionaries
if cat is not None:
#set easy names for columns
prefix = '{0}_{1}_'.format(cat.name,self.name)
sep = prefix + 'sep'
dRAsec = prefix + 'dRAsec' #in seconds
dRA = prefix + 'dRA' #in arcsec
dDEC = prefix + 'dDEC'
self.cat_list.append(cat.name)
self.count[cat.name] = len(np.where(~np.isnan(cat.df[sep]))[0]) #length of indices that aren't nan
self.freq[cat.name] = cat.freq[cat.name]
self.radius[cat.name] = cat.radius[cat.name]
self.ra[cat.name] = cat.ra[cat.name]
self.dec[cat.name] = cat.dec[cat.name]
#compute the positional offsets
self.df[dRAsec] = (self.ra[self.name] - self.ra[cat.name])*3600 #in seconds
self.df[dRA] = self.df[dRAsec]*np.cos(np.deg2rad((self.dec[self.name] + self.dec[cat.name])/2)) #in arcsec
self.df[dDEC] = (self.dec[self.name] - self.dec[cat.name])*3600
#store in dictionaries
self.sep[cat.name] = cat.df[sep]
self.dRAsec[cat.name] = self.df[dRAsec]
self.dRA[cat.name] = self.df[dRA]
self.dDEC[cat.name] = self.df[dDEC]
if cat.name in cat.flux.keys():
self.flux[cat.name] = cat.flux[cat.name]
self.flux_err[cat.name] = cat.flux_err[cat.name]
self.rms[cat.name] = cat.rms[cat.name]
#write flux ratio if frequencies within 1%
if self.name in self.flux.keys() and np.abs(cat.freq[cat.name]/self.freq[self.name]-1) < 0.01:
self.df[prefix + 'flux_ratio'] = self.flux[self.name]/self.flux[cat.name]
if cat.si_col != None:
self.si[cat.name] = cat.si[cat.name]
#otherwise initialise or update dictionary for this instance
else:
if set_coords or (indices is not None and len(self.coords) == 0):
#initialise SkyCoord object for all sky positions and create numpy array for RA and DEC in degrees
self.coords[self.name] = SkyCoord(ra = self.df[self.ra_col], dec = self.df[self.dec_col],unit='{0},{1}'.format(self.ra_fmt,self.dec_fmt))
self.ra[self.name] = self.coords[self.name].ra.deg
self.dec[self.name] = self.coords[self.name].dec.deg
coords_set = True
#initliase fluxes
if indices is None and not (self.flux_col is None and self.peak_col is None):
#Create pandas series for peak or integrated flux and errors
if self.use_peak and self.peak_col is None:
warnings.warn_explicit("Can't use peak flux since peak column name not specified. Using integrated flux.\n",UserWarning,WARN,cf.f_lineno)
if self.use_peak and self.peak_col is not None:
self.flux[self.name] = self.df[self.peak_col].copy()
if self.peak_err_col != None:
self.flux_err[self.name] = self.df[self.peak_err_col].copy()
else:
self.flux_err[self.name] = self.flux[self.name]*0.1 #10% error
else:
self.flux[self.name] = self.df[self.flux_col].copy()
if self.flux_err_col != None:
self.flux_err[self.name] = self.df[self.flux_err_col].copy()
else:
self.flux_err[self.name] = self.flux[self.name]*0.1 #10% error
#set 10% errors where error is <=0
if np.any(self.flux_err[self.name] <= 0):
i = np.where(self.flux_err[self.name] <= 0)[0]
self.flux_err[self.name][i] = self.flux[self.name][i]*0.1 #10% error
#Set rms as pandas series or single value
if type(self.rms_val) is str:
self.rms[self.name] = self.df[self.rms_val].copy()
else:
self.rms[self.name] = self.rms_val
#Force Jy units
factor = 1
if self.flux_unit == 'mjy':
factor = 1e-3
elif self.flux_unit == 'ujy':
factor = 1e-6
self.flux[self.name] *= factor
self.flux_err[self.name] *= factor
self.rms[self.name] *= factor
if self.si_col != None:
self.si[self.name] = self.df[self.si_col]
#otherwise just update this instance
else:
if not coords_set:
self.coords[self.name] = self.coords[self.name][indices]
self.ra[self.name] = self.ra[self.name][indices]
self.dec[self.name] = self.dec[self.name][indices]
#reset indices of pandas series
if self.name in self.flux.keys():
self.flux[self.name] = self.flux[self.name][indices].reset_index(drop=True)
self.flux_err[self.name] = self.flux_err[self.name][indices].reset_index(drop=True)
if type(self.rms_val) is str:
self.rms[self.name] = self.rms[self.name][indices].reset_index(drop=True)
#only update these for cross-matched catalogues
if self.name in self.sep.keys():
self.sep[self.name] = self.sep[self.name][indices].reset_index(drop=True)
self.dRAsec[self.name] = self.dRAsec[self.name][indices].reset_index(drop=True)
self.dRA[self.name] = self.dRA[self.name][indices].reset_index(drop=True)
self.dDEC[self.name] = self.dDEC[self.name][indices].reset_index(drop=True)
if self.name in self.si.keys():
self.si[self.name] = self.si[self.name][indices].reset_index(drop=True)
#reset indices and catalogue length after change has been made
self.df = self.df.reset_index(drop=True)
self.count[self.name] = len(self.df)
def overwrite_df(self,catalogue,step='this',set_coords=True,verbose=True):
"""
Overwrite self.df with another file or DataFrame.
All other fields are assumed to stay the same.
Arguments:
----------
catalogue : string or pandas.DataFrame
The filename of the catalogue (must be csv file), or a pandas data frame object.
If a dataframe is input, it is assumed to have come from a catalogue object.
Keyword arguments:
------------------
verbose : bool
Verbose output.
See Also:
---------
pandas.DataFrame
"""
#read from file if filename is provided
if type(catalogue) is str:
self.df = pd.read_csv(catalogue)
if verbose:
print "'{0}' already exists. Skipping {1} step and setting catalogue to this file.".format(catalogue,step)
print "{0} catalogue now contains {1} sources.".format(self.name,len(self.df))
else:
self.df = catalogue
#reset the key fields
self.set_key_fields(set_coords=set_coords)
def write_df(self,write,filename,verbose=True):
"""Write data frame to file.
Arguments:
----------
write : bool
Write the file.
filename : string
The file name.
Keyword Arguments:
------------------
verbose : bool
Verbose output."""
if write:
if verbose:
print "Writing to '{0}'.".format(filename)
self.df.to_csv(filename,index=False)
def cutout_box(self, ra, dec, fov=12, redo=False, write=True, verbose=True):
"""Cut out a box of the catalogue, updating the catalogue to only contain sources within this box.
Input a central RA and DEC and FOV or four vertices.
Arguments:
----------
ra : float or tuple
The RA centre in degrees, or a tuple of two RA boundaries.
dec : float
The DEC centre in degrees, or a tuple of two DEC boundaries.
Keyword arguments:
------------------
fov : float
The field of view in degrees (i.e. fov*fov degrees). Only used when RA & DEC are single values.
redo : bool
Cut out a box, even if a cutout file exists.
write : bool
Write the cutout catalogue to file.
verbose : bool
Verbose output."""
filename = self.cutout_name
#if column units aren't degrees, set sky coords and get RA/DEC in degrees
if self.ra_fmt != 'deg' or self.dec_fmt != 'deg':
if len(self.coords) == 0:
self.set_key_fields(set_coords=True)
RA = self.ra[self.name]
DEC = self.dec[self.name]
#otherwise just use column values
else:
RA = self.df[self.ra_col]
DEC = self.df[self.dec_col]
if (type(ra) is tuple and (type(dec) is not tuple or len(ra) != 2)) or \
(type(dec) is tuple and (type(ra) is not tuple or len(dec) != 2)):
warnings.warn_explicit('RA and DEC must both be single value or a tuple with two indices. Cutout not applied.\n',UserWarning,WARN,cf.f_lineno)
elif redo or not os.path.exists(filename):
if verbose:
if redo:
print "Re-doing cutout step."
if write:
print "Overwriting '{0}'.".format(filename)
print "Cutting out sources from {0}.".format(self.name)
#cut out all rows outside RA and DEC boundaries
if type(ra) is tuple:
ra_min, ra_max, dec_min, dec_max = axis_lim(ra, min), axis_lim(ra, max),\
axis_lim(dec, min), axis_lim(dec, max)
if verbose:
print "Selecting sources with {0} <= RA <= {1} and {2} <= DEC <= {3}.".format(ra_min,ra_max,dec_min,dec_max)
self.df = self.df[(RA <= ra_max) & (RA >= ra_min) & (DEC <= dec_max) & (DEC >= dec_min)]
#cut out all rows outside the FOV
else:
if verbose:
print "Using a {0}x{0} degree box centred at {1} deg, {2} deg.".format(fov,ra,dec)
self.df = self.df[(DEC <= dec + fov/2) & (DEC >= dec - fov/2) &
(RA <= ra + fov/2/np.cos(np.deg2rad(DEC))) &
(RA >= ra - fov/2/np.cos(np.deg2rad(DEC)))]
if verbose:
print '{0} {1} sources within this region.'.format(len(self.df),self.name)
#Drop the rejected rows, reset the key fields and write to file
self.set_key_fields(indices=self.df.index.tolist())
self.write_df(write,filename)
#if file exists, simply read in catalogue
else:
self.overwrite_df(filename,step='cutout',set_coords=False)
def filter_sources(self,flux_lim=0,SNR=0,ratio_frac=0,ratio_sigma=0,reject_blends=False,psf_tol=0,
resid_tol=0,flags=False,file_suffix='',redo=False,write=True,verbose=False):
"""Reject problematic sources according to several criteria. This will overwrite self.df.
Keyword arguments:
------------------
flux_lim : float
The flux density limit in Jy, below which sources will be rejected.
SNR : float
The S/N ratio limit (where N is the rms), below which sources will be rejected. Use 0 to skip this step.
ratio_frac : float
The fraction given by the integrated flux divided by the peak flux, above which, sources will be rejected. Use 0 to skip this step.
ratio_sigma : float
Reject sources based on the validation metric for resolved sources, based on their int/peak, above this sigma value.
reject_blends : bool
For Aegean and Selavy only. Reject multi-component sources.
psf_tol : float
For Aegean and Selavy only. Reject sources with fitted major axis this many times the psf major axis. Use 0 to skip this step.
resid_tol : float
For Aegean only. Reject sources with a std in the residual this many times the rms. Use 0 to skip this step.
flags : bool
Reject sources with flags != 0. Only performed if flag_col set in constructor of this instance.
file_suffix : string
Suffix to add to filename, for when doing several steps of filtering.
redo : bool
Perform the filtering, even if filtered file exists.
write : bool
Write the filtered catalogue to file.
verbose : bool
Verbose output."""
if file_suffix != '':
self.basename += file_suffix
filename = '{0}.csv'.format(self.basename)
else:
filename = self.filtered_name
if redo or not os.path.exists(filename):
if verbose:
if redo:
print "Re-doing filtering."
if write:
print "Overwriting '{0}'.".format(filename)
print "Filtering sources in '{0}'...".format(filename)
print "Initial number of sources: {0}.".format(len(self.df))
#reject faint sources
if flux_lim != 0:
if self.flux_col != None:
self.df = self.df[self.flux[self.name] > flux_lim]
if verbose:
print "Rejected (faint) sources below {0} Jy.".format(flux_lim)
print "Number remaining: {0}.".format(len(self.df))
else:
warnings.warn_explicit("No int flux column given. Can't reject resolved sources based on flux.\n",UserWarning,WARN,cf.f_lineno)
#reject low S/N sources
if SNR != 0:
if self.flux_col != None and self.rms_val != None:
#reindex key fields before comparison
self.set_key_fields(indices=self.df.index.tolist())
self.df = self.df[self.flux[self.name] > SNR*self.rms[self.name]]
if verbose:
print "Rejected (low-S/N) sources < {0} x r.m.s.".format(SNR)
print "Number remaining: {0}.".format(len(self.df))
else:
warnings.warn_explicit("rms or int flux column not given. Can't reject resolved sources based on SNR.\n",UserWarning,WARN,cf.f_lineno)
#reject resolved sources based on flux ratio
if ratio_frac != 0:
if self.peak_col != None:
self.df = self.df[self.df[self.flux_col]/self.df[self.peak_col] <= ratio_frac]
if verbose:
print "Rejected (resolved) sources with total flux > {0} times the peak flux.".format(ratio_frac)
print "Number remaining: {0}.".format(len(self.df))
else:
warnings.warn_explicit("No peak flux column given. Can't reject resolved sources using flux ratio.\n",UserWarning,WARN,cf.f_lineno)
#reject resolved sources based on flux ratio metric
if ratio_sigma != 0:
if self.peak_col != None:
uncertainty = np.sqrt(self.df[self.flux_err_col]**2 + self.df[self.peak_err_col]**2)
if self.finder == 'selavy':
uncertainty += np.sqrt(self.df[self.rms_val]**2)
resolved = self.df[self.flux_col] - self.df[self.peak_col] > ratio_sigma * uncertainty
self.df = self.df[~resolved]
if verbose:
print "Rejected (resolved) sources according to int/peak metric, above {0} sigma.".format(ratio_sigma)
print "Number remaining: {0}.".format(len(self.df))
else:
warnings.warn_explicit("No peak flux column given. Can't reject resolved sources using flux ratio.\n",UserWarning,WARN,cf.f_lineno)
#reject multi-component islands
if reject_blends:
if self.knownFinder:
island_counts = self.df[self.island_col].value_counts()
point_islands = island_counts[island_counts == 1].index
self.df = self.df[self.df[self.island_col].isin(point_islands)]
if verbose:
print "Rejected (resolved) sources belonging to a multi-component island."
print "Number remaining: {0}.".format(len(self.df))
else:
warnings.warn_explicit("Can't reject blends since finder isn't Aegean or Selavy or pybdsf.\n",UserWarning,WARN,cf.f_lineno)
#reject extended components based on component size
if psf_tol != 0:
if self.knownFinder:
if self.image is not None:
self.df = self.df[self.df[self.maj_col] <= psf_tol*self.image.bmaj]
elif self.finder == 'Aegean':
self.df = self.df[self.df['{0}_a'.format(self.name)] <= psf_tol*self.df['{0}_psf_a'.format(self.name)]]
else:
warnings.warn_explicit("Can't rejected resolved sources based on psf tolerance without\
inputting radio_image object to read psf.\n",UserWarning,WARN,cf.f_lineno)
if verbose:
print "Rejected (resolved) sources with fitted major axis > {0} times the psf major axis.".format(psf_tol)
print "Number remaining: {0}.".format(len(self.df))
else:
warnings.warn_explicit("Can't reject sources based on PSF since finder isn't Aegean or Selavy or pybdsf.\n",UserWarning,WARN,cf.f_lineno)
#reject sources with poor fit
if resid_tol != 0:
if self.finder == 'aegean':
#reindex key fields before comparison
self.set_key_fields(indices=self.df.index.tolist())
self.df = self.df[self.df['{0}_residual_std'.format(self.name)] <= resid_tol*self.rms[self.name]]
if verbose:
print "Rejected (poorly fit) sources with standard deviation in residual > {0} times the rms.".format(resid_tol)
print "Number remaining: {0}.".format(len(self.df))
else:
warnings.warn_explicit("Can't reject resolved sources based on residual since finder isn't Aegean.\n",UserWarning,WARN,cf.f_lineno)
#reject sources with any flags
if flags:
if self.flag_col != None:
self.df = self.df[self.df[self.flag_col] == 0]
if verbose:
print "Rejecting (problematic) sources flagged as bad."
print "Number remaining: {0}.".format(len(self.df))
else:
warnings.warn_explicit("Can't reject resolved sources based on flag since flag column not set.\n",UserWarning,WARN,cf.f_lineno)
#Drop the rejected rows, reset the key fields and write to file
self.set_key_fields(indices=self.df.index.tolist(), set_coords=False)
self.write_df(write, filename)
#if file exists, simply read in catalogue
else:
self.overwrite_df(filename, step='filtering', verbose=verbose)
def cross_match(self, cat, radius='largest', join_type='1', redo=False, write=True):
"""Perform a nearest neighbour cross-match between this catalogue object and another catalogue object.
This will set update this object's catalogue to the matched catalogue and add the key fields to the key field dictionaries.
Arguments:
----------
cat : catalogue
A catalogue object to cross-match this instance to.
Keyword arguments:
------------------
radius : string or float
The search radius in arcsec. Use 'largest' to use the larger of the two default radii.
join_type : string
The join type of the two catalogues. '1' to keep all rows from this instance, or '1and2' to keep only matched rows.
redo : bool
Perform the cross-matching, even if cross-matched file exists.
write : bool
Write the cross-matched catalogue to file."""
if cat.name in self.cat_list:
warnings.warn_explicit("You've already cross-matched to {0}. Catalogue unchanged.\n".format(cat.name),UserWarning,WARN,cf.f_lineno)
return
filename = '{0}_{1}.csv'.format(self.basename,cat.name)
#Cross-match and create file if it doesn't exist, otherwise open existing file
if redo or not os.path.exists(filename):
if len(self.df) == 0 or len(cat.df) == 0:
if self.verbose:
print 'No {0} sources to match. Catalogue unchanged.'.format(cat.name)
return
print "---------------------------------"
print "| Cross-matching {0} and {1} |".format(self.name,cat.name)
print "---------------------------------"
if redo:
print "Re-doing cross-match."
if write:
print "Overwriting '{0}'.".format(filename)
print "Cross-matching {0} {1} sources with {2} {3} sources.".format(len(self.df),self.name,len(cat.df),cat.name)
#force coordinates to be set
if len(self.coords) == 0:
self.set_key_fields()
if len(cat.coords) == 0:
cat.set_key_fields()
#get sky coordinates and find nearest match of every source
#from this instance, independent of the search radius
c1 = self.coords[self.name]
c2 = cat.coords[cat.name]
# n = len(c2) - len(c1)
# if n < 0:
# warnings.warn_explicit("The second catalogue is shorter. Adding fake coords.",UserWarning,WARN,cf.f_lineno)
# c2 = SkyCoord(ra=list(c2.ra.value) + (-n+10)*[0.0],
# dec=list(c2.dec.value) + (-n+10)*[0.0], unit='deg')
indices,sep,sep3d = c1.match_to_catalog_sky(c2)
#take the maximum radius from the two
if radius == 'largest':
radius = max(self.search_rad, cat.search_rad)
if self.verbose:
print 'Using the largest of the two search radii of {0} arcsec.'.format(radius)
#only take matched rows from cat, so self.df and cat.df are parallel
cat.df = cat.df.iloc[indices].reset_index(drop=True)
#create pandas dataframe of the separations in arcsec
sep_col = '{0}_{1}_sep'.format(cat.name,self.name)
sepdf = pd.DataFrame(data = {sep_col : sep.arcsec})
#only take matches within search radius
indices = np.where(sep.arcsec < radius)[0]
#only add cross-matched table when at least 1 match
if len(indices) >= 1:
print "Found {0} matches within {1} arcsec.".format(len(indices),radius)
#don't reset indices so cat.df stays parallel with self.df
cat.df = cat.df.iloc[indices]
sepdf = sepdf.iloc[indices]
#concatenate tables together according to match type
if join_type == '1and2':
matched_df = pd.concat([self.df,cat.df,sepdf], axis=1, join='inner')
elif join_type == '1':
matched_df = pd.concat([self.df,cat.df,sepdf], axis=1, join_axes=[self.df.index])
matched_only_df = pd.concat([self.df, cat.df, sepdf], axis=1, join='inner')
self.matched_df = matched_only_df
#reset indices and overwrite data frame with matched one
matched_df = matched_df.reset_index(drop=True)
#set catalogue to matched df and write to file
self.overwrite_df(matched_df)
self.write_df(write,filename)
else:
print '{0} cross-matches between {1} and {2}. Catalogue unchanged.'.format(len(indices),self.name,cat.name)
return
#if file exists, simply read in catalogue
else:
print "'{0}' already exists. Skipping cross-matching step.".format(filename)
print 'Setting catalogue to this file.'
matched_df = pd.read_csv(filename)
self.matched_df = matched_df
#update basename to this cross-matched catalogue
self.basename = filename[:-4]
#overwrite the df so unmatched rows (set to nan) are included
cat.overwrite_df(matched_df)
self.overwrite_df(matched_df)
#add key fields from matched catalogue
self.set_key_fields(cat=cat)
def fit_spectra(self, cat_name=None, match_perc=0, models=['pow'], fig_extn=None,
GLEAM_subbands=None, GLEAM_nchans=None, fit_flux=False, redo=False,
write=True):
"""Derive radio spectra for this catalogue, using the input SED models. This will add new columns to the table, including the spectral index
and error, and optionally, the fitted flux at the frequency of this instance and the ratio between this and the measured flux.
Keyword arguments:
------------------
cat_name : string
Derive spectral indices between the catalogue given by this dictionary key and the main catalogue from this instance. If None is input, all data
except the catalogue from this instance will be used, and the flux at its frequency will be derived. If 'all' is input, all data will be used.
match_perc : float
Don't use the fluxes from a catalogue if the number of sources is less than this percentage of the main catalogue.
Only used when cat_name is None. Use 0 to accept all catalogues.
models : list
A list of strings corresponding to SED models to attempt to fit to the radio spectra.
fig_extn : string
Write figures of the SEDs with this extension. Use None to not write figures.
GLEAM_subbands : string
Use all GLEAM sub-band measurements, if GLEAM cross-matched. Input 'int' for integrated fluxes,
'peak' for peak fluxes, and None to use none.
GLEAM_nchans : int
Average together this many 8 MHz GLEAM sub-bands. Use None for no averaging.
fit_flux : bool
Use all cross-matched catalogues to derive a fitted flux. If False, a typical spectral index of -0.8 will be assumed.
redo : bool
Derive spectral indices, even if the file exists and the spectral indices have been derived from these frequencies.
write : bool
Write the spectral index catalogue to file."""
#update filename (usually after cross-match)
if not self.basename.endswith('si') and cat_name is None:
self.basename += '_si'
self.si_name = "{0}.csv".format(self.basename)
filename = self.si_name
#if file exists, simply read in catalogue
if not redo and self.basename.endswith('si') and os.path.exists(filename):
self.overwrite_df(filename,step='spectral index')
#when no cat name is given, use all available fluxes
if cat_name in [None,'all']:
print "-----------------------------"
print "| Deriving spectral indices |"
print "-----------------------------"
#derive the number of frequencies used to measure spectral
#indices, and store these for column names and for output
num_cats,max_count = 0,0
used_cats,max_cat = '',''
#derive column names based on main catalogue
freq = int(round(self.frequency))
suffix = '_{0}MHz'.format(freq)
fitted_flux_suffix = '_fitted{0}_flux'.format(suffix)
fitted_ratio_suffix = '_fitted{0}_{1}_flux_ratio'.format(suffix,self.name)
best_fitted_flux = 'best' + fitted_flux_suffix
best_fitted_ratio = 'best' + fitted_ratio_suffix
for cat in self.cat_list:
count = self.count[cat]
#Catalogue not considered useful if less than input % of sources have matches
if cat != self.name and count > (match_perc/100)*self.count[self.name]:
num_cats += 1
used_cats += "{0}, ".format(cat)
#store the largest used catalogue
if count > max_count:
max_cat = cat
max_count = count
#derive extrapolated flux for each catalogue
fitted_flux = '{0}_extrapolated{1}_flux'.format(cat,suffix)
fitted_ratio = '{0}_extrapolated{1}_{2}_flux_ratio'.format(cat,suffix,self.name)
self.est_fitted_flux(fitted_flux,fitted_ratio,freq,cat)
#don't derive spectral indices if there aren't 2+ catalogues to use,
#but just derive flux at given frequency from a typical spectral index
if (num_cats <= 1 or not fit_flux) and self.name in self.flux.keys() and (redo or best_fitted_flux not in self.df.columns):
self.est_fitted_flux(best_fitted_flux,best_fitted_ratio,freq,max_cat)
#otherwise, derive the spectral index and fitted flux using all available frequencies
elif num_cats > 1 and (redo or best_fitted_flux not in self.df.columns):
self.n_point_spectra(fitted_flux_suffix,fitted_ratio_suffix,best_fitted_flux,best_fitted_ratio,used_cats,freq,
models=models,fig_extn=fig_extn,GLEAM_subbands=GLEAM_subbands,GLEAM_nchans=GLEAM_nchans,redo=redo)
#otherwise derive the spectral index between this instance
#and the given catalogue, if any cross-matches were found
elif cat_name in self.cat_list:
self.two_point_spectra(cat_name,redo)
#write catalogue to file
self.write_df(write,filename)
def est_fitted_flux(self,fitted_flux_col,fitted_ratio_col,freq,cat,spec_index=-0.8):
"""Using a typical spectral index, derive the flux at the frequency of this
catalogue instance, and the ratio between this and its measured value.
Arguments:
----------
fitted_flux_col : string
The name of the fitted flux column to which to append the estimated flux.
fitted_ratio_col : string
The name of the fitted ratio column to which to append the estimated ratio.
freq : string
The frequency at which the flux is derived.
cat : string
The catalogue to use for extrapolating the spectral index.
Keyword arguments:
------------------
spec_index : float
The assumed spectral index."""
if self.verbose:
print "Deriving the flux at {0} MHz assuming a typical spectral index of {1}.".format(freq,spec_index)
self.df[fitted_flux_col] = flux_at_freq(self.freq[self.name],self.freq[cat],self.flux[cat],spec_index)
self.df[fitted_ratio_col] = self.flux[self.name] / self.df[fitted_flux_col]
def two_point_spectra(self,cat_name,redo=False):
"""Derive the spectral index, uncertainty and fitted flux between two frequencies.
Arguments:
----------
cat_name : string
Derive spectral indices between the catalogue given by this dictionary key and the catalogue from this instance.
Keyword arguments:
------------------
redo : bool
Derive spectral indices, even if the file exists and the spectral indices have been derived from these frequencies."""
alpha_col = '{0}_{1}_alpha'.format(self.name,cat_name)
alpha_err_col = '{0}_err'.format(alpha_col)
if redo or alpha_col not in self.df.columns:
#don't derive spectral indices if frequencies are <10% apart
if np.abs(self.freq[cat_name]/self.freq[self.name]-1) <= 0.1:
print "{0} and {1} are too close to derive spectral indices.".format(self.name,cat_name)
else:
print "Deriving spectral indices between {0} and {1}.".format(self.name,cat_name)
self.df[alpha_col],self.df[alpha_err_col],flux = two_freq_power_law(self.freq[self.name],
[self.freq[self.name],self.freq[cat_name]],
[self.flux[self.name],self.flux[cat_name]],
[self.flux_err[self.name],self.flux_err[cat_name]])
def n_point_spectra(self,fitted_flux_suffix,fitted_ratio_suffix,best_fitted_flux,best_fitted_ratio,used_cats,freq,
cat_name=None,models=['pow'],fig_extn=None,GLEAM_subbands=None,GLEAM_nchans=None,redo=False):
"""Derive the radio spectra from the input SED models, using the specified data, presumed to be >2 frequency measurements.
Arguments:
----------
fitted_flux_suffix : string
The suffix for the column name to store the fitted flux for each model.
fitted_ratio_suffix : string
The suffix for the column name to store the fitted flux ratio for each model.
best_fitted_flux : string
The name of the column to store the best fitted flux.
best_fitted_ratio : string
The name of the column to store the best fitted flux ratio.
used_cats : string
The catalogues used to calculate the SEDs (for output).
freq : string
The frequency at which the flux is derived.
Keyword arguments:
------------------
cat_name : string
If None is input, all data except the catalogue from this instance will be used, and the flux at the frequency
given by this instance will be derived from these spectral indices. If 'all' is input, all data will be used.
models : list
A list of strings corresponding to SED models to fit to the radio spectra.
fig_extn : string
Write figures of the SEDs with this extension. Use None to not write figures.
GLEAM_subbands : string
Use all GLEAM sub-band measurements, if GLEAM cross-matched. Input 'int' for integrated fluxes,
'peak' for peak fluxes, and None to use none.
GLEAM_nchans : int
Average together this many 8 MHz GLEAM sub-bands. Use None for no averaging.
redo : bool
Derive spectral indices, even if the file exists and the spectral indices have been derived from these frequencies."""
print "Deriving SEDs using following catalogues: {0}.".format(used_cats[:-2])
print "Deriving the flux for each model at {0} MHz.".format(freq)
for col in [best_fitted_flux,best_fitted_ratio]:
self.df[col] = np.full(len(self.df),np.nan)
if fig_extn is not None and self.verbose:
print "Writting SED plots to 'SEDs/'"
#iterate through all sources and derive SED model where possible
for i in range(len(self.df)):
fluxes,errs,freqs = np.array([]),np.array([]),np.array([])
#iterate through all catalogues and only take fluxes
#that aren't nan and optionally don't include main catalogue
for cat in self.flux.keys():
flux = self.flux[cat].iloc[i]
if not np.isnan(flux) and (cat != self.name or cat_name == 'all'):
fluxes=np.append(fluxes,flux)
errs=np.append(errs,self.flux_err[cat].iloc[i])
freqs=np.append(freqs,self.freq[cat])
#append GLEAM sub-band measurements according to input type (int or peak)
if GLEAM_subbands is not None and 'GLEAM' in self.cat_list:
GLEAM_freqs,GLEAM_fluxes,GLEAM_errs = np.array([]),np.array([]),np.array([])
for col in self.df.columns:
if col.startswith('GLEAM_{0}_flux_'.format(GLEAM_subbands)) and 'fit' not in col and 'wide' not in col and not np.isnan(self.df.loc[i,col]):
GLEAM_freq = col.split('_')[-1]
GLEAM_freqs = np.append(GLEAM_freqs,float(GLEAM_freq))
GLEAM_fluxes = np.append(GLEAM_fluxes,self.df.loc[i,'GLEAM_{0}_flux_{1}'.format(GLEAM_subbands,GLEAM_freq)])
GLEAM_errs = np.append(GLEAM_errs,self.df.loc[i,'GLEAM_err_{0}_flux_{1}'.format(GLEAM_subbands,GLEAM_freq)])
#optionally average sub-bands together
if GLEAM_nchans is not None:
index=0
used_index=0
while index < len(GLEAM_freqs) and GLEAM_freqs[index+GLEAM_nchans] <= 174:
GLEAM_freqs[used_index] = GLEAM_freqs[index:index+GLEAM_nchans].mean()
GLEAM_fluxes[used_index] = GLEAM_fluxes[index:index+GLEAM_nchans].mean()
GLEAM_errs[used_index] = np.sqrt(np.sum(GLEAM_errs[index:index+GLEAM_nchans]**2)) / GLEAM_nchans
index += GLEAM_nchans
used_index += 1
GLEAM_freqs=GLEAM_freqs[:used_index]
GLEAM_fluxes=GLEAM_fluxes[:used_index]
GLEAM_errs=GLEAM_errs[:used_index]
#append GLEAM measurements
freqs = np.append(freqs,GLEAM_freqs)
fluxes = np.append(fluxes,GLEAM_fluxes)
errs = np.append(errs,GLEAM_errs)
#attempt to fit models if more than one frequency
if len(freqs) > 1:
figname = fig_extn
#use island ID or otherwise row index for figure name
if figname is not None:
if self.island_col is not None:
name = self.df.loc[i,self.island_col]
else:
name = i
figname = '{0}.{1}'.format(name,figname)
#fit SED models
mods,names,params,errors,fluxes,rcs,BICs = SED(self.freq[self.name],freqs,fluxes,errs,models,figname=figname)
#append best fitted flux and ratio
if len(mods) > 0:
best_flux = fluxes[np.where(BICs == min(BICs))[0][0]]
self.df.loc[i,best_fitted_flux] = best_flux
if self.name in self.flux.keys():
self.df.loc[i,best_fitted_ratio] = best_flux / self.flux[self.name][i]
#iterate through each model and append fitted parameters
for j,model in enumerate(mods):
fitted_flux_col = model + fitted_flux_suffix
fitted_ratio_col = model + fitted_ratio_suffix
rcs_col = model + '_rcs'
BIC_col = model + '_BIC'
for col in [fitted_flux_col,fitted_ratio_col,rcs_col,BIC_col]:
if col not in self.df.columns:
self.df[col] = np.full(len(self.df),np.nan)
self.df.loc[i,fitted_flux_col] = fluxes[j]
if self.name in self.flux.keys():
self.df.loc[i,fitted_ratio_col] = fluxes[j] / self.flux[self.name][i]
self.df.loc[i,rcs_col] = rcs[j]
self.df.loc[i,BIC_col] = BICs[j]
for k,name in enumerate(names[j]):
#derive column name for each parameter
para_col = '{0}_{1}'.format(model,name)
para_err_col = '{0}_err'.format(para_col)
if para_col not in self.df.columns:
#add new columns for each parameter and uncertainty, the fitted
#flux and the ratio between this and the measured flux
self.df[para_col] = np.full(len(self.df),np.nan)
self.df[para_err_col] = np.full(len(self.df),np.nan)
#store parameter value, error, fitted flux and ratio
self.df.loc[i,para_col] = params[j][k]
self.df.loc[i,para_err_col] = errors[j][k]
def process_config_file(self, config_file, main_dir, redo=False, write_all=True,
write_any=True, verbose=False):
"""For a given catalogue config file, read the paramaters into a dictionary, pass it into a
new catalogue object, cut out a box, cross-match to this instance, and derive the spectral index.
Arguments:
----------
config_file : string
The filepath to a configuration file for a catalogue.
main_dir : string
Main directory that contains all the necessary files.
Keyword arguments:
------------------
redo : bool
Re-do all processing, even if output files produced.
write_all : bool
Write all files during processing. If False, cutout file will still be written.
write_any : bool
Write any files whatsoever?"""
#create dictionary of arguments, append verbose and create new catalogue instance
config_dic = config2dic(config_file, main_dir, verbose=verbose)
config_dic.update({'verbose': verbose})
if redo:
config_dic['autoload'] = False
cat = catalogue(**config_dic)
#Cut out a box in catalogues within boundaries of image, cross-match and derive spectral indices
cat.cutout_box(self.ra_bounds, self.dec_bounds, redo=redo,
verbose=verbose, write=write_any)
self.cross_match(cat, redo=redo, write=write_all)
if cat.name in self.cat_list and self.name in self.flux.keys():
self.fit_spectra(cat_name=cat.name, redo=redo, write=write_all)
def sources_within_radius(self, radius=30):
"""
The DataFarame of sources within the given radius (arcmin) from the image center
based on pybdsf catalog
"""
a = self.df
seps = self.img_center.separation(self.coords[self.name]).to('arcmin').value
a['Center_sep'] = seps
res = a.query('Center_sep < @radius')
return res
def source_dynamic_range(self, radius=30):
"""
Get the highest dynamic range for sources within 1/4 beam radius
based on pybdsf catalog (Peak_flux / Resid_Isl_rms)
"""
d = self.sources_within_radius(radius=radius)
# take 5 brightest sources:
d = d.sort_values(self.peak_col, ascending=False)[:5]
dr = d[self.peak_col]/d[self.rms_val]
return dr.min(), dr.max()
def local_dynamic_range(self, radius=30, box=50):
"""
Get the highest peak-to-artefact ratio in box of +-@box pixels
for sources within the @radius of the center
based on pybdsf catalog and residual image
"""
d = self.sources_within_radius(radius=radius)
# take 5 brightest sources:
d = d.sort_values(self.peak_col, ascending=False)[:5]
fts = f.open(self.image.residual)[0]
data = fts.data
wcs = WCS(fts.header).celestial
# print data.shape
if len(data.shape) == 4:
data = data[0,0,:,:]
res = []
for ra, dec, peak in zip(d[self.ra_col], d[self.dec_col], d[self.peak_col]):
pxra, pxdec = wcs.wcs_world2pix([[ra, dec]], 1)[0]
boxdata = data[int(pxdec-box):int(pxdec+box), int(pxra-box):int(pxra+box)]
res.append(peak/np.max(abs(boxdata)))
return min(res), max(res)
| 63,782 | 45.286647 | 160 | py |
dataqa | dataqa-master/continuum/validation_tool/radio_image.py | from __future__ import division
from functions import remove_extn, get_pixel_area
import os
import numpy as np
from astropy.io import fits as f
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from astropy.utils.exceptions import AstropyWarning
import warnings
from inspect import currentframe, getframeinfo
from scipy.stats import normaltest
import logging
#ignore annoying astropy warnings and set my own obvious warning output
warnings.simplefilter('ignore', category=AstropyWarning)
cf = currentframe()
WARN = '\n\033[91mWARNING: \033[0m' + getframeinfo(cf).filename
class radio_image(object):
def __init__(self, filepath, finder='aegean', extn='fits',
rms_map=None, SNR=5, verbose=False):
"""Initialise a radio image object.
Arguments:
----------
filepath : string
The absolute path to a fits image (must have '.fits' extension).
Keyword arguments:
------------------
aegean_suffix : string
A suffix to append to any Aegean files (don't include 'comp' or 'isle').
aegean_extn : string
The extension of the Aegean catalogue if source finding is performed.
rms_map : string
The filepath of a fits image of the local rms. If None is provided, a BANE map is used.
SNR : float
The signal-to-noise ratio, used to derive a search radius when cross-matching the catalogue of this image.
verbose : bool
Verbose output."""
self.verbose = verbose
if verbose:
print "----------------------"
print "| Reading fits image |"
print "----------------------"
if verbose:
print "Initialising radio_image object using file '{0}'.".format(filepath.split('/')[-1])
self.filepath = filepath
self.name = filepath.split('/')[-1]
if finder == 'aegean':
suffix = '_aegean'
elif finder == 'pybdsf':
suffix = '_pybdsf'
#Aegean format
self.basename = remove_extn(self.name) + suffix
self.bkg = '../{0}_bkg.fits'.format(self.basename)
self.cat_name = '../{0}.{1}'.format(self.basename, extn)
if finder == 'pybdsf':
self.cat_comp = '../{0}_comp.csv'.format(self.basename)
else:
self.cat_comp = '../{0}_comp.{1}'.format(self.basename, extn)
self.residual = '../{0}_gaus_resid.fits'.format(self.basename)
self.model = '../{0}_model.fits'.format(self.basename)
# if finder == 'pybdsf:
self.rms_map = '../{}_rms.fits'.format(self.basename)
# else:
# self.rms_map = rms_map
#open fits image and store header specs
self.fits = f.open(filepath)[0] #HDU axis 0
self.header_specs(self.fits, verbose=verbose)
#expected positional error, given by FWHM/SNR (Condon, 1997)
self.posErr = int(round(self.bmaj/SNR))
self.SNR = SNR
def header_key(self, header, key, floatify=False):
"""Return the value of the key from a fits header. If key doesn't exist, '' will be returned.
Arguments:
----------
header : astropy.io.fits.header.Header
A fits header object.
key : string
A key from the header.
Keyword Arguments:
------------------
floatify : bool
Convert value to float.
Returns:
--------
value : string
'' if key doesn't exist, otherwise, the value of the key.
See Also:
---------
astropy.io.fits.header.Header"""
if key not in header.keys():
return ''
elif floatify:
return float(header[key])
else:
return header[key]
def header_specs(self,fits,verbose=False):
"""Read the header of a fits file and set several fields for this instance, including the RA, DEC, BMIN, BMAJ, BPA, frequency, etc.
Arguments:
----------
fits : astropy.io.fits
The primary axis of a fits image.
Keyword arguments:
------------------
verbose : bool
Verbose output."""
if verbose:
print "Reading Bmaj, RA/DEC centre, frequency, etc from fits header."
head = fits.header
w = WCS(head)
#Assume these keys exist
self.bmaj = head['BMAJ']*3600
self.bmin = head['BMIN']*3600
self.bpa = head['BPA']
#Set these to '' if they don't exist
self.project = self.header_key(head,'PROJECT')
self.sbid = self.header_key(head,'SBID')
self.date = self.header_key(head,'DATE-OBS')
self.duration = self.header_key(head,'DURATION',floatify=True) #seconds
#get ASKAP soft version from history in header if it exists
# self.soft_version = ''
# self.pipeline_version = ''
# if 'HISTORY' in head.keys():
# for val in head['HISTORY']:
# if 'ASKAPsoft version' in val:
# self.soft_version = val.split('/')[-1].split()[-1].replace(',','')
# if 'ASKAP pipeline version' in val:
# self.pipeline_version = val.split()[-1].replace(',','')
#derive duration in hours
if self.duration != '':
self.duration = '{0:.2f}'.format(self.duration/3600)
#iterate through axes in header to find RA,DEC and frequency
axis = 0
while axis < w.naxis:
chanType = w.wcs.ctype[axis]
if(chanType.startswith('RA')):
self.refRA = w.wcs.crval[axis]
self.raPS = np.abs(w.wcs.cdelt[axis])*3600 #pixel size in arcsec
self.ra_axis = axis
elif(chanType.startswith('DEC')):
self.refDEC = w.wcs.crval[axis]
self.decPS = np.abs(w.wcs.cdelt[axis])*3600 #pixel size in arcsec
self.dec_axis = axis
elif(chanType.startswith('FREQ')):
self.freq = w.wcs.crval[axis]/1e6 #freq in MHz
w = w.dropaxis(axis)
axis -= 1
#drop all other axes from wcs object so only RA/DEC left
else:
w = w.dropaxis(axis)
axis -= 1
axis += 1
#Get the area and solid angle from all non-nan pixels in this image
self.area, self.solid_ang = get_pixel_area(fits, nans=True,
ra_axis=self.ra_axis,
dec_axis=self.dec_axis, w=w)
#store the RA/DEC of the image as centre pixel and store image vertices
naxis1 = int(head['NAXIS1'])
naxis2 = int(head['NAXIS2'])
pixcrd = np.array([[naxis1/2, naxis2/2]])
centre = w.all_pix2world(pixcrd,1)
self.ra = centre[0][0]
self.dec = centre[0][1]
self.center = SkyCoord(ra=self.ra, dec=self.dec, unit="deg,deg")
self.centre = self.center.to_string(style='hmsdms',sep=':')
self.vertices = w.calc_footprint()
self.ra_bounds = min(self.vertices[:,:1])[0],max(self.vertices[:,:1])[0]
self.dec_bounds = min(self.vertices[:,1:])[0],max(self.vertices[:,1:])[0]
if verbose:
print "Found psf axes {0:.2f} x {1:.2f} arcsec at PA {2}.".format(self.bmaj,self.bmin,self.bpa)
print "Found a frequency of {0} MHz.".format(self.freq)
print "Found a field centre of {0}.".format(self.centre)
self.imsizestr = "{}x{}".format(head['NAXIS1'], head['NAXIS2'])
self.pixsizestr = "{:.1f}".format(self.decPS)
# Check gaussianity:
if w.naxis == 4:
img = fits.data[0][0]
elif w.naxis == 3:
img = fits.data[0]
else:
img = fits.data
# determin gaussianity
try:
k2, p = normaltest(img, nan_policy='omit', axis=None)
if p < 1e-2:
self.gaussianity = "Passed"
else:
self.gaussianity = "Failed"
except:
self.gaussianity = None
def run_BANE(self, ncores=8, redo=False):
"""Produce a noise and background map using BANE.
Keyword arguments:
------------------
ncores : int
The number of cores to use (per node) when running BANE.
redo : bool
Reproduce the maps, even if they exist."""
#Overwrite rms map input by user
self.rms_map = '../{0}_rms.fits'.format(self.basename)
if redo:
print "Re-running BANE and overwriting background and rms maps."
#Run BANE to create a map of the local rms
if not os.path.exists(self.rms_map) or redo:
print "----------------------------"
print "| Running BANE for rms map |"
print "----------------------------"
command = "BANE --cores={0} --out=../{1} {2}".format(ncores,self.basename,self.filepath)
print "Running BANE using following command:"
print command
os.system(command)
else:
print "'{0}' already exists. Skipping BANE.".format(self.rms_map)
def run_Aegean(self, params='', ncores=8, write=True, redo=False):
"""Perform source finding on image using Aegean, producing just a component catalogue by default.
Keyword arguments:
------------------
params : string
Any extra parameters to pass into Aegean (apart from cores, noise, background and table).
ncores : int
The number of cores to use (per node) when running BANE and Aegean.
write : bool
Write the fitted model and residual images.
redo : bool
Perform source finding, even if output catalogue(s) exist."""
if redo:
print "Re-doing source finding. Overwriting all Aegean and AeRes files."
if not os.path.exists(self.cat_comp) or redo:
print "--------------------------------"
print "| Running Aegean for catalogue |"
print "--------------------------------"
#Run Aegean source finder to produce catalogue of image
command = 'aegean --cores={0} --table={1}'.\
format(ncores, self.cat_name)
#Also write ds9 region file and island fits file when user wants verbose output
if self.verbose:
command += ',{0}.reg'.format(remove_extn(self.cat_name))
#Add any parameters used has input and file name
command += " {0} {1}".format(params, self.filepath)
print "Running Aegean with following command:"
print command
os.system(command)
#Print error message when no sources are found and catalogue not created.
if not os.path.exists(self.cat_comp):
warnings.warn_explicit('Aegean catalogue not created. Check output from Aegean.\n',UserWarning,WARN,cf.f_lineno)
else:
print "'{0}' already exists. Skipping Aegean.".format(self.cat_comp)
#Run AeRes when Aegean catalogue exists to produce fitted model and residual
if write:
if (not os.path.exists(self.residual) and os.path.exists(self.cat_comp)) or redo:
print "----------------------------------------"
print "| Running AeRes for model and residual |"
print "----------------------------------------"
command = 'AeRes -f {0} -c {1} -r {2} -m {3}'.format(self.filepath, self.cat_comp,
self.residual, self.model)
print "Running AeRes for residual and model images with following command:"
print command
os.system(command)
else:
print "'{0}' already exists. Skipping AeRes.".format(self.residual)
# TODO:
def run_PyBDSF(self, pybdsf_params=dict(), ncores=8, write=True, redo=False):
"""
Perform source finding on image using PyBDSF, producing just a component catalogue by default.
Keyword arguments:
------------------
params : string
Any extra parameters to pass into PyBDSF (apart from cores, noise, background and table).
ncores : int
The number of cores to use (per node) when running BANE and Aegean.
write : bool
Write the fitted model and residual images.
redo : bool
Perform source finding, even if output catalogue(s) exist."""
try:
import bdsf
except:
logging.error("Can not import bdsf module")
return 1
if redo:
print "Re-doing source finding. Overwriting all PyBDSF files."
if not os.path.exists(self.cat_comp) or redo:
print "--------------------------------"
print "| Running PyBDSF for catalogue |"
print "--------------------------------"
#Run PyBDSF source finder to produce catalogue of image
# if self.SNR is not None:
# pybdsf_params.update({'thresh':'hard', 'thresh_pix':self.SNR})
img = bdsf.process_image(self.filepath, quiet=True, ncores=ncores,
**pybdsf_params)
plot_type_list = ['rms', 'mean',
'gaus_model', 'gaus_resid', 'island_mask']
fits_names = ["../{}_{}.fits".format(self.basename, _) for _ in plot_type_list]
# number of plots
n_plots = len(plot_type_list)
for k in range(n_plots):
img.export_image(outfile=fits_names[k],
clobber=True, img_type=plot_type_list[k])
img.write_catalog(outfile=self.cat_comp, format="csv",
clobber=True, catalog_type="srl")
#Print error message when no sources are found and catalogue not created.
if not os.path.exists(self.cat_comp):
warnings.warn_explicit('Catalogue not created. Check output from PyBDSF.\n',UserWarning,WARN,cf.f_lineno)
else:
print "'{0}' already exists. Skipping PyBDSF.".format(self.cat_comp)
def correct_img(self,dRA,dDEC,flux_factor=1.0):
"""Correct the header of the fits image from this instance, by shifting the reference positions,
and optionally multiplying the pixels by a factor. Write the image to 'name_corrected.fits'.
Arguments:
----------
dRA : float
The RA offset in SECONDS. NOTE: This is not in arcsec.
dDEC : float
The DEC offset in arcsec.
Keyword Arguments:
------------------
flux_factor : float
The factor by which to mutiply all pixels."""
filename = '{0}_corrected.fits'.format(self.basename)
print "Correcting header of fits image and writing to '{0}'".format(filename)
print "Shifting RA by {0} seconds and DEC by {1} arcsec".format(dRA,dDEC)
if flux_factor != 1.0:
print "Multiplying image by {0}".format(flux_factor)
#Shift the central RA/DEC in degrees, and multiply the image by the flux factor (x1 by default)
#WCS axes start at 0 but fits header axes start at 1
self.fits.header['CRVAL' + str(self.ra_axis+1)] += dRA/3600
self.fits.header['CRVAL' + str(self.dec_axis+1)] += dDEC/3600
self.fits.data[0][0] *= flux_factor
self.fits.writeto(filename,clobber=True)
| 15,725 | 36.265403 | 139 | py |
dataqa | dataqa-master/crosscal/__init__.py | 0 | 0 | 0 | py |
|
dataqa | dataqa-master/crosscal/dish_delay_plot.py | """
Simple function to plot the dish delay.
This needs to run separately from the other crosscal plots because
it requires all 40 beams to be accessible
"""
from crosscal_plots import GDSols
def get_dish_delay_plots(obs_id, fluxcal, basedir=None):
GD = GDSols(obs_id, fluxcal, False, basedir=basedir)
GD.get_data()
GD.plot_dish_delay()
| 352 | 22.533333 | 66 | py |
dataqa | dataqa-master/crosscal/crosscal_plots.py | #python "module" for QA plots for cross-cal
#Will want to plot calibration solutions
#also potential for raw and corrected data
from __future__ import print_function
#load necessary packages
import os
import numpy as np
from astropy.io import ascii
import apercal
import casacore.tables as pt
import logging
import matplotlib
import time
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from scandata import ScanData
from apercal.subs import misc
logger = logging.getLogger(__name__)
def make_all_ccal_plots(scan, fluxcal, polcal, output_path=None, basedir=None, trigger_mode=False):
"""
Create crosscal QA plots
Args:
scan (int): Task id of target, e.g. 190311152
fluxcal (str): Name of fluxcal, e.g. "3C147"
polcal(str): Name of the polcal, e.g. "3C286"
output_path (str): Output path, None for default
trigger_mode (bool): To run automatically after Apercal
"""
# Get autocorrelation plots
logger.info("Autocorrelation plots")
start_time_autocorr = time.time()
AC = AutocorrData(scan, fluxcal, trigger_mode, basedir=basedir)
AC.get_data()
AC.plot_autocorr_per_antenna(imagepath=output_path)
AC.plot_autocorr_per_beam(imagepath=output_path)
logger.info('Done with autocorrelation plots ({0:.0f}s)'.format(
time.time() - start_time_autocorr))
# Get BP plots
logger.info("Bandpass plots")
start_time_bp = time.time()
BP = BPSols(scan, fluxcal, trigger_mode)
BP.get_data()
BP.plot_amp(imagepath=output_path)
BP.plot_phase(imagepath=output_path)
logger.info('Done with bandpass plots ({0:.0f}s)'.format(time.time() - start_time_bp))
# Get Gain plots
logger.info("Gain plots")
start_time_gain = time.time()
Gain = GainSols(scan, fluxcal, trigger_mode)
Gain.get_data()
Gain.plot_amp(imagepath=output_path)
Gain.plot_phase(imagepath=output_path)
logger.info('Done with gainplots ({0:.0f}s)'.format(time.time() - start_time_gain))
# Get Global Delay plots
logger.info("Global delay plots")
start_time_gdelay = time.time()
GD = GDSols(scan, fluxcal, trigger_mode)
GD.get_data()
GD.plot_delay(imagepath=output_path)
logger.info('Done with global delay plots ({0:.0f}s)'.format(time.time() - start_time_gdelay))
# Get polarisation leakage plots
logger.info("Leakage plots")
start_time_leak = time.time()
Leak = LeakSols(scan, fluxcal, trigger_mode)
Leak.get_data()
Leak.plot_amp(imagepath=output_path)
Leak.plot_phase(imagepath=output_path)
logger.info('Done with leakage plots ({0:.0f}s)'.format(time.time() - start_time_leak))
# Get cross hand delay solutions
logger.info("Cross-hand delay plots")
start_time_kcross = time.time()
KCross = KCrossSols(scan, polcal, trigger_mode)
KCross.get_data()
KCross.plot_delay(imagepath=output_path)
logger.info('Done with cross hand delay plots ({0:.0f}s)'.format(time.time() - start_time_kcross))
# Get polarisation angle plots
logger.info("Polarisation angle plots")
start_time_polangle = time.time()
Polangle = PolangleSols(scan, polcal, trigger_mode)
Polangle.get_data()
Polangle.plot_amp(imagepath=output_path)
Polangle.plot_phase(imagepath=output_path)
logger.info('Done with polarisation angle correction plots ({0:.0f}s)'.format(time.time() - start_time_polangle))
# Get Raw data
logger.info("Raw data plots")
start_time_raw = time.time()
Raw = RawData(scan, fluxcal, trigger_mode)
Raw.get_data()
Raw.plot_amp(imagepath=output_path)
Raw.plot_phase(imagepath=output_path)
logger.info('Done with plotting raw data ({0:.0f}s)'.format(
time.time() - start_time_raw))
# Get model data
logger.info("Model data plots")
start_time_model = time.time()
Model = ModelData(scan, fluxcal, trigger_mode)
Model.get_data()
Model.plot_amp(imagepath=output_path)
Model.plot_phase(imagepath=output_path)
logger.info('Done with plotting model data ({0:.0f}s)'.format(
time.time() - start_time_model))
# Get corrected data
logger.info("Corrected data plots")
start_time_corrected = time.time()
Corrected = CorrectedData(scan, fluxcal, trigger_mode)
Corrected.get_data()
Corrected.plot_amp(imagepath=output_path)
Corrected.plot_phase(imagepath=output_path)
logger.info('Done with plotting corrected data ({0:.0f}s)'.format(
time.time() - start_time_corrected))
class BPSols(ScanData):
def __init__(self,scan,fluxcal,trigger_mode,basedir=None):
ScanData.__init__(self, scan, fluxcal,
trigger_mode=trigger_mode, basedir=basedir)
self.imagepathsuffix = "crosscal"
self.ants = np.empty(len(self.dirlist),dtype=np.object)
self.time = np.empty(len(self.dirlist),dtype=np.ndarray)
self.freq = np.empty(len(self.dirlist),dtype=np.ndarray)
self.flags = np.empty(len(self.dirlist),dtype=np.ndarray)
self.amps_norm = np.empty(len(self.dirlist),dtype=np.ndarray)
self.phases_norm = np.empty(len(self.dirlist),dtype=np.ndarray)
def get_data(self):
#get the data
for i, (path,beam) in enumerate(zip(self.dirlist,self.beamlist)):
bptable = "{0}/raw/{1}.Bscan".format(path,self.sourcename)
#print(bptable)
if os.path.isdir(bptable):
taql_command = ("SELECT TIME,abs(CPARAM) AS amp, arg(CPARAM) AS phase, "
"FLAG FROM {0}").format(bptable)
t=pt.taql(taql_command)
times = t.getcol('TIME')
amp_sols=t.getcol('amp')
phase_sols = t.getcol('phase')
flags = t.getcol('FLAG')
taql_antnames = "SELECT NAME FROM {0}::ANTENNA".format(bptable)
t= pt.taql(taql_antnames)
ant_names=t.getcol("NAME")
taql_freq = "SELECT CHAN_FREQ FROM {0}::SPECTRAL_WINDOW".format(bptable)
t = pt.taql(taql_freq)
freqs = t.getcol('CHAN_FREQ')
#check for flags and mask
amp_sols[flags] = np.nan
phase_sols[flags] = np.nan
self.ants[i] = ant_names
self.time[i] = times
self.phase[i] = phase_sols *180./np.pi #put into degrees
self.amp[i] = amp_sols
self.flags[i] = flags
self.freq[i] = freqs
else:
logger.info('Filling with NaNs. BP table not present for B{}'.format(beam))
self.ants[i] = ['RT2','RT3','RT4','RT5','RT6','RT7','RT8','RT9','RTA','RTB','RTC','RTD']
self.time[i] = np.array(np.nan)
self.phase[i] = np.full((12,2,2),np.nan)
self.amp[i] = np.full((12,2,2),np.nan)
self.freq[i] = np.full((2,2),np.nan)
def plot_amp(self, imagepath=None):
"""Plot amplitude, one plot per antenna"""
logging.info("Creating plots for bandpass amplitude")
imagepath = self.create_imagepath(imagepath)
#put plots in default place w/ default name
ant_names = self.ants[0]
#figlist = ['fig_'+str(i) for i in range(len(ant_names))]
for a,ant in enumerate(ant_names):
#iterate through antennas
#set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize,ysize))
plt.suptitle('Bandpass amplitude for Antenna {0}'.format(ant),size=30)
for n,beam in enumerate(self.beamlist):
beamnum = int(beam)
#print(beamnum)
plt.subplot(ny, nx, beamnum+1)
plt.scatter(self.freq[n][0,:],self.amp[n][a,:,0],
label='XX',
marker=',',s=1)
plt.scatter(self.freq[n][0,:],self.amp[n][a,:,1],
label='YY',
marker=',',s=1)
plt.title('Beam {0}'.format(beam))
plt.ylim(0,1.8)
plt.legend(markerscale=3,fontsize=14)
plt.savefig('{imagepath}/BP_amp_{ant}_{scan}.png'.format(ant=ant, scan=self.scan, imagepath=imagepath))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
def plot_phase(self, imagepath=None):
"""Plot phase, one plot per antenna"""
logger.info("Creating plots for bandpass phase")
imagepath = self.create_imagepath(imagepath)
ant_names = self.ants[0]
#figlist = ['fig_'+str(i) for i in range(len(ant_names))]
for a,ant in enumerate(ant_names):
#iterate through antennas
#set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize,ysize))
plt.suptitle('Bandpass phases for Antenna {0}'.format(ant),size=30)
for n,beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum+1)
plt.scatter(self.freq[n][0,:],self.phase[n][a,:,0],
label='XX',
marker=',',s=1)
plt.scatter(self.freq[n][0,:],self.phase[n][a,:,1],
label='YY',
marker=',',s=1)
plt.title('Beam {0}'.format(beam))
plt.ylim(-180,180)
plt.legend(markerscale=3,fontsize=14)
plt.savefig('{imagepath}/BP_phase_{ant}_{scan}.png'.format(ant=ant,scan=self.scan,imagepath=imagepath))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
class GainSols(ScanData):
def __init__(self, scan, fluxcal, trigger_mode, basedir=None):
ScanData.__init__(self, scan, fluxcal,
trigger_mode=trigger_mode, basedir=basedir)
self.imagepathsuffix = "crosscal"
self.ants = np.empty(len(self.dirlist),dtype=np.object)
self.time = np.empty(len(self.dirlist),dtype=np.ndarray)
self.flags = np.empty(len(self.dirlist),dtype=np.ndarray)
self.amps_norm = np.empty(len(self.dirlist),dtype=np.ndarray)
self.phases_norm = np.empty(len(self.dirlist),dtype=np.ndarray)
def get_data(self):
for i, (path,beam) in enumerate(zip(self.dirlist,self.beamlist)):
gaintable = "{0}/raw/{1}.G1ap".format(path,self.sourcename)
#check if table exists
#otherwise, place NaNs in place for everything
if os.path.isdir(gaintable):
taql_antnames = "SELECT NAME FROM {0}::ANTENNA".format(gaintable)
t= pt.taql(taql_antnames)
ant_names=t.getcol("NAME")
#then get number of times
#need this for setting shape
taql_time = "select TIME from {0} orderby unique TIME".format(gaintable)
t= pt.taql(taql_time)
times = t.getcol('TIME')
#then iterate over antenna
#set array sahpe to be [n_ant,n_time,n_stokes]
#how can I get n_stokes? Could be 2 or 4, want to find from data
#get 1 data entry
taql_stokes = "SELECT abs(CPARAM) AS amp from {0} limit 1" .format(gaintable)
t_pol = pt.taql(taql_stokes)
pol_array = t_pol.getcol('amp')
n_stokes = pol_array.shape[2] #shape is time, one, nstokes
amp_ant_array = np.empty((len(ant_names),len(times),n_stokes),dtype=object)
phase_ant_array = np.empty((len(ant_names),len(times),n_stokes),dtype=object)
flags_ant_array = np.empty((len(ant_names),len(times),n_stokes),dtype=bool)
for ant in xrange(len(ant_names)):
taql_command = ("SELECT abs(CPARAM) AS amp, arg(CPARAM) AS phase, FLAG FROM {0} "
"WHERE ANTENNA1={1}").format(gaintable,ant)
t = pt.taql(taql_command)
amp_ant_array[ant,:,:] = t.getcol('amp')[:,0,:]
phase_ant_array[ant,:,:] = t.getcol('phase')[:,0,:]
flags_ant_array[ant,:,:] = t.getcol('FLAG')[:,0,:]
#check for flags and mask
amp_ant_array[flags_ant_array] = np.nan
phase_ant_array[flags_ant_array] = np.nan
self.amp[i] = amp_ant_array
self.phase[i] = phase_ant_array * 180./np.pi #put into degrees
self.ants[i] = ant_names
self.time[i] = times
self.flags[i] = flags_ant_array
else:
logger.info('Filling with NaNs. Gain table not present for B{}'.format(beam))
self.amp[i] = np.full((12,2,2),np.nan)
self.phase[i] = np.full((12,2,2),np.nan)
self.ants[i] = ['RT2','RT3','RT4','RT5','RT6','RT7','RT8','RT9','RTA','RTB','RTC','RTD']
self.time[i] = np.full((2),np.nan)
self.flags[i] = np.full((12,2,2),np.nan)
def plot_amp(self, imagepath=None):
"""Plot amplitude, one plot per antenna"""
logger.info("Creating plots for gain amplitude")
imagepath = self.create_imagepath(imagepath)
#put plots in default place w/ default name
ant_names = self.ants[0]
#figlist = ['fig_'+str(i) for i in range(len(ant_names))]
for a,ant in enumerate(ant_names):
#iterate through antennas
#set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize,ysize))
plt.suptitle('Gain amplitude for Antenna {0}'.format(ant),size=30)
for n,beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum+1)
plt.scatter(self.time[n],self.amp[n][a,:,0],
label='XX',
marker=',',s=5)
plt.scatter(self.time[n],self.amp[n][a,:,1],
label='YY',
marker=',',s=5)
plt.title('Beam {0}'.format(beam))
plt.ylim(10,30)
plt.legend(markerscale=3,fontsize=14)
plt.savefig(plt.savefig('{imagepath}/Gain_amp_{ant}_{scan}.png'.format(ant=ant,scan=self.scan,imagepath=imagepath)))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
def plot_phase(self,imagepath=None):
"""Plot phase, one plot per antenna"""
logger.info("Creating plots for gain phase")
imagepath = self.create_imagepath(imagepath)
#put plots in default place w/ default name
ant_names = self.ants[0]
#figlist = ['fig_'+str(i) for i in range(len(ant_names))]
for a,ant in enumerate(ant_names):
#iterate through antennas
#set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize,ysize))
plt.suptitle('Gain phase for Antenna {0}'.format(ant),size=30)
for n,beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum+1)
plt.scatter(self.time[n],self.phase[n][a,:,0],
label='XX',marker=',',s=5)
plt.scatter(self.time[n],self.phase[n][a,:,1],
label='YY',marker=',',s=5)
plt.title('Beam {0}'.format(beam))
plt.ylim(-180,180)
plt.legend(markerscale=3,fontsize=14)
plt.savefig(plt.savefig('{2}/Gain_phase_{0}_{1}.png'.format(ant,self.scan,imagepath)))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
class GDSols(ScanData):
def __init__(self, scan, fluxcal, trigger_mode, basedir=None):
ScanData.__init__(self, scan, fluxcal,
trigger_mode=trigger_mode, basedir=basedir)
self.imagepathsuffix = "crosscal"
self.ants = np.empty(len(self.dirlist),dtype=np.object)
self.delays = np.empty(len(self.dirlist),dtype=np.ndarray)
def get_data(self):
# get the data
for i, (path, beam) in enumerate(zip(self.dirlist, self.beamlist)):
gdtable = "{0}/raw/{1}.K".format(path, self.sourcename)
if os.path.isdir(gdtable):
taql_command = ("SELECT FPARAM FROM {0} ").format(gdtable)
t = pt.taql(taql_command)
delays = t.getcol('FPARAM')
taql_antnames = "SELECT NAME FROM {0}::ANTENNA".format(gdtable)
t = pt.taql(taql_antnames)
ant_names = t.getcol("NAME")
self.ants[i] = ant_names
self.delays[i] = delays[:,0,:]
else:
logger.info('Filling with NaNs. Global delay table not present for B{}'.format(beam))
self.ants[i] = misc.create_antnames()
self.delays[i] = np.full((12, 2), np.nan)
def plot_delay(self, imagepath=None):
"""Plot amplitude, one plot per antenna"""
logger.info("Creating plots for global delay")
imagepath = self.create_imagepath(imagepath)
# put plots in default place w/ default name
ant_names = self.ants[0]
# figlist = ['fig_'+str(i) for i in range(len(ant_names))]
# set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx * 4
ysize = ny * 4
plt.figure(figsize=(xsize, ysize))
plt.suptitle('Global delay', size=30)
for n, beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum + 1)
plt.scatter(self.ants[n], self.delays[n][:,0], label='X', marker='o', s=5)
plt.scatter(self.ants[n], self.delays[n][:,1], label='Y', marker='o', s=5)
plt.title('Beam {0}'.format(beam))
plt.legend(markerscale=3, fontsize=14)
plt.savefig(plt.savefig('{imagepath}/K_{scan}.png'.format(scan=self.scan, imagepath=imagepath)))
# to really close the plot, this will do
plt.close('all')
def plot_dish_delay(self, imagepath=None):
"""Plot global delays, dish-based views"""
logger.info("Creating dish-based plots for global delay")
imagepath = self.create_imagepath(imagepath)
# put plots in default place w/ default name
ant_names = self.ants[0]
# figlist = ['fig_'+str(i) for i in range(len(ant_names))]
# set up for 4x3 plots (12 dishes)
nx = 4
ny = 3
xsize = nx * 4
ysize = ny * 4
plt.figure(figsize=(xsize, ysize))
plt.suptitle('Global dish-based delay', size=30)
#reshape array
delays = np.hstack(self.delays).reshape((12,40,2))
beamarray = np.arange(len(self.beamlist))
for n, ant in enumerate(ant_names):
plt.subplot(ny, nx, n + 1)
plt.scatter(beamarray, delays[n,:,0], label='X', marker='o', s=5)
plt.scatter(beamarray, delays[n,:,1], label='Y', marker='o', s=5)
plt.title('Dish {0}'.format(ant))
plt.legend(markerscale=3, fontsize=14)
plt.xlabel('beam number')
plt.ylabel('Delay, nanoseconds')
plt.savefig(plt.savefig('{imagepath}/K_dish_{scan}.png'.format(scan=self.scan, imagepath=imagepath)))
# to really close the plot, this will do
plt.close('all')
class LeakSols(ScanData):
def __init__(self, scan, fluxcal, trigger_mode, basedir=None):
ScanData.__init__(self, scan, fluxcal,
trigger_mode=trigger_mode, basedir=basedir)
self.imagepathsuffix = "crosscal"
self.ants = np.empty(len(self.dirlist),dtype=np.object)
self.freq = np.empty(len(self.dirlist),dtype=np.ndarray)
self.flags = np.empty(len(self.dirlist),dtype=np.ndarray)
self.leakage = np.empty((len(self.dirlist)),dtype=np.ndarray)
def get_data(self):
# get the data
for i, (path, beam) in enumerate(zip(self.dirlist, self.beamlist)):
leaktable = "{0}/raw/{1}.Df".format(path, self.sourcename)
if os.path.isdir(leaktable):
taql_command = ("SELECT abs(CPARAM) AS amp, arg(CPARAM) AS phase, FLAG FROM {0}").format(leaktable)
t = pt.taql(taql_command)
ampleak_sols=t.getcol('amp')
phaseleak_sols = t.getcol('phase')
flags = t.getcol('FLAG')
taql_antnames = "SELECT NAME FROM {0}::ANTENNA".format(leaktable)
t = pt.taql(taql_antnames)
ant_names = t.getcol("NAME")
taql_freq = "SELECT CHAN_FREQ FROM {0}::SPECTRAL_WINDOW".format(leaktable)
t = pt.taql(taql_freq)
freqs = t.getcol('CHAN_FREQ')
# check for flags and mask
ampleak_sols[flags] = np.nan
phaseleak_sols[flags] = np.nan
self.ants[i] = ant_names
self.phase[i] = phaseleak_sols *180./np.pi #put into degrees
self.amp[i] = ampleak_sols
self.flags[i] = flags
self.freq[i] = freqs
else:
logger.info('Filling with NaNs. Polarisation leakage table not present for B{}'.format(beam))
self.ants[i] = misc.create_antnames()
self.phase[i] = np.full((12, 2, 2),np.nan)
self.amp[i] = np.full((12, 2, 2), np.nan)
self.freq[i] = np.full((2, 2), np.nan)
def plot_amp(self, imagepath=None):
"""Plot leakage, one plot per antenna"""
logger.info("Creating plots for amplitude leakage")
imagepath = self.create_imagepath(imagepath)
# put plots in default place w/ default name
ant_names = self.ants[0]
for a, ant in enumerate(ant_names):
# iterate through antennas
# set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx * 4
ysize = ny * 4
plt.figure(figsize=(xsize, ysize))
plt.suptitle('Amplitude polarisation leakage for Antenna {0}'.format(ant), size=30)
for n, beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum + 1)
plt.scatter(self.freq[n][0, :], self.amp[n][a, :, 0], label='X', marker=',', s=1)
plt.scatter(self.freq[n][0, :], self.amp[n][a, :, 1], label='Y', marker=',', s=1)
plt.title('Beam {0}'.format(beam))
plt.legend(markerscale=3, fontsize=14)
plt.savefig('{imagepath}/Df_amp_{ant}_{scan}.png'.format(ant=ant, scan=self.scan, imagepath=imagepath))
# to really close the plot, this will do
plt.close('all')
def plot_phase(self, imagepath=None):
"""Plot leakage, one plot per antenna"""
logger.info("Creating plots for phase leakage")
imagepath = self.create_imagepath(imagepath)
# put plots in default place w/ default name
ant_names = self.ants[0]
for a, ant in enumerate(ant_names):
# iterate through antennas
# set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx * 4
ysize = ny * 4
plt.figure(figsize=(xsize, ysize))
plt.suptitle('Phase polarisation leakage for Antenna {0}'.format(ant), size=30)
for n, beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum + 1)
plt.scatter(self.freq[n][0, :], self.phase[n][a, :, 0], label='X', marker=',', s=1)
plt.scatter(self.freq[n][0, :], self.phase[n][a, :, 1], label='Y', marker=',', s=1)
plt.title('Beam {0}'.format(beam))
plt.legend(markerscale=3, fontsize=14)
plt.savefig('{imagepath}/Df_phase_{ant}_{scan}.png'.format(ant=ant, scan=self.scan, imagepath=imagepath))
# to really close the plot, this will do
plt.close('all')
class KCrossSols(ScanData):
def __init__(self, scan, polcal, trigger_mode, basedir=None):
ScanData.__init__(self, scan, polcal,
trigger_mode=trigger_mode, basedir=basedir)
self.imagepathsuffix = "crosscal"
self.ants = np.empty(len(self.dirlist),dtype=np.object)
self.delays = np.empty(len(self.dirlist),dtype=np.ndarray)
def get_data(self):
# get the data
for i, (path, beam) in enumerate(zip(self.dirlist, self.beamlist)):
gdtable = "{0}/raw/{1}.Kcross".format(path, self.sourcename)
if os.path.isdir(gdtable):
taql_command = ("SELECT FPARAM FROM {0} ").format(gdtable)
t = pt.taql(taql_command)
delays = t.getcol('FPARAM')
taql_antnames = "SELECT NAME FROM {0}::ANTENNA".format(gdtable)
t = pt.taql(taql_antnames)
ant_names = t.getcol("NAME")
self.ants[i] = ant_names
self.delays[i] = delays[:,0,:]
else:
logger.info('Filling with NaNs. Cross hand delay table not present for B{}'.format(beam))
self.ants[i] = misc.create_antnames()
self.delays[i] = np.full((12, 2), np.nan)
def plot_delay(self, imagepath=None):
"""Plot amplitude, one plot per antenna"""
logger.info("Creating plots for cross hand delay")
imagepath = self.create_imagepath(imagepath)
# put plots in default place w/ default name
ant_names = self.ants[0]
# figlist = ['fig_'+str(i) for i in range(len(ant_names))]
# set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx * 4
ysize = ny * 4
plt.figure(figsize=(xsize, ysize))
plt.suptitle('Cross hand delay', size=30)
for n, beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum + 1)
plt.scatter(self.ants[n], self.delays[n][:,0], label='X', marker='o', s=5)
plt.scatter(self.ants[n], self.delays[n][:,1], label='Y', marker='o', s=5)
plt.title('Beam {0}'.format(beam))
plt.legend(markerscale=3, fontsize=14)
plt.savefig(plt.savefig('{imagepath}/Kcross_{scan}.png'.format(scan=self.scan, imagepath=imagepath)))
# to really close the plot, this will do
plt.close('all')
class PolangleSols(ScanData):
def __init__(self, scan, polcal, trigger_mode, basedir=None):
ScanData.__init__(self, scan, polcal,
trigger_mode=trigger_mode, basedir=basedir)
self.imagepathsuffix = "crosscal"
self.ants = np.empty(len(self.dirlist),dtype=np.object)
self.freq = np.empty(len(self.dirlist),dtype=np.ndarray)
self.flags = np.empty(len(self.dirlist),dtype=np.ndarray)
self.polangle = np.empty((len(self.dirlist)),dtype=np.ndarray)
def get_data(self):
# get the data
for i, (path, beam) in enumerate(zip(self.dirlist, self.beamlist)):
polangletable = "{0}/raw/{1}.Xf".format(path, self.sourcename)
if os.path.isdir(polangletable):
taql_command = ("SELECT abs(CPARAM) AS amp, arg(CPARAM) AS phase, FLAG FROM {0}").format(polangletable)
t = pt.taql(taql_command)
amppolangle_sols=t.getcol('amp')
phasepolangle_sols = t.getcol('phase')
flags = t.getcol('FLAG')
taql_antnames = "SELECT NAME FROM {0}::ANTENNA".format(polangletable)
t = pt.taql(taql_antnames)
ant_names = t.getcol("NAME")
taql_freq = "SELECT CHAN_FREQ FROM {0}::SPECTRAL_WINDOW".format(polangletable)
t = pt.taql(taql_freq)
freqs = t.getcol('CHAN_FREQ')
# check for flags and mask
amppolangle_sols[flags] = np.nan
phasepolangle_sols[flags] = np.nan
self.ants[i] = ant_names
self.phase[i] = phasepolangle_sols *180./np.pi #put into degrees
self.amp[i] = amppolangle_sols
self.flags[i] = flags
self.freq[i] = freqs
else:
logger.info('Filling with NaNs. Polarisation angle table not present for B{}'.format(beam))
self.ants[i] = misc.create_antnames()
self.phase[i] = np.full((12, 2, 2),np.nan)
self.amp[i] = np.full((12, 2, 2), np.nan)
self.freq[i] = np.full((2, 2), np.nan)
def plot_amp(self, imagepath=None):
"""Plot leakage, one plot per antenna"""
logger.info("Creating plots for amplitude polarisation angle corrections")
imagepath = self.create_imagepath(imagepath)
# put plots in default place w/ default name
ant_names = self.ants[0]
for a, ant in enumerate(ant_names):
# iterate through antennas
# set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx * 4
ysize = ny * 4
plt.figure(figsize=(xsize, ysize))
plt.suptitle('Amplitude polarisation angle for Antenna {0}'.format(ant), size=30)
for n, beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum + 1)
plt.scatter(self.freq[n][0, :], self.amp[n][a, :, 0], marker=',', s=1)
plt.title('Beam {0}'.format(beam))
plt.savefig('{imagepath}/Xf_amp_{ant}_{scan}.png'.format(ant=ant, scan=self.scan, imagepath=imagepath))
# to really close the plot, this will do
plt.close('all')
def plot_phase(self, imagepath=None):
"""Plot leakage, one plot per antenna"""
logger.info("Creating plots for phase polarisation angle corrections")
imagepath = self.create_imagepath(imagepath)
# put plots in default place w/ default name
ant_names = self.ants[0]
for a, ant in enumerate(ant_names):
# iterate through antennas
# set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx * 4
ysize = ny * 4
plt.figure(figsize=(xsize, ysize))
plt.suptitle('Phase polarisation angle for Antenna {0}'.format(ant), size=30)
for n, beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum + 1)
plt.scatter(self.freq[n][0, :], self.phase[n][a, :, 0], marker=',', s=1)
plt.title('Beam {0}'.format(beam))
plt.savefig('{imagepath}/Xf_phase_{ant}_{scan}.png'.format(ant=ant, scan=self.scan, imagepath=imagepath))
# to really close the plot, this will do
plt.close('all')
class ModelData(ScanData):
def __init__(self, scan, fluxcal, trigger_mode, basedir=None):
ScanData.__init__(self, scan, fluxcal,
trigger_mode=trigger_mode, basedir=basedir)
self.imagepathsuffix = "crosscal"
self.freq = np.empty(len(self.dirlist),dtype=np.ndarray)
def get_data(self):
for i, (path,beam) in enumerate(zip(self.dirlist,self.beamlist)):
msfile = "{0}/raw/{1}.MS".format(path,self.sourcename)
if os.path.isdir(msfile):
taql_freq = "SELECT CHAN_FREQ FROM {0}::SPECTRAL_WINDOW".format(msfile)
t = pt.taql(taql_freq)
freqs = t.getcol('CHAN_FREQ')[0,:]
try:
taql_command = "SELECT abs(gmeans(MODEL_DATA)) AS amp, arg(gmeans(MODEL_DATA)) AS phase FROM {0}".format(msfile)
t = pt.taql(taql_command)
amp = t.getcol('amp')[0,:,:]
phase = t.getcol('phase')[0,:,:]
except:
amp = np.full((len(freqs),4),np.nan)
phase = np.full((len(freqs),4),np.nan)
self.amp[i] = amp
self.phase[i] = phase
self.freq[i] = freqs
def plot_amp(self,imagepath=None):
"""Plot amplitude, one subplot per beam"""
logger.info("Creating plots for model amplitude")
imagepath = self.create_imagepath(imagepath)
#put plots in default place w/ default name
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize,ysize))
plt.suptitle('Model amplitude')
for n,beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum+1)
plt.plot(self.freq[n],self.amp[n][:,0],
label='XX')
plt.plot(self.freq[n],self.amp[n][:,3],
label='YY')
plt.title('Beam {0}'.format(beam))
#plt.ylim(10,30)
plt.legend(markerscale=3,fontsize=14)
plt.savefig(plt.savefig('{1}/Model_amp_{0}.png'.format(self.scan,imagepath)))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
def plot_phase(self,imagepath=None):
"""Plot amplitude, one subplot per beam"""
logger.info("Creating plots for model phase")
imagepath = self.create_imagepath(imagepath)
#put plots in default place w/ default name
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize,ysize))
plt.suptitle('Model phase',size=30)
for n,beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum+1)
plt.plot(self.freq[n],self.phase[n][:,0],
label='XX')
plt.plot(self.freq[n],self.phase[n][:,3],
label='YY')
plt.title('Beam {0}'.format(beam))
#plt.ylim(10,30)
plt.legend(markerscale=3,fontsize=14)
plt.savefig(plt.savefig('{1}/Model_phase_{0}.png'.format(self.scan,imagepath)))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
class AutocorrData(ScanData):
def __init__(self, scan, fluxcal, trigger_mode, basedir=None):
ScanData.__init__(self, scan, fluxcal, trigger_mode=trigger_mode, basedir=basedir)
self.imagepathsuffix = "crosscal"
self.freq = np.empty(len(self.dirlist), dtype=np.ndarray)
self.ants = np.empty(len(self.dirlist), dtype=np.object)
def get_data(self):
for i, (path, beam) in enumerate(zip(self.dirlist, self.beamlist)):
msfile = "{0}/raw/{1}.MS".format(path, self.sourcename)
if os.path.isdir(msfile):
logger.info("Processing {}".format(msfile))
taql_antnames = "SELECT NAME FROM {0}::ANTENNA".format(msfile)
t = pt.taql(taql_antnames)
ant_names = t.getcol("NAME")
if ant_names is None:
logger.warning("Something wrong. No antenna names. Continue with next beam")
continue
#then get frequencies:
taql_freq = "SELECT CHAN_FREQ FROM {0}::SPECTRAL_WINDOW".format(
msfile)
t = pt.taql(taql_freq)
freqs = t.getcol('CHAN_FREQ')[0, :]
#and number of stokes params
taql_stokes = "SELECT abs(DATA) AS amp from {0} limit 1" .format(
msfile)
t_pol = pt.taql(taql_stokes)
pol_array = t_pol.getcol('amp')
if pol_array is None:
logger.warning("Something wrong. No polarisation information. Continue with next beam")
continue
n_stokes = pol_array.shape[2] # shape is time, one, nstokes
#take MS file and get calibrated data
amp_ant_array = np.empty(
(len(ant_names), len(freqs), n_stokes), dtype=object)
# phase_ant_array = np.empty(
# (len(ant_names), len(freqs), n_stokes), dtype=object)
for ant in xrange(len(ant_names)):
try:
taql_command = ("SELECT abs(gmeans(CORRECTED_DATA[FLAG])) AS amp "
"FROM {0} "
"WHERE ANTENNA1==ANTENNA2 && (ANTENNA1={1} || ANTENNA2={1})").format(msfile, ant)
t = pt.taql(taql_command)
test = t.getcol('amp')
amp_ant_array[ant, :, :] = t.getcol('amp')[0, :, :]
#phase_ant_array[ant, :, :] = t.getcol('phase')[0, :, :]
except Exception as e:
amp_ant_array[ant, :, :] = np.full(
(len(freqs), n_stokes), np.nan)
# phase_ant_array[ant, :, :] = np.full(
# (len(freqs), n_stokes), np.nan)
logger.exception(e)
#self.phase[i] = phase_ant_array
self.amp[i] = amp_ant_array
self.freq[i] = freqs
self.ants[i] = ant_names
else:
logger.warning("Could not find {}".format(msfile))
def plot_autocorr_per_antenna(self, imagepath=None):
"""
Plot the autocorrelation for each antenna for all beams
"""
logger.info("Creating plots for autocorrelation plots per antenna")
y_min = 300
y_max = 1600
#first define imagepath if not given by user
imagepath = self.create_imagepath(imagepath)
#plot amplitude, one plot per antenna
#put plots in default place w/ default name
for antennas in self.ants:
if not antennas is None:
ant_names = antennas
break
#ant_names = self.ants[0]
#figlist = ['fig_'+str(i) for i in range(len(ant_names))]
for a, ant in enumerate(ant_names):
#iterate through antennas
#set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize, ysize))
plt.suptitle(
'Autocorrelation of Antenna {0}'.format(ant), size=30)
for n, beam in enumerate(self.beamlist):
freq = self.freq[n]
if self.amp[n] is None:
continue
amp_xx = self.amp[n][a, :, 0]
amp_yy = self.amp[n][a, :, 3]
beamnum = int(beam)
plt.subplot(ny, nx, beamnum+1)
plt.scatter(freq[np.where(amp_xx != 0.)[0]], amp_xx[np.where(amp_xx != 0.)[0]],
label='XX',
marker=',', s=1, color='C0')
plt.scatter(freq[np.where(amp_yy != 0.)[0]], amp_yy[np.where(amp_yy != 0)[0]],
label='YY',
marker=',', s=1, color='C1')
# plt.scatter(self.freq[n][np.where(self.amp[n][a, :, 0] != 0)[0]], self.amp[n][a, :, 0][np.where(self.amp[n][a, :, 0] != 0)[0]],
# label='XX',
# marker=',', s=1)
# plt.scatter(self.freq[n][np.where(self.amp[n][a, :, 0] != 0)[0]], self.amp[n][a, :, 3][np.where(self.amp[n][a, :, 0] != 0)[0]],
# label='YY',
# marker=',', s=1)
# values above plot maximum
high_xx_values = np.where(amp_xx > y_max)[0]
high_yy_values = np.where(amp_yy > y_max)[0]
if len(high_xx_values) != 0:
plt.scatter(freq[high_xx_values], np.full(len(high_xx_values), y_max - 20),
marker=10, s=1, label="XX>{0}".format(y_max), color='C9')
if len(high_yy_values) != 0:
plt.scatter(freq[high_yy_values], np.full(len(high_yy_values), y_max - 30),
marker=10, s=1, label="YY>{0}".format(y_max), color='C3')
plt.title('Beam {0}'.format(beam))
plt.ylim(y_min, y_max)
plt.legend(markerscale=3, fontsize=14)
plt.savefig(plt.savefig(
'{2}/Autocorrelation_Antenna_{0}_{1}.png'.format(ant, self.scan, imagepath)))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
def plot_autocorr_per_beam(self, imagepath=None):
"""
Plot the autocorrelation for each beam with all antennas
"""
logger.info("Creating plots for autocorrelation plots per beam")
y_min = 200
y_max = 2000
#first define imagepath if not given by user
imagepath = self.create_imagepath(imagepath)
#plot amplitude, one plot per antenna
#put plots in default place w/ default name
for antennas in self.ants:
if not antennas is None:
ant_names = antennas
break
#ant_names = self.ants[0]
#ant_names = self.ants[0]
#figlist = ['fig_'+str(i) for i in range(len(ant_names))]
for n, beam in enumerate(self.beamlist):
beamnum = int(beam)
#iterate through antennas
#set up for 8x5 plots (40 beams)
nx = 4
ny = 3
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize, ysize))
plt.suptitle(
'Autocorrelation of Beam {0:02d}'.format(beamnum), size=30)
for a, ant in enumerate(ant_names):
freq = self.freq[n]
if self.amp[n] is None:
continue
amp_xx = self.amp[n][a, :, 0]
amp_yy = self.amp[n][a, :, 3]
plt.subplot(ny, nx, a+1)
plt.scatter(freq[np.where(amp_xx != 0.)[0]], amp_xx[np.where(amp_xx != 0.)[0]],
label='XX',
marker=',', s=1, color='C0')
plt.scatter(freq[np.where(amp_yy != 0.)[0]], amp_yy[np.where(amp_yy != 0.)[0]],
label='YY',
marker=',', s=1, color='C1')
# values above plot maximum
high_xx_values = np.where(amp_xx > y_max)[0]
high_yy_values = np.where(amp_yy > y_max)[0]
if len(high_xx_values) != 0:
plt.scatter(freq[high_xx_values], np.full(len(high_xx_values),y_max - 20),
marker = 10, s = 1, label="XX>{0}".format(y_max), color='C9')
if len(high_yy_values) != 0:
plt.scatter(freq[high_yy_values], np.full(len(high_yy_values),y_max - 30),
marker=10, s=1, label="YY>{0}".format(y_max), color='C3')
plt.title('Antenna {0}'.format(ant))
plt.ylim(y_min, y_max)
plt.legend(markerscale=3, fontsize=14)
plt.savefig(plt.savefig(
'{2}/Autocorrelation_Beam_{0:02d}_{1}.png'.format(beamnum, self.scan, imagepath)))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
class CorrectedData(ScanData):
def __init__(self, scan, fluxcal, trigger_mode, basedir=None):
ScanData.__init__(self, scan, fluxcal,
trigger_mode=trigger_mode, basedir=basedir)
self.imagepathsuffix = "crosscal"
self.freq = np.empty(len(self.dirlist),dtype=np.ndarray)
self.ants = np.empty(len(self.dirlist),dtype=np.object)
def get_data(self):
for i, (path,beam) in enumerate(zip(self.dirlist,self.beamlist)):
msfile = "{0}/raw/{1}.MS".format(path,self.sourcename)
if os.path.isdir(msfile):
logger.info("Processing {}".format(msfile))
taql_antnames = "SELECT NAME FROM {0}::ANTENNA".format(msfile)
t= pt.taql(taql_antnames)
ant_names=t.getcol("NAME")
if ant_names is None:
logger.warning(
"Something wrong. No antenna names. Continue with next beam")
continue
#then get frequencies:
taql_freq = "SELECT CHAN_FREQ FROM {0}::SPECTRAL_WINDOW".format(msfile)
t = pt.taql(taql_freq)
freqs = t.getcol('CHAN_FREQ')[0,:]
#and number of stokes params
taql_stokes = "SELECT abs(DATA) AS amp from {0} limit 1" .format(msfile)
t_pol = pt.taql(taql_stokes)
pol_array = t_pol.getcol('amp')
if pol_array is None:
logger.warning(
"Something wrong. No polarisation information. Continue with next beam")
continue
n_stokes = pol_array.shape[2] #shape is time, one, nstokes
#take MS file and get calibrated data
amp_ant_array = np.empty((len(ant_names),len(freqs),n_stokes),dtype=object)
phase_ant_array = np.empty((len(ant_names),len(freqs),n_stokes),dtype=object)
for ant in xrange(len(ant_names)):
try:
taql_command = ("SELECT abs(gmeans(CORRECTED_DATA[FLAG])) AS amp, "
"arg(gmeans(CORRECTED_DATA[FLAG])) AS phase FROM {0} "
"WHERE ANTENNA1!=ANTENNA2 && "
"(ANTENNA1={1} || ANTENNA2={1})").format(msfile,ant)
t = pt.taql(taql_command)
test=t.getcol('amp')
amp_ant_array[ant,:,:] = t.getcol('amp')[0,:,:]
phase_ant_array[ant,:,:] = t.getcol('phase')[0,:,:]
except:
amp_ant_array[ant,:,:] = np.full((len(freqs),n_stokes),np.nan)
phase_ant_array[ant,:,:] = np.full((len(freqs),n_stokes),np.nan)
self.phase[i] = phase_ant_array
self.amp[i] = amp_ant_array
self.freq[i] = freqs
self.ants[i] = ant_names
else:
logger.warning("Could not find {}".format(msfile))
def plot_amp(self,imagepath=None):
logger.info("Creating plots for corrected amplitude")
#first define imagepath if not given by user
imagepath = self.create_imagepath(imagepath)
#plot amplitude, one plot per antenna
#put plots in default place w/ default name
for antennas in self.ants:
if not antennas is None:
ant_names = antennas
break
#ant_names = self.ants[0]
#figlist = ['fig_'+str(i) for i in range(len(ant_names))]
for a,ant in enumerate(ant_names):
#iterate through antennas
#set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize,ysize))
plt.suptitle('Corrected amplitude for Antenna {0} (baselines averaged)'.format(ant),size=30)
for n,beam in enumerate(self.beamlist):
if self.amp[n] is None:
continue
beamnum = int(beam)
plt.subplot(ny, nx, beamnum+1)
plt.scatter(self.freq[n],self.amp[n][a,:,0],
label='XX',
marker=',',s=1)
plt.scatter(self.freq[n],self.amp[n][a,:,3],
label='YY',
marker=',',s=1)
plt.title('Beam {0}'.format(beam))
plt.ylim(0,30)
plt.legend(markerscale=3,fontsize=14)
plt.savefig(plt.savefig('{2}/Corrected_amp_{0}_{1}.png'.format(ant,self.scan,imagepath)))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
def plot_phase(self,imagepath=None):
logger.info("Creating plots for corrected phase")
#plot amplitude, one plot per antenna
imagepath = self.create_imagepath(imagepath)
#put plots in default place w/ default name
for antennas in self.ants:
if not antennas is None:
ant_names = antennas
break
#ant_names = self.ants[0]
#figlist = ['fig_'+str(i) for i in range(len(ant_names))]
for a,ant in enumerate(ant_names):
#iterate through antennas
#set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize,ysize))
plt.suptitle('Corrected phase for Antenna {0} (baselines averaged)'.format(ant))
for n,beam in enumerate(self.beamlist):
if self.amp[n] is None:
continue
beamnum = int(beam)
plt.subplot(ny, nx, beamnum+1)
plt.scatter(self.freq[n],self.phase[n][a,:,0],
label='XX',
marker=',',s=1)
plt.scatter(self.freq[n],self.phase[n][a,:,3],
label='YY',
marker=',',s=1)
plt.title('Beam {0}'.format(beam))
plt.ylim(-3,3)
plt.legend(markerscale=3,fontsize=14)
plt.savefig(plt.savefig('{2}/Corrected_phase_{0}_{1}.png'.format(ant,self.scan,imagepath)))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
class RawData(ScanData):
def __init__(self, scan, fluxcal, trigger_mode, basedir=None):
ScanData.__init__(self, scan, fluxcal,
trigger_mode=trigger_mode, basedir=basedir)
self.imagepathsuffix = "crosscal"
self.freq = np.empty(len(self.dirlist),dtype=np.ndarray)
self.ants = np.empty(len(self.dirlist),dtype=np.object)
def get_data(self):
for i, (path,beam) in enumerate(zip(self.dirlist,self.beamlist)):
msfile = "{0}/raw/{1}.MS".format(path,self.sourcename)
if os.path.isdir(msfile):
logger.info("Processing {}".format(msfile))
taql_antnames = "SELECT NAME FROM {0}::ANTENNA".format(msfile)
t= pt.taql(taql_antnames)
ant_names=t.getcol("NAME")
if ant_names is None:
logger.warning(
"Something wrong. No antenna names. Continue with next beam")
continue
#then get frequencies:
taql_freq = "SELECT CHAN_FREQ FROM {0}::SPECTRAL_WINDOW".format(msfile)
t = pt.taql(taql_freq)
freqs = t.getcol('CHAN_FREQ')[0,:]
#and number of stokes params
taql_stokes = "SELECT abs(DATA) AS amp from {0} limit 1" .format(msfile)
t_pol = pt.taql(taql_stokes)
pol_array = t_pol.getcol('amp')
if pol_array is None:
logger.warning(
"Something wrong. No polarisation information. Continue with next beam")
continue
n_stokes = pol_array.shape[2] #shape is time, one, nstokes
#take MS file and get calibrated data
amp_ant_array = np.empty((len(ant_names),len(freqs),n_stokes),dtype=object)
phase_ant_array = np.empty((len(ant_names),len(freqs),n_stokes),dtype=object)
for ant in xrange(len(ant_names)):
try:
taql_command = ("SELECT abs(gmeans(DATA[FLAG])) AS amp, "
"arg(gmeans(DATA[FLAG])) AS phase FROM {0} "
"WHERE ANTENNA1!=ANTENNA2 && "
"(ANTENNA1={1} || ANTENNA2={1})").format(msfile,ant)
t = pt.taql(taql_command)
test=t.getcol('amp')
amp_ant_array[ant,:,:] = t.getcol('amp')[0,:,:]
phase_ant_array[ant,:,:] = t.getcol('phase')[0,:,:]
except:
amp_ant_array[ant,:,:] = np.full((len(freqs),n_stokes),np.nan) #t.getcol('amp')[0,:,:]
phase_ant_array[ant,:,:] = np.full((len(freqs),n_stokes),np.nan) #t.getcol('phase')[0,:,:]
self.phase[i] = phase_ant_array
self.amp[i] = amp_ant_array
self.freq[i] = freqs
self.ants[i] = ant_names
else:
logger.warning("Could not find {}".format(msfile))
def plot_amp(self,imagepath=None):
logger.info("Creating plots for raw amplitude")
#plot amplitude, one plot per antenna
imagepath = self.create_imagepath(imagepath)
#put plots in default place w/ default name
for antennas in self.ants:
if not antennas is None:
ant_names = antennas
break
#ant_names = self.ants[0]
#figlist = ['fig_'+str(i) for i in range(len(ant_names))]
for a,ant in enumerate(ant_names):
#iterate through antennas
#set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize,ysize))
plt.suptitle('Raw amplitude for Antenna {0} (baselines averaged)'.format(ant),size=30)
for n,beam in enumerate(self.beamlist):
if self.amp[n] is None:
continue
beamnum = int(beam)
plt.subplot(ny, nx, beamnum+1)
plt.scatter(self.freq[n],self.amp[n][a,:,0],
label='XX',
marker=',',s=1)
plt.scatter(self.freq[n],self.amp[n][a,:,3],
label='YY',
marker=',',s=1)
plt.title('Beam {0}'.format(beam))
#plt.ylim(10,30)
plt.legend(markerscale=3,fontsize=14)
plt.savefig(plt.savefig('{2}/Raw_amp_{0}_{1}.png'.format(ant,self.scan,imagepath)))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
def plot_phase(self,imagepath=None):
logger.info("Creating plots for raw phase")
#plot amplitude, one plot per antenna
imagepath = self.create_imagepath(imagepath)
#put plots in default place w/ default name
for antennas in self.ants:
if not antennas is None:
ant_names = antennas
break
#ant_names = self.ants[0]
#figlist = ['fig_'+str(i) for i in range(len(ant_names))]
for a,ant in enumerate(ant_names):
#iterate through antennas
#set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx*4
ysize = ny*4
plt.figure(figsize=(xsize,ysize))
plt.suptitle('Raw phase for Antenna {0} (baselines averaged)'.format(ant),size=30)
for n,beam in enumerate(self.beamlist):
if self.amp[n] is None:
continue
beamnum = int(beam)
plt.subplot(ny, nx, beamnum+1)
plt.scatter(self.freq[n],self.phase[n][a,:,0],
label='XX',
marker=',',s=1)
plt.scatter(self.freq[n],self.phase[n][a,:,3],
label='YY',
marker=',',s=1)
plt.title('Beam {0}'.format(beam))
plt.ylim(-180,180)
plt.legend(markerscale=3,fontsize=14)
plt.savefig(plt.savefig('{2}/Raw_phase_{0}_{1}.png'.format(ant,self.scan,imagepath)))
#plt.clf()
# to really close the plot, this will do
plt.close('all')
| 57,674 | 42.825988 | 145 | py |
dataqa | dataqa-master/inspection_plots/__init__.py | 0 | 0 | 0 | py |
|
dataqa | dataqa-master/inspection_plots/inspection_plots.py | # Module with functionality to get the inspection plots for an Apertif observation
import numpy as np
import os
import glob
import logging
import subprocess
logger = logging.getLogger(__name__)
FNULL = open(os.devnull, 'w')
def get_inspection_plot_list(is_calibrator=False):
"""
Function to return a list of inspection plot
This list only contains the type of inspection plot
to be copied from ALTA.
Args:
polarisation (str): Polarisation, currently XX only
Return:
(List(str)): List of inspection plots
"""
if is_calibrator:
plot_type_list = ['_beams_xx',
'_beams_yy']
else:
plot_type_list = ['_beams_ampvstime_XX',
'_beams_ampvschan_XX',
'_beams_phavstime_XX',
'_beams_phavschan_XX',
'_beams_waterfall_amplitude_autoscale_XX',
'_beams_waterfall_amplitude_noscale_XX',
'_beams_waterfall_phase_autoscale_XX',
'_beams_waterfall_phase_noscale_XX',
'_beams_xx',
'_beams_yy']
return plot_type_list
def get_inspection_plot_from_alta(qa_plot_dir, obs_id, plot_type_list):
"""
Function to get a specific inspection plot from ALTA
Args:
qa_plot_dir (str): Directory where plots should be stored
obs_id (int): ID of the observation (scan number or task_id)
plot_type (str): Type of inspection plot
Return:
plot_file_name (str): Name of the plot (without alta path)
"""
# Main ALTA path
default_alta_path = "/altaZone/archive/apertif_main/visibilities_default/"
# Path of inspection plots on ALTA for a given obs id
alta_plot_path = os.path.join(default_alta_path, "{}_INSP".format(obs_id))
plot_file_name_list = ["WSRTA{0}{1}.png".format(
obs_id, plot_type) for plot_type in plot_type_list]
alta_plot_file_list = [os.path.join(
alta_plot_path, plot_file_name) for plot_file_name in plot_file_name_list]
alta_files = " ".join(alta_plot_file_list)
# set the irod files location
irods_status_file = os.path.join(
qa_plot_dir, "{}_insp-icat.irods-status".format(obs_id))
irods_status_lf_file = os.path.join(
qa_plot_dir, "{}_insp-icat.lf-irods-status".format(obs_id))
# iget command to get the plot
try:
cmd = "iget -rfPIT -X {0} --lfrestart {1} --retries 5 {2} {3}".format(
irods_status_file, irods_status_lf_file, alta_files, qa_plot_dir)
logger.debug(cmd)
subprocess.check_call(cmd, shell=True, stdout=FNULL, stderr=FNULL)
except Exception as e:
logger.warning(
"Failed retrieving inpsection plots for {}".format(obs_id))
logger.exception(e)
else:
logger.info(
"Successully retrieved inspection plots for {}".format(obs_id))
# iget -rfPIT -X /data/schulz/transfer_files/test-icat.irods-status --lfrestart /data/schulz/transfer_files/test-icat.lf-irods-status --retries 5 /altaZone/home/apertif_main/early_results/temp_storage/190725042/17/M0155+3130.UVFITS /data/schulz
# return full path
# plot_file_name_with_path = os.path.join(qa_plot_dir, plot_file_name)
return plot_file_name_list
def rename_inspection_plots(qa_plot_dir, plot_file_name_list):
"""
Function to rename the inspection plots from the target so that
they stay in order.
"""
plot_counter = 1
for plot_file_name in plot_file_name_list:
# rename it to keep the order
plot_file_name_new = os.path.join(qa_plot_dir, "{0:02d}_{1:s}".format(
plot_counter, plot_file_name))
# have to add the path to the original plot name now
plot_file_name = os.path.join(qa_plot_dir, plot_file_name)
try:
os.rename(plot_file_name, plot_file_name_new)
except Exception as e:
logger.warning("Renaming {} failed".format(plot_file_name))
logger.exception(e)
else:
logger.info("Inspection plot saved as {0:s}".format(
os.path.basename(plot_file_name_new)))
plot_counter += 1
def get_inspection_plots(obs_id, qa_plot_dir, is_calibrator=False, cal_id=None):
"""
Function to get all inspection plots from ALTA useful for the QA
Args:
qa_plot_dir (str) Directory where plots should be stored
obs_id (int) ID of observation (scan number or task id)
"""
# list of types of inspection plots
plot_type_list = get_inspection_plot_list(is_calibrator=is_calibrator)
# get inspection plot
if cal_id is None:
plot_file_name_list = get_inspection_plot_from_alta(
qa_plot_dir, obs_id, plot_type_list)
rename_inspection_plots(qa_plot_dir, plot_file_name_list)
else:
plot_file_name_list = get_inspection_plot_from_alta(
qa_plot_dir, cal_id, plot_type_list)
| 5,046 | 32.423841 | 248 | py |
dataqa | dataqa-master/osa_overview/create_report_nb.py | # version of ../create_report to run as part of the pipeline
import os
import sys
from astropy.table import Table
import logging
import glob
import time
import argparse
import socket
from apercal.libs import lib
from dataqa.report import html_report as hp
from dataqa.report import html_report_dir as hpd
from dataqa.report.pipeline_run_time import get_pipeline_run_time
from dataqa.scandata import get_default_imagepath
def run():
"""Function to create the web report by the OSA.
It is very similar to create_report.py with the important
difference that it does not require any arguments from the OSA.
This also means that this function does not work unless create_report.py
was run as it requires the observation information file in the directory of the obs
"""
start_time = time.time()
# get the file name as seen from the directory of where the script will be run
obs_file = glob.glob(
"../[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]_obs.csv")
if len(obs_file) == 0:
print("ERROR: No observing file found. Please make sure that the observation was processed by the QA or ask for help. Abort")
return -1
else:
obs_info = Table.read(obs_file[0])
obs_id = obs_info['Obs_ID'][0]
# directory where the QA is
qa_dir = get_default_imagepath(obs_id)
# check the mode to run the validation
qa_report_dir = "{0:s}report".format(
qa_dir)
# check that this directory exists (just in case)
if not os.path.exists(qa_report_dir):
print("Directory {0:s} does not exist and will be created".format(
qa_report_dir))
os.makedirs(qa_report_dir)
lib.setup_logger(
'debug', logfile='{0:s}/create_report.log'.format(qa_report_dir))
logger = logging.getLogger(__name__)
# check if a report is available
add_osa_report = False
#osa_report = ''
# # if osa report should be added, check it is available
# if add_osa_report:
# # name of the osa report for this observation
# osa_report = os.path.join(
# qa_report_dir, "OSA_Report/{}_OSA_report.ecsv")
# # check that the file is actually there
# if not os.path.exists(osa_report):
# logger.error("No OSA report found. Abort")
# return -1
# else:
# osa_report = ''
# check on which happili we are:
host_name = socket.gethostname()
trigger_mode = False
if host_name != "happili-01":
logger.error(
"You are not working on happili-01. This script will not work here. Abort")
return -1
# logging.basicConfig(filename='{0:s}/create_report.log'.format(qa_dir), level=logging.DEBUG,
# format='%(asctime)s - %(levelname)s: %(message)s')
# getting timing measurment for apercal
# this could be removed and taken care of by linking to the other happili node files
# the information is already there.
if not add_osa_report:
try:
get_pipeline_run_time(obs_id, trigger_mode=trigger_mode)
except Exception as e:
logger.error(e)
# the subpages to be created
subpages = ['observing_log', 'summary', 'inspection_plots', 'preflag', 'crosscal',
'selfcal', 'continuum', 'line', 'mosaic', 'apercal_log']
logger.info("#### Create report directory structure")
# copy the js and css files
js_file_name = "{0:s}/report_fct.js".format(
hp.__file__.split("/html_report.py")[0])
css_file_name = "{0:s}/report_style.css".format(
hp.__file__.split("/html_report.py")[0])
# Check that qa_dir and the other directories exists
if not os.path.exists(qa_dir):
logger.error(
"Directory {0:s} does not exists. Abort".format(qa_report_dir))
sys.exit(-1)
else:
# Create directory structure for the report
if not add_osa_report:
try:
hpd.create_report_dirs(
obs_id, qa_dir, subpages, trigger_mode=trigger_mode, obs_info=obs_info)
except Exception as e:
logger.error(e)
logger.info("#### Creating report")
try:
hp.create_main_html(qa_report_dir, obs_id, subpages,
css_file=css_file_name, js_file=js_file_name, obs_info=obs_info, add_osa_report=add_osa_report)
except Exception as e:
logger.error(e)
logger.info("#### Report. Done ({0:.0f}s)".format(
time.time()-start_time))
| 4,528 | 33.052632 | 133 | py |
dataqa | dataqa-master/selfcal/selfcal_maps.py | """
This script contains functionality to plot the selfcal images
"""
import os
from apercal.libs import lib
import glob
import socket
import logging
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.colors as mc
logger = logging.getLogger(__name__)
def plot_selfcal_maps(fits_name, qa_selfcal_beam_dir, plot_residuals=False):
"""This function plots the selfcal maps
"""
fits_hdulist = fits.open(fits_name)
# get WCS header of cube
wcs = WCS(fits_hdulist[0].header)
# remove unnecessary axis
if wcs.naxis == 4:
wcs = wcs.dropaxis(3)
wcs = wcs.dropaxis(2)
img = fits_hdulist[0].data[0][0]
elif wcs.naxis == 3:
wcs = wcs.dropaxis(2)
img = fits_hdulist[0].data[0]
else:
img = fits_hdulist[0].data
# set up plot
ax = plt.subplot(projection=wcs)
# create image
if plot_residuals:
fig = ax.imshow(img * 1.e3, norm=mc.Normalize(vmin=-
.05, vmax=.05), origin='lower', cmap="hot")
# fig = ax.imshow(img * 1.e3, norm=mc.SymLogNorm(1.e-3,
# vmin=-1, vmax=1.), origin='lower')
else:
fig = ax.imshow(img * 1.e3, norm=mc.SymLogNorm(1.e-9,
vmin=0.02, vmax=1.), origin='lower', cmap="hot")
cbar = plt.colorbar(fig)
cbar.set_label('Flux Density [mJy/beam]')
ax.coords[0].set_axislabel('Right Ascension')
ax.coords[1].set_axislabel('Declination')
ax.coords[0].set_major_formatter('hh:mm')
ax.set_title("Selfcal {0:s}".format(fits_name))
output = "{0:s}/{1:s}".format(qa_selfcal_beam_dir,
fits_name).replace(".fits", ".png")
plt.savefig(output, overwrite=True, bbox_inches='tight', dpi=200)
# if plot_format == "pdf":
# plt.savefig(output.replace(".png", ".pdf"),
# overwrite=True, bbox_inches='tight')
# else:
# plt.savefig(output, overwrite=True, bbox_inches='tight', dpi=300)
plt.close("all")
def convert_mir2fits(mir_name, fits_name):
"""This function converts a miriad image to fits
"""
fits = lib.miriad('fits')
fits.in_ = mir_name
fits.out = fits_name
fits.op = 'xyout'
try:
fits.go()
except Exception as e:
logger.error(e)
def create_selfcal_maps(mir_image_list, qa_selfcal_beam_dir, plot_residuals=False, selfcal_type="phase"):
"""
This function creates plots for the selfcal maps.
"""
# go through the list of images
for mir_image in mir_image_list:
# create link to the miriad image
link_name = os.path.basename(mir_image)
if not os.path.exists(link_name):
os.symlink(mir_image, link_name)
else:
os.unlink(link_name)
os.symlink(mir_image, link_name)
# get the major cycle
major_cycle = mir_image.split("/")[-2]
minor_cycle = os.path.basename(mir_image).split("_")[-1]
fits_name = "{0:s}_{1:s}_{2:s}_{3:s}.fits".format(
selfcal_type, major_cycle, minor_cycle, link_name.split("_")[0])
try:
convert_mir2fits(link_name, fits_name)
except Exception as e:
logger.error(e)
logger.error("Converting {0:s} failed".format(mir_image))
# plot image if it exists
if os.path.exists(fits_name):
logger.info("Plotting {0:s}".format(mir_image))
plot_selfcal_maps(fits_name, qa_selfcal_beam_dir,
plot_residuals=plot_residuals)
# remove the fits file
try:
os.remove(fits_name)
except Exception as e:
logger.error(e)
logger.error("Could not remove {0:s}".format(fits_name))
def get_selfcal_maps(obs_id, qa_selfcal_dir, trigger_mode=False):
"""
This function goes through the images available and plots them.
It will convert the miriad images temporarily into fits. The fits files
will be delelted afterwards.
At the moment only the image and residual is taken into account.
"""
# creating a temporary directory for conversion
tmp_convert_dir = "{0:s}tmp_conv/".format(qa_selfcal_dir)
if not os.path.exists(tmp_convert_dir):
os.mkdir(tmp_convert_dir)
# get current working directory to go back to at the end of this function
cwd = os.getcwd()
# change to this directory for shorter path lengths for miriad
os.chdir(tmp_convert_dir)
# check host name
host_name = socket.gethostname()
if trigger_mode:
logger.info(
"--> Running selfcal QA in trigger mode. Looking only for data processed by Apercal on {0:s} <--".format(host_name))
if host_name != "happili-01" and not trigger_mode:
logger.warning("You are not working on happili-01.")
logger.warning("The script will not process all beams")
logger.warning("Please switch to happili-01")
# get a list of data beam directories
if "/data" in qa_selfcal_dir:
if trigger_mode:
data_beam_dir_list = glob.glob(
"/data/apertif/{}/[0-3][0-9]".format(obs_id))
elif host_name != "happili-01" and not trigger_mode:
data_beam_dir_list = glob.glob(
"/data/apertif/{}/[0-3][0-9]".format(obs_id))
else:
data_beam_dir_list = glob.glob(
"/data*/apertif/{}/[0-3][0-9]".format(obs_id))
else:
if trigger_mode:
data_beam_dir_list = glob.glob(
"/tank/apertif/{}/[0-3][0-9]".format(obs_id))
elif host_name != "happili-01" and not trigger_mode:
data_beam_dir_list = glob.glob(
"/tank/apertif/{}/[0-3][0-9]".format(obs_id))
else:
data_beam_dir_list = glob.glob(
"/tank/apertif/{}/[0-3][0-9]".format(obs_id)) + glob.glob(
"/tank2/apertif/{}/[0-3][0-9]".format(obs_id)) + glob.glob(
"/tank3/apertif/{}/[0-3][0-9]".format(obs_id)) + glob.glob(
"/tank4/apertif/{}/[0-3][0-9]".format(obs_id))
if len(data_beam_dir_list) != 0:
data_beam_dir_list.sort()
# go through the beam directories found
for data_beam_dir in data_beam_dir_list:
logger.info("## Going through {0:s}".format(data_beam_dir))
beam = data_beam_dir.split("/")[-1]
# create beam directory in selfcal QA dir
qa_selfcal_beam_dir = "{0:s}{1:s}".format(qa_selfcal_dir, beam)
if not os.path.exists(qa_selfcal_beam_dir):
os.mkdir(qa_selfcal_beam_dir)
# Phase selfcal
# =============
# get major cycles
major_cycle_dir_list = glob.glob(
"{0:s}/selfcal/[0-9][0-9]".format(data_beam_dir))
if len(major_cycle_dir_list) != 0:
# go through the major cycle directories:
for major_cycle_dir in major_cycle_dir_list:
# get all images for this major cycle:
mir_image_list = glob.glob(
"{0:s}/image*".format(major_cycle_dir))
if len(mir_image_list) != 0:
# create plots for miriad selfcal images
create_selfcal_maps(
mir_image_list, qa_selfcal_beam_dir)
else:
logger.warning(
"No images found in {0:s}".format(major_cycle_dir))
# get all residuals for this major cycle:
mir_image_list = glob.glob(
"{0:s}/residual*".format(major_cycle_dir))
if len(mir_image_list) != 0:
# create plots for miriad selfcal residuals
create_selfcal_maps(
mir_image_list, qa_selfcal_beam_dir, plot_residuals=True)
else:
logger.warning(
"No residual found in {0:s}".format(major_cycle_dir))
else:
logger.warning(
"No major selfcal cycles found for {0:s}/selfcal/".format(data_beam_dir))
# Amplitude selfcal
# =================
# amplitude selfcal directory
data_beam_dir_amp = os.path.join(data_beam_dir, "selfcal/amp")
# create images only if directory exists (thus amplitude selfcal ran)
if os.path.exists(data_beam_dir_amp):
# get all images for this major cycle:
mir_image_list = glob.glob(
os.path.join(data_beam_dir_amp, "image*"))
if len(mir_image_list) != 0:
# create plots for miriad selfcal images
create_selfcal_maps(
mir_image_list, qa_selfcal_beam_dir, selfcal_type="amplitude")
else:
logger.warning(
"No images found in {0:s}".format(major_cycle_dir))
# get all residuals for this major cycle:
mir_image_list = glob.glob(
os.path.join(data_beam_dir_amp, "residual*"))
if len(mir_image_list) != 0:
# create plots for miriad selfcal residuals
create_selfcal_maps(
mir_image_list, qa_selfcal_beam_dir, selfcal_type="amplitude", plot_residuals=True)
else:
logger.warning(
"No residual found in {0:s}".format(major_cycle_dir))
pass
else:
logger.warning(
"No amplitude selfcal directory found in {0:s}/selfcal/".format(data_beam_dir))
else:
logger.error("Could not find any beams for selfcal QA")
# switch back to working directory
os.chdir(cwd)
| 10,194 | 32.983333 | 128 | py |
dataqa | dataqa-master/selfcal/selfcal_plots.py | # python "module" for QA plots for cross-cal
# Will want to plot calibration solutions
# also potential for raw and corrected data
# load necessary packages
import os
import numpy as np
import datetime
from apercal.subs import readmirlog
from apercal.subs import misc
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from scandata import ScanData
class PHSols(ScanData):
def __init__(self, scan, target, trigger_mode=False, basedir=None):
ScanData.__init__(self, scan, target,
trigger_mode=trigger_mode, basedir=basedir)
self.phants = np.empty(len(self.dirlist), dtype=np.object)
self.phtimes = np.empty(len(self.dirlist), dtype=np.object)
self.phases = np.empty(len(self.dirlist), dtype=np.ndarray)
self.phnants = np.empty(len(self.dirlist), dtype=np.ndarray)
self.phnbins = np.empty(len(self.dirlist), dtype=np.ndarray)
self.phnsols = np.empty(len(self.dirlist), dtype=np.ndarray)
def get_data(self):
# get the data
for i, (path, beam) in enumerate(zip(self.dirlist, self.beamlist)):
phdata = "{0}/selfcal/{1}.mir".format(path, self.sourcename)
if os.path.isdir(phdata):
try:
phgains, times = readmirlog.get_phases(phdata)
self.phants[i] = misc.create_antnames()
self.phtimes[i] = times
self.phases[i] = phgains
self.phnants[i], self.phnbins[i], self.phnsols[i] = readmirlog.get_ndims(
phdata)
except:
print 'Filling with NaNs. Phase self-calibration not present for B{}'.format(beam)
self.phants[i] = misc.create_antnames()
self.phtimes[i] = np.array(np.nan)
self.phases[i] = np.array(np.nan)
self.phnbins[i] = np.array(np.nan)
self.phnants[i], self.phnbins[i], self.phnsols[i] = np.array(
np.nan), np.array(np.nan), np.array(np.nan)
else:
print 'Filling with NaNs. Phase self-calibration not present for B{}'.format(beam)
self.phants[i] = misc.create_antnames()
self.phtimes[i] = np.array(np.nan)
self.phases[i] = np.array(np.nan)
self.phnbins[i] = np.array(np.nan)
self.phnants[i], self.phnbins[i], self.phnsols[i] = np.array(
np.nan), np.array(np.nan), np.array(np.nan)
def plot_phase(self, imagepath=None):
"""Plot phase, one plot per antenna"""
imagepath = self.create_imagepath(imagepath)
ant_names = misc.create_antnames()
for a, ant in enumerate(ant_names):
# iterate through antennas
# set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx * 4
ysize = ny * 4
plt.figure(figsize=(xsize, ysize))
plt.suptitle('Selfcal phases for Antenna {0}'.format(ant))
for n, beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum + 1)
if np.isnan(self.phnbins[n]):
continue
else:
color = cm.rainbow(np.linspace(0, 1, self.phnbins[n]))
for f in range(self.phnbins[n]):
plt.scatter(range(len(
self.phtimes[n])), self.phases[n][a, f, :], label='F'+str(f), marker=',', s=1, c=color[f])
if n >= 32:
plt.xlabel('Time [solint]')
if n % nx == 0:
plt.ylabel('Phase [deg]')
plt.title('Beam {0}'.format(beam))
plt.legend(prop={'size': 5})
plt.savefig(
'{2}SCAL_phase_{0}_{1}.png'.format(ant, self.scan, imagepath))
plt.close("all")
class AMPSols(ScanData):
def __init__(self, scan, target, trigger_mode=False, basedir=None):
ScanData.__init__(self, scan, target,
trigger_mode=trigger_mode, basedir=basedir)
self.ampants = np.empty(len(self.dirlist), dtype=np.object)
self.amptimes = np.empty(len(self.dirlist), dtype=np.object)
self.amps = np.empty(len(self.dirlist), dtype=np.ndarray)
self.ampnants = np.empty(len(self.dirlist), dtype=np.ndarray)
self.ampnbins = np.empty(len(self.dirlist), dtype=np.ndarray)
self.ampnsols = np.empty(len(self.dirlist), dtype=np.ndarray)
def get_data(self):
for i, (path, beam) in enumerate(zip(self.dirlist, self.beamlist)):
ampdata = "{0}/selfcal/{1}_amp.mir".format(path, self.sourcename)
if os.path.isdir(ampdata):
try:
ampgains, times = readmirlog.get_amps(ampdata)
self.ampants[i] = misc.create_antnames()
self.amptimes[i] = times
self.amps[i] = ampgains
self.ampnants[i], self.ampnbins[i], self.ampnsols[i] = readmirlog.get_ndims(
ampdata)
except:
print 'Filling with NaNs. Amplitude self-calibration not present for B{}'.format(beam)
self.ampants[i] = misc.create_antnames()
self.amptimes[i] = np.array(np.nan)
self.amps[i] = np.array(np.nan)
self.ampnbins[i] = np.array(np.nan)
self.ampnants[i], self.ampnbins[i], self.ampnsols[i] = np.array(
np.nan), np.array(np.nan), np.array(np.nan)
else:
print 'Filling with NaNs. Amplitude self-calibration not present for B{}'.format(beam)
self.ampants[i] = misc.create_antnames()
self.amptimes[i] = np.array(np.nan)
self.amps[i] = np.array(np.nan)
self.ampnbins[i] = np.array(np.nan)
self.ampnants[i], self.ampnbins[i], self.ampnsols[i] = np.array(
np.nan), np.array(np.nan), np.array(np.nan)
def plot_amp(self, imagepath=None):
"""Plot amplitudes, one plot per antenna"""
imagepath = self.create_imagepath(imagepath)
ant_names = misc.create_antnames()
for a, ant in enumerate(ant_names):
# iterate through antennas
# set up for 8x5 plots (40 beams)
nx = 8
ny = 5
xsize = nx * 4
ysize = ny * 4
plt.figure(figsize=(xsize, ysize))
plt.suptitle('Selfcal amplitudes for Antenna {0}'.format(ant))
for n, beam in enumerate(self.beamlist):
beamnum = int(beam)
plt.subplot(ny, nx, beamnum + 1)
if np.isnan(self.ampnbins[n]):
continue
else:
color = cm.rainbow(np.linspace(0, 1, self.ampnbins[n]))
for f in range(self.ampnbins[n]):
plt.scatter(range(len(
self.amptimes[n])), self.amps[n][a, f, :], label='F' + str(f), marker=',', s=1, c=color[f])
if n >= 32:
plt.xlabel('Time [solint]')
if n % nx == 0:
plt.ylabel('Amp')
plt.title('Beam {0}'.format(beam))
plt.legend()
plt.savefig('{2}SCAL_amp_{0}_{1}.png'.format(
ant, self.scan, imagepath))
plt.close("all")
| 7,673 | 44.952096 | 119 | py |
dataqa | dataqa-master/selfcal/__init__.py | 0 | 0 | 0 | py |
|
robust-nli | robust-nli-master/src/losses.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def convert_2d_prob_to_3d(prob_dist):
prob_dist = torch.cat([(prob_dist[:, 0] / 2.0).view(-1, 1),
prob_dist[:, 1].view(-1, 1),
(prob_dist[:, 0] / 2.0).view(-1, 1)], dim=1)
return prob_dist
# Focal loss's implementation is adapted from
# https://github.com/zhoudaxia233/focal_loss_pytorch/blob/master/multi_class_focal_loss.py
class FocalLoss(nn.Module):
def __init__(self, alpha=1.0, gamma=2.0, size_average=True, ensemble_training=False, aggregate_ensemble="mean"):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.size_average = size_average
self.ensemble_training = ensemble_training
self.aggregate_ensemble=aggregate_ensemble
def compute_probs(self, inputs, targets):
prob_dist = F.softmax(inputs, dim=1)
pt = prob_dist.gather(1, targets)
return pt
def aggregate(self, p1, p2, operation):
if self.aggregate_ensemble == "mean":
result = (p1+p2)/2
return result
elif self.aggregate_ensemble == "multiply":
result = p1*p2
return result
else:
assert NotImplementedError("Operation ", operation, "is not implemented.")
def forward(self, inputs, targets, inputs_adv=None, second_inputs_adv=None):
targets = targets.view(-1, 1)
norm = 0.0
pt = self.compute_probs(inputs, targets)
pt_scale = self.compute_probs(inputs if inputs_adv is None else inputs_adv, targets)
if self.ensemble_training:
pt_scale_second = self.compute_probs(second_inputs_adv, targets)
if self.aggregate_ensemble in ["mean", "multiply"]:
pt_scale_total = self.aggregate(pt_scale, pt_scale_second, "mean")
batch_loss = -self.alpha * (torch.pow((1 - pt_scale_total), self.gamma)) * torch.log(pt)
else:
batch_loss = -self.alpha * (torch.pow((1 - pt_scale), self.gamma)) * torch.log(pt)
norm += self.alpha * (torch.pow((1 - pt_scale), self.gamma))
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class POELoss(nn.Module):
"""Implements the product of expert loss."""
def __init__(self, size_average=True, ensemble_training=False, poe_alpha=1):
super().__init__()
self.size_average = size_average
self.ensemble_training=ensemble_training
self.poe_alpha = poe_alpha
def compute_probs(self, inputs):
prob_dist = F.softmax(inputs, dim=1)
return prob_dist
def forward(self, inputs, targets, inputs_adv, second_inputs_adv=None):
targets = targets.view(-1, 1)
pt = self.compute_probs(inputs)
pt_adv = self.compute_probs(inputs_adv)
if self.ensemble_training:
pt_adv_second = self.compute_probs(second_inputs_adv)
joint_pt = F.softmax((torch.log(pt) + torch.log(pt_adv) + torch.log(pt_adv_second)), dim=1)
else:
joint_pt = F.softmax((torch.log(pt) + self.poe_alpha*torch.log(pt_adv)), dim=1)
joint_p = joint_pt.gather(1, targets)
batch_loss = -torch.log(joint_p)
if self.size_average:
loss = batch_loss.mean()
else:
loss = batch_loss.sum()
return loss
class RUBILoss(nn.Module):
# Implements the RUBI loss.
def __init__(self, num_labels, size_average=True):
super().__init__()
self.size_average = size_average
self.num_labels = num_labels
self.loss_fct = torch.nn.CrossEntropyLoss()
def compute_probs(self, inputs):
prob_dist = F.softmax(inputs, dim=1)
return prob_dist
def forward(self, inputs, targets, inputs_adv):
inputs = inputs.view(-1, self.num_labels)
inputs_adv = inputs_adv.view(-1, self.num_labels)
targets = targets.view(-1)
logits = inputs*torch.sigmoid(inputs_adv)
logits = logits.view(-1, self.num_labels)
loss = self.loss_fct(logits, targets)
return loss
epsilon = 1e-8
def log(x):
"""
We assume the given input is a probability and this is not over 1 or below 0.
"""
return torch.log(torch.clamp(x, min=epsilon, max=1-epsilon))
| 4,401 | 35.081967 | 116 | py |
robust-nli | robust-nli-master/src/BERT/utils_glue.py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" BERT classification fine-tuning: utilities to work with GLUE tasks """
from __future__ import absolute_import, division, print_function
import csv
import logging
import os
import sys
import jsonlines
from io import open
import numpy as np
import torch
import torch.nn.functional as f
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score
from os.path import join
from heuristics_utils import have_lexical_overlap, is_subsequence, parse_phrase_list, is_constituent
logger = logging.getLogger(__name__)
def dot_product_matrix_attention(matrix_1, matrix_2):
return matrix_1.bmm(matrix_2.transpose(2, 1))
def get_emb(tokens, word_vec):
matrix = np.zeros((len(tokens), 300))
for i, p in enumerate(tokens):
matrix[i, :] = word_vec[p]
return matrix
def get_word_similarity_new(prem_matrix, hyp_matrix, scores, h_mask, p_mask):
# normalize the token embeddings.
# [8, 64, 768]
prem_matrix = f.normalize(prem_matrix, p=2, dim=2)
hyp_matrix = f.normalize(hyp_matrix, p=2, dim=2)
prem_matrix = prem_matrix*p_mask.view(prem_matrix.shape[0], prem_matrix.shape[1], 1).float()
hyp_matrix = hyp_matrix*h_mask.view(hyp_matrix.shape[0], hyp_matrix.shape[1], 1).float()
similarity_matrix = hyp_matrix.bmm(prem_matrix.transpose(2, 1)) #batch_size*seqlen(h)*seqlen(p)
similarity = torch.max(similarity_matrix, 2)[0] #batch_size*seqlen => hsize
sim_score = []
if "min" in scores or "second_min" in scores:
# compute the min and second min in the similarities.
similarity_replace = similarity.clone()
# all the similarity values are smaller than 1 so 10 is a good number
# so that the masked elements are not selected during the top minimum computations.
similarity_replace[h_mask == 0] = 10
y, i = torch.topk(similarity_replace, k=2, dim=1, largest=False, sorted=True)
if "min" in scores:
sim_score.append(y[:, 0].view(-1, 1))
if "second_min" in scores:
sim_score.append(y[:, 1].view(-1, 1))
if "mean" in scores:
h_lens = torch.sum(h_mask, 1)
# note that to account for zero values, we have to consider the length not
# getting mean.
sum_similarity = torch.sum(similarity, 1)
mean_similarity = sum_similarity/h_lens.float()
sim_score.append(mean_similarity.view(-1, 1))
if "max" in scores:
max_similarity = torch.max(similarity, 1)[0]
sim_score.append(max_similarity.view(-1, 1))
similarity_score = torch.cat(sim_score, 1)
return similarity_score
def get_length_features(p_mask, h_mask, length_features):
features = []
p_lengths = torch.sum(p_mask, dim=1)
h_lengths = torch.sum(h_mask, dim=1)
if "log-len-diff" in length_features:
features.append((torch.log(torch.max((p_lengths-h_lengths), torch.ones_like(p_lengths)).float())).view(-1, 1))
if "len-diff" in length_features:
features.append((p_lengths-h_lengths).float().view(-1, 1))
return torch.cat(features, 1)
def get_hans_features(premise, hypothesis, parse):
constituent = is_constituent(premise, hypothesis, parse)
subsequence = is_subsequence(premise, hypothesis)
lexical_overlap, overlap_rate = have_lexical_overlap(premise, hypothesis)
return constituent, subsequence, lexical_overlap, overlap_rate
def get_hans_features_new(premise, hypothesis, parse, tokenizer):
premise_tokens = tokenizer.tokenize(premise)
hyp_tokens = tokenizer.tokenize(hypothesis)
premise_tokens = [p.lower() for p in premise_tokens]
hyp_tokens = [h.lower() for h in hyp_tokens]
premise_tokens = " ".join(premise_tokens)
hyp_tokens = " ".join(hyp_tokens)
constituent = is_constituent(premise_tokens, hyp_tokens, parse)
subsequence = is_subsequence(premise_tokens, hyp_tokens)
lexical_overlap, overlap_rate = have_lexical_overlap(premise_tokens, hyp_tokens, get_hans_new_features=True)
return constituent, subsequence, lexical_overlap, overlap_rate
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None, parse=None, binary_label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.parse = parse
self.binary_label = binary_label
class RUBIInputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, h_ids, input_mask_h):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.h_ids = h_ids
self.input_mask_h = input_mask_h
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class HansInputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id, h_ids, input_mask_h,
p_ids, input_mask_p, have_overlap, overlap_rate, subsequence, constituent, binary_label=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.h_ids = h_ids
self.input_mask_h = input_mask_h
self.p_ids = p_ids
self.input_mask_p = input_mask_p
self.have_overlap = have_overlap
self.overlap_rate = overlap_rate
self.subsequence = subsequence
self.constituent = constituent
self.binary_label = binary_label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
@classmethod
def _read_jsonl(cls, filepath):
""" Reads the jsonl file path. """
lines = []
with jsonlines.open(filepath) as f:
for line in f:
lines.append(line)
return lines
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def __init__(self, hans=False):
# It joins the other two label to one label.
self.num_classes = 3
self.hans = hans
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_dev_labels(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, "dev_matched.tsv"))
labels = []
for (i, line) in enumerate(lines):
if i == 0:
continue
label = line[-1]
labels.append(label)
return np.array(labels)
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
if self.hans:
parse = line[6]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, parse=parse))
else:
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_matched")
def get_dev_labels(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv"))
labels = []
for (i, line) in enumerate(lines):
if i == 0:
continue
label = line[-1]
labels.append(label)
return np.array(labels)
class SnliProcessor(DataProcessor):
"""Processor for the SNLI data set (GLUE version)."""
def __init__(self):
self.num_classes = 3
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_validation_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def get_dev_labels(self, data_dir):
lines = self._read_tsv(os.path.join(data_dir, "test.tsv"))
labels = []
for (i, line) in enumerate(lines):
if i == 0:
continue
label = line[-1]
labels.append(label)
return np.array(labels)
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class NliProcessor(DataProcessor):
"""Processor for the dataset of the format of SNLI
(InferSent version), could be 2 or 3 classes."""
# We use get_labels() class to convert the labels to indices,
# later during the transfer it will be problematic if the labels
# are not the same order as the SNLI/MNLI so we return the whole
# 3 labels, but for getting the actual number of classes, we use
# self.num_classes.
def __init__(self, data_dir):
# We assume there is a training file there and we read labels from there.
labels = [line.rstrip() for line in open(join(data_dir, 'labels.train'))]
self.labels = list(set(labels))
labels = ["contradiction", "entailment", "neutral"]
ordered_labels = []
for l in labels:
if l in self.labels:
ordered_labels.append(l)
self.labels = ordered_labels
self.num_classes = len(self.labels)
def get_dev_labels(self, data_dir):
labels = [line.rstrip() for line in open(join(data_dir, 'labels.test'))]
return np.array(labels)
def get_validation_dev_examples(self, data_dir):
return self._create_examples(data_dir, "dev")
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(data_dir, "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(data_dir, "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, data_dir, set_type):
"""Creates examples for the training and dev sets."""
s1s = [line.rstrip() for line in open(join(data_dir, 's1.'+set_type))]
s2s = [line.rstrip() for line in open(join(data_dir, 's2.'+set_type))]
labels = [line.rstrip() for line in open(join(data_dir, 'labels.'+set_type))]
examples = []
for (i, line) in enumerate(s1s):
guid = "%s-%s" % (set_type, i)
text_a = s1s[i]
text_b = s2s[i]
label = labels[i]
# In case of hidden labels, changes it with entailment.
if label == "hidden":
label = "entailment"
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class FEVERProcessor(DataProcessor):
"""Processor for the processed FEVER dataset."""
def __init__(self):
self.num_classes = 3
def read_jsonl(self, filepath):
""" Reads the jsonl file path. """
lines = []
with jsonlines.open(filepath) as f:
for line in f:
lines.append(line)
return lines
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(\
self.read_jsonl(join(data_dir, "nli.train.jsonl")),\
"train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(\
self.read_jsonl(join(data_dir, "nli.dev.jsonl")),\
"dev")
def get_labels(self):
"""See base class."""
return ["SUPPORTS", "REFUTES", "NOT ENOUGH INFO"]
def _create_examples(self, items, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, item) in enumerate(items):
guid = "%s-%s" % (set_type, i)
# Claim has artifacts so this needs to be text_b.
text_a = items[i]["claim"]
text_b = items[i]["evidence"] if "evidence" in items[i] else items[i]["evidence_sentence"]
label = items[i]["gold_label"] if "gold_label" in items[i] else items[i]["label"]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class HansProcessor(DataProcessor):
"""Processor for the processed Hans dataset."""
def __init__(self, hans=False):
self.num_classes = 2
self.hans = hans # this is added only to test hans-only classifier on HANS dataset.
def read_jsonl(self, filepath):
""" Reads the jsonl file path. """
lines = []
with jsonlines.open(filepath) as f:
for line in f:
lines.append(line)
return lines
def get_train_examples(self, data_dir):
"""See base class."""
pass
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(\
self._read_tsv(join(data_dir, "heuristics_evaluation_set.txt")), \
"dev")
def get_dev_labels(self, data_dir):
items = self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt"))
labels = []
for (i, item) in enumerate(items):
if i == 0:
continue
label = items[i][0]
labels.append(label)
return np.array(labels)
def get_labels(self):
"""See base class."""
return ["non-entailment", "entailment"]
def _create_examples(self, items, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, item) in enumerate(items):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
# Claim has artifacts so this needs to be text_b.
text_a = items[i][5]
text_b = items[i][6]
label = items[i][0]
if self.hans:
parse = items[i][3]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, parse=parse))
else:
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer, output_mode,
cls_token_at_end=False, pad_on_left=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=1, pad_token_segment_id=0,
mask_padding_with_zero=True, rubi=False, rubi_text="b",
hans=False, hans_features=False):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
rubi: In case of having this option, it also adds on the hypothesis only examples
to the dataset created.
"""
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
if rubi:
tokens_h = tokenizer.tokenize(example.text_b if rubi_text == "b" else example.text_a)
half_max_seq_length = int(max_seq_length/2)
if len(tokens_h) > (half_max_seq_length-2):
tokens_h = tokens_h[:(half_max_seq_length-2)]
tokens_h = ["[CLS]"]+tokens_h+["[SEP]"]
h_ids = tokenizer.convert_tokens_to_ids(tokens_h)
input_mask_h = [1]*len(h_ids)
padding_h = [0]*(half_max_seq_length-len(h_ids))
h_ids += padding_h
input_mask_h += padding_h
assert len(h_ids) == half_max_seq_length
assert len(input_mask_h) == half_max_seq_length
if hans: # this is only for rubi, so only compute this for p
def get_ids_mask(text, max_seq_length):
tokens_h = tokenizer.tokenize(text)
half_max_seq_length = int(max_seq_length / 2)
if len(tokens_h) > (half_max_seq_length - 2):
tokens_h = tokens_h[:(half_max_seq_length - 2)]
tokens_h = ["[CLS]"] + tokens_h + ["[SEP]"]
h_ids = tokenizer.convert_tokens_to_ids(tokens_h)
input_mask_h = [1] * len(h_ids)
padding_h = [0] * (half_max_seq_length - len(h_ids))
h_ids += padding_h
input_mask_h += padding_h
assert len(h_ids) == half_max_seq_length
assert len(input_mask_h) == half_max_seq_length
return h_ids, input_mask_h
p_ids, input_mask_p = get_ids_mask(example.text_a if rubi_text == "b" else example.text_b, max_seq_length)
if hans_features:
have_overlap, constituent, subsequence, overlap_rate = get_hans_features_new(example.text_a, example.text_b, example.parse, tokenizer)
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = tokens_a + [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if tokens_b:
tokens += tokens_b + [sep_token]
segment_ids += [sequence_b_segment_id] * (len(tokens_b) + 1)
if cls_token_at_end:
tokens = tokens + [cls_token]
segment_ids = segment_ids + [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
if rubi:
if hans:
features.append(
HansInputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
h_ids=h_ids,
input_mask_h=input_mask_h,
p_ids=p_ids,
input_mask_p=input_mask_p,
have_overlap=have_overlap,
overlap_rate=overlap_rate,
subsequence=subsequence,
constituent=constituent,
))
else:
features.append(
RUBIInputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
h_ids = h_ids,
input_mask_h=input_mask_h))
else:
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def per_class_accuracy(preds, labels):
unique_labels = np.unique(labels)
results = {}
for l in unique_labels:
indices = (l == labels)
acc = (preds[indices] == labels[indices]).mean()
results["acc_"+str(int(l))] = acc
acc = (preds == labels).mean()
results["acc"] = acc
return results
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def compute_metrics(task_name, preds, labels):
assert len(preds) == len(labels)
if task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "snli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "nli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "fever":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "hans":
return per_class_accuracy(preds, labels)
else:
raise KeyError(task_name)
processors = {
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"snli": SnliProcessor,
"nli": NliProcessor,
"fever": FEVERProcessor,
"hans": HansProcessor,
}
output_modes = {
"mnli": "classification",
"mnli-mm": "classification",
"snli": "classification",
"nli": "classification",
"fever": "classification",
"hans": "classification",
}
GLUE_TASKS_NUM_LABELS = {
"mnli": 3,
"mnli-mm": 3,
"snli": 3,
"fever": 3,
"hans": 2,
}
| 29,876 | 37.550968 | 154 | py |
robust-nli | robust-nli-master/src/BERT/run_glue.py | """ Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet)."""
from __future__ import absolute_import, division, print_function
import logging
import os
import random
from utils_glue import GLUE_TASKS_NUM_LABELS
from eval_utils import load_and_cache_examples, evaluate, get_parser
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler)
from tqdm import tqdm, trange
from mutils import write_to_csv
from pytorch_transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer)
from utils_bert import BertDebiasForSequenceClassification
from pytorch_transformers import AdamW, WarmupLinearSchedule
from utils_glue import (compute_metrics, convert_examples_to_features,
processors)
from eval_utils import task_to_data_dir, nli_task_names, actual_task_names, ALL_MODELS
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.CRITICAL)
from eval_utils import MODEL_CLASSES
from eval_utils import do_evaluate
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
def save_model(args, global_step, model, logger):
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
args.train_batch_size = args.per_gpu_train_batch_size
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=False)
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=False)
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
if args.hans:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3],
'h_ids': batch[4],
'h_attention_mask': batch[5],
'p_ids': batch[6],
'p_attention_mask': batch[7],
'have_overlap': batch[8],
'overlap_rate': batch[9],
'subsequence': batch[10],
'constituent': batch[11]
}
elif args.rubi or args.hypothesis_only or args.focal_loss or args.poe_loss:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
'labels': batch[3],
'h_ids': batch[4],
'h_attention_mask': batch[5]}
else:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
'labels': batch[3]}
outputs = model(**inputs)
loss = outputs["bert"][0] # model outputs are always tuple in pytorch-transformers (see doc)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
logging_loss = tr_loss
if args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
save_model(args, global_step, model, logger)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
#tb_writer.close()
return global_step, tr_loss / global_step
def main():
parser = get_parser()
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# add all variations of hans automatically
if "HANS" in args.eval_task_names:
hans_variations = ["HANS-const", "HANS-lex", "HANS-sub"]
for variation in hans_variations:
if variation not in args.eval_task_names:
args.eval_task_names.append(variation)
# Setup CUDA, GPU & distributed training
device = torch.device("cuda")
args.device = device
# All of these tasks use the NliProcessor
args.actual_task_names = actual_task_names
# By default we evaluate on the task itself.
if len(args.eval_task_names) == 0:
args.eval_task_names = [args.task_name]
if "all" in args.eval_task_names:
args.eval_task_names = args.eval_task_names + nli_task_names + ["snli", "mnli"]
args.eval_task_names.remove("all")
print(args.eval_task_names)
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
-1, device, 1, bool(False), args.fp16)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name.startswith("fever"):
processor = processors["fever"]()
elif args.task_name in nli_task_names:
processor = processors["nli"](task_to_data_dir[args.task_name])
elif args.task_name in ["mnli"]:
processor = processors["mnli"](hans=args.hans)
elif args.task_name.startswith("HANS"):
processor = processors["hans"]()
elif args.task_name in args.actual_task_names:
processor = processors[args.task_name]()
else:
raise ValueError("Task not found: %s" % (args.task_name))
label_list = processor.get_labels()
num_labels = len(label_list)
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name)
tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case)
# Adds rubi parameters here.
config.rubi = args.rubi
config.hans = args.hans
config.nonlinear_h_classifier = args.nonlinear_h_classifier
config.hypothesis_only = args.hypothesis_only
config.lambda_h = args.lambda_h
config.focal_loss = args.focal_loss
config.poe_loss = args.poe_loss
config.similarity = args.similarity
config.gamma_focal = args.gamma_focal
config.weighted_bias_only = args.weighted_bias_only
config.length_features = args.length_features
config.hans_features=args.hans_features
config.hans_only = args.hans_only
config.ensemble_training = args.ensemble_training
config.aggregate_ensemble = args.aggregate_ensemble
config.poe_alpha = args.poe_alpha
model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool('.ckpt' in args.model_name_or_path), config=config)
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset, _, _ = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
# Create output directory if needed
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
print("model is saved in ", os.path.join(args.output_dir, 'training_args.bin'))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval:
result, _ = do_evaluate(args, args.output_dir, tokenizer, model, config)
for r in result:
results.update(r)
# saves the results.
print(results)
if args.outputfile is not None:
write_to_csv(results, args, args.outputfile)
return results
if __name__ == "__main__":
main()
| 13,321 | 42.966997 | 163 | py |
robust-nli | robust-nli-master/src/BERT/heuristics_utils.py | # These codes are from the codes for
# Right for the Wrong Reasons: Diagnosing Syntactic Heuristics in Natural Language Inference by
# Tom McCoy, Ellie Pavlick, Tal Linzen, ACL 2019
def have_lexical_overlap(premise, hypothesis, get_hans_new_features=False):
prem_words = []
hyp_words = []
for word in premise.split():
if word not in [".", "?", "!"]:
prem_words.append(word.lower())
for word in hypothesis.split():
if word not in [".", "?", "!"]:
hyp_words.append(word.lower())
all_in = True
for word in hyp_words:
if word not in prem_words:
all_in = False
break
if get_hans_new_features:
overlap_percent = len(list(set(hyp_words) & set(prem_words))) / len(set(hyp_words))
else:
overlap_percent = len(list(set(hyp_words) & set(prem_words))) / len(set(prem_words))
return all_in, overlap_percent
def is_subsequence(premise, hypothesis):
prem_words = []
hyp_words = []
for word in premise.split():
if word not in [".", "?", "!"]:
prem_words.append(word.lower())
for word in hypothesis.split():
if word not in [".", "?", "!"]:
hyp_words.append(word.lower())
prem_filtered = " ".join(prem_words)
hyp_filtered = " ".join(hyp_words)
return hyp_filtered in prem_filtered
def parse_phrase_list(parse, phrases):
if parse == "":
return phrases
phrase_list = phrases
words = parse.split()
this_phrase = []
next_level_parse = []
for index, word in enumerate(words):
if word == "(":
next_level_parse += this_phrase
this_phrase = ["("]
elif word == ")" and len(this_phrase) > 0 and this_phrase[0] == "(":
phrase_list.append(" ".join(this_phrase[1:]))
next_level_parse += this_phrase[1:]
this_phrase = []
elif word == ")":
next_level_parse += this_phrase
next_level_parse.append(")")
this_phrase = []
else:
this_phrase.append(word)
return parse_phrase_list(" ".join(next_level_parse), phrase_list)
def is_constituent(premise, hypothesis, parse):
parse_new = []
for word in parse.split():
if word not in [".", "?", "!"]:
parse_new.append(word.lower())
all_phrases = parse_phrase_list(" ".join(parse_new), [])
prem_words = []
hyp_words = []
for word in premise.split():
if word not in [".", "?", "!"]:
prem_words.append(word.lower().replace(".", "").replace("?", "").replace("!", ""))
for word in hypothesis.split():
if word not in [".", "?", "!"]:
hyp_words.append(word.lower().replace(".", "").replace("?", "").replace("!", ""))
hyp_filtered = " ".join(hyp_words)
return hyp_filtered in all_phrases
| 2,890 | 28.20202 | 95 | py |
robust-nli | robust-nli-master/src/BERT/__init__.py | 0 | 0 | 0 | py |
|
robust-nli | robust-nli-master/src/BERT/utils_bert.py | import torch
from torch import nn
import sys
sys.path.append("../")
from torch.nn import CrossEntropyLoss, MSELoss
from pytorch_transformers.modeling_bert import BertPreTrainedModel, BertModel
from losses import FocalLoss, POELoss, RUBILoss
from utils_glue import get_word_similarity_new, get_length_features
from mutils import grad_mul_const
class BertDebiasForSequenceClassification(BertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(BertDebiasForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.apply(self.init_weights)
self.config = config
self.hypothesis_only = self.get_bool_value(config, "hypothesis_only")
self.gamma_focal = config.gamma_focal if hasattr(config, "gamma_focal") else 2
self.ensemble_training = self.get_bool_value(config, "ensemble_training")
self.poe_alpha = config.poe_alpha if hasattr(config, 'poe_alpha') else 1
# Sets the rubi parameters.
self.similarity = self.get_list_value(config, "similarity")
self.rubi = self.get_bool_value(config, 'rubi')
self.hans = self.get_bool_value(config, 'hans')
self.hans_features = self.get_bool_value(config, 'hans_features')
self.focal_loss = self.get_bool_value(config, 'focal_loss')
self.length_features = self.get_list_value(config, "length_features")
self.hans_only = self.get_bool_value(config, 'hans_only')
self.aggregate_ensemble=self.get_str_value(config, 'aggregate_ensemble')
self.poe_loss = self.get_bool_value(config, 'poe_loss')
self.weighted_bias_only = self.get_bool_value(config, "weighted_bias_only")
num_labels_bias_only = self.config.num_labels
if self.rubi or self.hypothesis_only or self.focal_loss or self.poe_loss or self.hans_only:
if self.hans:
num_features = 4 + len(self.similarity)
if self.hans_features:
num_features += len(self.length_features)
if not config.nonlinear_h_classifier:
self.h_classifier1 = nn.Linear(num_features, num_labels_bias_only)
else:
self.h_classifier1 = nn.Sequential(
nn.Linear(num_features, num_features),
nn.Tanh(),
nn.Linear(num_features, num_features),
nn.Tanh(),
nn.Linear(num_features, num_labels_bias_only))
if self.ensemble_training:
self.h_classifier1_second = self.get_classifier(config, config.nonlinear_h_classifier,
num_labels_bias_only)
else:
# Loads the classifiers from the pretrained model.
self.h_classifier1 = self.get_classifier(config, config.nonlinear_h_classifier, num_labels_bias_only)
self.lambda_h = config.lambda_h
def get_bool_value(self, config, attribute):
return True if hasattr(config, attribute) and eval('config.'+attribute) else False
def get_str_value(self, config, attribute):
return eval('config.'+attribute) if hasattr(config, attribute) else ""
def get_list_value(self, config, attribute):
return eval('config.' + attribute) if hasattr(config, attribute) else []
def get_classifier(self, config, nonlinear, num_labels):
if nonlinear == "deep":
classifier = nn.Sequential(
nn.Linear(config.hidden_size, config.hidden_size),
nn.Tanh(),
nn.Linear(config.hidden_size, int(config.hidden_size/2)),
nn.Tanh(),
nn.Linear(int(config.hidden_size/2), int(config.hidden_size/4)),
nn.Tanh(),
nn.Linear(int(config.hidden_size/4), num_labels),
)
else:
classifier = nn.Linear(config.hidden_size, num_labels)
return classifier
def set_ensemble_training(self, ensemble_training):
self.ensemble_training = ensemble_training
def set_hans(self, hans):
self.hans = hans
def set_rubi(self, rubi):
self.rubi = rubi
def set_poe_loss(self, poe_loss):
self.poe_loss = poe_loss
def set_focal_loss(self, focal_loss):
self.focal_loss = focal_loss
def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None,
position_ids=None, head_mask=None, h_ids=None,
h_attention_mask=None, p_ids=None, p_attention_mask=None, have_overlap=None,
overlap_rate=None, subsequence=None, constituent=None, binary_labels=None):
if self.hypothesis_only:
outputs = self.bert(h_ids, token_type_ids=None, attention_mask=h_attention_mask)
pooled_h = outputs[1]
pooled_h_g = self.dropout(pooled_h)
logits = self.h_classifier1(pooled_h_g)
outputs = (logits,) + outputs[2:]
elif not self.hans_only:
outputs = self.bert(input_ids, position_ids=position_ids,\
token_type_ids=token_type_ids,\
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
# add hidden states and attention if they are here
outputs = (logits,) + outputs[2:]
if self.hans: # if both are correct.
h_outputs = self.bert(h_ids, token_type_ids=None, attention_mask=h_attention_mask)
if self.ensemble_training: # also computes the h-only results.
pooled_h_second = h_outputs[1]
h_embd_second = grad_mul_const(pooled_h_second, 0.0)
pooled_h_g_second = self.dropout(h_embd_second)
h_logits_second = self.h_classifier1_second(pooled_h_g_second)
h_outputs_second = (h_logits_second,) + h_outputs[2:]
h_matrix = h_outputs[0]
h_matrix = grad_mul_const(h_matrix, 0.0)
h_matrix = self.dropout(h_matrix)
p_outputs = self.bert(p_ids, token_type_ids=None, attention_mask=p_attention_mask)
p_matrix = p_outputs[0]
p_matrix = grad_mul_const(p_matrix, 0.0)
p_matrix = self.dropout(p_matrix)
# compute similarity features.
if self.hans_features:
simialrity_score = get_word_similarity_new(h_matrix, p_matrix, self.similarity,\
h_attention_mask, p_attention_mask)
# this is the default case.
hans_h_inputs = torch.cat((simialrity_score,\
have_overlap.view(-1, 1), overlap_rate.view(-1, 1), subsequence.view(-1, 1), constituent.view(-1, 1)), 1)
if self.hans_features and len(self.length_features) != 0:
length_features = get_length_features(p_attention_mask, h_attention_mask, self.length_features)
hans_h_inputs = torch.cat((hans_h_inputs, length_features), 1)
h_logits = self.h_classifier1(hans_h_inputs)
h_outputs = (h_logits,) + h_outputs[2:]
if self.hans_only:
logits = h_logits
# overwrite outputs.
outputs = h_outputs
elif self.focal_loss or self.poe_loss or self.rubi:
h_outputs = self.bert(h_ids, token_type_ids=None, attention_mask=h_attention_mask)
pooled_h = h_outputs[1]
h_embd = grad_mul_const(pooled_h, 0.0)
pooled_h_g = self.dropout(h_embd)
h_logits = self.h_classifier1(pooled_h_g)
h_outputs = (h_logits,) + h_outputs[2:]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
if self.focal_loss:
loss_fct = FocalLoss(gamma=self.gamma_focal,\
ensemble_training=self.ensemble_training,
aggregate_ensemble=self.aggregate_ensemble)
elif self.poe_loss:
loss_fct = POELoss(ensemble_training=self.ensemble_training, poe_alpha=self.poe_alpha)
elif self.rubi:
loss_fct = RUBILoss(num_labels=self.num_labels)
elif self.hans_only:
if self.weighted_bias_only and self.hans:
weights = torch.tensor([0.5, 1.0, 0.5]).cuda()
loss_fct = CrossEntropyLoss(weight=weights)
else:
loss_fct = CrossEntropyLoss()
if self.rubi or self.focal_loss or self.poe_loss:
if self.ensemble_training:
model_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1), \
h_logits.view(-1, self.num_labels), h_logits_second.view(-1, self.num_labels))
else:
model_loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1),\
h_logits.view(-1, self.num_labels))
if self.weighted_bias_only and self.hans:
weights = torch.tensor([0.5, 1.0, 0.5]).cuda()
h_loss_fct = CrossEntropyLoss(weight=weights)
if self.ensemble_training:
h_loss_fct_second = CrossEntropyLoss()
else:
h_loss_fct = CrossEntropyLoss()
h_loss = h_loss_fct(h_logits.view(-1, self.num_labels), labels.view(-1))
if self.ensemble_training:
h_loss += h_loss_fct_second(h_logits_second.view(-1, self.num_labels), labels.view(-1))
loss = model_loss + self.lambda_h * h_loss
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
all_outputs = {}
all_outputs["bert"] = outputs
if self.rubi or self.focal_loss or self.poe_loss:
all_outputs["h"] = h_outputs
if self.ensemble_training:
all_outputs["h_second"] = h_outputs_second
return all_outputs # (loss), logits, (hidden_states), (attentions)
| 12,786 | 48.949219 | 134 | py |
robust-nli | robust-nli-master/src/BERT/eval_utils.py | from torch.utils.data import (DataLoader, SequentialSampler, TensorDataset)
from os.path import join
import numpy as np
from utils_glue import (compute_metrics, convert_examples_to_features,
processors)
import argparse
import torch
import os
import glob
import logging
from tqdm import tqdm, trange
from pytorch_transformers import (WEIGHTS_NAME, BertConfig, BertTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer)
from utils_bert import BertDebiasForSequenceClassification
MODEL_CLASSES = {
'bert': (BertConfig, BertDebiasForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
}
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig)), ())
task_to_data_dir = {
"snli": "../../data/datasets/SNLI/original",
"mnli": "../../data/datasets/MNLI",
"mnli-mm": "../../data/datasets/MNLI",
"addonerte": "../../data/datasets/AddOneRTE",
"dpr": "../../data/datasets/DPR/",
"sprl": "../../data/datasets/SPRL/",
"fnplus": "../../data/datasets/FNPLUS/",
"joci": "../../data/datasets/JOCI/",
"mpe": "../../data/datasets/MPE/",
"scitail": "../../data/datasets/SciTail/",
"sick": "../../data/datasets/SICK/",
"glue": "../../data/datasets/GLUEDiagnostic/",
"QQP": "../../data/datasets/QQP/",
"snlihard": "../../data/datasets/SNLIHard/",
"MNLIMatchedHard": "../../data/datasets/MNLIMatchedHard/",
"MNLIMismatchedHard": "../../data/datasets/MNLIMismatchedHard/",
"mnlimatched": "../../data/datasets/MNLIMatched/",
"mnlimismatched": "../../data/datasets/MNLIMismatched/",
"fever": "../../data/datasets/FEVER/",
"fever-symmetric-generated": "../../data/datasets/FEVER-symmetric-generated/",
"MNLIMatchedHardWithHardTest": "../../data/datasets/MNLIMatchedHardWithHardTest/",
"MNLIMismatchedHardWithHardTest": "../../data/datasets/MNLIMismatchedHardWithHardTest/",
"MNLITrueMatched": "../../data/datasets/MNLITrueMatched",
"MNLITrueMismatched": "../../data/datasets/MNLITrueMismatched",
"HANS": "../../data/datasets/HANS",
"HANS-const": "../../data/datasets/HANS/constituent",
"HANS-lex": "../../data/datasets/HANS/lexical_overlap",
"HANS-sub": "../../data/datasets/HANS/subsequence",
}
# All of these tasks use the NliProcessor # I added snli to this one as well.
nli_task_names = ["addonerte", "dpr", "sprl", "fnplus", "joci", "mpe", "scitail", "sick", "glue", "QQP",\
"snlihard", "mnlimatched", "mnlimismatched", "MNLIMatchedHardWithHardTest", \
"MNLIMismatchedHardWithHardTest", "MNLITrueMismatched", "MNLITrueMatched", "MNLIMatchedHard", "MNLIMismatchedHard"]
actual_task_names = ["snli", "mnli", "mnli-mm"]
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.CRITICAL)
def get_parser():
parser = argparse.ArgumentParser()
# RUBI parameters, this is deactivated by default.
parser.add_argument("--ensemble_training", action="store_true", help="Train the h-only and hans bias-only together\
on MNLI.")
parser.add_argument("--poe_alpha", default=1.0, type=float, help="alpha for poe method.")
parser.add_argument("--aggregate_ensemble", choices=["mean"], default="mean",
help="When using ensemble training with focal loss, one can combine the\
two bias only predictions with mean.")
parser.add_argument("--hans_only", action="store_true")
parser.add_argument("--weighted_bias_only", action="store_true", help="If specified bias-only\
model's loss is weighted. Only impacts hans.")
parser.add_argument("--gamma_focal", type=float, default=2.0)
parser.add_argument("--similarity", type=str, nargs="+", default=[], choices=["max", "mean", "min", "second_min"])
parser.add_argument("--hans", action="store_true", help="If selected trains the bias-only with hans features.")
parser.add_argument("--length_features", type=str, nargs="+", default=[], help="options are len-diff, log-len-diff")
parser.add_argument("--hans_features", action="store_true", help="If selected, computes the features for the hans experiment")
parser.add_argument("--rubi_text", choices=["a", "b"], default="b")
parser.add_argument("--poe_loss", action="store_true", help="Uses the product of the expert loss.")
parser.add_argument("--focal_loss", action="store_true", help="Uses the focal loss for classification,\
where instead of the probabilities of the objects, we use the h only probabilities")
parser.add_argument("--lambda_h", default=1.0, type=float)
parser.add_argument("--rubi", action="store_true", help="If specified use rubi network.")
parser.add_argument("--hypothesis_only", action="store_true")
parser.add_argument("--nonlinear_h_classifier", choices=["deep", None], default=None)
parser.add_argument("--save_labels_file", type=str, default=None, \
help="If specified, saves the labels.")
parser.add_argument("--output_label_format", type=str, default="kaggle", choices=["kaggle", "numpy"],
help="the format of saving the labels.")
# Bert parameters.
parser.add_argument("--outputfile", type=str, default=None, help="If specified, saves the results.")
parser.add_argument("--binerize_eval", action="store_true",
help="If specified, it binerize the dataset. During eval")
parser.add_argument("--use_cached_dataset", action="store_true", help="If specified will use the cached dataset")
parser.add_argument("--model_type", default=None, type=str,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, #required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(
ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--eval_task_names", nargs='+', type=str, default=[], \
help="list of the tasks to evaluate on them.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=2e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument('--logging_steps', type=int, default=100000, # this was 10000 # 50
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=100000, # this was 10000 # 50
help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
return parser
# writes the labels in the kaggle format.
def write_in_kaggle_format(args, label_ids, gold_labels, save_labels_file, eval_task):
# make a dictionary from the labels.
labels_map = {}
i = 0
for label in gold_labels:
labels_map[i] = label
i = i + 1
ids_file = join(task_to_data_dir[eval_task], "ids.test")
ids = [line.strip('\n') for line in open(ids_file)]
with open(save_labels_file, 'w') as f:
f.write("pairID,gold_label\n")
for i, l in enumerate(label_ids):
label = labels_map[l]
f.write("{0},{1}\n".format(ids[i], label))
def write_in_numpy_format(args, preds, save_labels_file):
np.save(save_labels_file, preds)
def binarize_preds(preds):
# maps the third label (neutral one) to first, which is contradiction.
preds[preds==2] = 0
return preds
def load_and_cache_examples(args, task, tokenizer, evaluate=False, dev_evaluate=False):
data_dir = task_to_data_dir[task]
if task.startswith("fever"):
processor = processors["fever"]()
elif task in nli_task_names:
processor = processors["nli"](data_dir)
elif task in ["mnli"]:
processor = processors["mnli"](hans=args.hans)
elif task == "mnli-mm":
processor = processors["mnli-mm"](hans=args.hans)
elif task.startswith("HANS"):
processor = processors["hans"](hans=args.hans)
else:
processor = processors[task]()
# Load data features from cache or dataset file
cached_features_file = os.path.join(data_dir, 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
print("File is: ", cached_features_file)
if False: #os.path.exists(cached_features_file) and args.use_cached_dataset:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", data_dir)
label_list = processor.get_labels()
if dev_evaluate: # and task in nli_task_names:
examples = processor.get_validation_dev_examples(data_dir)
else:
examples = processor.get_dev_examples(data_dir) if evaluate else\
processor.get_train_examples(data_dir)
features = convert_examples_to_features(examples, label_list, args.max_seq_length, tokenizer, "classification",
cls_token_at_end=bool(args.model_type in ['xlnet']), # xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
cls_token_segment_id=2 if args.model_type in ['xlnet'] else 0,
pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,
rubi=args.rubi or args.hypothesis_only or args.focal_loss or args.poe_loss or args.hans_only,
rubi_text=args.rubi_text, hans=(args.hans and not evaluate) or args.hans_only,\
hans_features=args.hans_features)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in features], dtype=torch.long)
if (args.hans and not evaluate) or args.hans_only:
all_h_ids = torch.tensor([f.h_ids for f in features], dtype=torch.long)
all_h_masks = torch.tensor([f.input_mask_h for f in features], dtype=torch.long)
all_p_ids = torch.tensor([f.p_ids for f in features], dtype=torch.long)
all_p_masks = torch.tensor([f.input_mask_p for f in features], dtype=torch.long)
all_have_overlap = torch.tensor([f.have_overlap for f in features], dtype=torch.float)
all_overlap_rate = torch.tensor([f.overlap_rate for f in features], dtype=torch.float)
all_subsequence = torch.tensor([f.subsequence for f in features], dtype=torch.float)
all_constituent = torch.tensor([f.constituent for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,\
all_h_ids, all_h_masks, all_p_ids, all_p_masks, all_have_overlap, all_overlap_rate,\
all_subsequence, all_constituent)
elif args.rubi or args.hypothesis_only or args.focal_loss or args.poe_loss:
# Hypothesis representations.
all_h_ids = torch.tensor([f.h_ids for f in features], dtype=torch.long)
all_h_masks = torch.tensor([f.input_mask_h for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids,\
all_h_ids, all_h_masks)
else:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset, processor.get_labels(), processor.num_classes
def get_batch_emebddings(model, args, input_ids, token_type_ids=None, attention_mask=None,
position_ids=None, head_mask=None, h_ids=None, h_attention_mask=None, labels=None):
if args.hypothesis_only:
outputs = model.bert(h_ids, token_type_ids=None, attention_mask=h_attention_mask)
pooled_output = outputs[1]
else:
outputs = model.bert(input_ids, position_ids=position_ids, \
token_type_ids=token_type_ids, \
attention_mask=attention_mask, head_mask=head_mask)
pooled_output = outputs[1]
return pooled_output
def get_embeddings(args, model, tokenizer):
# Loop to handle MNLI double evaluation (matched, mis-matched)
if "mnli" in args.task_name and "mnli-mm" not in args.task_name:
args.eval_task_names.append("mnli-mm")
results = {}
for eval_task in args.eval_task_names:
# eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset, eval_labels, num_classes = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
print(eval_dataset)
args.eval_batch_size = args.per_gpu_eval_batch_size
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
embeddings = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
if args.hypothesis_only or args.focal_loss or args.poe_loss:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3],
'h_ids': batch[4],
'h_attention_mask': batch[5]}
else:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3]}
embedding = get_batch_emebddings(model, args, **inputs)
embeddings.append(embedding)
results[eval_task] = torch.cat(embeddings, dim=0)
return results
def evaluate(args, model, tokenizer, prefix="", dev_evaluate=False):
# Loop to handle MNLI double evaluation (matched, mis-matched)
if "mnli" in args.task_name and "mnli-mm" not in args.task_name:
args.eval_task_names.append("mnli-mm")
results = {}
for eval_task in args.eval_task_names:
# eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset, eval_labels, num_classes = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True,\
dev_evaluate=dev_evaluate)
print("num_classes ", num_classes, "eval_labels ", eval_labels)
print(eval_dataset)
args.eval_batch_size = args.per_gpu_eval_batch_size
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
if args.hypothesis_only or args.focal_loss or args.poe_loss:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3],
'h_ids': batch[4],
'h_attention_mask': batch[5]}
elif args.hans_only:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3],
'h_ids': batch[4],
'h_attention_mask': batch[5],
'p_ids': batch[6],
'p_attention_mask': batch[7],
'have_overlap': batch[8],
'overlap_rate': batch[9],
'subsequence': batch[10],
'constituent': batch[11]
}
else:
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None,
# XLM don't use segment_ids
'labels': batch[3]}
outputs = model(**inputs)["bert"]
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
max_preds = np.argmax(preds, axis=1)
# convert 1,2 labels to 1 in case of binary dataset.
if num_classes == 2 and args.binerize_eval:
max_preds = binarize_preds(max_preds)
out_label_ids = binarize_preds(out_label_ids)
if eval_task in nli_task_names:
eval_task_metric = "nli"
elif eval_task.startswith("fever"):
eval_task_metric = "fever"
elif eval_task.startswith("HANS"):
eval_task_metric = "hans"
else:
eval_task_metric = eval_task
result = compute_metrics(eval_task_metric, max_preds, out_label_ids)
if args.save_labels_file is not None:
save_labels_file = args.save_labels_file + "_" + eval_task
if args.output_label_format == "kaggle":
write_in_kaggle_format(args, max_preds, eval_labels, save_labels_file, eval_task)
elif args.output_label_format == "numpy":
write_in_numpy_format(args, preds, save_labels_file)
results[eval_task] = result["acc"]
if eval_task.startswith("HANS"):
results[eval_task + "_not-entailment"] = result["acc_0"]
results[eval_task + "_entailment"] = result["acc_1"]
print("results is ", result, " eval_task ", eval_task)
return results, preds
def do_evaluate(args, output_dir, tokenizer, model, config, return_embeddings=False, dev_evaluate=False):
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
tokenizer = tokenizer_class.from_pretrained(output_dir, do_lower_case=args.do_lower_case)
checkpoints = [output_dir]
results = []
preds_list = []
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.set_rubi(False)
model.set_ensemble_training(False)
if args.hans:
model.set_hans(False)
model.set_focal_loss(False)
model.set_poe_loss(False)
if args.hans_only:
model.set_hans(True)
model.to(args.device)
if return_embeddings:
result = get_embeddings(args, model, tokenizer)
else:
result, preds = evaluate(args, model, tokenizer, prefix=global_step, dev_evaluate=dev_evaluate)
preds_list.append(preds)
results.append(result)
if return_embeddings:
return results
else:
return results, preds_list
| 25,678 | 51.620902 | 135 | py |
robust-nli | robust-nli-master/src/BERT/mutils.py | import csv
import os
import torch
def write_to_csv(scores, params, outputfile):
"""
This function writes the parameters and the scores with their names in a
csv file.
"""
# creates the file if not existing.
file = open(outputfile, 'a')
# If file is empty writes the keys to the file.
params_dict = vars(params)
if os.stat(outputfile).st_size == 0:
# Writes the configuration parameters
for key in params_dict.keys():
file.write(key+";")
for i, key in enumerate(scores.keys()):
ending = ";" if i < len(scores.keys())-1 else ""
file.write(key+ending)
file.write("\n")
file.close()
# Writes the values to each corresponding column.
with open(outputfile, 'r') as f:
reader = csv.reader(f, delimiter=';')
headers = next(reader)
# Iterates over the header names and write the corresponding values.
with open(outputfile, 'a') as f:
for i, key in enumerate(headers):
ending = ";" if i < len(headers)-1 else ""
if key in params_dict:
f.write(str(params_dict[key])+ending)
elif key in scores:
f.write(str(scores[key])+ending)
else:
raise AssertionError("Key not found in the given dictionary")
f.write("\n")
# Multiplies the gradient of the given parameter by a constant.
class GradMulConst(torch.autograd.Function):
@staticmethod
def forward(ctx, x, const):
ctx.const = const
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output*ctx.const, None
def grad_mul_const(x, const):
return GradMulConst.apply(x, const)
| 1,741 | 30.107143 | 77 | py |
robust-nli | robust-nli-master/src/InferSent/data.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import torch
def get_batch(batch, word_vec, emb_dim=300):
# sent in batch in decreasing order of lengths (bsize, max_len, word_dim)
lengths = np.array([len(x) for x in batch])
max_len = np.max(lengths)
embed = np.zeros((max_len, len(batch), emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = word_vec[batch[i][j]]
return torch.from_numpy(embed).float(), lengths
def get_word_dict(sentences):
# create vocab of words
word_dict = {}
for sent in sentences:
for word in sent.split():
if word not in word_dict:
word_dict[word] = ''
word_dict['<s>'] = ''
word_dict['</s>'] = ''
word_dict['<p>'] = ''
return word_dict
def get_glove(word_dict, glove_path):
# create word_vec with glove vectors
word_vec = {}
with open(glove_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.array(list(map(float, vec.split())))
print('Found {0}(/{1}) words with glove vectors'.format(
len(word_vec), len(word_dict)))
return word_vec
def build_vocab(sentences, glove_path):
word_dict = get_word_dict(sentences)
word_vec = get_glove(word_dict, glove_path)
print('Vocab size : {0}'.format(len(word_vec)))
return word_vec
def get_nli(data_path, n_classes):
s1 = {}
s2 = {}
target = {}
if n_classes == 3:
dico_label = {'entailment': 0, 'neutral': 1, 'contradiction': 2, 'hidden':0}
else:
dico_label = {'entailment': 0, 'neutral': 1, 'contradiction': 1, 'hidden':0}
for data_type in ['train', 'dev', 'test']:
s1[data_type], s2[data_type], target[data_type] = {}, {}, {}
s1[data_type]['path'] = os.path.join(data_path, 's1.' + data_type)
s2[data_type]['path'] = os.path.join(data_path, 's2.' + data_type)
target[data_type]['path'] = os.path.join(data_path,
'labels.' + data_type)
s1[data_type]['sent'] = [line.rstrip() for line in
open(s1[data_type]['path'], 'r')]
s2[data_type]['sent'] = [line.rstrip() for line in
open(s2[data_type]['path'], 'r')]
target[data_type]['data'] = np.array([dico_label[line.rstrip('\n')]
for line in open(target[data_type]['path'], 'r')])
assert len(s1[data_type]['sent']) == len(s2[data_type]['sent']) == \
len(target[data_type]['data'])
print('** {0} DATA : Found {1} pairs of {2} sentences.'.format(
data_type.upper(), len(s1[data_type]['sent']), data_type))
train = {'s1': s1['train']['sent'], 's2': s2['train']['sent'],
'label': target['train']['data']}
dev = {'s1': s1['dev']['sent'], 's2': s2['dev']['sent'],
'label': target['dev']['data']}
test = {'s1': s1['test']['sent'], 's2': s2['test']['sent'],
'label': target['test']['data']}
return train, dev, test
| 3,317 | 33.926316 | 84 | py |
robust-nli | robust-nli-master/src/InferSent/models.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
This file contains the definition of encoders used in https://arxiv.org/pdf/1705.02364.pdf
"""
import time
import sys
sys.path.append("../")
import numpy as np
import torch
import torch.nn as nn
from mutils import grad_mul_const
from losses import FocalLoss, POELoss, RUBILoss
from torch.nn import CrossEntropyLoss
"""
BLSTM (max/mean) encoder
"""
class InferSent(nn.Module):
def __init__(self, config):
super(InferSent, self).__init__()
self.bsize = config['bsize']
self.word_emb_dim = config['word_emb_dim']
self.enc_lstm_dim = config['enc_lstm_dim']
self.pool_type = config['pool_type']
self.dpout_model = config['dpout_model']
self.version = 1 if 'version' not in config else config['version']
self.enc_lstm = nn.LSTM(self.word_emb_dim, self.enc_lstm_dim, 1,
bidirectional=True, dropout=self.dpout_model)
assert self.version in [1, 2]
if self.version == 1:
self.bos = '<s>'
self.eos = '</s>'
self.max_pad = True
self.moses_tok = False
elif self.version == 2:
self.bos = '<p>'
self.eos = '</p>'
self.max_pad = False
self.moses_tok = True
def is_cuda(self):
# either all weights are on cpu or they are on gpu
return self.enc_lstm.bias_hh_l0.data.is_cuda
def forward(self, sent_tuple, return_all_emb=False):
# sent_len: [max_len, ..., min_len] (bsize)
# sent: (seqlen x bsize x worddim)
sent, sent_len = sent_tuple
# Sort by length (keep idx)
sent_len_sorted, idx_sort = np.sort(sent_len)[::-1], np.argsort(-sent_len)
sent_len_sorted = sent_len_sorted.copy()
idx_unsort = np.argsort(idx_sort)
idx_sort = torch.from_numpy(idx_sort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_sort)
sent = sent.index_select(1, idx_sort)
# Handling padding in Recurrent Networks
sent_packed = nn.utils.rnn.pack_padded_sequence(sent, sent_len_sorted)
sent_output = self.enc_lstm(sent_packed)[0] # seqlen x batch x 2*nhid
sent_output = nn.utils.rnn.pad_packed_sequence(sent_output)[0]
# Un-sort by length
idx_unsort = torch.from_numpy(idx_unsort).cuda() if self.is_cuda() \
else torch.from_numpy(idx_unsort)
sent_output = sent_output.index_select(1, idx_unsort)
# Pooling
if self.pool_type == "mean":
sent_len = torch.FloatTensor(sent_len.copy()).unsqueeze(1).cuda()
emb = torch.sum(sent_output, 0).squeeze(0)
emb = emb / sent_len.expand_as(emb)
elif self.pool_type == "max":
if not self.max_pad:
sent_output[sent_output == 0] = -1e9
emb = torch.max(sent_output, 0)[0]
if emb.ndimension() == 3:
emb = emb.squeeze(0)
assert emb.ndimension() == 2
if return_all_emb:
all_emb = sent_output.permute(1, 0, 2)
return emb, all_emb
else:
return emb
def set_w2v_path(self, w2v_path):
self.w2v_path = w2v_path
def get_word_dict(self, sentences, tokenize=True):
# create vocab of words
word_dict = {}
sentences = [s.split() if not tokenize else self.tokenize(s) for s in sentences]
for sent in sentences:
for word in sent:
if word not in word_dict:
word_dict[word] = ''
word_dict[self.bos] = ''
word_dict[self.eos] = ''
return word_dict
def get_w2v(self, word_dict):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with w2v vectors
word_vec = {}
with open(self.w2v_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if word in word_dict:
word_vec[word] = np.fromstring(vec, sep=' ')
print('Found %s(/%s) words with w2v vectors' % (len(word_vec), len(word_dict)))
return word_vec
def get_w2v_k(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
# create word_vec with k first w2v vectors
k = 0
word_vec = {}
with open(self.w2v_path) as f:
for line in f:
word, vec = line.split(' ', 1)
if k <= K:
word_vec[word] = np.fromstring(vec, sep=' ')
k += 1
if k > K:
if word in [self.bos, self.eos]:
word_vec[word] = np.fromstring(vec, sep=' ')
if k > K and all([w in word_vec for w in [self.bos, self.eos]]):
break
return word_vec
def build_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
word_dict = self.get_word_dict(sentences, tokenize)
self.word_vec = self.get_w2v(word_dict)
print('Vocab size : %s' % (len(self.word_vec)))
# build w2v vocab with k most frequent words
def build_vocab_k_words(self, K):
assert hasattr(self, 'w2v_path'), 'w2v path not set'
self.word_vec = self.get_w2v_k(K)
print('Vocab size : %s' % (K))
def update_vocab(self, sentences, tokenize=True):
assert hasattr(self, 'w2v_path'), 'warning : w2v path not set'
assert hasattr(self, 'word_vec'), 'build_vocab before updating it'
word_dict = self.get_word_dict(sentences, tokenize)
# keep only new words
for word in self.word_vec:
if word in word_dict:
del word_dict[word]
# udpate vocabulary
if word_dict:
new_word_vec = self.get_w2v(word_dict)
self.word_vec.update(new_word_vec)
else:
new_word_vec = []
print('New vocab size : %s (added %s words)'% (len(self.word_vec), len(new_word_vec)))
def get_batch(self, batch):
# sent in batch in decreasing order of lengths
# batch: (bsize, max_len, word_dim)
embed = np.zeros((len(batch[0]), len(batch), self.word_emb_dim))
for i in range(len(batch)):
for j in range(len(batch[i])):
embed[j, i, :] = self.word_vec[batch[i][j]]
return torch.FloatTensor(embed)
def tokenize(self, s):
from nltk.tokenize import word_tokenize
if self.moses_tok:
s = ' '.join(word_tokenize(s))
s = s.replace(" n't ", "n 't ") # HACK to get ~MOSES tokenization
return s.split()
else:
return word_tokenize(s)
def prepare_samples(self, sentences, bsize, tokenize, verbose):
sentences = [[self.bos] + s.split() + [self.eos] if not tokenize else
[self.bos] + self.tokenize(s) + [self.eos] for s in sentences]
n_w = np.sum([len(x) for x in sentences])
# filters words without w2v vectors
for i in range(len(sentences)):
s_f = [word for word in sentences[i] if word in self.word_vec]
if not s_f:
import warnings
warnings.warn('No words in "%s" (idx=%s) have w2v vectors. \
Replacing by "</s>"..' % (sentences[i], i))
s_f = [self.eos]
sentences[i] = s_f
lengths = np.array([len(s) for s in sentences])
n_wk = np.sum(lengths)
if verbose:
print('Nb words kept : %s/%s (%.1f%s)' % (
n_wk, n_w, 100.0 * n_wk / n_w, '%'))
# sort by decreasing length
lengths, idx_sort = np.sort(lengths)[::-1], np.argsort(-lengths)
sentences = np.array(sentences)[idx_sort]
return sentences, lengths, idx_sort
def encode(self, sentences, bsize=64, tokenize=True, verbose=False):
tic = time.time()
sentences, lengths, idx_sort = self.prepare_samples(
sentences, bsize, tokenize, verbose)
embeddings = []
for stidx in range(0, len(sentences), bsize):
batch = self.get_batch(sentences[stidx:stidx + bsize])
if self.is_cuda():
batch = batch.cuda()
with torch.no_grad():
batch = self.forward((batch, lengths[stidx:stidx + bsize])).data.cpu().numpy()
embeddings.append(batch)
embeddings = np.vstack(embeddings)
# unsort
idx_unsort = np.argsort(idx_sort)
embeddings = embeddings[idx_unsort]
if verbose:
print('Speed : %.1f sentences/s (%s mode, bsize=%s)' % (
len(embeddings)/(time.time()-tic),
'gpu' if self.is_cuda() else 'cpu', bsize))
return embeddings
def visualize(self, sent, tokenize=True):
sent = sent.split() if not tokenize else self.tokenize(sent)
sent = [[self.bos] + [word for word in sent if word in self.word_vec] + [self.eos]]
if ' '.join(sent[0]) == '%s %s' % (self.bos, self.eos):
import warnings
warnings.warn('No words in "%s" have w2v vectors. Replacing \
by "%s %s"..' % (sent, self.bos, self.eos))
batch = self.get_batch(sent)
if self.is_cuda():
batch = batch.cuda()
output = self.enc_lstm(batch)[0]
output, idxs = torch.max(output, 0)
# output, idxs = output.squeeze(), idxs.squeeze()
idxs = idxs.data.cpu().numpy()
argmaxs = [np.sum((idxs == k)) for k in range(len(sent[0]))]
# visualize model
import matplotlib.pyplot as plt
x = range(len(sent[0]))
y = [100.0 * n / np.sum(argmaxs) for n in argmaxs]
plt.xticks(x, sent[0], rotation=45)
plt.bar(x, y)
plt.ylabel('%')
plt.title('Visualisation of words importance')
plt.show()
return output, idxs
"""
Main module for Natural Language Inference
"""
class NLINet(nn.Module):
def __init__(self, config):
super(NLINet, self).__init__()
# classifier
self.nonlinear_fc = config['nonlinear_fc']
self.fc_dim = config['fc_dim']
self.n_classes = config['n_classes']
self.enc_lstm_dim = config['enc_lstm_dim']
self.encoder_type = config['encoder_type']
self.dpout_fc = config['dpout_fc']
self.encoder = eval(self.encoder_type)(config)
self.inputdim = 4*2*self.enc_lstm_dim
if self.nonlinear_fc:
self.classifier = nn.Sequential(
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.inputdim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.n_classes),
)
else:
self.classifier = nn.Sequential(
nn.Linear(self.inputdim, self.fc_dim),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Linear(self.fc_dim, self.n_classes)
)
def forward(self, s1, s2):
# s1 : (s1, s1_len)
u = self.encoder(s1)
v = self.encoder(s2)
features = torch.cat((u, v, torch.abs(u-v), u*v), 1)
output = self.classifier(features)
return output
def encode(self, s1):
emb = self.encoder(s1)
return emb
"""
Main module for Debiasing.
"""
class DebiasNet(nn.Module):
""" This module wrap the NLI model and applied the debiasing technique to it."""
def __init__(self, config):
super(DebiasNet, self).__init__()
# Loss options.
self.focal_loss = config['focal_loss']
self.poe_loss = config['poe_loss']
self.rubi = config['rubi']
self.n_classes = config['n_classes']
self.gamma_focal = config['gamma_focal']
self.poe_alpha = config['poe_alpha'] if 'poe_alpha' in config else 1.0
if self.focal_loss:
self.loss_fct = FocalLoss(gamma=self.gamma_focal)
elif self.poe_loss:
self.loss_fct = POELoss(poe_alpha=self.poe_alpha)
elif self.rubi:
self.loss_fct = RUBILoss(num_labels=self.n_classes)
else:
self.loss_fct = CrossEntropyLoss()
self.ensemble = self.rubi or self.focal_loss or self.poe_loss
self.loss_fct_h = CrossEntropyLoss()
self.h_loss_weight = config['h_loss_weight']
self.nli_model = config['nli_net']
# Let figure out the dimension of the classifier here.
self.fc_dim = config['fc_dim']
self.encoder_type = config['encoder_type']
self.enc_lstm_dim = config['enc_lstm_dim']
self.inputdim = 4 * 2 * self.enc_lstm_dim
self.dpout_fc = config['dpout_fc']
self.nonlinear_fc = config['nonlinear_fc']
if self.ensemble:
self.nonlinear_h_classifier = config['nonlinear_h_classifier']
self.c1 = self.get_classifier(self.nonlinear_h_classifier)
def get_classifier(self, nonlinear_fc):
if nonlinear_fc:
classifier = nn.Sequential(
nn.Dropout(p=self.dpout_fc),
nn.Linear(int(self.inputdim / 4), self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Tanh(),
nn.Dropout(p=self.dpout_fc),
nn.Linear(self.fc_dim, self.n_classes),
)
else:
classifier = nn.Sequential(
nn.Linear(int(self.inputdim / 4), self.fc_dim),
nn.Linear(self.fc_dim, self.fc_dim),
nn.Linear(self.fc_dim, self.n_classes),
)
return classifier
def get_loss(self, output, adv_output, labels):
loss = self.loss_fct(output, labels, adv_output)
h_output = adv_output
loss += self.h_loss_weight * self.loss_fct_h(h_output, labels)
return loss
def forward(self, s1, s2, labels):
nli_output = self.nli_model(s1, s2)
h_pred = None
if self.ensemble:
# gets the embedding for the hypotheses.
h_embeddings = self.nli_model.encoder(s2)
h_embeddings = grad_mul_const(h_embeddings, 0.0) # do not backpropagate through the hypothesis encoder.
h_pred = self.c1(h_embeddings)
total_loss = self.get_loss(nli_output, h_pred, labels)
else:
total_loss = self.loss_fct(nli_output, labels)
outputs = {}
outputs['total_loss'] = total_loss
outputs['nli'] = nli_output
outputs['h'] = h_pred
return outputs
| 15,032 | 34.878282 | 115 | py |
robust-nli | robust-nli-master/src/InferSent/train_nli.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import time
import argparse
import os
import numpy as np
import torch
from torch.autograd import Variable
from data import get_nli, get_batch, build_vocab
from mutils import write_to_csv, get_optimizer, construct_model_name
from models import NLINet, DebiasNet
parser = argparse.ArgumentParser(description='NLI training')
# losses.
parser.add_argument("--poe_alpha", type=float, default=1.0)
parser.add_argument("--gamma_focal", type=float, default=2.0)
parser.add_argument("--nonlinear_h_classifier", action="store_true", help="If specified uses a nonlinear classifier for h model.")
parser.add_argument("--use_early_stopping", action="store_true")
parser.add_argument("--rubi", action="store_true")
parser.add_argument("--poe_loss", action="store_true", help="Uses the product of the expert loss.")
parser.add_argument("--focal_loss", action="store_true", help="Uses the focal loss for classification,\
where instead of the probabilities of the objects, we use the h only probabilities")
# paths
parser.add_argument("--outputfile", type=str, default="results.csv", help="writes the final results\
in this file in a csv format.")
parser.add_argument("--dataset", type=str, default="SNLI", help="this will be set automatically.")
parser.add_argument("--outputdir", type=str, default='savedir/', help="Output directory")
parser.add_argument("--outputmodelname", type=str, nargs='+', default=['model.pickle'])
parser.add_argument("--word_emb_path", type=str, default="../../data/GloVe/glove.840B.300d.txt", help="word embedding file path")
# training
parser.add_argument('--h_loss_weight', type=float, default=1.0, help="defines the weight of the adversary loss.")
parser.add_argument("--n_epochs", type=int, default=20)
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--dpout_model", type=float, default=0., help="encoder dropout")
parser.add_argument("--dpout_fc", type=float, default=0., help="classifier dropout")
parser.add_argument("--nonlinear_fc", action="store_true", help="use nonlinearity in fc")
parser.add_argument("--optimizer", type=str, default="sgd,lr=0.1", help="adam or sgd,lr=0.1")
parser.add_argument("--lrshrink", type=float, default=5, help="shrink factor for sgd")
parser.add_argument("--decay", type=float, default=0.99, help="lr decay")
parser.add_argument("--minlr", type=float, default=1e-5, help="minimum lr")
parser.add_argument("--max_norm", type=float, default=5., help="max norm (grad clipping)")
# model
parser.add_argument("--version", type=int, default=2, help="Defines the version of the model.")
parser.add_argument("--encoder_type", type=str, default='InferSent', choices=['InferSent'], help="see list of encoders")
parser.add_argument("--enc_lstm_dim", type=int, default=2048, help="encoder nhid dimension")
parser.add_argument("--n_enc_layers", type=int, default=1, help="encoder num layers")
parser.add_argument("--fc_dim", type=int, default=512, help="nhid of fc layers")
parser.add_argument("--pool_type", type=str, default='max', help="max or mean")
# gpu
parser.add_argument("--gpu_id", type=int, default=0, help="GPU ID")
parser.add_argument("--seed", type=int, default=1234, help="seed")
# data
parser.add_argument("--word_emb_dim", type=int, default=300, help="word embedding dimension")
params, unknowns = parser.parse_known_args()
if len(unknowns) != 0:
raise AssertionError("There exists unknown parameters: ", unknowns)
all_datasets = {
'SNLI': {'path': "../../data/datasets/SNLI", 'n_classes': 3},
'SNLIHard': {'path': "../../data/datasets/SNLIHard", 'n_classes': 3},
'MNLIMatched': {'path': "../../data/datasets/MNLIMatched/", 'n_classes': 3},
'MNLIMismatched': {'path': "../../data/datasets/MNLIMismatched/", 'n_classes': 3},
'MNLIMismatchedHardWithHardTest': {'path': "../../data/datasets/MNLIMismatchedHardWithHardTest/", 'n_classes':3},
'MNLIMatchedHardWithHardTest': {'path': "../../data/datasets/MNLIMatchedHardWithHardTest/", 'n_classes':3},
'JOCI': {'path': "../../data/datasets/JOCI", 'n_classes': 3},
'SICK-E': {'path': "../../data/datasets/SICK-E", 'n_classes': 3},
'AddOneRTE': {'path': "../../data/datasets/AddOneRTE", 'n_classes': 2},
'DPR': {'path': "../../data/datasets/DPR", 'n_classes': 2},
'FNPLUS': {'path': "../../data/datasets/FNPLUS", 'n_classes': 2},
'SciTail': {'path': "../../data/datasets/SciTail", 'n_classes': 2},
'SPRL': {'path': "../../data/datasets/SPRL", 'n_classes': 2},
'MPE': {'path': "../../data/datasets/MPE", 'n_classes': 3},
'QQP': {'path': "../../data/datasets/QQP", 'n_classes': 2},
'GLUEDiagnostic': {'path': "../../data/datasets/GLUEDiagnostic", 'n_classes': 3},
}
params.nlipath = all_datasets[params.dataset]['path']
params.n_classes = all_datasets[params.dataset]['n_classes']
params.outputmodelname = construct_model_name(params, params.outputmodelname)
# set gpu device
torch.cuda.set_device(params.gpu_id)
# print parameters passed, and all parameters
print('\ntogrep : {0}\n'.format(sys.argv[1:]))
print(params)
# this function clears the gradient of the given model.
def clear_gradients(model, name):
for param in eval('model.'+name).parameters():
if param.grad is not None:
param.grad *= 0.0
"""
SEED
"""
np.random.seed(params.seed)
torch.manual_seed(params.seed)
torch.cuda.manual_seed(params.seed)
"""
DATA
"""
train, valid, test = get_nli(params.nlipath, params.n_classes)
word_vec = build_vocab(train['s1'] + train['s2'] +
valid['s1'] + valid['s2'] +
test['s1'] + test['s2'], params.word_emb_path)
for split in ['s1', 's2']:
for data_type in ['train', 'valid', 'test']:
eval(data_type)[split] = np.array([['<s>'] +
[word for word in sent.split() if word in word_vec] +
['</s>'] for sent in eval(data_type)[split]])
"""
MODEL
"""
# model config
config_nli_model = {
'n_words' : len(word_vec) ,
'word_emb_dim' : params.word_emb_dim ,
'enc_lstm_dim' : params.enc_lstm_dim ,
'n_enc_layers' : params.n_enc_layers ,
'dpout_model' : params.dpout_model ,
'dpout_fc' : params.dpout_fc ,
'fc_dim' : params.fc_dim ,
'bsize' : params.batch_size ,
'n_classes' : params.n_classes ,
'pool_type' : params.pool_type ,
'nonlinear_fc' : params.nonlinear_fc ,
'encoder_type' : params.encoder_type ,
'use_cuda' : True ,
'version' : params.version ,
}
nli_net = NLINet(config_nli_model)
print(nli_net)
config_debias_model = {
'n_words' : len(word_vec) ,
'word_emb_dim' : params.word_emb_dim ,
'enc_lstm_dim' : params.enc_lstm_dim ,
'n_enc_layers' : params.n_enc_layers ,
'dpout_model' : params.dpout_model ,
'dpout_fc' : params.dpout_fc ,
'fc_dim' : params.fc_dim ,
'bsize' : params.batch_size ,
'n_classes' : params.n_classes ,
'pool_type' : params.pool_type ,
'nonlinear_fc' : params.nonlinear_fc ,
'encoder_type' : params.encoder_type ,
'use_cuda' : True ,
'nli_net' : nli_net ,
'version' : params.version ,
"poe_loss" : params.poe_loss ,
"focal_loss" : params.focal_loss ,
"h_loss_weight" : params.h_loss_weight ,
"rubi" : params.rubi ,
"nonlinear_h_classifier" : params.nonlinear_h_classifier,
"gamma_focal" : params.gamma_focal,
"poe_alpha" : params.poe_alpha,
}
# model
encoder_types = ['InferSent']
assert params.encoder_type in encoder_types, "encoder_type must be in " + \
str(encoder_types)
debias_net = DebiasNet(config_debias_model)
print(debias_net)
# optimizer
optim_fn, optim_params = get_optimizer(params.optimizer)
optimizer = optim_fn(debias_net.parameters(), **optim_params)
# cuda by default
debias_net.cuda()
"""
TRAIN
"""
val_acc_best = -1e10
adam_stop = False
stop_training = False
lr = optim_params['lr'] if 'sgd' in params.optimizer else None
def trainepoch(epoch):
print('\nTRAINING : Epoch ' + str(epoch))
nli_net.train()
debias_net.train()
all_costs = []
logs = []
words_count = 0
last_time = time.time()
correct = 0.
# shuffle the data
permutation = np.random.permutation(len(train['s1']))
s1 = train['s1'][permutation]
s2 = train['s2'][permutation]
target = train['label'][permutation]
optimizer.param_groups[0]['lr'] = optimizer.param_groups[0]['lr'] * params.decay if epoch>1\
and 'sgd' in params.optimizer else optimizer.param_groups[0]['lr']
print('Learning rate : {0}'.format(optimizer.param_groups[0]['lr']))
for stidx in range(0, len(s1), params.batch_size):
# prepare batch
s1_batch, s1_len = get_batch(s1[stidx:stidx + params.batch_size],
word_vec, params.word_emb_dim)
s2_batch, s2_len = get_batch(s2[stidx:stidx + params.batch_size],
word_vec, params.word_emb_dim)
s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())
tgt_batch = Variable(torch.LongTensor(target[stidx:stidx + params.batch_size])).cuda()
k = s1_batch.size(1) # actual batch size
# model forward
outputs = debias_net((s1_batch, s1_len), (s2_batch, s2_len), tgt_batch)
pred = outputs['nli'].data.max(1)[1]
correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()
assert len(pred) == len(s1[stidx:stidx + params.batch_size])
# define the losses here.
all_costs.append(outputs['total_loss'].item())
words_count += (s1_batch.nelement() + s2_batch.nelement()) / params.word_emb_dim
# backward
optimizer.zero_grad()
# lets do the backward in the several steps.
outputs['total_loss'].backward()
# gradient clipping (off by default)
shrink_factor = 1
total_norm = 0
for p in debias_net.parameters():
if p.requires_grad:
total_norm += p.grad.data.norm() ** 2
total_norm = np.sqrt(total_norm.cpu())
if total_norm > params.max_norm:
shrink_factor = params.max_norm / total_norm
current_lr = optimizer.param_groups[0]['lr'] # current lr (no external "lr", for adam)
optimizer.param_groups[0]['lr'] = current_lr * shrink_factor # just for update
# optimizer step
optimizer.step()
optimizer.param_groups[0]['lr'] = current_lr
if len(all_costs) == 100:
logs_outputs = '{0} ; total loss {1} ; sentence/s {2} ;\
words/s {3} ; accuracy train : {4}'.format(
stidx, round(np.mean(all_costs), 2),
int(len(all_costs) * params.batch_size / (time.time() - last_time)),
int(words_count * 1.0 / (time.time() - last_time)),
round(100.*correct.item()/(stidx+k), 2))
logs.append(logs_outputs)
print(logs[-1])
last_time = time.time()
words_count = 0
all_costs = []
train_acc = round(100 * correct.item()/len(s1), 2)
print('results : epoch {0} ; mean accuracy train : {1}'
.format(epoch, train_acc))
return train_acc
def evaluate(epoch, eval_type='valid', final_eval=False):
nli_net.eval()
debias_net.eval()
correct = 0.
global val_acc_best, lr, stop_training, adam_stop
if eval_type == 'valid':
print('\nVALIDATION : Epoch {0}'.format(epoch))
s1 = valid['s1'] if eval_type == 'valid' else test['s1']
s2 = valid['s2'] if eval_type == 'valid' else test['s2']
target = valid['label'] if eval_type == 'valid' else test['label']
for i in range(0, len(s1), params.batch_size):
# prepare batch
s1_batch, s1_len = get_batch(s1[i:i + params.batch_size], word_vec, params.word_emb_dim)
s2_batch, s2_len = get_batch(s2[i:i + params.batch_size], word_vec, params.word_emb_dim)
s1_batch, s2_batch = Variable(s1_batch.cuda()), Variable(s2_batch.cuda())
tgt_batch = Variable(torch.LongTensor(target[i:i + params.batch_size])).cuda()
# model forward
outputs = debias_net((s1_batch, s1_len), (s2_batch, s2_len), tgt_batch)
pred = outputs['nli'].data.max(1)[1]
correct += pred.long().eq(tgt_batch.data.long()).cpu().sum()
# save model
eval_acc = round(100 * correct.item() / len(s1), 2)
if final_eval:
print('finalgrep : accuracy {0} : {1}'.format(eval_type, eval_acc))
else:
print('togrep : results : epoch {0} ; mean accuracy {1} :\
{2}'.format(epoch, eval_type, eval_acc))
if eval_type == 'valid' and epoch <= params.n_epochs:
if eval_acc > val_acc_best:
print('saving model at epoch {0}'.format(epoch))
if not os.path.exists(params.outputdir):
os.makedirs(params.outputdir)
torch.save(debias_net, os.path.join(params.outputdir,
params.outputmodelname))
val_acc_best = eval_acc
else:
if 'sgd' in params.optimizer:
optimizer.param_groups[0]['lr'] = optimizer.param_groups[0]['lr'] / params.lrshrink
print('Shrinking lr by : {0}. New lr = {1}'
.format(params.lrshrink,
optimizer.param_groups[0]['lr']))
if optimizer.param_groups[0]['lr'] < params.minlr and params.use_early_stopping:
stop_training = True
if 'adam' in params.optimizer and params.use_early_stopping:
# early stopping (at 2nd decrease in accuracy)
stop_training = adam_stop
adam_stop = True
return eval_acc
"""
Train model on Natural Language Inference task
"""
epoch = 1
while not stop_training and epoch <= params.n_epochs:
train_acc = trainepoch(epoch)
eval_acc = evaluate(epoch, 'valid')
epoch += 1
# Run best model on test set.
debias_net = torch.load(os.path.join(params.outputdir, params.outputmodelname))
scores = {}
print('\nTEST : Epoch {0}'.format(epoch))
scores['NLI_val'] = evaluate(1e6, 'valid', True)
scores['NLI_test'] = evaluate(0, 'test', True)
write_to_csv(scores, params, params.outputfile)
| 14,974 | 39.582656 | 130 | py |
robust-nli | robust-nli-master/src/InferSent/mutils.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import inspect
from torch import optim
import torch
import os
import csv
def construct_model_name(params, names_params):
if len(names_params) == 1:
return names_params[0]
else:
params_dict = vars(params)
outputmodelname=""
for n in names_params:
outputmodelname += str(n) + ":" + str(params_dict[str(n)]) + "-"
return outputmodelname
def write_to_csv(scores, params, outputfile):
"""
This function writes the parameters and the scores with their names in a
csv file.
"""
# creates the file if not existing.
file = open(outputfile, 'a')
# If file is empty writes the keys to the file.
params_dict = vars(params)
if os.stat(outputfile).st_size == 0:
# Writes the configuration parameters
for key in params_dict.keys():
file.write(key+";")
for i, key in enumerate(scores.keys()):
ending = ";" if i < len(scores.keys())-1 else ""
file.write(key+ending)
file.write("\n")
file.close()
# Writes the values to each corresponding column.
with open(outputfile, 'r') as f:
reader = csv.reader(f, delimiter=';')
headers = next(reader)
# Iterates over the header names and write the corresponding values.
with open(outputfile, 'a') as f:
for i, key in enumerate(headers):
ending = ";" if i < len(headers)-1 else ""
if key in params_dict:
f.write(str(params_dict[key])+ending)
elif key in scores:
f.write(str(scores[key])+ending)
else:
raise AssertionError("Key not found in the given dictionary")
f.write("\n")
def get_optimizer(s):
"""
Parse optimizer parameters.
Input should be of the form:
- "sgd,lr=0.01"
- "adagrad,lr=0.1,lr_decay=0.05"
"""
if "," in s:
method = s[:s.find(',')]
optim_params = {}
for x in s[s.find(',') + 1:].split(','):
split = x.split('=')
assert len(split) == 2
assert re.match("^[+-]?(\d+(\.\d*)?|\.\d+)$", split[1]) is not None
optim_params[split[0]] = float(split[1])
else:
method = s
optim_params = {}
if method == 'adadelta':
optim_fn = optim.Adadelta
elif method == 'adagrad':
optim_fn = optim.Adagrad
elif method == 'adam':
optim_fn = optim.Adam
elif method == 'adamax':
optim_fn = optim.Adamax
elif method == 'asgd':
optim_fn = optim.ASGD
elif method == 'rmsprop':
optim_fn = optim.RMSprop
elif method == 'rprop':
optim_fn = optim.Rprop
elif method == 'sgd':
optim_fn = optim.SGD
assert 'lr' in optim_params
else:
raise Exception('Unknown optimization method: "%s"' % method)
# check that we give good parameters to the optimizer
expected_args = inspect.getargspec(optim_fn.__init__)[0]
assert expected_args[:2] == ['self', 'params']
if not all(k in expected_args[2:] for k in optim_params.keys()):
raise Exception('Unexpected parameters: expected "%s", got "%s"' % (
str(expected_args[2:]), str(optim_params.keys())))
return optim_fn, optim_params
"""
Importing batcher and prepare for SentEval
"""
def batcher(batch, params):
# batch contains list of words
batch = [['<s>'] + s + ['</s>'] for s in batch]
sentences = [' '.join(s) for s in batch]
embeddings = params.infersent.encode(sentences, bsize=params.batch_size,
tokenize=False)
return embeddings
def prepare(params, samples):
params.infersent.build_vocab([' '.join(s) for s in samples],
params.glove_path, tokenize=False)
class dotdict(dict):
""" dot.notation access to dictionary attributes """
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
# Multiplies the gradient of the given parameter by a constant.
class GradMulConst(torch.autograd.Function):
@staticmethod
def forward(ctx, x, const):
ctx.const = const
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output*ctx.const, None
def grad_mul_const(x, const):
return GradMulConst.apply(x, const)
| 4,580 | 28.941176 | 79 | py |
robust-nli | robust-nli-master/data/scripts/nli_hardset.py | import json_lines
import os
import argparse
def process_nli_hardset(datapth, outdir):
if not os.path.exists(outdir):
os.makedirs(outdir)
# Writes data in the file.
sentences2 = []
sentences1 = []
labels = []
pair_ids = []
with open(datapth, 'rb') as f:
for item in json_lines.reader(f):
sentences2.append(item['sentence2'])
sentences1.append(item['sentence1'])
labels.append(item['gold_label'])
pair_ids.append(item['pairID'])
with open(os.path.join(outdir, 'labels.test'), 'w') as f:
f.write('\n'.join(labels))
with open(os.path.join(outdir, 's1.test'), 'w') as f:
f.write('\n'.join(sentences1))
with open(os.path.join(outdir, 's2.test'), 'w') as f:
f.write('\n'.join(sentences2))
with open(os.path.join(outdir, 'ids.test'), 'w') as f:
f.write('\n'.join(pair_ids))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Processing Hard NLI dataset.")
parser.add_argument("--datapath", type=str,
help="Defines the path to the hardset of NLI dataset")
parser.add_argument("--outputpath", type=str,
help="Defines the path to the output folder for the processed dataset")
params, unknowns = parser.parse_known_args()
process_nli_hardset(params.datapath, params.outputpath)
| 1,411 | 33.439024 | 103 | py |
robust-nli | robust-nli-master/data/scripts/glue_diagnostic.py | # This scripts process the SICK-E dataset and
# writes it in the format of SNLI dataset.
import os
import argparse
import pandas as pd
import numpy as np
class GlueDiagnosticDataset(object):
def __init__(self, testpath, outputdir):
self.testpath = testpath
self.outputdir = outputdir
# Creates the output directory if does not exist.
if not os.path.exists(outputdir):
os.makedirs(outputdir)
def writeData(self, lines, fpath):
"""Writes the given data in the format of each
data in one line.
"""
with open(fpath, 'w') as f:
for line in lines:
print(line, file=f)
def loadFile(self, datapath):
df = pd.read_csv(datapath, sep='\t')
labels = df['Label'].values.tolist()
# Filters all nan labels.
indices = [i for i, x in enumerate(labels) if x is not np.nan]
data = {}
data['s1'] = np.array(df['Premise'].values.tolist())[indices]
data['s2'] = np.array(df['Hypothesis'].values.tolist())[indices]
data['labels'] = np.array(df['Label'].values.tolist())[indices]
assert (len(data['s1']) == len(data['s2']) == len(data['labels']))
return data
def process(self):
test = self.loadFile(os.path.join(self.testpath))
self.writeData(test['s1'], os.path.join(self.outputdir, 's1.test'))
self.writeData(test['s2'], os.path.join(self.outputdir, 's2.test'))
self.writeData(test['labels'], os.path.join(self.outputdir, 'labels.test'))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Process Glue Diagnostic test set.")
parser.add_argument("--testpath", type=str, #default="/idiap/temp/rkarimi/datasets/diagnostic-full.tsv?dl=1", \
help="Defines the path to GLUE test set")
parser.add_argument("--outputpath", type=str, #default="/idiap/temp/rkarimi/datasets/GlueDiagnostic/", \
help="Defines the path to the output folder for the processed dataset")
params, unknowns = parser.parse_known_args()
GlueDiagnosticDataset(params.testpath, params.outputpath).process()
| 2,134 | 37.125 | 115 | py |
robust-nli | robust-nli-master/data/scripts/recast_white.py | import os
import argparse
import glob
class RecastWhiteDataset(object):
def __init__(self, datadir, outputdir):
self.datadir = datadir
self.outputdir = outputdir
def writeData(self, lines, fpath):
"""Writes the given data in the format of each
data in one line.
"""
with open(fpath, 'w') as f:
for line in lines:
print(line, file=f)
def process_file(self, datapath):
data = {}
for type in ['train', 'dev', 'test']:
data[type] = {}
data[type]['s1'] = []
data[type]['s2'] = []
data[type]['labels'] = []
dataset_name = (datapath.split("/")[-1].split("_")[0]).upper()
orig_sent, hyp_sent, data_split, src, label = None, None, None, None, None
for line in open(datapath):
if line.startswith("entailed: "):
label = "entailment"
if "not-entailed" in line:
label = "contradiction"
elif line.startswith("text: "):
orig_sent = " ".join(line.split("text: ")[1:]).strip()
elif line.startswith("hypothesis: "):
hyp_sent = " ".join(line.split("hypothesis: ")[1:]).strip()
elif line.startswith("partof: "):
data_split = line.split("partof: ")[-1].strip()
elif line.startswith("provenance: "):
src = line.split("provenance: ")[-1].strip()
elif not line.strip():
assert orig_sent != None
assert hyp_sent != None
assert data_split != None
assert src != None
assert label != None
data[data_split]['labels'].append(label)
data[data_split]['s1'].append(orig_sent)
data[data_split]['s2'].append(hyp_sent)
orig_sent, hyp_sent, data_split, src, label = None, None, None, None, None
# Creates the output directory if does not exist.
if not os.path.exists(os.path.join(self.outputdir, dataset_name)):
os.makedirs(os.path.join(self.outputdir, dataset_name))
# Writes the dataset.
for name, data in data.items():
self.writeData(data['s1'], os.path.join(self.outputdir, dataset_name, 's1.' + name))
self.writeData(data['s2'], os.path.join(self.outputdir, dataset_name, 's2.' + name))
self.writeData(data['labels'], os.path.join(self.outputdir, dataset_name, 'labels.' + name))
def process(self):
input_files = glob.glob(os.path.join(self.datadir,"*_data.txt"))
for file in input_files:
self.process_file(file)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Recast White datasets.")
parser.add_argument("--datadir", type=str, default="/idiap/temp/rkarimi/datasets/rte/", \
help="Defines the path to the datasets recats by White et al")
parser.add_argument("--outputpath", type=str, default="/idiap/temp/rkarimi/datasets/", \
help="Defines the path to the output folder for the processed dataset")
params, unknowns = parser.parse_known_args()
RecastWhiteDataset(params.datadir, params.outputpath).process()
| 3,255 | 40.74359 | 104 | py |
robust-nli | robust-nli-master/data/scripts/scitail.py | # This scripts process the Scitail dataset and writes it in the
# format of SNLI dataset.
import os
import json_lines
import argparse
class SciTailDataset(object):
def __init__(self, datadir, outputdir):
self.datadir = datadir
self.outputdir = outputdir
# Creates the output directory if does not exist.
if not os.path.exists(outputdir):
os.makedirs(outputdir)
def writeData(self, lines, fpath):
"""Writes the given data in the format of each
data in one line.
"""
with open(fpath, 'w') as f:
for line in lines:
print(line, file=f)
def loadFile(self, datapath):
sentences2 = []
sentences1 = []
labels = []
with open(datapath, 'rb') as f:
for item in json_lines.reader(f):
sentences2.append(item['sentence2'])
sentences1.append(item['sentence1'])
labels.append(item['gold_label'])
data = {}
data['s1'] = sentences1
data['s2'] = sentences2
data['labels'] = labels
return data
def process(self):
train = self.loadFile(os.path.join(self.datadir, 'scitail_1.0_train.txt'))
dev = self.loadFile(os.path.join(self.datadir, 'scitail_1.0_dev.txt'))
test = self.loadFile(os.path.join(self.datadir, 'scitail_1.0_test.txt'))
scitail_data = {'train':train, 'dev':dev, 'test':test}
for name, data in scitail_data.items():
self.writeData(data['s1'], os.path.join(self.outputdir, 's1.'+name))
self.writeData(data['s2'], os.path.join(self.outputdir, 's2.'+name))
self.writeData(data['labels'], os.path.join(self.outputdir, 'labels.'+name))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Process SciTail dataset.")
parser.add_argument("--datadir", type=str,
help="Defines the path to the nli_format folder of SciTail dataset")
parser.add_argument("--outputpath", type=str,
help="Defines the path to the output folder for the processed dataset")
params, unknowns = parser.parse_known_args()
SciTailDataset(params.datadir, params.outputpath).process()
| 2,217 | 36.59322 | 88 | py |
robust-nli | robust-nli-master/data/scripts/download_glue.py | # The codes are adapted from https://raw.githubusercontent.com/nyu-mll/jiant/master/scripts/download_glue_data.py
import os
import sys
import shutil
import argparse
import tempfile
import urllib.request
import zipfile
TASKS = ["MNLI", "SNLI"]
TASK2PATH = {
"MNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FMNLI.zip?alt=media&token=50329ea1-e339-40e2-809c-10c40afff3ce',
"SNLI":'https://firebasestorage.googleapis.com/v0/b/mtl-sentence-representations.appspot.com/o/data%2FSNLI.zip?alt=media&token=4afcfbb2-ff0c-4b2d-a09a-dbf07926f4df',
}
def download_and_extract(task, data_dir):
print("Downloading and extracting %s..." % task)
data_file = "%s.zip" % task
urllib.request.urlretrieve(TASK2PATH[task], data_file)
with zipfile.ZipFile(data_file) as zip_ref:
zip_ref.extractall(data_dir)
os.remove(data_file)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', help='directory to save data to', type=str)
parser.add_argument('--tasks', nargs="+", help='list of tasks to get downloaded', type=str)
args = parser.parse_args()
if not os.path.isdir(args.data_dir):
os.mkdir(args.data_dir)
tasks = args.tasks
for task in tasks:
download_and_extract(task, args.data_dir)
if __name__ == '__main__':
main()
| 1,366 | 34.973684 | 169 | py |
robust-nli | robust-nli-master/data/scripts/joci.py | import csv
import random
import os
import argparse
class JOCIDataset(object):
def __init__(self, datadir, outputdir):
self.datadir = datadir
self.outputdir = outputdir
# Creates the output directory if does not exist.
if not os.path.exists(outputdir):
os.makedirs(outputdir)
def writeData(self, lines, fpath):
"""Writes the given data in the format of each
data in one line.
"""
with open(fpath, 'w') as f:
for line in lines:
print(line, file=f)
def convert_label(self, num):
if num == 1:
return 'contradiction'
if num == 5:
return 'entailment'
return 'neutral'
def loadFile(self, split):
sentences1 = []
sentences2 = []
labels = []
with open(os.path.join(params.datadir, 'joci.csv'), 'r') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
line_num = -1
for row in csv_reader:
line_num += 1
if line_num == 0:
continue
hyp_src = row[4]
# TODO: check why this is in the codes.
'''
# This is only for processing B.
if "AGCI" not in hyp_src:
continue
'''
premise, hypothesis, label = row[0], row[1], self.convert_label(int(row[2]))
sentences1.append(premise.strip())
sentences2.append(hypothesis.strip())
labels.append(label)
# Now we have all the data in both section A and B.
combined = list(zip(sentences1, sentences2, labels))
random.shuffle(combined)
sentences1[:], sentences2[:], labels[:] = zip(*combined)
data = {}
data['s1'] = sentences1
data['s2'] = sentences2
data['labels'] = labels
return data
def process(self):
train = self.loadFile('train')
dev = self.loadFile('dev')
test = self.loadFile('test')
joci_data = {'train':train, 'dev':dev, 'test':test}
for name, data in joci_data.items():
self.writeData(data['s1'], os.path.join(self.outputdir, 's1.'+name))
self.writeData(data['s2'], os.path.join(self.outputdir, 's2.'+name))
self.writeData(data['labels'], os.path.join(self.outputdir, 'labels.'+name))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Processing Joci datasets.")
parser.add_argument("--datadir", type=str,
help="Defines the path to the joci datasets")
parser.add_argument("--outputpath", type=str,
help="Defines the path to the output folder for the processed dataset")
params, unknowns = parser.parse_known_args()
JOCIDataset(params.datadir, params.outputpath).process()
| 2,952 | 33.337209 | 97 | py |
robust-nli | robust-nli-master/data/scripts/add_one_rte.py | import os
import argparse
class AddOneRTEDataset(object):
def __init__(self, datadir, outputdir):
self.datadir = datadir
self.outputdir = outputdir
# Creates the output directory if does not exist.
if not os.path.exists(outputdir):
os.makedirs(outputdir)
def writeData(self, lines, fpath):
"""Writes the given data in the format of each
data in one line.
"""
with open(fpath, 'w') as f:
for line in lines:
print(line, file=f)
def convert_label(self, score, is_test):
""" Converts not_entailed to contradiction, since we convert
contradiction and neutral to one label, it does not matter to
which label we convert the not_entailed labels.
"""
score = float(score)
if is_test:
if score <= 3:
return "contradiction"
elif score >= 4:
return "entailment"
return
if score < 3.5:
return "contradiction"
return "entailment"
def loadFile(self, type):
sentences1 = []
sentences2 = []
labels = []
line_count = -1
for line in open(os.path.join(self.datadir,"addone-entailment/splits/data.%s" % (type))):
line_count += 1
line = line.split("\t")
assert (len(line) == 7) # "add one rte %s file has a bad line" % (f))
label = self.convert_label(line[0], type == "test")
if not label:
continue
labels.append(label)
hypothesis = line[-1].replace("<b><u>", "").replace("</u></b>", "").strip()
premise = line[-2].replace("<b><u>", "").replace("</u></b>", "").strip()
sentences2.append(hypothesis)
sentences1.append(premise)
assert (len(labels) == len(sentences2) == len(sentences1))
data = {}
data['s1'] = sentences1
data['s2'] = sentences2
data['labels'] = labels
return data
def process(self):
train = self.loadFile('train')
dev = self.loadFile('dev')
test = self.loadFile('test')
add_one_rte_data = {'train':train, 'dev':dev, 'test':test}
for name, data in add_one_rte_data.items():
self.writeData(data['s1'], os.path.join(self.outputdir, 's1.'+name))
self.writeData(data['s2'], os.path.join(self.outputdir, 's2.'+name))
self.writeData(data['labels'], os.path.join(self.outputdir, 'labels.'+name))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Processing ADD-ONE-RTE dataset.")
parser.add_argument("--datadir", type=str, #default="/idiap/temp/rkarimi/datasets/AN-composition/", \
help="Defines the path to the nli_format folder of Add-One-RTE dataset")
parser.add_argument("--outputpath", type=str, #default="/idiap/temp/rkarimi/datasets/AddOneRTE", \
help="Defines the path to the output folder for the processed dataset")
params, unknowns = parser.parse_known_args()
AddOneRTEDataset(params.datadir, params.outputpath).process()
| 3,144 | 36.891566 | 105 | py |
robust-nli | robust-nli-master/data/scripts/sick.py | # This scripts process the SICK-E dataset and
# writes it in the format of SNLI dataset.
import os
import pandas as pd
import argparse
class SickDataset(object):
def __init__(self, datadir, outputdir):
self.datadir = datadir
self.outputdir = outputdir
# Creates the output directory if does not exist.
if not os.path.exists(outputdir):
os.makedirs(outputdir)
def writeData(self, lines, fpath):
"""Writes the given data in the format of each
data in one line.
"""
with open(fpath, 'w') as f:
for line in lines:
print(line, file=f)
def loadFile(self, datapath):
label_dict = {"NEUTRAL": "neutral", "CONTRADICTION":"contradiction",\
"ENTAILMENT":"entailment"}
df = pd.read_csv(datapath, sep="\t")
sentences1 = df['sentence_A'].tolist()
sentences2 = df['sentence_B'].tolist()
labels = df['entailment_judgment'].tolist()
labels = [label_dict[label] for label in labels]
data = {}
data['s1'] = sentences1
data['s2'] = sentences2
data['labels'] = labels
return data
def process(self):
train = self.loadFile(os.path.join(self.datadir, 'SICK_train.txt'))
dev = self.loadFile(os.path.join(self.datadir, 'SICK_trial.txt'))
test = self.loadFile(os.path.join(self.datadir, 'SICK_test_annotated.txt'))
scitail_data = {'train':train, 'dev':dev, 'test':test}
for name, data in scitail_data.items():
self.writeData(data['s1'], os.path.join(self.outputdir, 's1.'+name))
self.writeData(data['s2'], os.path.join(self.outputdir, 's2.'+name))
self.writeData(data['labels'], os.path.join(self.outputdir, 'labels.'+name))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Process Sick dataset.")
parser.add_argument("--datadir", type=str, default="/idiap/temp/rkarimi/datasets/sick/", \
help="Defines the path to the nli_format folder of Sick dataset")
parser.add_argument("--outputpath", type=str, default="/idiap/temp/rkarimi/datasets/SICK/", \
help="Defines the path to the output folder for the processed dataset")
params, unknowns = parser.parse_known_args()
SickDataset(params.datadir, params.outputpath).process()
| 2,346 | 40.175439 | 97 | py |
robust-nli | robust-nli-master/data/scripts/qqp.py | import csv
import random
import os
import argparse
import numpy as np
import csv
class QQPDataset(object):
def __init__(self, datadir, outputdir):
self.datadir = datadir
self.outputdir = outputdir
if not os.path.exists(outputdir):
os.makedirs(outputdir)
self.label_dict = {'1': "entailment", '0': "neutral"}
def writeData(self, lines, fpath):
"""Writes the given data in the format of each
data in one line.
"""
with open(fpath, 'w') as f:
for line in lines:
print(line, file=f)
def loadFile(self, datapath):
sentences1 = []
sentences2= []
labels = []
with open(datapath) as tsvfile:
reader = csv.reader(tsvfile, delimiter='\t')
for row in reader:
sentences1.append(row[1])
sentences2.append(row[2])
labels.append(row[0])
labels = [self.label_dict[label] for label in labels]
data = {}
data['s1'] = sentences1
data['s2'] = sentences2
data['labels'] = labels
return data
def process(self):
train = self.loadFile(os.path.join(self.datadir, 'train.tsv'))
dev = self.loadFile(os.path.join(self.datadir, 'dev.tsv'))
test = self.loadFile(os.path.join(self.datadir, 'test.tsv'))
mpe_data = {'train':train, 'dev':dev, 'test':test}
for name, data in mpe_data.items():
self.writeData(data['s1'], os.path.join(self.outputdir, 's1.'+name))
self.writeData(data['s2'], os.path.join(self.outputdir, 's2.'+name))
self.writeData(data['labels'], os.path.join(self.outputdir, 'labels.'+name))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Processing QQP datasets.")
parser.add_argument("--datadir", type=str, default="Quora_question_pair_partition", \
help="Defines the path to the qqp datasets")
parser.add_argument("--outputpath", type=str, \
help="Path to the output folder for the processed dataset")
params, unknowns = parser.parse_known_args()
QQPDataset(params.datadir, params.outputpath).process()
| 2,200 | 34.5 | 91 | py |
robust-nli | robust-nli-master/data/scripts/hans.py | import csv
import sys
from os.path import join
import os
import argparse
def read_tsv(input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
def split_hans_dataset(input_file, outputdir):
lines = read_tsv(input_file)
types = ["lexical_overlap", "constituent", "subsequence"]
lines_types = [[], [], []]
for i, line in enumerate(lines):
if i == 0:
first_line = "\t".join(line)
if line[8] == types[0]:
lines_types[0].append("\t".join(line))
elif line[8] == types[1]:
lines_types[1].append("\t".join(line))
elif line[8] == types[2]:
lines_types[2].append("\t".join(line))
# Write the splitted files.
for i, heuristic in enumerate(types):
datadir = join(outputdir, heuristic)
if not os.path.exists(datadir):
os.makedirs(datadir)
filepath = join(datadir, "heuristics_evaluation_set.txt")
lines = [first_line]+lines_types[i]
with open(filepath, "w") as f:
for line in lines:
f.write(line+"\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser("process HANS dataset.")
parser.add_argument("--inputfile", type=str)
parser.add_argument("--outputdir", type=str)
args = parser.parse_args()
split_hans_dataset(args.inputfile, args.outputdir)
| 1,703 | 31.769231 | 68 | py |
robust-nli | robust-nli-master/data/scripts/mpe.py | import csv
import random
import os
import argparse
import pandas as pd
import numpy as np
class MPEDataset(object):
def __init__(self, datadir, outputdir):
self.datadir = datadir
self.outputdir = outputdir
if not os.path.exists(outputdir):
os.makedirs(outputdir)
def writeData(self, lines, fpath):
"""Writes the given data in the format of each
data in one line.
"""
with open(fpath, 'w') as f:
for line in lines:
print(line, file=f)
def loadFile(self, datapath):
df = pd.read_csv(datapath, sep="\t")
premise1 = df['premise1'].tolist()
premise2 = df['premise2'].tolist()
premise3 = df['premise3'].tolist()
premise4 = df['premise4'].tolist()
premise1 = [s.split('/')[1] for s in premise1]
premise2 = [s.split('/')[1] for s in premise2]
premise3 = [s.split('/')[1] for s in premise3]
premise4 = [s.split('/')[1] for s in premise4]
sentences1 = [" ".join([s1, s2, s3, s4]) for s1, s2, s3, s4 in zip(premise1, premise2, premise3, premise4)]
sentences2 = df['hypothesis'].tolist()
labels = df['gold_label'].tolist()
indices = [i for i, x in enumerate(labels) if x is not np.nan]
data = {}
data['s1'] = np.array(sentences1)[indices]
data['s2'] = np.array(sentences2)[indices]
data['labels'] = np.array(labels)[indices]
return data
def process(self):
train = self.loadFile(os.path.join(self.datadir, 'mpe_train.txt'))
dev = self.loadFile(os.path.join(self.datadir, 'mpe_dev.txt'))
test = self.loadFile(os.path.join(self.datadir, 'mpe_test.txt'))
mpe_data = {'train':train, 'dev':dev, 'test':test}
for name, data in mpe_data.items():
self.writeData(data['s1'], os.path.join(self.outputdir, 's1.'+name))
self.writeData(data['s2'], os.path.join(self.outputdir, 's2.'+name))
self.writeData(data['labels'], os.path.join(self.outputdir, 'labels.'+name))
if __name__ == "__main__":
parser = argparse.ArgumentParser("Processing MPE datasets.")
parser.add_argument("--datadir", type=str, default="/idiap/temp/rkarimi/datasets/mpe/", \
help="Defines the path to the mpe datasets")
parser.add_argument("--outputpath", type=str, default="/idiap/temp/rkarimi/datasets/MPE", \
help="Defines the path to the output folder for the processed dataset")
params, unknowns = parser.parse_known_args()
MPEDataset(params.datadir, params.outputpath).process()
| 2,653 | 39.212121 | 115 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/main.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Main function for this repo. """
import argparse
import torch
from utils.misc import pprint
from utils.gpu_tools import set_gpu
from trainer.meta import MetaTrainer
from trainer.pre import PreTrainer
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Basic parameters
parser.add_argument('--model_type', type=str, default='ResNet', choices=['ResNet']) # The network architecture
parser.add_argument('--dataset', type=str, default='MiniImageNet', choices=['miniImageNet', 'tieredImageNet', 'FC100']) # Dataset
parser.add_argument('--phase', type=str, default='meta_train', choices=['pre_train', 'meta_train', 'meta_eval']) # Phase
parser.add_argument('--seed', type=int, default=0) # Manual seed for PyTorch, "0" means using random seed
parser.add_argument('--gpu', default='1') # GPU id
parser.add_argument('--dataset_dir', type=str, default='./data/mini/') # Dataset folder
# Parameters for meta-train phase
parser.add_argument('--max_epoch', type=int, default=100) # Epoch number for meta-train phase
parser.add_argument('--num_batch', type=int, default=100) # The number for different tasks used for meta-train
parser.add_argument('--shot', type=int, default=1) # Shot number, how many samples for one class in a task
parser.add_argument('--way', type=int, default=5) # Way number, how many classes in a task
parser.add_argument('--train_query', type=int, default=15) # The number of training samples for each class in a task
parser.add_argument('--val_query', type=int, default=15) # The number of test samples for each class in a task
parser.add_argument('--meta_lr1', type=float, default=0.0001) # Learning rate for SS weights
parser.add_argument('--meta_lr2', type=float, default=0.001) # Learning rate for FC weights
parser.add_argument('--base_lr', type=float, default=0.01) # Learning rate for the inner loop
parser.add_argument('--update_step', type=int, default=50) # The number of updates for the inner loop
parser.add_argument('--step_size', type=int, default=10) # The number of epochs to reduce the meta learning rates
parser.add_argument('--gamma', type=float, default=0.5) # Gamma for the meta-train learning rate decay
parser.add_argument('--init_weights', type=str, default=None) # The pre-trained weights for meta-train phase
parser.add_argument('--eval_weights', type=str, default=None) # The meta-trained weights for meta-eval phase
parser.add_argument('--meta_label', type=str, default='exp1') # Additional label for meta-train
# Parameters for pretain phase
parser.add_argument('--pre_max_epoch', type=int, default=100) # Epoch number for pre-train phase
parser.add_argument('--pre_batch_size', type=int, default=128) # Batch size for pre-train phase
parser.add_argument('--pre_lr', type=float, default=0.1) # Learning rate for pre-train phase
parser.add_argument('--pre_gamma', type=float, default=0.2) # Gamma for the pre-train learning rate decay
parser.add_argument('--pre_step_size', type=int, default=30) # The number of epochs to reduce the pre-train learning rate
parser.add_argument('--pre_custom_momentum', type=float, default=0.9) # Momentum for the optimizer during pre-train
parser.add_argument('--pre_custom_weight_decay', type=float, default=0.0005) # Weight decay for the optimizer during pre-train
# Set and print the parameters
args = parser.parse_args()
pprint(vars(args))
# Set the GPU id
set_gpu(args.gpu)
# Set manual seed for PyTorch
if args.seed==0:
print ('Using random seed.')
torch.backends.cudnn.benchmark = True
else:
print ('Using manual seed:', args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Start trainer for pre-train, meta-train or meta-eval
if args.phase=='meta_train':
trainer = MetaTrainer(args)
trainer.train()
elif args.phase=='meta_eval':
trainer = MetaTrainer(args)
trainer.eval()
elif args.phase=='pre_train':
trainer = PreTrainer(args)
trainer.train()
else:
raise ValueError('Please set correct phase.')
| 4,678 | 54.702381 | 133 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/run_pre.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Generate commands for pre-train phase. """
import os
def run_exp(lr=0.1, gamma=0.2, step_size=30):
max_epoch = 110
shot = 1
query = 15
way = 5
gpu = 1
base_lr = 0.01
the_command = 'python3 main.py' \
+ ' --pre_max_epoch=' + str(max_epoch) \
+ ' --shot=' + str(shot) \
+ ' --train_query=' + str(query) \
+ ' --way=' + str(way) \
+ ' --pre_step_size=' + str(step_size) \
+ ' --pre_gamma=' + str(gamma) \
+ ' --gpu=' + str(gpu) \
+ ' --base_lr=' + str(base_lr) \
+ ' --pre_lr=' + str(lr) \
+ ' --phase=pre_train'
os.system(the_command)
run_exp(lr=0.1, gamma=0.2, step_size=30)
| 1,079 | 29 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/run_meta.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Generate commands for meta-train phase. """
import os
def run_exp(num_batch=1000, shot=1, query=15, lr1=0.0001, lr2=0.001, base_lr=0.01, update_step=10, gamma=0.5):
max_epoch = 100
way = 5
step_size = 10
gpu = 1
the_command = 'python3 main.py' \
+ ' --max_epoch=' + str(max_epoch) \
+ ' --num_batch=' + str(num_batch) \
+ ' --shot=' + str(shot) \
+ ' --train_query=' + str(query) \
+ ' --way=' + str(way) \
+ ' --meta_lr1=' + str(lr1) \
+ ' --meta_lr2=' + str(lr2) \
+ ' --step_size=' + str(step_size) \
+ ' --gamma=' + str(gamma) \
+ ' --gpu=' + str(gpu) \
+ ' --base_lr=' + str(base_lr) \
+ ' --update_step=' + str(update_step)
os.system(the_command + ' --phase=meta_train')
os.system(the_command + ' --phase=meta_eval')
run_exp(num_batch=100, shot=1, query=15, lr1=0.0001, lr2=0.001, base_lr=0.01, update_step=100, gamma=0.5)
run_exp(num_batch=100, shot=5, query=15, lr1=0.0001, lr2=0.001, base_lr=0.01, update_step=100, gamma=0.5)
| 1,455 | 37.315789 | 110 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/trainer/__init__.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
| 380 | 37.1 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/trainer/meta.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/Sha-Lab/FEAT
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Trainer for meta-train phase. """
import os.path as osp
import os
import tqdm
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from dataloader.samplers import CategoriesSampler
from models.mtl import MtlLearner
from utils.misc import Averager, Timer, count_acc, compute_confidence_interval, ensure_path
from tensorboardX import SummaryWriter
from dataloader.dataset_loader import DatasetLoader as Dataset
class MetaTrainer(object):
"""The class that contains the code for the meta-train phase and meta-eval phase."""
def __init__(self, args):
# Set the folder to save the records and checkpoints
log_base_dir = './logs/'
if not osp.exists(log_base_dir):
os.mkdir(log_base_dir)
meta_base_dir = osp.join(log_base_dir, 'meta')
if not osp.exists(meta_base_dir):
os.mkdir(meta_base_dir)
save_path1 = '_'.join([args.dataset, args.model_type, 'MTL'])
save_path2 = 'shot' + str(args.shot) + '_way' + str(args.way) + '_query' + str(args.train_query) + \
'_step' + str(args.step_size) + '_gamma' + str(args.gamma) + '_lr1' + str(args.meta_lr1) + '_lr2' + str(args.meta_lr2) + \
'_batch' + str(args.num_batch) + '_maxepoch' + str(args.max_epoch) + \
'_baselr' + str(args.base_lr) + '_updatestep' + str(args.update_step) + \
'_stepsize' + str(args.step_size) + '_' + args.meta_label
args.save_path = meta_base_dir + '/' + save_path1 + '_' + save_path2
ensure_path(args.save_path)
# Set args to be shareable in the class
self.args = args
# Load meta-train set
self.trainset = Dataset('train', self.args)
self.train_sampler = CategoriesSampler(self.trainset.label, self.args.num_batch, self.args.way, self.args.shot + self.args.train_query)
self.train_loader = DataLoader(dataset=self.trainset, batch_sampler=self.train_sampler, num_workers=8, pin_memory=True)
# Load meta-val set
self.valset = Dataset('val', self.args)
self.val_sampler = CategoriesSampler(self.valset.label, 600, self.args.way, self.args.shot + self.args.val_query)
self.val_loader = DataLoader(dataset=self.valset, batch_sampler=self.val_sampler, num_workers=8, pin_memory=True)
# Build meta-transfer learning model
self.model = MtlLearner(self.args)
# Set optimizer
self.optimizer = torch.optim.Adam([{'params': filter(lambda p: p.requires_grad, self.model.encoder.parameters())}, \
{'params': self.model.base_learner.parameters(), 'lr': self.args.meta_lr2}], lr=self.args.meta_lr1)
# Set learning rate scheduler
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=self.args.step_size, gamma=self.args.gamma)
# load pretrained model without FC classifier
self.model_dict = self.model.state_dict()
if self.args.init_weights is not None:
pretrained_dict = torch.load(self.args.init_weights)['params']
else:
pre_base_dir = osp.join(log_base_dir, 'pre')
pre_save_path1 = '_'.join([args.dataset, args.model_type])
pre_save_path2 = 'batchsize' + str(args.pre_batch_size) + '_lr' + str(args.pre_lr) + '_gamma' + str(args.pre_gamma) + '_step' + \
str(args.pre_step_size) + '_maxepoch' + str(args.pre_max_epoch)
pre_save_path = pre_base_dir + '/' + pre_save_path1 + '_' + pre_save_path2
pretrained_dict = torch.load(osp.join(pre_save_path, 'max_acc.pth'))['params']
pretrained_dict = {'encoder.'+k: v for k, v in pretrained_dict.items()}
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in self.model_dict}
print(pretrained_dict.keys())
self.model_dict.update(pretrained_dict)
self.model.load_state_dict(self.model_dict)
# Set model to GPU
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
self.model = self.model.cuda()
def save_model(self, name):
"""The function to save checkpoints.
Args:
name: the name for saved checkpoint
"""
torch.save(dict(params=self.model.state_dict()), osp.join(self.args.save_path, name + '.pth'))
def train(self):
"""The function for the meta-train phase."""
# Set the meta-train log
trlog = {}
trlog['args'] = vars(self.args)
trlog['train_loss'] = []
trlog['val_loss'] = []
trlog['train_acc'] = []
trlog['val_acc'] = []
trlog['max_acc'] = 0.0
trlog['max_acc_epoch'] = 0
# Set the timer
timer = Timer()
# Set global count to zero
global_count = 0
# Set tensorboardX
writer = SummaryWriter(comment=self.args.save_path)
# Generate the labels for train set of the episodes
label_shot = torch.arange(self.args.way).repeat(self.args.shot)
if torch.cuda.is_available():
label_shot = label_shot.type(torch.cuda.LongTensor)
else:
label_shot = label_shot.type(torch.LongTensor)
# Start meta-train
for epoch in range(1, self.args.max_epoch + 1):
# Update learning rate
self.lr_scheduler.step()
# Set the model to train mode
self.model.train()
# Set averager classes to record training losses and accuracies
train_loss_averager = Averager()
train_acc_averager = Averager()
# Generate the labels for test set of the episodes during meta-train updates
label = torch.arange(self.args.way).repeat(self.args.train_query)
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
# Using tqdm to read samples from train loader
tqdm_gen = tqdm.tqdm(self.train_loader)
for i, batch in enumerate(tqdm_gen, 1):
# Update global count number
global_count = global_count + 1
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
p = self.args.shot * self.args.way
data_shot, data_query = data[:p], data[p:]
# Output logits for model
logits = self.model((data_shot, label_shot, data_query))
# Calculate meta-train loss
loss = F.cross_entropy(logits, label)
# Calculate meta-train accuracy
acc = count_acc(logits, label)
# Write the tensorboardX records
writer.add_scalar('data/loss', float(loss), global_count)
writer.add_scalar('data/acc', float(acc), global_count)
# Print loss and accuracy for this step
tqdm_gen.set_description('Epoch {}, Loss={:.4f} Acc={:.4f}'.format(epoch, loss.item(), acc))
# Add loss and accuracy for the averagers
train_loss_averager.add(loss.item())
train_acc_averager.add(acc)
# Loss backwards and optimizer updates
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Update the averagers
train_loss_averager = train_loss_averager.item()
train_acc_averager = train_acc_averager.item()
# Start validation for this epoch, set model to eval mode
self.model.eval()
# Set averager classes to record validation losses and accuracies
val_loss_averager = Averager()
val_acc_averager = Averager()
# Generate the labels for test set of the episodes during meta-val for this epoch
label = torch.arange(self.args.way).repeat(self.args.val_query)
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
# Print previous information
if epoch % 10 == 0:
print('Best Epoch {}, Best Val Acc={:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc']))
# Run meta-validation
for i, batch in enumerate(self.val_loader, 1):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
p = self.args.shot * self.args.way
data_shot, data_query = data[:p], data[p:]
logits = self.model((data_shot, label_shot, data_query))
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
val_loss_averager.add(loss.item())
val_acc_averager.add(acc)
# Update validation averagers
val_loss_averager = val_loss_averager.item()
val_acc_averager = val_acc_averager.item()
# Write the tensorboardX records
writer.add_scalar('data/val_loss', float(val_loss_averager), epoch)
writer.add_scalar('data/val_acc', float(val_acc_averager), epoch)
# Print loss and accuracy for this epoch
print('Epoch {}, Val, Loss={:.4f} Acc={:.4f}'.format(epoch, val_loss_averager, val_acc_averager))
# Update best saved model
if val_acc_averager > trlog['max_acc']:
trlog['max_acc'] = val_acc_averager
trlog['max_acc_epoch'] = epoch
self.save_model('max_acc')
# Save model every 10 epochs
if epoch % 10 == 0:
self.save_model('epoch'+str(epoch))
# Update the logs
trlog['train_loss'].append(train_loss_averager)
trlog['train_acc'].append(train_acc_averager)
trlog['val_loss'].append(val_loss_averager)
trlog['val_acc'].append(val_acc_averager)
# Save log
torch.save(trlog, osp.join(self.args.save_path, 'trlog'))
if epoch % 10 == 0:
print('Running Time: {}, Estimated Time: {}'.format(timer.measure(), timer.measure(epoch / self.args.max_epoch)))
writer.close()
def eval(self):
"""The function for the meta-eval phase."""
# Load the logs
trlog = torch.load(osp.join(self.args.save_path, 'trlog'))
# Load meta-test set
test_set = Dataset('test', self.args)
sampler = CategoriesSampler(test_set.label, 600, self.args.way, self.args.shot + self.args.val_query)
loader = DataLoader(test_set, batch_sampler=sampler, num_workers=8, pin_memory=True)
# Set test accuracy recorder
test_acc_record = np.zeros((600,))
# Load model for meta-test phase
if self.args.eval_weights is not None:
self.model.load_state_dict(torch.load(self.args.eval_weights)['params'])
else:
self.model.load_state_dict(torch.load(osp.join(self.args.save_path, 'max_acc' + '.pth'))['params'])
# Set model to eval mode
self.model.eval()
# Set accuracy averager
ave_acc = Averager()
# Generate labels
label = torch.arange(self.args.way).repeat(self.args.val_query)
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
label_shot = torch.arange(self.args.way).repeat(self.args.shot)
if torch.cuda.is_available():
label_shot = label_shot.type(torch.cuda.LongTensor)
else:
label_shot = label_shot.type(torch.LongTensor)
# Start meta-test
for i, batch in enumerate(loader, 1):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
k = self.args.way * self.args.shot
data_shot, data_query = data[:k], data[k:]
logits = self.model((data_shot, label_shot, data_query))
acc = count_acc(logits, label)
ave_acc.add(acc)
test_acc_record[i-1] = acc
if i % 100 == 0:
print('batch {}: {:.2f}({:.2f})'.format(i, ave_acc.item() * 100, acc * 100))
# Calculate the confidence interval, update the logs
m, pm = compute_confidence_interval(test_acc_record)
print('Val Best Epoch {}, Acc {:.4f}, Test Acc {:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc'], ave_acc.item()))
print('Test Acc {:.4f} + {:.4f}'.format(m, pm))
| 13,374 | 44.493197 | 143 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/trainer/pre.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Trainer for pretrain phase. """
import os.path as osp
import os
import tqdm
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader
from dataloader.samplers import CategoriesSampler
from models.mtl import MtlLearner
from utils.misc import Averager, Timer, count_acc, ensure_path
from tensorboardX import SummaryWriter
from dataloader.dataset_loader import DatasetLoader as Dataset
class PreTrainer(object):
"""The class that contains the code for the pretrain phase."""
def __init__(self, args):
# Set the folder to save the records and checkpoints
log_base_dir = './logs/'
if not osp.exists(log_base_dir):
os.mkdir(log_base_dir)
pre_base_dir = osp.join(log_base_dir, 'pre')
if not osp.exists(pre_base_dir):
os.mkdir(pre_base_dir)
save_path1 = '_'.join([args.dataset, args.model_type])
save_path2 = 'batchsize' + str(args.pre_batch_size) + '_lr' + str(args.pre_lr) + '_gamma' + str(args.pre_gamma) + '_step' + \
str(args.pre_step_size) + '_maxepoch' + str(args.pre_max_epoch)
args.save_path = pre_base_dir + '/' + save_path1 + '_' + save_path2
ensure_path(args.save_path)
# Set args to be shareable in the class
self.args = args
# Load pretrain set
self.trainset = Dataset('train', self.args, train_aug=True)
self.train_loader = DataLoader(dataset=self.trainset, batch_size=args.pre_batch_size, shuffle=True, num_workers=8, pin_memory=True)
# Load meta-val set
self.valset = Dataset('val', self.args)
self.val_sampler = CategoriesSampler(self.valset.label, 600, self.args.way, self.args.shot + self.args.val_query)
self.val_loader = DataLoader(dataset=self.valset, batch_sampler=self.val_sampler, num_workers=8, pin_memory=True)
# Set pretrain class number
num_class_pretrain = self.trainset.num_class
# Build pretrain model
self.model = MtlLearner(self.args, mode='pre', num_cls=num_class_pretrain)
# Set optimizer
self.optimizer = torch.optim.SGD([{'params': self.model.encoder.parameters(), 'lr': self.args.pre_lr}, \
{'params': self.model.pre_fc.parameters(), 'lr': self.args.pre_lr}], \
momentum=self.args.pre_custom_momentum, nesterov=True, weight_decay=self.args.pre_custom_weight_decay)
# Set learning rate scheduler
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=self.args.pre_step_size, \
gamma=self.args.pre_gamma)
# Set model to GPU
if torch.cuda.is_available():
torch.backends.cudnn.benchmark = True
self.model = self.model.cuda()
def save_model(self, name):
"""The function to save checkpoints.
Args:
name: the name for saved checkpoint
"""
torch.save(dict(params=self.model.encoder.state_dict()), osp.join(self.args.save_path, name + '.pth'))
def train(self):
"""The function for the pre-train phase."""
# Set the pretrain log
trlog = {}
trlog['args'] = vars(self.args)
trlog['train_loss'] = []
trlog['val_loss'] = []
trlog['train_acc'] = []
trlog['val_acc'] = []
trlog['max_acc'] = 0.0
trlog['max_acc_epoch'] = 0
# Set the timer
timer = Timer()
# Set global count to zero
global_count = 0
# Set tensorboardX
writer = SummaryWriter(comment=self.args.save_path)
# Start pretrain
for epoch in range(1, self.args.pre_max_epoch + 1):
# Update learning rate
self.lr_scheduler.step()
# Set the model to train mode
self.model.train()
self.model.mode = 'pre'
# Set averager classes to record training losses and accuracies
train_loss_averager = Averager()
train_acc_averager = Averager()
# Using tqdm to read samples from train loader
tqdm_gen = tqdm.tqdm(self.train_loader)
for i, batch in enumerate(tqdm_gen, 1):
# Update global count number
global_count = global_count + 1
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
label = batch[1]
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
# Output logits for model
logits = self.model(data)
# Calculate train loss
loss = F.cross_entropy(logits, label)
# Calculate train accuracy
acc = count_acc(logits, label)
# Write the tensorboardX records
writer.add_scalar('data/loss', float(loss), global_count)
writer.add_scalar('data/acc', float(acc), global_count)
# Print loss and accuracy for this step
tqdm_gen.set_description('Epoch {}, Loss={:.4f} Acc={:.4f}'.format(epoch, loss.item(), acc))
# Add loss and accuracy for the averagers
train_loss_averager.add(loss.item())
train_acc_averager.add(acc)
# Loss backwards and optimizer updates
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# Update the averagers
train_loss_averager = train_loss_averager.item()
train_acc_averager = train_acc_averager.item()
# Start validation for this epoch, set model to eval mode
self.model.eval()
self.model.mode = 'preval'
# Set averager classes to record validation losses and accuracies
val_loss_averager = Averager()
val_acc_averager = Averager()
# Generate the labels for test
label = torch.arange(self.args.way).repeat(self.args.val_query)
if torch.cuda.is_available():
label = label.type(torch.cuda.LongTensor)
else:
label = label.type(torch.LongTensor)
label_shot = torch.arange(self.args.way).repeat(self.args.shot)
if torch.cuda.is_available():
label_shot = label_shot.type(torch.cuda.LongTensor)
else:
label_shot = label_shot.type(torch.LongTensor)
# Print previous information
if epoch % 10 == 0:
print('Best Epoch {}, Best Val acc={:.4f}'.format(trlog['max_acc_epoch'], trlog['max_acc']))
# Run meta-validation
for i, batch in enumerate(self.val_loader, 1):
if torch.cuda.is_available():
data, _ = [_.cuda() for _ in batch]
else:
data = batch[0]
p = self.args.shot * self.args.way
data_shot, data_query = data[:p], data[p:]
logits = self.model((data_shot, label_shot, data_query))
loss = F.cross_entropy(logits, label)
acc = count_acc(logits, label)
val_loss_averager.add(loss.item())
val_acc_averager.add(acc)
# Update validation averagers
val_loss_averager = val_loss_averager.item()
val_acc_averager = val_acc_averager.item()
# Write the tensorboardX records
writer.add_scalar('data/val_loss', float(val_loss_averager), epoch)
writer.add_scalar('data/val_acc', float(val_acc_averager), epoch)
# Print loss and accuracy for this epoch
print('Epoch {}, Val, Loss={:.4f} Acc={:.4f}'.format(epoch, val_loss_averager, val_acc_averager))
# Update best saved model
if val_acc_averager > trlog['max_acc']:
trlog['max_acc'] = val_acc_averager
trlog['max_acc_epoch'] = epoch
self.save_model('max_acc')
# Save model every 10 epochs
if epoch % 10 == 0:
self.save_model('epoch'+str(epoch))
# Update the logs
trlog['train_loss'].append(train_loss_averager)
trlog['train_acc'].append(train_acc_averager)
trlog['val_loss'].append(val_loss_averager)
trlog['val_acc'].append(val_acc_averager)
# Save log
torch.save(trlog, osp.join(self.args.save_path, 'trlog'))
if epoch % 10 == 0:
print('Running Time: {}, Estimated Time: {}'.format(timer.measure(), timer.measure(epoch / self.args.max_epoch)))
writer.close()
| 9,314 | 42.528037 | 139 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/models/mtl.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Model for meta-transfer learning. """
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.resnet_mtl import ResNetMtl
class BaseLearner(nn.Module):
"""The class for inner loop."""
def __init__(self, args, z_dim):
super().__init__()
self.args = args
self.z_dim = z_dim
self.vars = nn.ParameterList()
self.fc1_w = nn.Parameter(torch.ones([self.args.way, self.z_dim]))
torch.nn.init.kaiming_normal_(self.fc1_w)
self.vars.append(self.fc1_w)
self.fc1_b = nn.Parameter(torch.zeros(self.args.way))
self.vars.append(self.fc1_b)
def forward(self, input_x, the_vars=None):
if the_vars is None:
the_vars = self.vars
fc1_w = the_vars[0]
fc1_b = the_vars[1]
net = F.linear(input_x, fc1_w, fc1_b)
return net
def parameters(self):
return self.vars
class MtlLearner(nn.Module):
"""The class for outer loop."""
def __init__(self, args, mode='meta', num_cls=64):
super().__init__()
self.args = args
self.mode = mode
self.update_lr = args.base_lr
self.update_step = args.update_step
z_dim = 640
self.base_learner = BaseLearner(args, z_dim)
if self.mode == 'meta':
self.encoder = ResNetMtl()
else:
self.encoder = ResNetMtl(mtl=False)
self.pre_fc = nn.Sequential(nn.Linear(640, 1000), nn.ReLU(), nn.Linear(1000, num_cls))
def forward(self, inp):
"""The function to forward the model.
Args:
inp: input images.
Returns:
the outputs of MTL model.
"""
if self.mode=='pre':
return self.pretrain_forward(inp)
elif self.mode=='meta':
data_shot, label_shot, data_query = inp
return self.meta_forward(data_shot, label_shot, data_query)
elif self.mode=='preval':
data_shot, label_shot, data_query = inp
return self.preval_forward(data_shot, label_shot, data_query)
else:
raise ValueError('Please set the correct mode.')
def pretrain_forward(self, inp):
"""The function to forward pretrain phase.
Args:
inp: input images.
Returns:
the outputs of pretrain model.
"""
return self.pre_fc(self.encoder(inp))
def meta_forward(self, data_shot, label_shot, data_query):
"""The function to forward meta-train phase.
Args:
data_shot: train images for the task
label_shot: train labels for the task
data_query: test images for the task.
Returns:
logits_q: the predictions for the test samples.
"""
embedding_query = self.encoder(data_query)
embedding_shot = self.encoder(data_shot)
logits = self.base_learner(embedding_shot)
loss = F.cross_entropy(logits, label_shot)
grad = torch.autograd.grad(loss, self.base_learner.parameters())
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.base_learner.parameters())))
logits_q = self.base_learner(embedding_query, fast_weights)
for _ in range(1, self.update_step):
logits = self.base_learner(embedding_shot, fast_weights)
loss = F.cross_entropy(logits, label_shot)
grad = torch.autograd.grad(loss, fast_weights)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
logits_q = self.base_learner(embedding_query, fast_weights)
return logits_q
def preval_forward(self, data_shot, label_shot, data_query):
"""The function to forward meta-validation during pretrain phase.
Args:
data_shot: train images for the task
label_shot: train labels for the task
data_query: test images for the task.
Returns:
logits_q: the predictions for the test samples.
"""
embedding_query = self.encoder(data_query)
embedding_shot = self.encoder(data_shot)
logits = self.base_learner(embedding_shot)
loss = F.cross_entropy(logits, label_shot)
grad = torch.autograd.grad(loss, self.base_learner.parameters())
fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, self.base_learner.parameters())))
logits_q = self.base_learner(embedding_query, fast_weights)
for _ in range(1, 100):
logits = self.base_learner(embedding_shot, fast_weights)
loss = F.cross_entropy(logits, label_shot)
grad = torch.autograd.grad(loss, fast_weights)
fast_weights = list(map(lambda p: p[1] - 0.01 * p[0], zip(grad, fast_weights)))
logits_q = self.base_learner(embedding_query, fast_weights)
return logits_q
| 5,292 | 38.796992 | 115 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/models/conv2d_mtl.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/pytorch/pytorch
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" MTL CONV layers. """
import math
import torch
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from torch.nn.modules.utils import _pair
class _ConvNdMtl(Module):
"""The class for meta-transfer convolution"""
def __init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, transposed, output_padding, groups, bias):
super(_ConvNdMtl, self).__init__()
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.dilation = dilation
self.transposed = transposed
self.output_padding = output_padding
self.groups = groups
if transposed:
self.weight = Parameter(torch.Tensor(
in_channels, out_channels // groups, *kernel_size))
self.mtl_weight = Parameter(torch.ones(in_channels, out_channels // groups, 1, 1))
else:
self.weight = Parameter(torch.Tensor(
out_channels, in_channels // groups, *kernel_size))
self.mtl_weight = Parameter(torch.ones(out_channels, in_channels // groups, 1, 1))
self.weight.requires_grad=False
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
self.bias.requires_grad=False
self.mtl_bias = Parameter(torch.zeros(out_channels))
else:
self.register_parameter('bias', None)
self.register_parameter('mtl_bias', None)
self.reset_parameters()
def reset_parameters(self):
n = self.in_channels
for k in self.kernel_size:
n *= k
stdv = 1. / math.sqrt(n)
self.weight.data.uniform_(-stdv, stdv)
self.mtl_weight.data.uniform_(1, 1)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
self.mtl_bias.data.uniform_(0, 0)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.output_padding != (0,) * len(self.output_padding):
s += ', output_padding={output_padding}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
class Conv2dMtl(_ConvNdMtl):
"""The class for meta-transfer convolution"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(Conv2dMtl, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
def forward(self, inp):
new_mtl_weight = self.mtl_weight.expand(self.weight.shape)
new_weight = self.weight.mul(new_mtl_weight)
if self.bias is not None:
new_bias = self.bias + self.mtl_bias
else:
new_bias = None
return F.conv2d(inp, new_weight, new_bias, self.stride,
self.padding, self.dilation, self.groups)
| 4,195 | 40.137255 | 94 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/models/resnet_mtl.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/Sha-Lab/FEAT
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" ResNet with MTL. """
import torch.nn as nn
from models.conv2d_mtl import Conv2dMtl
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def conv3x3mtl(in_planes, out_planes, stride=1):
return Conv2dMtl(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlockMtl(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlockMtl, self).__init__()
self.conv1 = conv3x3mtl(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3mtl(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BottleneckMtl(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BottleneckMtl, self).__init__()
self.conv1 = Conv2dMtl(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = Conv2dMtl(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = Conv2dMtl(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetMtl(nn.Module):
def __init__(self, layers=[4, 4, 4], mtl=True):
super(ResNetMtl, self).__init__()
if mtl:
self.Conv2d = Conv2dMtl
block = BasicBlockMtl
else:
self.Conv2d = nn.Conv2d
block = BasicBlock
cfg = [160, 320, 640]
self.inplanes = iChannels = int(cfg[0]/2)
self.conv1 = self.Conv2d(3, iChannels, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.BatchNorm2d(iChannels)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, cfg[0], layers[0], stride=2)
self.layer2 = self._make_layer(block, cfg[1], layers[1], stride=2)
self.layer3 = self._make_layer(block, cfg[2], layers[2], stride=2)
self.avgpool = nn.AvgPool2d(10, stride=1)
for m in self.modules():
if isinstance(m, self.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
self.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
| 6,842 | 30.246575 | 90 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/models/__init__.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
| 380 | 37.1 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/utils/misc.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/Sha-Lab/FEAT
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Additional utility functions. """
import os
import time
import pprint
import torch
import numpy as np
import torch.nn.functional as F
def ensure_path(path):
"""The function to make log path.
Args:
path: the generated saving path.
"""
if os.path.exists(path):
pass
else:
os.mkdir(path)
class Averager():
"""The class to calculate the average."""
def __init__(self):
self.n = 0
self.v = 0
def add(self, x):
self.v = (self.v * self.n + x) / (self.n + 1)
self.n += 1
def item(self):
return self.v
def count_acc(logits, label):
"""The function to calculate the .
Args:
logits: input logits.
label: ground truth labels.
Return:
The output accuracy.
"""
pred = F.softmax(logits, dim=1).argmax(dim=1)
if torch.cuda.is_available():
return (pred == label).type(torch.cuda.FloatTensor).mean().item()
return (pred == label).type(torch.FloatTensor).mean().item()
class Timer():
"""The class for timer."""
def __init__(self):
self.o = time.time()
def measure(self, p=1):
x = (time.time() - self.o) / p
x = int(x)
if x >= 3600:
return '{:.1f}h'.format(x / 3600)
if x >= 60:
return '{}m'.format(round(x / 60))
return '{}s'.format(x)
_utils_pp = pprint.PrettyPrinter()
def pprint(x):
_utils_pp.pprint(x)
def compute_confidence_interval(data):
"""The function to calculate the .
Args:
data: input records
label: ground truth labels.
Return:
m: mean value
pm: confidence interval.
"""
a = 1.0 * np.array(data)
m = np.mean(a)
std = np.std(a)
pm = 1.96 * (std / np.sqrt(len(a)))
return m, pm
| 2,219 | 24.227273 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/utils/gpu_tools.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Tools for GPU. """
import os
import torch
def set_gpu(cuda_device):
os.environ['CUDA_VISIBLE_DEVICES'] = cuda_device
print('Using gpu:', cuda_device)
| 547 | 31.235294 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/utils/__init__.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
| 380 | 37.1 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/dataloader/dataset_loader.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/Sha-Lab/FEAT
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Dataloader for all datasets. """
import os.path as osp
import os
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np
class DatasetLoader(Dataset):
"""The class to load the dataset"""
def __init__(self, setname, args, train_aug=False):
# Set the path according to train, val and test
if setname=='train':
THE_PATH = osp.join(args.dataset_dir, 'train')
label_list = os.listdir(THE_PATH)
elif setname=='test':
THE_PATH = osp.join(args.dataset_dir, 'test')
label_list = os.listdir(THE_PATH)
elif setname=='val':
THE_PATH = osp.join(args.dataset_dir, 'val')
label_list = os.listdir(THE_PATH)
else:
raise ValueError('Wrong setname.')
# Generate empty list for data and label
data = []
label = []
# Get folders' name
folders = [osp.join(THE_PATH, the_label) for the_label in label_list if os.path.isdir(osp.join(THE_PATH, the_label))]
# Get the images' paths and labels
for idx, this_folder in enumerate(folders):
this_folder_images = os.listdir(this_folder)
for image_path in this_folder_images:
data.append(osp.join(this_folder, image_path))
label.append(idx)
# Set data, label and class number to be accessable from outside
self.data = data
self.label = label
self.num_class = len(set(label))
# Transformation
if train_aug:
image_size = 80
self.transform = transforms.Compose([
transforms.Resize(92),
transforms.RandomResizedCrop(88),
transforms.CenterCrop(image_size),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
np.array([x / 255.0 for x in [63.0, 62.1, 66.7]]))])
else:
image_size = 80
self.transform = transforms.Compose([
transforms.Resize(92),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize(np.array([x / 255.0 for x in [125.3, 123.0, 113.9]]),
np.array([x / 255.0 for x in [63.0, 62.1, 66.7]]))])
def __len__(self):
return len(self.data)
def __getitem__(self, i):
path, label = self.data[i], self.label[i]
image = self.transform(Image.open(path).convert('RGB'))
return image, label
| 3,153 | 37.463415 | 125 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/dataloader/__init__.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
| 380 | 37.1 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/pytorch/dataloader/samplers.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/Sha-Lab/FEAT
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Sampler for dataloader. """
import torch
import numpy as np
class CategoriesSampler():
"""The class to generate episodic data"""
def __init__(self, label, n_batch, n_cls, n_per):
self.n_batch = n_batch
self.n_cls = n_cls
self.n_per = n_per
label = np.array(label)
self.m_ind = []
for i in range(max(label) + 1):
ind = np.argwhere(label == i).reshape(-1)
ind = torch.from_numpy(ind)
self.m_ind.append(ind)
def __len__(self):
return self.n_batch
def __iter__(self):
for i_batch in range(self.n_batch):
batch = []
classes = torch.randperm(len(self.m_ind))[:self.n_cls]
for c in classes:
l = self.m_ind[c]
pos = torch.randperm(len(l))[:self.n_per]
batch.append(l[pos])
batch = torch.stack(batch).t().reshape(-1)
yield batch
| 1,381 | 32.707317 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/main.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import os
from tensorflow.python.platform import flags
from trainer.meta import MetaTrainer
from trainer.pre import PreTrainer
FLAGS = flags.FLAGS
### Basic options
flags.DEFINE_integer('img_size', 84, 'image size')
flags.DEFINE_integer('device_id', 0, 'GPU device ID to run the job.')
flags.DEFINE_float('gpu_rate', 0.9, 'the parameter for the full_gpu_memory_mode')
flags.DEFINE_string('phase', 'meta', 'pre or meta')
flags.DEFINE_string('exp_log_label', 'experiment_results', 'directory for summaries and checkpoints')
flags.DEFINE_string('logdir_base', './logs/', 'directory for logs')
flags.DEFINE_bool('full_gpu_memory_mode', False, 'in this mode, the code occupies GPU memory in advance')
flags.DEFINE_string('backbone_arch', 'resnet12', 'network backbone')
### Pre-train phase options
flags.DEFINE_integer('pre_lr_dropstep', 5000, 'the step number to drop pre_lr')
flags.DEFINE_integer('pretrain_class_num', 64, 'number of classes used in the pre-train phase')
flags.DEFINE_integer('pretrain_batch_size', 64, 'batch_size for the pre-train phase')
flags.DEFINE_integer('pretrain_iterations', 30000, 'number of pretraining iterations.')
flags.DEFINE_integer('pre_sum_step', 10, 'the step number to summary during pretraining')
flags.DEFINE_integer('pre_save_step', 1000, 'the step number to save the pretrain model')
flags.DEFINE_integer('pre_print_step', 1000, 'the step number to print the pretrain results')
flags.DEFINE_float('pre_lr', 0.001, 'the pretrain learning rate')
flags.DEFINE_float('min_pre_lr', 0.0001, 'the pretrain learning rate min')
flags.DEFINE_float('pretrain_dropout_keep', 0.9, 'the dropout keep parameter in the pre-train phase')
flags.DEFINE_string('pretrain_folders', './data/mini-imagenet/train', 'directory for pre-train data')
flags.DEFINE_string('pretrain_label', 'mini_normal', 'additional label for the pre-train log folder')
flags.DEFINE_bool('pre_lr_stop', False, 'whether stop decrease the pre_lr when it is low')
### Meta phase options
flags.DEFINE_integer('way_num', 5, 'number of classes (e.g. 5-way classification)')
flags.DEFINE_integer('shot_num', 1, 'number of examples per class (K for K-shot learning)')
flags.DEFINE_integer('metatrain_epite_sample_num', 15, 'number of meta train episode-test samples')
flags.DEFINE_integer('metatest_epite_sample_num', 0, 'number of meta test episode-test samples, 0 means metatest_epite_sample_num=shot_num')
flags.DEFINE_integer('meta_sum_step', 10, 'the step number to summary during meta-training')
flags.DEFINE_integer('meta_save_step', 500, 'the step number to save the model')
flags.DEFINE_integer('meta_intrain_val_sample', 600, 'the number of samples used for val during meta-train')
flags.DEFINE_integer('meta_print_step', 100, 'the step number to print the meta-train results')
flags.DEFINE_integer('meta_val_print_step', 100, 'the step number to print the meta-val results during meta-training')
flags.DEFINE_integer('metatrain_iterations', 15000, 'number of meta-train iterations.')
flags.DEFINE_integer('meta_batch_size', 2, 'number of tasks sampled per meta-update')
flags.DEFINE_integer('train_base_epoch_num', 20, 'number of inner gradient updates during training.')
flags.DEFINE_integer('test_base_epoch_num', 100, 'number of inner gradient updates during test.')
flags.DEFINE_integer('lr_drop_step', 5000, 'the step number to drop meta_lr')
flags.DEFINE_integer('test_iter', 1000, 'iteration to load model')
flags.DEFINE_float('meta_lr', 0.001, 'the meta learning rate of the generator')
flags.DEFINE_float('lr_drop_rate', 0.5, 'the step number to drop meta_lr')
flags.DEFINE_float('min_meta_lr', 0.0001, 'the min meta learning rate of the generator')
flags.DEFINE_float('base_lr', 1e-3, 'step size alpha for inner gradient update.')
flags.DEFINE_string('metatrain_dir', './data/mini-imagenet/train', 'directory for meta-train set')
flags.DEFINE_string('metaval_dir', './data/mini-imagenet/val', 'directory for meta-val set')
flags.DEFINE_string('metatest_dir', './data/mini-imagenet/test', 'directory for meta-test set')
flags.DEFINE_string('activation', 'leaky_relu', 'leaky_relu, relu, or None')
flags.DEFINE_string('norm', 'batch_norm', 'batch_norm, layer_norm, or None')
flags.DEFINE_bool('metatrain', True, 'is this the meta-train phase')
flags.DEFINE_bool('base_augmentation', True, 'whether do data augmentation during base learning')
flags.DEFINE_bool('redo_init', True, 're-build the initialization weights')
flags.DEFINE_bool('load_saved_weights', False, 'load the downloaded weights')
# Generate experiment key words string
exp_string = 'arch(' + FLAGS.backbone_arch + ')'
exp_string += '.cls(' + str(FLAGS.way_num) + ')'
exp_string += '.shot(' + str(FLAGS.shot_num) + ')'
exp_string += '.meta_batch(' + str(FLAGS.meta_batch_size) + ')'
exp_string += '.base_epoch(' + str(FLAGS.train_base_epoch_num) + ')'
exp_string += '.meta_lr(' + str(FLAGS.meta_lr) + ')'
exp_string += '.base_lr(' + str(FLAGS.base_lr) + ')'
exp_string += '.pre_iterations(' + str(FLAGS.pretrain_iterations) + ')'
exp_string += '.pre_dropout(' + str(FLAGS.pretrain_dropout_keep) + ')'
exp_string += '.acti(' + str(FLAGS.activation) + ')'
exp_string += '.lr_drop_step(' + str(FLAGS.lr_drop_step) + ')'
exp_string += '.lr_drop_rate(' + str(FLAGS.lr_drop_rate) + ')'
exp_string += '.pre_label(' + str(FLAGS.pretrain_label) + ')'
if FLAGS.norm == 'batch_norm':
exp_string += '.norm(batch)'
elif FLAGS.norm == 'layer_norm':
exp_string += '.norm(layer)'
elif FLAGS.norm == 'None':
exp_string += '.norm(none)'
else:
raise Exception('Norm setting is not recognized')
FLAGS.exp_string = exp_string
print('Parameters: ' + exp_string)
# Generate pre-train key words string
pre_save_str = 'arch(' + FLAGS.backbone_arch + ')'
pre_save_str += '.pre_lr(' + str(FLAGS.pre_lr) + ')'
pre_save_str += '.pre_lrdrop(' + str(FLAGS.pre_lr_dropstep) + ')'
pre_save_str += '.pre_class(' + str(FLAGS.pretrain_class_num) + ')'
pre_save_str += '.pre_batch(' + str(FLAGS.pretrain_batch_size) + ')'
pre_save_str += '.pre_dropout(' + str(FLAGS.pretrain_dropout_keep) + ')'
if FLAGS.pre_lr_stop:
pre_save_str += '.pre_lr_stop(True)'
else:
pre_save_str += '.pre_lr_stop(False)'
pre_save_str += '.pre_label(' + FLAGS.pretrain_label + ')'
FLAGS.pre_string = pre_save_str
# Generate log folders
FLAGS.logdir = FLAGS.logdir_base + FLAGS.exp_log_label
FLAGS.pretrain_dir = FLAGS.logdir_base + 'pretrain_weights'
if not os.path.exists(FLAGS.logdir_base):
os.mkdir(FLAGS.logdir_base)
if not os.path.exists(FLAGS.logdir):
os.mkdir(FLAGS.logdir)
if not os.path.exists(FLAGS.pretrain_dir):
os.mkdir(FLAGS.pretrain_dir)
# If FLAGS.redo_init is true, delete the previous intialization weights.
if FLAGS.redo_init:
if not os.path.exists('./logs/init_weights'):
os.system('rm -r ./logs/init_weights')
print('Init weights have been deleted')
else:
print('No init weights')
def main():
# Set GPU device id
print('Using GPU ' + str(FLAGS.device_id))
os.environ['CUDA_VISIBLE_DEVICES'] = str(FLAGS.device_id)
# Select pre-train phase or meta-learning phase
if FLAGS.phase=='pre':
trainer = PreTrainer()
elif FLAGS.phase=='meta':
trainer = MetaTrainer()
else:
raise Exception('Please set correct phase')
if __name__ == "__main__":
main()
| 7,702 | 51.401361 | 140 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/run_experiment.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Generate commands for main.py """
import os
import sys
def run_experiment(PHASE='META'):
"""The function to generate commands to run the experiments.
Arg:
PHASE: the phase for MTL. 'PRE' means pre-train phase, and 'META' means meta-train and meta-test phases.
"""
# Some important options
# Please note that not all the options are shown here. For more detailed options, please edit main.py
# Basic options
LOG_DIR = 'experiment_results' # Name of the folder to save the log files
GPU_ID = 1 # GPU device id
NET_ARCH = 'resnet12' # Additional label for pre-train model
# Pre-train phase options
PRE_TRA_LABEL = 'normal' # Additional label for pre-train model
PRE_TRA_ITER_MAX = 20000 # Iteration number for the pre-train phase
PRE_TRA_DROP = 0.9 # Dropout keep rate for the pre-train phase
PRE_DROP_STEP = 5000 # Iteration number for the pre-train learning rate reducing
PRE_LR = 0.001 # Pre-train learning rate
# Meta options
SHOT_NUM = 1 # Shot number for the few-shot tasks
WAY_NUM = 5 # Class number for the few-shot tasks
MAX_ITER = 20000 # Iteration number for meta-train
META_BATCH_SIZE = 2 # Meta batch size
PRE_ITER = 10000 # Iteration number for the pre-train model used in the meta-train phase
UPDATE_NUM = 20 # Epoch number for the base learning
SAVE_STEP = 100 # Iteration number to save the meta model
META_LR = 0.001 # Meta learning rate
META_LR_MIN = 0.0001 # Meta learning rate min value
LR_DROP_STEP = 1000 # The iteration number for the meta learning rate reducing
BASE_LR = 0.001 # Base learning rate
# Data directories
PRE_TRA_DIR = './data/mini-imagenet/train' # Directory for the pre-train phase images
META_TRA_DIR = './data/mini-imagenet/train' # Directory for the meta-train images
META_VAL_DIR = './data/mini-imagenet/val' # Directory for the meta-validation images
META_TES_DIR = './data/mini-imagenet/test' # Directory for the meta-test images
# Generate the base command for main.py
base_command = 'python main.py' \
+ ' --backbone_arch=' + str(NET_ARCH) \
+ ' --metatrain_iterations=' + str(MAX_ITER) \
+ ' --meta_batch_size=' + str(META_BATCH_SIZE) \
+ ' --shot_num=' + str(SHOT_NUM) \
+ ' --meta_lr=' + str(META_LR) \
+ ' --min_meta_lr=' + str(META_LR_MIN) \
+ ' --base_lr=' + str(BASE_LR)\
+ ' --train_base_epoch_num=' + str(UPDATE_NUM) \
+ ' --way_num=' + str(WAY_NUM) \
+ ' --exp_log_label=' + LOG_DIR \
+ ' --pretrain_dropout_keep=' + str(PRE_TRA_DROP) \
+ ' --activation=leaky_relu' \
+ ' --pre_lr=' + str(PRE_LR)\
+ ' --pre_lr_dropstep=' + str(PRE_DROP_STEP) \
+ ' --meta_save_step=' + str(SAVE_STEP) \
+ ' --lr_drop_step=' + str(LR_DROP_STEP) \
+ ' --pretrain_folders=' + PRE_TRA_DIR \
+ ' --pretrain_label=' + PRE_TRA_LABEL \
+ ' --device_id=' + str(GPU_ID) \
+ ' --metatrain_dir=' + META_TRA_DIR \
+ ' --metaval_dir=' + META_VAL_DIR \
+ ' --metatest_dir=' + META_TES_DIR
def process_test_command(TEST_STEP, in_command):
"""The function to adapt the base command to the meta-test phase.
Args:
TEST_STEP: the iteration number for the meta model to be loaded.
in_command: the input base command.
Return:
Processed command.
"""
output_test_command = in_command \
+ ' --phase=meta' \
+ ' --pretrain_iterations=' + str(PRE_ITER) \
+ ' --metatrain=False' \
+ ' --test_iter=' + str(TEST_STEP)
return output_test_command
if PHASE=='PRE':
print('****** Start Pre-train Phase ******')
pre_command = base_command + ' --phase=pre' + ' --pretrain_iterations=' + str(PRE_TRA_ITER_MAX)
os.system(pre_command)
if PHASE=='META':
print('****** Start Meta-train Phase ******')
meta_train_command = base_command + ' --phase=meta' + ' --pretrain_iterations=' + str(PRE_ITER)
os.system(meta_train_command)
print('****** Start Meta-test Phase ******')
for idx in range(MAX_ITER):
if idx % SAVE_STEP == 0:
print('[*] Runing meta-test, load model for ' + str(idx) + ' iterations')
test_command = process_test_command(idx, base_command)
os.system(test_command)
if PHASE=='META_LOAD':
print('****** Start Meta-train Phase with Downloaded Weights ******')
meta_train_command = base_command + ' --phase=meta' + ' --pretrain_iterations=' + str(PRE_ITER) + ' --load_saved_weights=True'
os.system(meta_train_command)
if PHASE=='TEST_LOAD':
print('****** Start Meta-test Phase with Downloaded Weights ******')
test_command = process_test_command(0, base_command) + ' --load_saved_weights=True'
os.system(test_command)
THE_INPUT_PHASE = sys.argv[1]
run_experiment(PHASE=THE_INPUT_PHASE)
| 5,436 | 43.203252 | 134 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/trainer/__init__.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
| 380 | 37.1 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/trainer/meta.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/cbfinn/maml
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Trainer for meta-learning. """
import os
import csv
import pickle
import random
import numpy as np
import tensorflow as tf
from tqdm import trange
from data_generator.meta_data_generator import MetaDataGenerator
from models.meta_model import MakeMetaModel
from tensorflow.python.platform import flags
from utils.misc import process_batch
FLAGS = flags.FLAGS
class MetaTrainer:
"""The class that contains the code for the meta-train and meta-test."""
def __init__(self):
# Remove the saved datalist for a new experiment
os.system('rm -r ./logs/processed_data/*')
data_generator = MetaDataGenerator()
if FLAGS.metatrain:
# Build model for meta-train phase
print('Building meta-train model')
self.model = MakeMetaModel()
self.model.construct_model()
print('Meta-train model is built')
# Start tensorflow session
self.start_session()
# Generate data for meta-train phase
if FLAGS.load_saved_weights:
random.seed(5)
data_generator.generate_data(data_type='train')
if FLAGS.load_saved_weights:
random.seed(7)
data_generator.generate_data(data_type='test')
if FLAGS.load_saved_weights:
random.seed(9)
data_generator.generate_data(data_type='val')
else:
# Build model for meta-test phase
print('Building meta-test mdoel')
self.model = MakeMetaModel()
self.model.construct_test_model()
self.model.summ_op = tf.summary.merge_all()
print('Meta-test model is built')
# Start tensorflow session
self.start_session()
# Generate data for meta-test phase
if FLAGS.load_saved_weights:
random.seed(7)
data_generator.generate_data(data_type='test')
# Load the experiment setting string from FLAGS
exp_string = FLAGS.exp_string
# Global initialization and starting queue
tf.global_variables_initializer().run()
tf.train.start_queue_runners()
if FLAGS.metatrain:
# Process initialization weights for meta-train
init_dir = FLAGS.logdir_base + 'init_weights/'
if not os.path.exists(init_dir):
os.mkdir(init_dir)
pre_save_str = FLAGS.pre_string
this_init_dir = init_dir + pre_save_str + '.pre_iter(' + str(FLAGS.pretrain_iterations) + ')/'
if not os.path.exists(this_init_dir):
# If there is no saved initialization weights for meta-train, load pre-train model and save initialization weights
os.mkdir(this_init_dir)
if FLAGS.load_saved_weights:
print('Loading downloaded pretrain weights')
weights = np.load('logs/download_weights/weights.npy', allow_pickle=True, encoding="latin1").tolist()
else:
print('Loading pretrain weights')
weights_save_dir_base = FLAGS.pretrain_dir
weights_save_dir = os.path.join(weights_save_dir_base, pre_save_str)
weights = np.load(os.path.join(weights_save_dir, "weights_{}.npy".format(FLAGS.pretrain_iterations)), \
allow_pickle=True, encoding="latin1").tolist()
bais_list = [bais_item for bais_item in weights.keys() if '_bias' in bais_item]
# Assign the bias weights to ss model in order to train them during meta-train
for bais_key in bais_list:
self.sess.run(tf.assign(self.model.ss_weights[bais_key], weights[bais_key]))
# Assign pretrained weights to tensorflow variables
for key in weights.keys():
self.sess.run(tf.assign(self.model.weights[key], weights[key]))
print('Pretrain weights loaded, saving init weights')
# Load and save init weights for the model
new_weights = self.sess.run(self.model.weights)
ss_weights = self.sess.run(self.model.ss_weights)
fc_weights = self.sess.run(self.model.fc_weights)
np.save(this_init_dir + 'weights_init.npy', new_weights)
np.save(this_init_dir + 'ss_weights_init.npy', ss_weights)
np.save(this_init_dir + 'fc_weights_init.npy', fc_weights)
else:
# If the initialization weights are already generated, load the previous saved ones
# This process is deactivate in the default settings, you may activate this for ablative study
print('Loading previous saved init weights')
weights = np.load(this_init_dir + 'weights_init.npy', allow_pickle=True, encoding="latin1").tolist()
ss_weights = np.load(this_init_dir + 'ss_weights_init.npy', allow_pickle=True, encoding="latin1").tolist()
fc_weights = np.load(this_init_dir + 'fc_weights_init.npy', allow_pickle=True, encoding="latin1").tolist()
for key in weights.keys():
self.sess.run(tf.assign(self.model.weights[key], weights[key]))
for key in ss_weights.keys():
self.sess.run(tf.assign(self.model.ss_weights[key], ss_weights[key]))
for key in fc_weights.keys():
self.sess.run(tf.assign(self.model.fc_weights[key], fc_weights[key]))
print('Init weights loaded')
else:
# Load the saved meta model for meta-test phase
if FLAGS.load_saved_weights:
# Load the downloaded weights
weights = np.load('./logs/download_weights/weights.npy', allow_pickle=True, encoding="latin1").tolist()
ss_weights = np.load('./logs/download_weights/ss_weights.npy', allow_pickle=True, encoding="latin1").tolist()
fc_weights = np.load('./logs/download_weights/fc_weights.npy', allow_pickle=True, encoding="latin1").tolist()
else:
# Load the saved weights of meta-train
weights = np.load(FLAGS.logdir + '/' + exp_string + '/weights_' + str(FLAGS.test_iter) + '.npy', \
allow_pickle=True, encoding="latin1").tolist()
ss_weights = np.load(FLAGS.logdir + '/' + exp_string + '/ss_weights_' + str(FLAGS.test_iter) + '.npy', \
allow_pickle=True, encoding="latin1").tolist()
fc_weights = np.load(FLAGS.logdir + '/' + exp_string + '/fc_weights_' + str(FLAGS.test_iter) + '.npy', \
allow_pickle=True, encoding="latin1").tolist()
# Assign the weights to the tensorflow variables
for key in weights.keys():
self.sess.run(tf.assign(self.model.weights[key], weights[key]))
for key in ss_weights.keys():
self.sess.run(tf.assign(self.model.ss_weights[key], ss_weights[key]))
for key in fc_weights.keys():
self.sess.run(tf.assign(self.model.fc_weights[key], fc_weights[key]))
print('Weights loaded')
if FLAGS.load_saved_weights:
print('Meta test using downloaded model')
else:
print('Test iter: ' + str(FLAGS.test_iter))
if FLAGS.metatrain:
self.train(data_generator)
else:
self.test(data_generator)
def start_session(self):
"""The function to start tensorflow session."""
if FLAGS.full_gpu_memory_mode:
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_rate
self.sess = tf.InteractiveSession(config=gpu_config)
else:
self.sess = tf.InteractiveSession()
def train(self, data_generator):
"""The function for the meta-train phase
Arg:
data_generator: the data generator class for this phase
"""
# Load the experiment setting string from FLAGS
exp_string = FLAGS.exp_string
# Generate tensorboard file writer
train_writer = tf.summary.FileWriter(FLAGS.logdir + '/' + exp_string, self.sess.graph)
print('Start meta-train phase')
# Generate empty list to record losses and accuracies
loss_list, acc_list = [], []
# Load the meta learning rate from FLAGS
train_lr = FLAGS.meta_lr
# Load data for meta-train and meta validation
data_generator.load_data(data_type='train')
data_generator.load_data(data_type='val')
for train_idx in trange(FLAGS.metatrain_iterations):
# Load the episodes for this meta batch
inputa = []
labela = []
inputb = []
labelb = []
for meta_batch_idx in range(FLAGS.meta_batch_size):
this_episode = data_generator.load_episode(index=train_idx*FLAGS.meta_batch_size+meta_batch_idx, data_type='train')
inputa.append(this_episode[0])
labela.append(this_episode[1])
inputb.append(this_episode[2])
labelb.append(this_episode[3])
inputa = np.array(inputa)
labela = np.array(labela)
inputb = np.array(inputb)
labelb = np.array(labelb)
# Generate feed dict for the tensorflow graph
feed_dict = {self.model.inputa: inputa, self.model.inputb: inputb, \
self.model.labela: labela, self.model.labelb: labelb, self.model.meta_lr: train_lr}
# Set the variables to load from the tensorflow graph
input_tensors = [self.model.metatrain_op] # The meta train optimizer
input_tensors.extend([self.model.total_loss]) # The meta train loss
input_tensors.extend([self.model.total_accuracy]) # The meta train accuracy
input_tensors.extend([self.model.training_summ_op]) # The tensorboard summary operation
# run this meta-train iteration
result = self.sess.run(input_tensors, feed_dict)
# record losses, accuracies and tensorboard
loss_list.append(result[1])
acc_list.append(result[2])
train_writer.add_summary(result[3], train_idx)
# print meta-train information on the screen after several iterations
if (train_idx!=0) and train_idx % FLAGS.meta_print_step == 0:
print_str = 'Iteration:' + str(train_idx)
print_str += ' Loss:' + str(np.mean(loss_list)) + ' Acc:' + str(np.mean(acc_list))
print(print_str)
loss_list, acc_list = [], []
# Save the model during meta-teain
if train_idx % FLAGS.meta_save_step == 0:
weights = self.sess.run(self.model.weights)
ss_weights = self.sess.run(self.model.ss_weights)
fc_weights = self.sess.run(self.model.fc_weights)
np.save(FLAGS.logdir + '/' + exp_string + '/weights_' + str(train_idx) + '.npy', weights)
np.save(FLAGS.logdir + '/' + exp_string + '/ss_weights_' + str(train_idx) + '.npy', ss_weights)
np.save(FLAGS.logdir + '/' + exp_string + '/fc_weights_' + str(train_idx) + '.npy', fc_weights)
# Run the meta-validation during meta-train
if train_idx % FLAGS.meta_val_print_step == 0:
test_loss = []
test_accs = []
for test_itr in range(FLAGS.meta_intrain_val_sample):
this_episode = data_generator.load_episode(index=test_itr, data_type='val')
test_inputa = this_episode[0][np.newaxis, :]
test_labela = this_episode[1][np.newaxis, :]
test_inputb = this_episode[2][np.newaxis, :]
test_labelb = this_episode[3][np.newaxis, :]
test_feed_dict = {self.model.inputa: test_inputa, self.model.inputb: test_inputb, \
self.model.labela: test_labela, self.model.labelb: test_labelb, \
self.model.meta_lr: 0.0}
test_input_tensors = [self.model.total_loss, self.model.total_accuracy]
test_result = self.sess.run(test_input_tensors, test_feed_dict)
test_loss.append(test_result[0])
test_accs.append(test_result[1])
valsum_feed_dict = {self.model.input_val_loss: \
np.mean(test_loss)*np.float(FLAGS.meta_batch_size)/np.float(FLAGS.shot_num), \
self.model.input_val_acc: np.mean(test_accs)*np.float(FLAGS.meta_batch_size)}
valsum = self.sess.run(self.model.val_summ_op, valsum_feed_dict)
train_writer.add_summary(valsum, train_idx)
print_str = '[***] Val Loss:' + str(np.mean(test_loss)*FLAGS.meta_batch_size) + \
' Val Acc:' + str(np.mean(test_accs)*FLAGS.meta_batch_size)
print(print_str)
# Reduce the meta learning rate to half after several iterations
if (train_idx!=0) and train_idx % FLAGS.lr_drop_step == 0:
train_lr = train_lr * FLAGS.lr_drop_rate
if train_lr < 0.1 * FLAGS.meta_lr:
train_lr = 0.1 * FLAGS.meta_lr
print('Train LR: {}'.format(train_lr))
# Save the final model
weights = self.sess.run(self.model.weights)
ss_weights = self.sess.run(self.model.ss_weights)
fc_weights = self.sess.run(self.model.fc_weights)
np.save(FLAGS.logdir + '/' + exp_string + '/weights_' + str(train_idx+1) + '.npy', weights)
np.save(FLAGS.logdir + '/' + exp_string + '/ss_weights_' + str(train_idx+1) + '.npy', ss_weights)
np.save(FLAGS.logdir + '/' + exp_string + '/fc_weights_' + str(train_idx+1) + '.npy', fc_weights)
def test(self, data_generator):
"""The function for the meta-test phase
Arg:
data_generator: the data generator class for this phase
"""
# Set meta-test episode number
NUM_TEST_POINTS = 600
# Load the experiment setting string from FLAGS
exp_string = FLAGS.exp_string
print('Start meta-test phase')
np.random.seed(1)
# Generate empty list to record accuracies
metaval_accuracies = []
# Load data for meta-test
data_generator.load_data(data_type='test')
for test_idx in trange(NUM_TEST_POINTS):
# Load one episode for meta-test
this_episode = data_generator.load_episode(index=test_idx, data_type='test')
inputa = this_episode[0][np.newaxis, :]
labela = this_episode[1][np.newaxis, :]
inputb = this_episode[2][np.newaxis, :]
labelb = this_episode[3][np.newaxis, :]
feed_dict = {self.model.inputa: inputa, self.model.inputb: inputb, \
self.model.labela: labela, self.model.labelb: labelb, self.model.meta_lr: 0.0}
result = self.sess.run(self.model.metaval_total_accuracies, feed_dict)
metaval_accuracies.append(result)
# Calculate the mean accuarcies and the confidence intervals
metaval_accuracies = np.array(metaval_accuracies)
means = np.mean(metaval_accuracies, 0)
stds = np.std(metaval_accuracies, 0)
ci95 = 1.96*stds/np.sqrt(NUM_TEST_POINTS)
# Print the meta-test results
print('Test accuracies and confidence intervals')
print((means, ci95))
# Save the meta-test results in the csv files
if not FLAGS.load_saved_weights:
out_filename = FLAGS.logdir +'/'+ exp_string + '/' + 'result_' + str(FLAGS.shot_num) + 'shot_' + str(FLAGS.test_iter) + '.csv'
out_pkl = FLAGS.logdir +'/'+ exp_string + '/' + 'result_' + str(FLAGS.shot_num) + 'shot_' + str(FLAGS.test_iter) + '.pkl'
with open(out_pkl, 'wb') as f:
pickle.dump({'mses': metaval_accuracies}, f)
with open(out_filename, 'w') as f:
writer = csv.writer(f, delimiter=',')
writer.writerow(['update'+str(i) for i in range(len(means))])
writer.writerow(means)
writer.writerow(stds)
writer.writerow(ci95)
| 16,927 | 51.571429 | 138 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/trainer/pre.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/cbfinn/maml
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Trainer for pre-train phase. """
import os
import numpy as np
import tensorflow as tf
from tqdm import trange
from data_generator.pre_data_generator import PreDataGenerator
from models.pre_model import MakePreModel
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
class PreTrainer:
"""The class that contains the code for the pre-train phase"""
def __init__(self):
# This class defines the pre-train phase trainer
print('Generating pre-training classes')
# Generate Pre-train Data Tensors
pre_train_data_generator = PreDataGenerator()
pretrain_input, pretrain_label = pre_train_data_generator.make_data_tensor()
pre_train_input_tensors = {'pretrain_input': pretrain_input, 'pretrain_label': pretrain_label}
# Build Pre-train Model
self.model = MakePreModel()
self.model.construct_pretrain_model(input_tensors=pre_train_input_tensors)
self.model.pretrain_summ_op = tf.summary.merge_all()
# Start the TensorFlow Session
if FLAGS.full_gpu_memory_mode:
gpu_config = tf.ConfigProto()
gpu_config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_rate
self.sess = tf.InteractiveSession(config=gpu_config)
else:
self.sess = tf.InteractiveSession()
# Initialize and Start the Pre-train Phase
tf.global_variables_initializer().run()
tf.train.start_queue_runners()
self.pre_train()
def pre_train(self):
# Load Parameters from FLAGS
pretrain_iterations = FLAGS.pretrain_iterations
weights_save_dir_base = FLAGS.pretrain_dir
pre_save_str = FLAGS.pre_string
# Build Pre-train Log Folder
weights_save_dir = os.path.join(weights_save_dir_base, pre_save_str)
if not os.path.exists(weights_save_dir):
os.mkdir(weights_save_dir)
pretrain_writer = tf.summary.FileWriter(weights_save_dir, self.sess.graph)
pre_lr = FLAGS.pre_lr
print('Start pre-train phase')
print('Pre-train Hyper parameters: ' + pre_save_str)
# Start the iterations
for itr in trange(pretrain_iterations):
# Generate the Feed Dict and Run the Optimizer
feed_dict = {self.model.pretrain_lr: pre_lr}
input_tensors = [self.model.pretrain_op, self.model.pretrain_summ_op]
input_tensors.extend([self.model.pretrain_task_loss, self.model.pretrain_task_accuracy])
result = self.sess.run(input_tensors, feed_dict)
# Print Results during Training
if (itr!=0) and itr % FLAGS.pre_print_step == 0:
print_str = '[*] Pre Loss: ' + str(result[-2]) + ', Pre Acc: ' + str(result[-1])
print(print_str)
# Write the TensorFlow Summery
if itr % FLAGS.pre_sum_step == 0:
pretrain_writer.add_summary(result[1], itr)
# Decrease the Learning Rate after Some Iterations
if (itr!=0) and itr % FLAGS.pre_lr_dropstep == 0:
pre_lr = pre_lr * 0.5
if FLAGS.pre_lr_stop and pre_lr < FLAGS.min_pre_lr:
pre_lr = FLAGS.min_pre_lr
# Save Pre-train Model
if (itr!=0) and itr % FLAGS.pre_save_step == 0:
print('Saving pretrain weights to npy')
weights = self.sess.run(self.model.weights)
np.save(os.path.join(weights_save_dir, "weights_{}.npy".format(itr)), weights)
| 3,972 | 39.958763 | 102 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/models/resnet18.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/cbfinn/maml
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" ResNet-18 class. """
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
from utils.misc import mse, softmaxloss, xent, resnet_conv_block, resnet_nob_conv_block
FLAGS = flags.FLAGS
class Models:
"""The class that contains the code for the basic resnet models and SS weights"""
def __init__(self):
# Set the dimension number for the input feature maps
self.dim_input = FLAGS.img_size * FLAGS.img_size * 3
# Set the dimension number for the outputs
self.dim_output = FLAGS.way_num
# Load base learning rates from FLAGS
self.update_lr = FLAGS.base_lr
# Load the pre-train phase class number from FLAGS
self.pretrain_class_num = FLAGS.pretrain_class_num
# Set the initial meta learning rate
self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
# Set the initial pre-train learning rate
self.pretrain_lr = tf.placeholder_with_default(FLAGS.pre_lr, ())
# Set the default objective functions for meta-train and pre-train
self.loss_func = xent
self.pretrain_loss_func = softmaxloss
# Set the default channel number to 3
self.channels = 3
# Load the image size from FLAGS
self.img_size = FLAGS.img_size
def process_ss_weights(self, weights, ss_weights, label):
"""The function to process the scaling operation
Args:
weights: the weights for the resnet.
ss_weights: the weights for scaling and shifting operation.
label: the label to indicate which layer we are operating.
Return:
The processed weights for the new resnet.
"""
[dim0, dim1] = weights[label].get_shape().as_list()[0:2]
this_ss_weights = tf.tile(ss_weights[label], multiples=[dim0, dim1, 1, 1])
return tf.multiply(weights[label], this_ss_weights)
def forward_pretrain_resnet(self, inp, weights, reuse=False, scope=''):
"""The function to forward the resnet during pre-train phase
Args:
inp: input feature maps.
weights: input resnet weights.
reuse: reuse the batch norm weights or not.
scope: the label to indicate which layer we are processing.
Return:
The processed feature maps.
"""
inp = tf.reshape(inp, [-1, self.img_size, self.img_size, self.channels])
inp = tf.image.resize_images(inp, size=[224,224], method=tf.image.ResizeMethod.BILINEAR)
net = self.pretrain_first_block_forward(inp, weights, 'block0_1', reuse, scope)
net = self.pretrain_block_forward(net, weights, 'block1_1', reuse, scope)
net = self.pretrain_block_forward(net, weights, 'block1_2', reuse, scope, block_last_layer=True)
net = self.pretrain_block_forward(net, weights, 'block2_1', reuse, scope)
net = self.pretrain_block_forward(net, weights, 'block2_2', reuse, scope, block_last_layer=True)
net = self.pretrain_block_forward(net, weights, 'block3_1', reuse, scope)
net = self.pretrain_block_forward(net, weights, 'block3_2', reuse, scope, block_last_layer=True)
net = self.pretrain_block_forward(net, weights, 'block4_1', reuse, scope)
net = self.pretrain_block_forward(net, weights, 'block4_2', reuse, scope, block_last_layer=True)
net = tf.nn.avg_pool(net, [1,7,7,1], [1,7,7,1], 'SAME')
net = tf.reshape(net, [-1, np.prod([int(dim) for dim in net.get_shape()[1:]])])
return net
def forward_resnet(self, inp, weights, ss_weights, reuse=False, scope=''):
"""The function to forward the resnet during meta-train phase
Args:
inp: input feature maps.
weights: input resnet weights.
ss_weights: input scaling weights.
reuse: reuse the batch norm weights or not.
scope: the label to indicate which layer we are processing.
Return:
The processed feature maps.
"""
inp = tf.reshape(inp, [-1, self.img_size, self.img_size, self.channels])
inp = tf.image.resize_images(inp, size=[224,224], method=tf.image.ResizeMethod.BILINEAR)
net = self.first_block_forward(inp, weights, ss_weights, 'block0_1', reuse, scope)
net = self.block_forward(net, weights, ss_weights, 'block1_1', reuse, scope)
net = self.block_forward(net, weights, ss_weights, 'block1_2', reuse, scope, block_last_layer=True)
net = self.block_forward(net, weights, ss_weights, 'block2_1', reuse, scope)
net = self.block_forward(net, weights, ss_weights, 'block2_2', reuse, scope, block_last_layer=True)
net = self.block_forward(net, weights, ss_weights, 'block3_1', reuse, scope)
net = self.block_forward(net, weights, ss_weights, 'block3_2', reuse, scope, block_last_layer=True)
net = self.block_forward(net, weights, ss_weights, 'block4_1', reuse, scope)
net = self.block_forward(net, weights, ss_weights, 'block4_2', reuse, scope, block_last_layer=True)
net = tf.nn.avg_pool(net, [1,7,7,1], [1,7,7,1], 'SAME')
net = tf.reshape(net, [-1, np.prod([int(dim) for dim in net.get_shape()[1:]])])
return net
def forward_fc(self, inp, fc_weights):
"""The function to forward the fc layer
Args:
inp: input feature maps.
fc_weights: input fc weights.
Return:
The processed feature maps.
"""
net = tf.matmul(inp, fc_weights['w5']) + fc_weights['b5']
return net
def pretrain_block_forward(self, inp, weights, block, reuse, scope, block_last_layer=False):
"""The function to forward a resnet block during pre-train phase
Args:
inp: input feature maps.
weights: input resnet weights.
block: the string to indicate which block we are processing.
reuse: reuse the batch norm weights or not.
scope: the label to indicate which layer we are processing.
block_last_layer: whether it is the last layer of this block.
Return:
The processed feature maps.
"""
net = resnet_conv_block(inp, weights[block + '_conv1'], weights[block + '_bias1'], reuse, scope+block+'0')
net = resnet_conv_block(net, weights[block + '_conv2'], weights[block + '_bias2'], reuse, scope+block+'1')
res = resnet_nob_conv_block(inp, weights[block + '_conv_res'], reuse, scope+block+'res')
net = net + res
if block_last_layer:
net = tf.nn.max_pool(net, [1,2,2,1], [1,2,2,1], 'SAME')
net = tf.nn.dropout(net, keep_prob=FLAGS.pretrain_dropout_keep)
return net
def block_forward(self, inp, weights, ss_weights, block, reuse, scope, block_last_layer=False):
"""The function to forward a resnet block during meta-train phase
Args:
inp: input feature maps.
weights: input resnet weights.
ss_weights: input scaling weights.
block: the string to indicate which block we are processing.
reuse: reuse the batch norm weights or not.
scope: the label to indicate which layer we are processing.
block_last_layer: whether it is the last layer of this block.
Return:
The processed feature maps.
"""
net = resnet_conv_block(inp, self.process_ss_weights(weights, ss_weights, block + '_conv1'), \
ss_weights[block + '_bias1'], reuse, scope+block+'0')
net = resnet_conv_block(net, self.process_ss_weights(weights, ss_weights, block + '_conv2'), \
ss_weights[block + '_bias2'], reuse, scope+block+'1')
res = resnet_nob_conv_block(inp, weights[block + '_conv_res'], reuse, scope+block+'res')
net = net + res
if block_last_layer:
net = tf.nn.max_pool(net, [1,2,2,1], [1,2,2,1], 'SAME')
net = tf.nn.dropout(net, keep_prob=1)
return net
def pretrain_first_block_forward(self, inp, weights, block, reuse, scope):
"""The function to forward the first resnet block during pre-train phase
Args:
inp: input feature maps.
weights: input resnet weights.
block: the string to indicate which block we are processing.
reuse: reuse the batch norm weights or not.
scope: the label to indicate which layer we are processing.
Return:
The processed feature maps.
"""
net = resnet_conv_block(inp, weights[block + '_conv1'], weights[block + '_bias1'], reuse, scope+block+'0')
net = tf.nn.max_pool(net, [1,3,3,1], [1,2,2,1], 'SAME')
net = tf.nn.dropout(net, keep_prob=FLAGS.pretrain_dropout_keep)
return net
def first_block_forward(self, inp, weights, ss_weights, block, reuse, scope, block_last_layer=False):
"""The function to forward the first resnet block during meta-train phase
Args:
inp: input feature maps.
weights: input resnet weights.
block: the string to indicate which block we are processing.
reuse: reuse the batch norm weights or not.
scope: the label to indicate which layer we are processing.
Return:
The processed feature maps.
"""
net = resnet_conv_block(inp, self.process_ss_weights(weights, ss_weights, block + '_conv1'), \
ss_weights[block + '_bias1'], reuse, scope+block+'0')
net = tf.nn.max_pool(net, [1,3,3,1], [1,2,2,1], 'SAME')
net = tf.nn.dropout(net, keep_prob=1)
return net
def construct_fc_weights(self):
"""The function to construct fc weights.
Return:
The fc weights.
"""
dtype = tf.float32
fc_weights = {}
fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)
if FLAGS.phase=='pre':
fc_weights['w5'] = tf.get_variable('fc_w5', [512, FLAGS.pretrain_class_num], initializer=fc_initializer)
fc_weights['b5'] = tf.Variable(tf.zeros([FLAGS.pretrain_class_num]), name='fc_b5')
else:
fc_weights['w5'] = tf.get_variable('fc_w5', [512, self.dim_output], initializer=fc_initializer)
fc_weights['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='fc_b5')
return fc_weights
def construct_resnet_weights(self):
"""The function to construct resnet weights.
Return:
The resnet weights.
"""
weights = {}
dtype = tf.float32
conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=dtype)
fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)
weights = self.construct_first_block_weights(weights, 7, 3, 64, conv_initializer, dtype, 'block0_1')
weights = self.construct_residual_block_weights(weights, 3, 64, 64, conv_initializer, dtype, 'block1_1')
weights = self.construct_residual_block_weights(weights, 3, 64, 64, conv_initializer, dtype, 'block1_2')
weights = self.construct_residual_block_weights(weights, 3, 64, 128, conv_initializer, dtype, 'block2_1')
weights = self.construct_residual_block_weights(weights, 3, 128, 128, conv_initializer, dtype, 'block2_2')
weights = self.construct_residual_block_weights(weights, 3, 128, 256, conv_initializer, dtype, 'block3_1')
weights = self.construct_residual_block_weights(weights, 3, 256, 256, conv_initializer, dtype, 'block3_2')
weights = self.construct_residual_block_weights(weights, 3, 256, 512, conv_initializer, dtype, 'block4_1')
weights = self.construct_residual_block_weights(weights, 3, 512, 512, conv_initializer, dtype, 'block4_2')
weights['w5'] = tf.get_variable('w5', [512, FLAGS.pretrain_class_num], initializer=fc_initializer)
weights['b5'] = tf.Variable(tf.zeros([FLAGS.pretrain_class_num]), name='b5')
return weights
def construct_residual_block_weights(self, weights, k, last_dim_hidden, dim_hidden, conv_initializer, dtype, scope='block0'):
"""The function to construct one block of the resnet weights.
Args:
weights: the resnet weight list.
k: the dimension number of the convolution kernel.
last_dim_hidden: the hidden dimension number of last block.
dim_hidden: the hidden dimension number of the block.
conv_initializer: the convolution initializer.
dtype: the dtype for numpy.
scope: the label to indicate which block we are processing.
Return:
The resnet block weights.
"""
weights[scope + '_conv1'] = tf.get_variable(scope + '_conv1', [k, k, last_dim_hidden, dim_hidden], \
initializer=conv_initializer, dtype=dtype)
weights[scope + '_bias1'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias1')
weights[scope + '_conv2'] = tf.get_variable(scope + '_conv2', [k, k, dim_hidden, dim_hidden], \
initializer=conv_initializer, dtype=dtype)
weights[scope + '_bias2'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias2')
weights[scope + '_conv_res'] = tf.get_variable(scope + '_conv_res', [1, 1, last_dim_hidden, dim_hidden], \
initializer=conv_initializer, dtype=dtype)
return weights
def construct_first_block_weights(self, weights, k, last_dim_hidden, dim_hidden, conv_initializer, dtype, scope='block0'):
"""The function to construct the first block of the resnet weights.
Args:
weights: the resnet weight list.
k: the dimension number of the convolution kernel.
last_dim_hidden: the hidden dimension number of last block.
dim_hidden: the hidden dimension number of the block.
conv_initializer: the convolution initializer.
dtype: the dtype for numpy.
scope: the label to indicate which block we are processing.
Return:
The resnet block weights.
"""
weights[scope + '_conv1'] = tf.get_variable(scope + '_conv1', [k, k, last_dim_hidden, dim_hidden], \
initializer=conv_initializer, dtype=dtype)
weights[scope + '_bias1'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias1')
return weights
def construct_first_block_ss_weights(self, ss_weights, last_dim_hidden, dim_hidden, scope='block0'):
"""The function to construct first block's ss weights.
Return:
The ss weights.
"""
ss_weights[scope + '_conv1'] = tf.Variable(tf.ones([1, 1, last_dim_hidden, dim_hidden]), name=scope + '_conv1')
ss_weights[scope + '_bias1'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias1')
return ss_weights
def construct_resnet_ss_weights(self):
"""The function to construct ss weights.
Return:
The ss weights.
"""
ss_weights = {}
ss_weights = self.construct_first_block_ss_weights(ss_weights, 3, 64, 'block0_1')
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 64, 64, 'block1_1')
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 64, 64, 'block1_2')
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 64, 128, 'block2_1')
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 128, 128, 'block2_2')
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 128, 256, 'block3_1')
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 256, 256, 'block3_2')
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 256, 512, 'block4_1')
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 512, 512, 'block4_2')
return ss_weights
def construct_residual_block_ss_weights(self, ss_weights, last_dim_hidden, dim_hidden, scope='block0'):
"""The function to construct one block ss weights.
Args:
ss_weights: the ss weight list.
last_dim_hidden: the hidden dimension number of last block.
dim_hidden: the hidden dimension number of the block.
scope: the label to indicate which block we are processing.
Return:
The ss block weights.
"""
ss_weights[scope + '_conv1'] = tf.Variable(tf.ones([1, 1, last_dim_hidden, dim_hidden]), name=scope + '_conv1')
ss_weights[scope + '_bias1'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias1')
ss_weights[scope + '_conv2'] = tf.Variable(tf.ones([1, 1, dim_hidden, dim_hidden]), name=scope + '_conv2')
ss_weights[scope + '_bias2'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias2')
return ss_weights
| 17,202 | 49.89645 | 129 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/models/resnet12.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/cbfinn/maml
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" ResNet-12 class. """
import numpy as np
import tensorflow as tf
from tensorflow.python.platform import flags
from utils.misc import mse, softmaxloss, xent, resnet_conv_block, resnet_nob_conv_block
FLAGS = flags.FLAGS
class Models:
"""The class that contains the code for the basic resnet models and SS weights"""
def __init__(self):
# Set the dimension number for the input feature maps
self.dim_input = FLAGS.img_size * FLAGS.img_size * 3
# Set the dimension number for the outputs
self.dim_output = FLAGS.way_num
# Load base learning rates from FLAGS
self.update_lr = FLAGS.base_lr
# Load the pre-train phase class number from FLAGS
self.pretrain_class_num = FLAGS.pretrain_class_num
# Set the initial meta learning rate
self.meta_lr = tf.placeholder_with_default(FLAGS.meta_lr, ())
# Set the initial pre-train learning rate
self.pretrain_lr = tf.placeholder_with_default(FLAGS.pre_lr, ())
# Set the default objective functions for meta-train and pre-train
self.loss_func = xent
self.pretrain_loss_func = softmaxloss
# Set the default channel number to 3
self.channels = 3
# Load the image size from FLAGS
self.img_size = FLAGS.img_size
def process_ss_weights(self, weights, ss_weights, label):
"""The function to process the scaling operation
Args:
weights: the weights for the resnet.
ss_weights: the weights for scaling and shifting operation.
label: the label to indicate which layer we are operating.
Return:
The processed weights for the new resnet.
"""
[dim0, dim1] = weights[label].get_shape().as_list()[0:2]
this_ss_weights = tf.tile(ss_weights[label], multiples=[dim0, dim1, 1, 1])
return tf.multiply(weights[label], this_ss_weights)
def forward_pretrain_resnet(self, inp, weights, reuse=False, scope=''):
"""The function to forward the resnet during pre-train phase
Args:
inp: input feature maps.
weights: input resnet weights.
reuse: reuse the batch norm weights or not.
scope: the label to indicate which layer we are processing.
Return:
The processed feature maps.
"""
inp = tf.reshape(inp, [-1, self.img_size, self.img_size, self.channels])
net = self.pretrain_block_forward(inp, weights, 'block1', reuse, scope)
net = self.pretrain_block_forward(net, weights, 'block2', reuse, scope)
net = self.pretrain_block_forward(net, weights, 'block3', reuse, scope)
net = self.pretrain_block_forward(net, weights, 'block4', reuse, scope)
net = tf.nn.avg_pool(net, [1,5,5,1], [1,5,5,1], 'VALID')
net = tf.reshape(net, [-1, np.prod([int(dim) for dim in net.get_shape()[1:]])])
return net
def forward_resnet(self, inp, weights, ss_weights, reuse=False, scope=''):
"""The function to forward the resnet during meta-train phase
Args:
inp: input feature maps.
weights: input resnet weights.
ss_weights: input scaling weights.
reuse: reuse the batch norm weights or not.
scope: the label to indicate which layer we are processing.
Return:
The processed feature maps.
"""
inp = tf.reshape(inp, [-1, self.img_size, self.img_size, self.channels])
net = self.block_forward(inp, weights, ss_weights, 'block1', reuse, scope)
net = self.block_forward(net, weights, ss_weights, 'block2', reuse, scope)
net = self.block_forward(net, weights, ss_weights, 'block3', reuse, scope)
net = self.block_forward(net, weights, ss_weights, 'block4', reuse, scope)
net = tf.nn.avg_pool(net, [1,5,5,1], [1,5,5,1], 'VALID')
net = tf.reshape(net, [-1, np.prod([int(dim) for dim in net.get_shape()[1:]])])
return net
def forward_fc(self, inp, fc_weights):
"""The function to forward the fc layer
Args:
inp: input feature maps.
fc_weights: input fc weights.
Return:
The processed feature maps.
"""
net = tf.matmul(inp, fc_weights['w5']) + fc_weights['b5']
return net
def pretrain_block_forward(self, inp, weights, block, reuse, scope):
"""The function to forward a resnet block during pre-train phase
Args:
inp: input feature maps.
weights: input resnet weights.
block: the string to indicate which block we are processing.
reuse: reuse the batch norm weights or not.
scope: the label to indicate which layer we are processing.
Return:
The processed feature maps.
"""
net = resnet_conv_block(inp, weights[block + '_conv1'], weights[block + '_bias1'], reuse, scope+block+'0')
net = resnet_conv_block(net, weights[block + '_conv2'], weights[block + '_bias2'], reuse, scope+block+'1')
net = resnet_conv_block(net, weights[block + '_conv3'], weights[block + '_bias3'], reuse, scope+block+'2')
res = resnet_nob_conv_block(inp, weights[block + '_conv_res'], reuse, scope+block+'res')
net = net + res
net = tf.nn.max_pool(net, [1,2,2,1], [1,2,2,1], 'VALID')
net = tf.nn.dropout(net, keep_prob=FLAGS.pretrain_dropout_keep)
return net
def block_forward(self, inp, weights, ss_weights, block, reuse, scope):
"""The function to forward a resnet block during meta-train phase
Args:
inp: input feature maps.
weights: input resnet weights.
ss_weights: input scaling weights.
block: the string to indicate which block we are processing.
reuse: reuse the batch norm weights or not.
scope: the label to indicate which layer we are processing.
Return:
The processed feature maps.
"""
net = resnet_conv_block(inp, self.process_ss_weights(weights, ss_weights, block + '_conv1'), \
ss_weights[block + '_bias1'], reuse, scope+block+'0')
net = resnet_conv_block(net, self.process_ss_weights(weights, ss_weights, block + '_conv2'), \
ss_weights[block + '_bias2'], reuse, scope+block+'1')
net = resnet_conv_block(net, self.process_ss_weights(weights, ss_weights, block + '_conv3'), \
ss_weights[block + '_bias3'], reuse, scope+block+'2')
res = resnet_nob_conv_block(inp, weights[block + '_conv_res'], reuse, scope+block+'res')
net = net + res
net = tf.nn.max_pool(net, [1,2,2,1], [1,2,2,1], 'VALID')
net = tf.nn.dropout(net, keep_prob=1)
return net
def construct_fc_weights(self):
"""The function to construct fc weights.
Return:
The fc weights.
"""
dtype = tf.float32
fc_weights = {}
fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)
if FLAGS.phase=='pre':
fc_weights['w5'] = tf.get_variable('fc_w5', [512, FLAGS.pretrain_class_num], initializer=fc_initializer)
fc_weights['b5'] = tf.Variable(tf.zeros([FLAGS.pretrain_class_num]), name='fc_b5')
else:
fc_weights['w5'] = tf.get_variable('fc_w5', [512, self.dim_output], initializer=fc_initializer)
fc_weights['b5'] = tf.Variable(tf.zeros([self.dim_output]), name='fc_b5')
return fc_weights
def construct_resnet_weights(self):
"""The function to construct resnet weights.
Return:
The resnet weights.
"""
weights = {}
dtype = tf.float32
conv_initializer = tf.contrib.layers.xavier_initializer_conv2d(dtype=dtype)
fc_initializer = tf.contrib.layers.xavier_initializer(dtype=dtype)
weights = self.construct_residual_block_weights(weights, 3, 3, 64, conv_initializer, dtype, 'block1')
weights = self.construct_residual_block_weights(weights, 3, 64, 128, conv_initializer, dtype, 'block2')
weights = self.construct_residual_block_weights(weights, 3, 128, 256, conv_initializer, dtype, 'block3')
weights = self.construct_residual_block_weights(weights, 3, 256, 512, conv_initializer, dtype, 'block4')
weights['w5'] = tf.get_variable('w5', [512, FLAGS.pretrain_class_num], initializer=fc_initializer)
weights['b5'] = tf.Variable(tf.zeros([FLAGS.pretrain_class_num]), name='b5')
return weights
def construct_residual_block_weights(self, weights, k, last_dim_hidden, dim_hidden, conv_initializer, dtype, scope='block0'):
"""The function to construct one block of the resnet weights.
Args:
weights: the resnet weight list.
k: the dimension number of the convolution kernel.
last_dim_hidden: the hidden dimension number of last block.
dim_hidden: the hidden dimension number of the block.
conv_initializer: the convolution initializer.
dtype: the dtype for numpy.
scope: the label to indicate which block we are processing.
Return:
The resnet block weights.
"""
weights[scope + '_conv1'] = tf.get_variable(scope + '_conv1', [k, k, last_dim_hidden, dim_hidden], \
initializer=conv_initializer, dtype=dtype)
weights[scope + '_bias1'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias1')
weights[scope + '_conv2'] = tf.get_variable(scope + '_conv2', [k, k, dim_hidden, dim_hidden], \
initializer=conv_initializer, dtype=dtype)
weights[scope + '_bias2'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias2')
weights[scope + '_conv3'] = tf.get_variable(scope + '_conv3', [k, k, dim_hidden, dim_hidden], \
initializer=conv_initializer, dtype=dtype)
weights[scope + '_bias3'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias3')
weights[scope + '_conv_res'] = tf.get_variable(scope + '_conv_res', [1, 1, last_dim_hidden, dim_hidden], \
initializer=conv_initializer, dtype=dtype)
return weights
def construct_resnet_ss_weights(self):
"""The function to construct ss weights.
Return:
The ss weights.
"""
ss_weights = {}
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 3, 64, 'block1')
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 64, 128, 'block2')
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 128, 256, 'block3')
ss_weights = self.construct_residual_block_ss_weights(ss_weights, 256, 512, 'block4')
return ss_weights
def construct_residual_block_ss_weights(self, ss_weights, last_dim_hidden, dim_hidden, scope='block0'):
"""The function to construct one block ss weights.
Args:
ss_weights: the ss weight list.
last_dim_hidden: the hidden dimension number of last block.
dim_hidden: the hidden dimension number of the block.
scope: the label to indicate which block we are processing.
Return:
The ss block weights.
"""
ss_weights[scope + '_conv1'] = tf.Variable(tf.ones([1, 1, last_dim_hidden, dim_hidden]), name=scope + '_conv1')
ss_weights[scope + '_bias1'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias1')
ss_weights[scope + '_conv2'] = tf.Variable(tf.ones([1, 1, dim_hidden, dim_hidden]), name=scope + '_conv2')
ss_weights[scope + '_bias2'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias2')
ss_weights[scope + '_conv3'] = tf.Variable(tf.ones([1, 1, dim_hidden, dim_hidden]), name=scope + '_conv3')
ss_weights[scope + '_bias3'] = tf.Variable(tf.zeros([dim_hidden]), name=scope + '_bias3')
return ss_weights
| 12,316 | 50.320833 | 129 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/models/pre_model.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Models for pre-train phase. """
import tensorflow as tf
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
def MakePreModel():
"""The function to make pre model.
Arg:
Pre-train model class.
"""
if FLAGS.backbone_arch=='resnet12':
try:#python2
from resnet12 import Models
except ImportError:#python3
from models.resnet12 import Models
elif FLAGS.backbone_arch=='resnet18':
try:#python2
from resnet18 import Models
except ImportError:#python3
from models.resnet18 import Models
else:
print('Please set the correct backbone')
class PreModel(Models):
"""The class for pre-train model."""
def construct_pretrain_model(self, input_tensors=None, is_val=False):
"""The function to construct pre-train model.
Args:
input_tensors: the input tensor to construct pre-train model.
is_val: whether the model is for validation.
"""
self.input = input_tensors['pretrain_input']
self.label = input_tensors['pretrain_label']
with tf.variable_scope('pretrain-model', reuse=None) as training_scope:
self.weights = weights = self.construct_resnet_weights()
self.fc_weights = fc_weights = self.construct_fc_weights()
if is_val is False:
self.pretrain_task_output = self.forward_fc(self.forward_pretrain_resnet(self.input, weights, reuse=False), fc_weights)
self.pretrain_task_loss = self.pretrain_loss_func(self.pretrain_task_output, self.label)
optimizer = tf.train.AdamOptimizer(self.pretrain_lr)
self.pretrain_op = optimizer.minimize(self.pretrain_task_loss, var_list=weights.values()+fc_weights.values())
self.pretrain_task_accuracy = tf.reduce_mean(tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax( \
self.pretrain_task_output), 1), tf.argmax(self.label, 1)))
tf.summary.scalar('pretrain train loss', self.pretrain_task_loss)
tf.summary.scalar('pretrain train accuracy', self.pretrain_task_accuracy)
else:
self.pretrain_task_output_val = self.forward_fc(self.forward_pretrain_resnet(self.input, weights, reuse=True), fc_weights)
self.pretrain_task_accuracy_val = tf.reduce_mean(tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax( \
self.pretrain_task_output_val), 1), tf.argmax(self.label, 1)))
tf.summary.scalar('pretrain val accuracy', self.pretrain_task_accuracy_val)
return PreModel()
| 3,134 | 47.230769 | 142 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/models/__init__.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
| 380 | 37.1 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/models/meta_model.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/cbfinn/maml
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Models for meta-learning. """
import tensorflow as tf
from tensorflow.python.platform import flags
from utils.misc import mse, softmaxloss, xent, resnet_conv_block, resnet_nob_conv_block
FLAGS = flags.FLAGS
def MakeMetaModel():
"""The function to make meta model.
Arg:
Meta-train model class.
"""
if FLAGS.backbone_arch=='resnet12':
try:#python2
from resnet12 import Models
except ImportError:#python3
from models.resnet12 import Models
elif FLAGS.backbone_arch=='resnet18':
try:#python2
from resnet18 import Models
except ImportError:#python3
from models.resnet18 import Models
else:
print('Please set the correct backbone')
class MetaModel(Models):
"""The class for the meta models. This class is inheritance from Models, so some variables are in the Models class."""
def construct_model(self):
"""The function to construct meta-train model."""
# Set the placeholder for the input episode
self.inputa = tf.placeholder(tf.float32) # episode train images
self.inputb = tf.placeholder(tf.float32) # episode test images
self.labela = tf.placeholder(tf.float32) # episode train labels
self.labelb = tf.placeholder(tf.float32) # episode test labels
with tf.variable_scope('meta-model', reuse=None) as training_scope:
# construct the model weights
self.ss_weights = ss_weights = self.construct_resnet_ss_weights()
self.weights = weights = self.construct_resnet_weights()
self.fc_weights = fc_weights = self.construct_fc_weights()
# Load base epoch number from FLAGS
num_updates = FLAGS.train_base_epoch_num
def task_metalearn(inp, reuse=True):
"""The function to process one episode in a meta-batch.
Args:
inp: the input episode.
reuse: whether reuse the variables for the normalization.
Returns:
A serious outputs like losses and accuracies.
"""
# Seperate inp to different variables
inputa, inputb, labela, labelb = inp
# Generate empty list to record losses
lossa_list = [] # Base train loss list
lossb_list = [] # Base test loss list
# Embed the input images to embeddings with ss weights
emb_outputa = self.forward_resnet(inputa, weights, ss_weights, reuse=reuse) # Embed episode train
emb_outputb = self.forward_resnet(inputb, weights, ss_weights, reuse=True) # Embed episode test
# Run the first epoch of the base learning
# Forward fc layer for episode train
outputa = self.forward_fc(emb_outputa, fc_weights)
# Calculate base train loss
lossa = self.loss_func(outputa, labela)
# Record base train loss
lossa_list.append(lossa)
# Forward fc layer for episode test
outputb = self.forward_fc(emb_outputb, fc_weights)
# Calculate base test loss
lossb = self.loss_func(outputb, labelb)
# Record base test loss
lossb_list.append(lossb)
# Calculate the gradients for the fc layer
grads = tf.gradients(lossa, list(fc_weights.values()))
gradients = dict(zip(fc_weights.keys(), grads))
# Use graient descent to update the fc layer
fast_fc_weights = dict(zip(fc_weights.keys(), [fc_weights[key] - \
self.update_lr*gradients[key] for key in fc_weights.keys()]))
for j in range(num_updates - 1):
# Run the following base epochs, these are similar to the first base epoch
lossa = self.loss_func(self.forward_fc(emb_outputa, fast_fc_weights), labela)
lossa_list.append(lossa)
lossb = self.loss_func(self.forward_fc(emb_outputb, fast_fc_weights), labelb)
lossb_list.append(lossb)
grads = tf.gradients(lossa, list(fast_fc_weights.values()))
gradients = dict(zip(fast_fc_weights.keys(), grads))
fast_fc_weights = dict(zip(fast_fc_weights.keys(), [fast_fc_weights[key] - \
self.update_lr*gradients[key] for key in fast_fc_weights.keys()]))
# Calculate final episode test predictions
outputb = self.forward_fc(emb_outputb, fast_fc_weights)
# Calculate the final episode test loss, it is the loss for the episode on meta-train
final_lossb = self.loss_func(outputb, labelb)
# Calculate the final episode test accuarcy
accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))
# Reorganize all the outputs to a list
task_output = [final_lossb, lossb_list, lossa_list, accb]
return task_output
# Initial the batch normalization weights
if FLAGS.norm is not None:
unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)
# Set the dtype of the outputs
out_dtype = [tf.float32, [tf.float32]*num_updates, [tf.float32]*num_updates, tf.float32]
# Run two episodes for a meta batch using parallel setting
result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), \
dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)
# Seperate the outputs to different variables
lossb, lossesb, lossesa, accsb = result
# Set the variables to output from the tensorflow graph
self.total_loss = total_loss = tf.reduce_sum(lossb) / tf.to_float(FLAGS.meta_batch_size)
self.total_accuracy = total_accuracy = tf.reduce_sum(accsb) / tf.to_float(FLAGS.meta_batch_size)
self.total_lossa = total_lossa = [tf.reduce_sum(lossesa[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
self.total_lossb = total_lossb = [tf.reduce_sum(lossesb[j]) / tf.to_float(FLAGS.meta_batch_size) for j in range(num_updates)]
# Set the meta-train optimizer
optimizer = tf.train.AdamOptimizer(self.meta_lr)
self.metatrain_op = optimizer.minimize(total_loss, var_list=list(ss_weights.values()) + list(fc_weights.values()))
# Set the tensorboard
self.training_summaries = []
self.training_summaries.append(tf.summary.scalar('Meta Train Loss', (total_loss / tf.to_float(FLAGS.metatrain_epite_sample_num))))
self.training_summaries.append(tf.summary.scalar('Meta Train Accuracy', total_accuracy))
for j in range(num_updates):
self.training_summaries.append(tf.summary.scalar('Base Train Loss Step' + str(j+1), total_lossa[j]))
for j in range(num_updates):
self.training_summaries.append(tf.summary.scalar('Base Val Loss Step' + str(j+1), total_lossb[j]))
self.training_summ_op = tf.summary.merge(self.training_summaries)
self.input_val_loss = tf.placeholder(tf.float32)
self.input_val_acc = tf.placeholder(tf.float32)
self.val_summaries = []
self.val_summaries.append(tf.summary.scalar('Meta Val Loss', self.input_val_loss))
self.val_summaries.append(tf.summary.scalar('Meta Val Accuracy', self.input_val_acc))
self.val_summ_op = tf.summary.merge(self.val_summaries)
def construct_test_model(self):
"""The function to construct meta-test model."""
# Set the placeholder for the input episode
self.inputa = tf.placeholder(tf.float32)
self.inputb = tf.placeholder(tf.float32)
self.labela = tf.placeholder(tf.float32)
self.labelb = tf.placeholder(tf.float32)
with tf.variable_scope('meta-test-model', reuse=None) as training_scope:
# construct the model weights
self.ss_weights = ss_weights = self.construct_resnet_ss_weights()
self.weights = weights = self.construct_resnet_weights()
self.fc_weights = fc_weights = self.construct_fc_weights()
# Load test base epoch number from FLAGS
num_updates = FLAGS.test_base_epoch_num
def task_metalearn(inp, reuse=True):
"""The function to process one episode in a meta-batch.
Args:
inp: the input episode.
reuse: whether reuse the variables for the normalization.
Returns:
A serious outputs like losses and accuracies.
"""
# Seperate inp to different variables
inputa, inputb, labela, labelb = inp
# Generate empty list to record accuracies
accb_list = []
# Embed the input images to embeddings with ss weights
emb_outputa = self.forward_resnet(inputa, weights, ss_weights, reuse=reuse)
emb_outputb = self.forward_resnet(inputb, weights, ss_weights, reuse=True)
# This part is similar to the meta-train function, you may refer to the comments above
outputa = self.forward_fc(emb_outputa, fc_weights)
lossa = self.loss_func(outputa, labela)
grads = tf.gradients(lossa, list(fc_weights.values()))
gradients = dict(zip(fc_weights.keys(), grads))
fast_fc_weights = dict(zip(fc_weights.keys(), [fc_weights[key] - \
self.update_lr*gradients[key] for key in fc_weights.keys()]))
outputb = self.forward_fc(emb_outputb, fast_fc_weights)
accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))
accb_list.append(accb)
for j in range(num_updates - 1):
lossa = self.loss_func(self.forward_fc(emb_outputa, fast_fc_weights), labela)
grads = tf.gradients(lossa, list(fast_fc_weights.values()))
gradients = dict(zip(fast_fc_weights.keys(), grads))
fast_fc_weights = dict(zip(fast_fc_weights.keys(), [fast_fc_weights[key] - \
self.update_lr*gradients[key] for key in fast_fc_weights.keys()]))
outputb = self.forward_fc(emb_outputb, fast_fc_weights)
accb = tf.contrib.metrics.accuracy(tf.argmax(tf.nn.softmax(outputb), 1), tf.argmax(labelb, 1))
accb_list.append(accb)
lossb = self.loss_func(outputb, labelb)
task_output = [lossb, accb, accb_list]
return task_output
if FLAGS.norm is not None:
unused = task_metalearn((self.inputa[0], self.inputb[0], self.labela[0], self.labelb[0]), False)
out_dtype = [tf.float32, tf.float32, [tf.float32]*num_updates]
result = tf.map_fn(task_metalearn, elems=(self.inputa, self.inputb, self.labela, self.labelb), \
dtype=out_dtype, parallel_iterations=FLAGS.meta_batch_size)
lossesb, accsb, accsb_list = result
self.metaval_total_loss = total_loss = tf.reduce_sum(lossesb)
self.metaval_total_accuracy = total_accuracy = tf.reduce_sum(accsb)
self.metaval_total_accuracies = total_accuracies =[tf.reduce_sum(accsb_list[j]) for j in range(num_updates)]
return MetaModel()
| 12,839 | 54.107296 | 142 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/data_generator/meta_data_generator.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/cbfinn/maml
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Data generator for meta-learning. """
import numpy as np
import os
import random
import tensorflow as tf
from tqdm import trange
from tensorflow.python.platform import flags
from utils.misc import get_images, process_batch, process_batch_augmentation
FLAGS = flags.FLAGS
class MetaDataGenerator(object):
"""The class to generate data lists and episodes for meta-train and meta-test."""
def __init__(self):
# Set the base folder to save the data lists
filename_dir = FLAGS.logdir_base + 'processed_data/'
if not os.path.exists(filename_dir):
os.mkdir(filename_dir)
# Set the detailed folder name for saving the data lists
self.this_setting_filename_dir = filename_dir + 'shot(' + str(FLAGS.shot_num) + ').way(' + str(FLAGS.way_num) \
+ ').metatr_epite(' + str(FLAGS.metatrain_epite_sample_num) + ').metate_epite(' + str(FLAGS.metatest_epite_sample_num) + ')/'
if not os.path.exists(self.this_setting_filename_dir):
os.mkdir(self.this_setting_filename_dir)
def generate_data(self, data_type='train'):
"""The function to generate the data lists.
Arg:
data_type: the phase for meta-learning.
"""
if data_type=='train':
metatrain_folder = FLAGS.metatrain_dir
folders = [os.path.join(metatrain_folder, label) \
for label in os.listdir(metatrain_folder) \
if os.path.isdir(os.path.join(metatrain_folder, label)) \
]
num_total_batches = FLAGS.metatrain_iterations * FLAGS.meta_batch_size + 10
num_samples_per_class = FLAGS.shot_num + FLAGS.metatrain_epite_sample_num
elif data_type=='test':
metatest_folder = FLAGS.metatest_dir
folders = [os.path.join(metatest_folder, label) \
for label in os.listdir(metatest_folder) \
if os.path.isdir(os.path.join(metatest_folder, label)) \
]
num_total_batches = 600
if FLAGS.metatest_epite_sample_num==0:
num_samples_per_class = FLAGS.shot_num*2
else:
num_samples_per_class = FLAGS.shot_num + FLAGS.metatest_epite_sample_num
elif data_type=='val':
metaval_folder = FLAGS.metaval_dir
folders = [os.path.join(metaval_folder, label) \
for label in os.listdir(metaval_folder) \
if os.path.isdir(os.path.join(metaval_folder, label)) \
]
num_total_batches = 600
if FLAGS.metatest_epite_sample_num==0:
num_samples_per_class = FLAGS.shot_num*2
else:
num_samples_per_class = FLAGS.shot_num + FLAGS.metatest_epite_sample_num
else:
raise Exception('Please check data list type')
task_num = FLAGS.way_num * num_samples_per_class
epitr_sample_num = FLAGS.shot_num
if not os.path.exists(self.this_setting_filename_dir+'/' + data_type + '_data.npy'):
print('Generating ' + data_type + ' data')
data_list = []
for epi_idx in trange(num_total_batches):
sampled_character_folders = random.sample(folders, FLAGS.way_num)
random.shuffle(sampled_character_folders)
labels_and_images = get_images(sampled_character_folders, \
range(FLAGS.way_num), nb_samples=num_samples_per_class, shuffle=False)
labels = [li[0] for li in labels_and_images]
filenames = [li[1] for li in labels_and_images]
this_task_tr_filenames = []
this_task_tr_labels = []
this_task_te_filenames = []
this_task_te_labels = []
for class_idx in range(FLAGS.way_num):
this_class_filenames = filenames[class_idx*num_samples_per_class:(class_idx+1)*num_samples_per_class]
this_class_label = labels[class_idx*num_samples_per_class:(class_idx+1)*num_samples_per_class]
this_task_tr_filenames += this_class_filenames[0:epitr_sample_num]
this_task_tr_labels += this_class_label[0:epitr_sample_num]
this_task_te_filenames += this_class_filenames[epitr_sample_num:]
this_task_te_labels += this_class_label[epitr_sample_num:]
this_batch_data = {'filenamea': this_task_tr_filenames, 'filenameb': this_task_te_filenames, 'labela': this_task_tr_labels, \
'labelb': this_task_te_labels}
data_list.append(this_batch_data)
np.save(self.this_setting_filename_dir+'/' + data_type + '_data.npy', data_list)
print('The ' + data_type + ' data are saved')
else:
print('The ' + data_type + ' data have already been created')
def load_data(self, data_type='test'):
"""The function to load the data lists.
Arg:
data_type: the phase for meta-learning.
"""
data_list = np.load(self.this_setting_filename_dir+'/' + data_type + '_data.npy', allow_pickle=True, encoding="latin1")
if data_type=='train':
self.train_data = data_list
elif data_type=='test':
self.test_data = data_list
elif data_type=='val':
self.val_data = data_list
else:
print('[Error] Please check data list type')
def load_episode(self, index, data_type='train'):
"""The function to load the episodes.
Args:
index: the index for the episodes.
data_type: the phase for meta-learning.
"""
if data_type=='train':
data_list = self.train_data
epite_sample_num = FLAGS.metatrain_epite_sample_num
elif data_type=='test':
data_list = self.test_data
if FLAGS.metatest_epite_sample_num==0:
epite_sample_num = FLAGS.shot_num
else:
epite_sample_num = FLAGS.metatest_episode_test_sample
elif data_type=='val':
data_list = self.val_data
if FLAGS.metatest_epite_sample_num==0:
epite_sample_num = FLAGS.shot_num
else:
epite_sample_num = FLAGS.metatest_episode_test_sample
else:
raise Exception('Please check data list type')
dim_input = FLAGS.img_size * FLAGS.img_size * 3
epitr_sample_num = FLAGS.shot_num
this_episode = data_list[index]
this_task_tr_filenames = this_episode['filenamea']
this_task_te_filenames = this_episode['filenameb']
this_task_tr_labels = this_episode['labela']
this_task_te_labels = this_episode['labelb']
if FLAGS.metatrain is False and FLAGS.base_augmentation:
this_inputa, this_labela = process_batch_augmentation(this_task_tr_filenames, \
this_task_tr_labels, dim_input, epitr_sample_num)
this_inputb, this_labelb = process_batch(this_task_te_filenames, \
this_task_te_labels, dim_input, epite_sample_num)
else:
this_inputa, this_labela = process_batch(this_task_tr_filenames, \
this_task_tr_labels, dim_input, epitr_sample_num)
this_inputb, this_labelb = process_batch(this_task_te_filenames, \
this_task_te_labels, dim_input, epite_sample_num)
return this_inputa, this_labela, this_inputb, this_labelb
| 7,979 | 45.941176 | 141 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/data_generator/__init__.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
| 380 | 37.1 | 75 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/data_generator/pre_data_generator.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/cbfinn/maml
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Data generator for pre-train phase. """
import numpy as np
import os
import random
import tensorflow as tf
from tqdm import trange
from tensorflow.python.platform import flags
from utils.misc import get_pretrain_images
FLAGS = flags.FLAGS
class PreDataGenerator(object):
"""The class to generate episodes for pre-train phase."""
def __init__(self):
self.num_classes = FLAGS.way_num
self.img_size = (FLAGS.img_size, FLAGS.img_size)
self.dim_input = np.prod(self.img_size)*3
self.pretrain_class_num = FLAGS.pretrain_class_num
self.pretrain_batch_size = FLAGS.pretrain_batch_size
pretrain_folder = FLAGS.pretrain_folders
pretrain_folders = [os.path.join(pretrain_folder, label) for label in os.listdir(pretrain_folder) if os.path.isdir(os.path.join(pretrain_folder, label))]
self.pretrain_character_folders = pretrain_folders
def make_data_tensor(self):
"""The function to make tensor for the tensorflow model."""
print('Generating pre-training data')
all_filenames_and_labels = []
folders = self.pretrain_character_folders
for idx, path in enumerate(folders):
all_filenames_and_labels += get_pretrain_images(path, idx)
random.shuffle(all_filenames_and_labels)
all_labels = [li[0] for li in all_filenames_and_labels]
all_filenames = [li[1] for li in all_filenames_and_labels]
filename_queue = tf.train.string_input_producer(tf.convert_to_tensor(all_filenames), shuffle=False)
label_queue = tf.train.slice_input_producer([tf.convert_to_tensor(all_labels)], shuffle=False)
image_reader = tf.WholeFileReader()
_, image_file = image_reader.read(filename_queue)
image = tf.image.decode_jpeg(image_file, channels=3)
image.set_shape((self.img_size[0],self.img_size[1],3))
image = tf.reshape(image, [self.dim_input])
image = tf.cast(image, tf.float32) / 255.0
num_preprocess_threads = 1
min_queue_examples = 256
batch_image_size = self.pretrain_batch_size
image_batch, label_batch = tf.train.batch([image, label_queue], batch_size = batch_image_size, num_threads=num_preprocess_threads,capacity=min_queue_examples + 3 * batch_image_size)
label_batch = tf.one_hot(tf.reshape(label_batch, [-1]), self.pretrain_class_num)
return image_batch, label_batch
| 2,850 | 43.546875 | 189 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/utils/misc.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Modified from: https://github.com/cbfinn/maml
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
""" Additional utility functions. """
import numpy as np
import os
import cv2
import random
import tensorflow as tf
from matplotlib.pyplot import imread
from tensorflow.contrib.layers.python import layers as tf_layers
from tensorflow.python.platform import flags
FLAGS = flags.FLAGS
def get_smallest_k_index(input_, k):
"""The function to get the smallest k items' indices.
Args:
input_: the list to be processed.
k: the number of indices to return.
Return:
The index list with k dimensions.
"""
input_copy = np.copy(input_)
k_list = []
for idx in range(k):
this_index = np.argmin(input_copy)
k_list.append(this_index)
input_copy[this_index]=np.max(input_copy)
return k_list
def one_hot(inp):
"""The function to make the input to one-hot vectors.
Arg:
inp: the input numpy array.
Return:
The reorganized one-shot array.
"""
n_class = inp.max() + 1
n_sample = inp.shape[0]
out = np.zeros((n_sample, n_class))
for idx in range(n_sample):
out[idx, inp[idx]] = 1
return out
def one_hot_class(inp, n_class):
"""The function to make the input to n-class one-hot vectors.
Args:
inp: the input numpy array.
n_class: the number of classes.
Return:
The reorganized n-class one-shot array.
"""
n_sample = inp.shape[0]
out = np.zeros((n_sample, n_class))
for idx in range(n_sample):
out[idx, inp[idx]] = 1
return out
def process_batch(input_filename_list, input_label_list, dim_input, batch_sample_num):
"""The function to process a part of an episode.
Args:
input_filename_list: the image files' directory list.
input_label_list: the image files' corressponding label list.
dim_input: the dimension number of the images.
batch_sample_num: the sample number of the inputed images.
Returns:
img_array: the numpy array of processed images.
label_array: the numpy array of processed labels.
"""
new_path_list = []
new_label_list = []
for k in range(batch_sample_num):
class_idxs = list(range(0, FLAGS.way_num))
random.shuffle(class_idxs)
for class_idx in class_idxs:
true_idx = class_idx*batch_sample_num + k
new_path_list.append(input_filename_list[true_idx])
new_label_list.append(input_label_list[true_idx])
img_list = []
for filepath in new_path_list:
this_img = imread(filepath)
this_img = np.reshape(this_img, [-1, dim_input])
this_img = this_img / 255.0
img_list.append(this_img)
img_array = np.array(img_list).reshape([FLAGS.way_num*batch_sample_num, dim_input])
label_array = one_hot(np.array(new_label_list)).reshape([FLAGS.way_num*batch_sample_num, -1])
return img_array, label_array
def process_batch_augmentation(input_filename_list, input_label_list, dim_input, batch_sample_num):
"""The function to process a part of an episode. All the images will be augmented by flipping.
Args:
input_filename_list: the image files' directory list.
input_label_list: the image files' corressponding label list.
dim_input: the dimension number of the images.
batch_sample_num: the sample number of the inputed images.
Returns:
img_array: the numpy array of processed images.
label_array: the numpy array of processed labels.
"""
new_path_list = []
new_label_list = []
for k in range(batch_sample_num):
class_idxs = list(range(0, FLAGS.way_num))
random.shuffle(class_idxs)
for class_idx in class_idxs:
true_idx = class_idx*batch_sample_num + k
new_path_list.append(input_filename_list[true_idx])
new_label_list.append(input_label_list[true_idx])
img_list = []
img_list_h = []
for filepath in new_path_list:
this_img = imread(filepath)
this_img_h = cv2.flip(this_img, 1)
this_img = np.reshape(this_img, [-1, dim_input])
this_img = this_img / 255.0
img_list.append(this_img)
this_img_h = np.reshape(this_img_h, [-1, dim_input])
this_img_h = this_img_h / 255.0
img_list_h.append(this_img_h)
img_list_all = img_list + img_list_h
label_list_all = new_label_list + new_label_list
img_array = np.array(img_list_all).reshape([FLAGS.way_num*batch_sample_num*2, dim_input])
label_array = one_hot(np.array(label_list_all)).reshape([FLAGS.way_num*batch_sample_num*2, -1])
return img_array, label_array
def get_images(paths, labels, nb_samples=None, shuffle=True):
"""The function to get the image files' directories with given class labels.
Args:
paths: the base path for the images.
labels: the class name labels.
nb_samples: the number of samples.
shuffle: whether shuffle the generated image list.
Return:
The list for the image files' directories.
"""
if nb_samples is not None:
sampler = lambda x: random.sample(x, nb_samples)
else:
sampler = lambda x: x
images = [(i, os.path.join(path, image)) \
for i, path in zip(labels, paths) \
for image in sampler(os.listdir(path))]
if shuffle:
random.shuffle(images)
return images
def get_pretrain_images(path, label):
"""The function to get the image files' directories for pre-train phase.
Args:
paths: the base path for the images.
labels: the class name labels.
is_val: whether the images are for the validation phase during pre-training.
Return:
The list for the image files' directories.
"""
images = []
for image in os.listdir(path):
images.append((label, os.path.join(path, image)))
return images
def get_images_tc(paths, labels, nb_samples=None, shuffle=True, is_val=False):
"""The function to get the image files' directories with given class labels for pre-train phase.
Args:
paths: the base path for the images.
labels: the class name labels.
nb_samples: the number of samples.
shuffle: whether shuffle the generated image list.
is_val: whether the images are for the validation phase during pre-training.
Return:
The list for the image files' directories.
"""
if nb_samples is not None:
sampler = lambda x: random.sample(x, nb_samples)
else:
sampler = lambda x: x
if is_val is False:
images = [(i, os.path.join(path, image)) \
for i, path in zip(labels, paths) \
for image in sampler(os.listdir(path)[0:500])]
else:
images = [(i, os.path.join(path, image)) \
for i, path in zip(labels, paths) \
for image in sampler(os.listdir(path)[500:])]
if shuffle:
random.shuffle(images)
return images
## Network helpers
def leaky_relu(x, leak=0.1):
"""The leaky relu function.
Args:
x: the input feature maps.
leak: the parameter for leaky relu.
Return:
The feature maps processed by non-liner layer.
"""
return tf.maximum(x, leak*x)
def resnet_conv_block(inp, cweight, bweight, reuse, scope, activation=leaky_relu):
"""The function to forward a conv layer.
Args:
inp: the input feature maps.
cweight: the filters' weights for this conv layer.
bweight: the biases' weights for this conv layer.
reuse: whether reuse the variables for the batch norm.
scope: the label for this conv layer.
activation: the activation function for this conv layer.
Return:
The processed feature maps.
"""
stride, no_stride = [1,2,2,1], [1,1,1,1]
if FLAGS.activation == 'leaky_relu':
activation = leaky_relu
elif FLAGS.activation == 'relu':
activation = tf.nn.relu
else:
activation = None
conv_output = tf.nn.conv2d(inp, cweight, no_stride, 'SAME') + bweight
normed = normalize(conv_output, activation, reuse, scope)
return normed
def resnet_nob_conv_block(inp, cweight, reuse, scope):
"""The function to forward a conv layer without biases, normalization and non-liner layer.
Args:
inp: the input feature maps.
cweight: the filters' weights for this conv layer.
reuse: whether reuse the variables for the batch norm.
scope: the label for this conv layer.
Return:
The processed feature maps.
"""
stride, no_stride = [1,2,2,1], [1,1,1,1]
conv_output = tf.nn.conv2d(inp, cweight, no_stride, 'SAME')
return conv_output
def normalize(inp, activation, reuse, scope):
"""The function to forward the normalization.
Args:
inp: the input feature maps.
reuse: whether reuse the variables for the batch norm.
scope: the label for this conv layer.
activation: the activation function for this conv layer.
Return:
The processed feature maps.
"""
if FLAGS.norm == 'batch_norm':
return tf_layers.batch_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif FLAGS.norm == 'layer_norm':
return tf_layers.layer_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
elif FLAGS.norm == 'None':
if activation is not None:
return activation(inp)
return inp
else:
raise ValueError('Please set correct normalization.')
## Loss functions
def mse(pred, label):
"""The MSE loss function.
Args:
pred: the predictions.
label: the ground truth labels.
Return:
The Loss.
"""
pred = tf.reshape(pred, [-1])
label = tf.reshape(label, [-1])
return tf.reduce_mean(tf.square(pred-label))
def softmaxloss(pred, label):
"""The softmax cross entropy loss function.
Args:
pred: the predictions.
label: the ground truth labels.
Return:
The Loss.
"""
return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=label))
def xent(pred, label):
"""The softmax cross entropy loss function. The losses will be normalized by the shot number.
Args:
pred: the predictions.
label: the ground truth labels.
Return:
The Loss.
Note: with tf version <=0.12, this loss has incorrect 2nd derivatives
"""
return tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=label) / FLAGS.shot_num
| 10,805 | 33.634615 | 100 | py |
meta-transfer-learning | meta-transfer-learning-main/tensorflow/utils/__init__.py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Yaoyao Liu
## Tianjin University
## [email protected]
## Copyright (c) 2019
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
| 380 | 37.1 | 75 | py |
adventures-in-ml-code | adventures-in-ml-code-master/tf_dataset_tutorial.py | import tensorflow as tf
import numpy as np
from sklearn.datasets import load_digits
def simple_dataset_with_error():
x = np.arange(0, 10)
# create dataset object from the numpy array
dx = tf.data.Dataset.from_tensor_slices(x)
# create a one-shot iterator
iterator = dx.make_one_shot_iterator()
# extract an element
next_element = iterator.get_next()
with tf.Session() as sess:
for i in range(11):
val = sess.run(next_element)
print(val)
def simple_dataset_initializer():
x = np.arange(0, 10)
dx = tf.data.Dataset.from_tensor_slices(x)
# create an initializable iterator
iterator = dx.make_initializable_iterator()
# extract an element
next_element = iterator.get_next()
with tf.Session() as sess:
sess.run(iterator.initializer)
for i in range(15):
val = sess.run(next_element)
print(val)
if i % 9 == 0 and i > 0:
sess.run(iterator.initializer)
def simple_dataset_batch():
x = np.arange(0, 10)
dx = tf.data.Dataset.from_tensor_slices(x).batch(3)
# create a one-shot iterator
iterator = dx.make_initializable_iterator()
# extract an element
next_element = iterator.get_next()
with tf.Session() as sess:
sess.run(iterator.initializer)
for i in range(15):
val = sess.run(next_element)
print(val)
if (i + 1) % (10 // 3) == 0 and i > 0:
sess.run(iterator.initializer)
def simple_zip_example():
x = np.arange(0, 10)
y = np.arange(1, 11)
# create dataset objects from the arrays
dx = tf.data.Dataset.from_tensor_slices(x)
dy = tf.data.Dataset.from_tensor_slices(y)
# zip the two datasets together
dcomb = tf.data.Dataset.zip((dx, dy)).batch(3)
iterator = dcomb.make_initializable_iterator()
# extract an element
next_element = iterator.get_next()
with tf.Session() as sess:
sess.run(iterator.initializer)
for i in range(15):
val = sess.run(next_element)
print(val)
if (i + 1) % (10 // 3) == 0 and i > 0:
sess.run(iterator.initializer)
def MNIST_dataset_example():
# load the data
digits = load_digits(return_X_y=True)
# split into train and validation sets
train_images = digits[0][:int(len(digits[0]) * 0.8)]
train_labels = digits[1][:int(len(digits[0]) * 0.8)]
valid_images = digits[0][int(len(digits[0]) * 0.8):]
valid_labels = digits[1][int(len(digits[0]) * 0.8):]
# create the training datasets
dx_train = tf.data.Dataset.from_tensor_slices(train_images)
# apply a one-hot transformation to each label for use in the neural network
dy_train = tf.data.Dataset.from_tensor_slices(train_labels).map(lambda z: tf.one_hot(z, 10))
# zip the x and y training data together and shuffle, batch etc.
train_dataset = tf.data.Dataset.zip((dx_train, dy_train)).shuffle(500).repeat().batch(30)
# do the same operations for the validation set
dx_valid = tf.data.Dataset.from_tensor_slices(valid_images)
dy_valid = tf.data.Dataset.from_tensor_slices(valid_labels).map(lambda z: tf.one_hot(z, 10))
valid_dataset = tf.data.Dataset.zip((dx_valid, dy_valid)).shuffle(500).repeat().batch(30)
# create general iterator
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
next_element = iterator.get_next()
# make datasets that we can initialize separately, but using the same structure via the common iterator
training_init_op = iterator.make_initializer(train_dataset)
validation_init_op = iterator.make_initializer(valid_dataset)
# create the neural network model
logits = nn_model(next_element[0])
# add the optimizer and loss
loss = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(labels=next_element[1], logits=logits))
optimizer = tf.train.AdamOptimizer().minimize(loss)
# get accuracy
prediction = tf.argmax(logits, 1)
equality = tf.equal(prediction, tf.argmax(next_element[1], 1))
accuracy = tf.reduce_mean(tf.cast(equality, tf.float32))
init_op = tf.global_variables_initializer()
# run the training
epochs = 600
with tf.Session() as sess:
sess.run(init_op)
sess.run(training_init_op)
for i in range(epochs):
l, _, acc = sess.run([loss, optimizer, accuracy])
if i % 50 == 0:
print("Epoch: {}, loss: {:.3f}, training accuracy: {:.2f}%".format(i, l, acc * 100))
# now setup the validation run
valid_iters = 100
# re-initialize the iterator, but this time with validation data
sess.run(validation_init_op)
avg_acc = 0
for i in range(valid_iters):
acc = sess.run([accuracy])
avg_acc += acc[0]
print("Average validation set accuracy over {} iterations is {:.2f}%".format(valid_iters,
(avg_acc / valid_iters) * 100))
def nn_model(in_data):
bn = tf.layers.batch_normalization(in_data)
fc1 = tf.layers.dense(bn, 50)
fc2 = tf.layers.dense(fc1, 50)
fc2 = tf.layers.dropout(fc2)
fc3 = tf.layers.dense(fc2, 10)
return fc3
if __name__ == "__main__":
# simple_dataset_with_error()
# simple_dataset_initializer()
# simple_dataset_batch()
# simple_zip_example()
MNIST_dataset_example() | 5,550 | 39.816176 | 116 | py |
adventures-in-ml-code | adventures-in-ml-code-master/lstm_tutorial.py | import tensorflow as tf
import numpy as np
import collections
import os
import argparse
import datetime as dt
"""To run this code, you'll need to first download and extract the text dataset
from here: http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz. Change the
data_path variable below to your local exraction path"""
data_path = "C:\\Users\Andy\Documents\simple-examples\data"
parser = argparse.ArgumentParser()
parser.add_argument('run_opt', type=int, default=1, help='An integer: 1 to train, 2 to test')
parser.add_argument('--data_path', type=str, default=data_path, help='The full path of the training data')
args = parser.parse_args()
def read_words(filename):
with tf.gfile.GFile(filename, "rb") as f:
return f.read().decode("utf-8").replace("\n", "<eos>").split()
def build_vocab(filename):
data = read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def file_to_word_ids(filename, word_to_id):
data = read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
def load_data():
# get the data paths
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
# build the complete vocabulary, then convert text data to list of integers
word_to_id = build_vocab(train_path)
train_data = file_to_word_ids(train_path, word_to_id)
valid_data = file_to_word_ids(valid_path, word_to_id)
test_data = file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
reversed_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))
print(train_data[:5])
print(word_to_id)
print(vocabulary)
print(" ".join([reversed_dictionary[x] for x in train_data[:10]]))
return train_data, valid_data, test_data, vocabulary, reversed_dictionary
def batch_producer(raw_data, batch_size, num_steps):
raw_data = tf.convert_to_tensor(raw_data, name="raw_data", dtype=tf.int32)
data_len = tf.size(raw_data)
batch_len = data_len // batch_size
data = tf.reshape(raw_data[0: batch_size * batch_len],
[batch_size, batch_len])
epoch_size = (batch_len - 1) // num_steps
i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()
x = data[:, i * num_steps:(i + 1) * num_steps]
x.set_shape([batch_size, num_steps])
y = data[:, i * num_steps + 1: (i + 1) * num_steps + 1]
y.set_shape([batch_size, num_steps])
return x, y
class Input(object):
def __init__(self, batch_size, num_steps, data):
self.batch_size = batch_size
self.num_steps = num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = batch_producer(data, batch_size, num_steps)
# create the main model
class Model(object):
def __init__(self, input, is_training, hidden_size, vocab_size, num_layers,
dropout=0.5, init_scale=0.05):
self.is_training = is_training
self.input_obj = input
self.batch_size = input.batch_size
self.num_steps = input.num_steps
self.hidden_size = hidden_size
# create the word embeddings
with tf.device("/cpu:0"):
embedding = tf.Variable(tf.random_uniform([vocab_size, self.hidden_size], -init_scale, init_scale))
inputs = tf.nn.embedding_lookup(embedding, self.input_obj.input_data)
if is_training and dropout < 1:
inputs = tf.nn.dropout(inputs, dropout)
# set up the state storage / extraction
self.init_state = tf.placeholder(tf.float32, [num_layers, 2, self.batch_size, self.hidden_size])
state_per_layer_list = tf.unstack(self.init_state, axis=0)
rnn_tuple_state = tuple(
[tf.contrib.rnn.LSTMStateTuple(state_per_layer_list[idx][0], state_per_layer_list[idx][1])
for idx in range(num_layers)]
)
# create an LSTM cell to be unrolled
cell = tf.contrib.rnn.LSTMCell(hidden_size, forget_bias=1.0)
# add a dropout wrapper if training
if is_training and dropout < 1:
cell = tf.contrib.rnn.DropoutWrapper(cell, output_keep_prob=dropout)
if num_layers > 1:
cell = tf.contrib.rnn.MultiRNNCell([cell for _ in range(num_layers)], state_is_tuple=True)
output, self.state = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32, initial_state=rnn_tuple_state)
# reshape to (batch_size * num_steps, hidden_size)
output = tf.reshape(output, [-1, hidden_size])
softmax_w = tf.Variable(tf.random_uniform([hidden_size, vocab_size], -init_scale, init_scale))
softmax_b = tf.Variable(tf.random_uniform([vocab_size], -init_scale, init_scale))
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
# Reshape logits to be a 3-D tensor for sequence loss
logits = tf.reshape(logits, [self.batch_size, self.num_steps, vocab_size])
# Use the contrib sequence loss and average over the batches
loss = tf.contrib.seq2seq.sequence_loss(
logits,
self.input_obj.targets,
tf.ones([self.batch_size, self.num_steps], dtype=tf.float32),
average_across_timesteps=False,
average_across_batch=True)
# Update the cost
self.cost = tf.reduce_sum(loss)
# get the prediction accuracy
self.softmax_out = tf.nn.softmax(tf.reshape(logits, [-1, vocab_size]))
self.predict = tf.cast(tf.argmax(self.softmax_out, axis=1), tf.int32)
correct_prediction = tf.equal(self.predict, tf.reshape(self.input_obj.targets, [-1]))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
if not is_training:
return
self.learning_rate = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars), 5)
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate)
# optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.contrib.framework.get_or_create_global_step())
# self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.cost)
self.new_lr = tf.placeholder(tf.float32, shape=[])
self.lr_update = tf.assign(self.learning_rate, self.new_lr)
def assign_lr(self, session, lr_value):
session.run(self.lr_update, feed_dict={self.new_lr: lr_value})
def train(train_data, vocabulary, num_layers, num_epochs, batch_size, model_save_name,
learning_rate=1.0, max_lr_epoch=10, lr_decay=0.93, print_iter=50):
# setup data and models
training_input = Input(batch_size=batch_size, num_steps=35, data=train_data)
m = Model(training_input, is_training=True, hidden_size=650, vocab_size=vocabulary,
num_layers=num_layers)
init_op = tf.global_variables_initializer()
orig_decay = lr_decay
with tf.Session() as sess:
# start threads
sess.run([init_op])
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
saver = tf.train.Saver()
for epoch in range(num_epochs):
new_lr_decay = orig_decay ** max(epoch + 1 - max_lr_epoch, 0.0)
m.assign_lr(sess, learning_rate * new_lr_decay)
# m.assign_lr(sess, learning_rate)
# print(m.learning_rate.eval(), new_lr_decay)
current_state = np.zeros((num_layers, 2, batch_size, m.hidden_size))
curr_time = dt.datetime.now()
for step in range(training_input.epoch_size):
# cost, _ = sess.run([m.cost, m.optimizer])
if step % print_iter != 0:
cost, _, current_state = sess.run([m.cost, m.train_op, m.state],
feed_dict={m.init_state: current_state})
else:
seconds = (float((dt.datetime.now() - curr_time).seconds) / print_iter)
curr_time = dt.datetime.now()
cost, _, current_state, acc = sess.run([m.cost, m.train_op, m.state, m.accuracy],
feed_dict={m.init_state: current_state})
print("Epoch {}, Step {}, cost: {:.3f}, accuracy: {:.3f}, Seconds per step: {:.3f}".format(epoch,
step, cost, acc, seconds))
# save a model checkpoint
saver.save(sess, data_path + '\\' + model_save_name, global_step=epoch)
# do a final save
saver.save(sess, data_path + '\\' + model_save_name + '-final')
# close threads
coord.request_stop()
coord.join(threads)
def test(model_path, test_data, reversed_dictionary):
test_input = Input(batch_size=20, num_steps=35, data=test_data)
m = Model(test_input, is_training=False, hidden_size=650, vocab_size=vocabulary,
num_layers=2)
saver = tf.train.Saver()
with tf.Session() as sess:
# start threads
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
current_state = np.zeros((2, 2, m.batch_size, m.hidden_size))
# restore the trained model
saver.restore(sess, model_path)
# get an average accuracy over num_acc_batches
num_acc_batches = 30
check_batch_idx = 25
acc_check_thresh = 5
accuracy = 0
for batch in range(num_acc_batches):
if batch == check_batch_idx:
true_vals, pred, current_state, acc = sess.run([m.input_obj.targets, m.predict, m.state, m.accuracy],
feed_dict={m.init_state: current_state})
pred_string = [reversed_dictionary[x] for x in pred[:m.num_steps]]
true_vals_string = [reversed_dictionary[x] for x in true_vals[0]]
print("True values (1st line) vs predicted values (2nd line):")
print(" ".join(true_vals_string))
print(" ".join(pred_string))
else:
acc, current_state = sess.run([m.accuracy, m.state], feed_dict={m.init_state: current_state})
if batch >= acc_check_thresh:
accuracy += acc
print("Average accuracy: {:.3f}".format(accuracy / (num_acc_batches-acc_check_thresh)))
# close threads
coord.request_stop()
coord.join(threads)
if args.data_path:
data_path = args.data_path
train_data, valid_data, test_data, vocabulary, reversed_dictionary = load_data()
if args.run_opt == 1:
train(train_data, vocabulary, num_layers=2, num_epochs=60, batch_size=20,
model_save_name='two-layer-lstm-medium-config-60-epoch-0p93-lr-decay-10-max-lr')
else:
trained_model = args.data_path + "\\two-layer-lstm-medium-config-60-epoch-0p93-lr-decay-10-max-lr-38"
test(trained_model, test_data, reversed_dictionary)
| 11,368 | 42.895753 | 117 | py |
adventures-in-ml-code | adventures-in-ml-code-master/keras_word2vec.py | from keras.models import Model
from keras.layers import Input, Dense, Reshape, merge
from keras.layers.embeddings import Embedding
from keras.preprocessing.sequence import skipgrams
from keras.preprocessing import sequence
import urllib
import collections
import os
import zipfile
import numpy as np
import tensorflow as tf
def maybe_download(filename, url, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
def collect_data(vocabulary_size=10000):
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', url, 31344016)
vocabulary = read_data(filename)
print(vocabulary[:7])
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
return data, count, dictionary, reverse_dictionary
vocab_size = 10000
data, count, dictionary, reverse_dictionary = collect_data(vocabulary_size=vocab_size)
print(data[:7])
window_size = 3
vector_dim = 300
epochs = 200000
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
sampling_table = sequence.make_sampling_table(vocab_size)
couples, labels = skipgrams(data, vocab_size, window_size=window_size, sampling_table=sampling_table)
word_target, word_context = zip(*couples)
word_target = np.array(word_target, dtype="int32")
word_context = np.array(word_context, dtype="int32")
print(couples[:10], labels[:10])
# create some input variables
input_target = Input((1,))
input_context = Input((1,))
embedding = Embedding(vocab_size, vector_dim, input_length=1, name='embedding')
target = embedding(input_target)
target = Reshape((vector_dim, 1))(target)
context = embedding(input_context)
context = Reshape((vector_dim, 1))(context)
# setup a cosine similarity operation which will be output in a secondary model
similarity = merge([target, context], mode='cos', dot_axes=0)
# now perform the dot product operation to get a similarity measure
dot_product = merge([target, context], mode='dot', dot_axes=1)
dot_product = Reshape((1,))(dot_product)
# add the sigmoid output layer
output = Dense(1, activation='sigmoid')(dot_product)
# create the primary training model
model = Model(input=[input_target, input_context], output=output)
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
# create a secondary validation model to run our similarity checks during training
validation_model = Model(input=[input_target, input_context], output=similarity)
class SimilarityCallback:
def run_sim(self):
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
sim = self._get_sim(valid_examples[i])
nearest = (-sim).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
@staticmethod
def _get_sim(valid_word_idx):
sim = np.zeros((vocab_size,))
in_arr1 = np.zeros((1,))
in_arr2 = np.zeros((1,))
in_arr1[0,] = valid_word_idx
for i in range(vocab_size):
in_arr2[0,] = i
out = validation_model.predict_on_batch([in_arr1, in_arr2])
sim[i] = out
return sim
sim_cb = SimilarityCallback()
arr_1 = np.zeros((1,))
arr_2 = np.zeros((1,))
arr_3 = np.zeros((1,))
for cnt in range(epochs):
idx = np.random.randint(0, len(labels)-1)
arr_1[0,] = word_target[idx]
arr_2[0,] = word_context[idx]
arr_3[0,] = labels[idx]
loss = model.train_on_batch([arr_1, arr_2], arr_3)
if cnt % 100 == 0:
print("Iteration {}, loss={}".format(cnt, loss))
if cnt % 10000 == 0:
sim_cb.run_sim()
| 5,397 | 34.513158 | 101 | py |
adventures-in-ml-code | adventures-in-ml-code-master/convolutional_neural_network_tutorial.py | import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
def run_cnn():
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# Python optimisation variables
learning_rate = 0.0001
epochs = 10
batch_size = 50
# declare the training data placeholders
# input x - for 28 x 28 pixels = 784 - this is the flattened image data that is drawn from mnist.train.nextbatch()
x = tf.placeholder(tf.float32, [None, 784])
# reshape the input data so that it is a 4D tensor. The first value (-1) tells function to dynamically shape that
# dimension based on the amount of data passed to it. The two middle dimensions are set to the image size (i.e. 28
# x 28). The final dimension is 1 as there is only a single colour channel i.e. grayscale. If this was RGB, this
# dimension would be 3
x_shaped = tf.reshape(x, [-1, 28, 28, 1])
# now declare the output data placeholder - 10 digits
y = tf.placeholder(tf.float32, [None, 10])
# create some convolutional layers
layer1 = create_new_conv_layer(x_shaped, 1, 32, [5, 5], [2, 2], name='layer1')
layer2 = create_new_conv_layer(layer1, 32, 64, [5, 5], [2, 2], name='layer2')
# flatten the output ready for the fully connected output stage - after two layers of stride 2 pooling, we go
# from 28 x 28, to 14 x 14 to 7 x 7 x,y co-ordinates, but with 64 output channels. To create the fully connected,
# "dense" layer, the new shape needs to be [-1, 7 x 7 x 64]
flattened = tf.reshape(layer2, [-1, 7 * 7 * 64])
# setup some weights and bias values for this layer, then activate with ReLU
wd1 = tf.Variable(tf.truncated_normal([7 * 7 * 64, 1000], stddev=0.03), name='wd1')
bd1 = tf.Variable(tf.truncated_normal([1000], stddev=0.01), name='bd1')
dense_layer1 = tf.matmul(flattened, wd1) + bd1
dense_layer1 = tf.nn.relu(dense_layer1)
# another layer with softmax activations
wd2 = tf.Variable(tf.truncated_normal([1000, 10], stddev=0.03), name='wd2')
bd2 = tf.Variable(tf.truncated_normal([10], stddev=0.01), name='bd2')
dense_layer2 = tf.matmul(dense_layer1, wd2) + bd2
y_ = tf.nn.softmax(dense_layer2)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=dense_layer2, labels=y))
# add an optimiser
optimiser = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
# define an accuracy assessment operation
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# setup the initialisation operator
init_op = tf.global_variables_initializer()
# setup recording variables
# add a summary to store the accuracy
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('C:\\Users\\Andy\\PycharmProjects')
with tf.Session() as sess:
# initialise the variables
sess.run(init_op)
total_batch = int(len(mnist.train.labels) / batch_size)
for epoch in range(epochs):
avg_cost = 0
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size=batch_size)
_, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y})
avg_cost += c / total_batch
test_acc = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels})
print("Epoch:", (epoch + 1), "cost =", "{:.3f}".format(avg_cost), " test accuracy: {:.3f}".format(test_acc))
summary = sess.run(merged, feed_dict={x: mnist.test.images, y: mnist.test.labels})
writer.add_summary(summary, epoch)
print("\nTraining complete!")
writer.add_graph(sess.graph)
print(sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}))
def create_new_conv_layer(input_data, num_input_channels, num_filters, filter_shape, pool_shape, name):
# setup the filter input shape for tf.nn.conv_2d
conv_filt_shape = [filter_shape[0], filter_shape[1], num_input_channels, num_filters]
# initialise weights and bias for the filter
weights = tf.Variable(tf.truncated_normal(conv_filt_shape, stddev=0.03), name=name+'_W')
bias = tf.Variable(tf.truncated_normal([num_filters]), name=name+'_b')
# setup the convolutional layer operation
out_layer = tf.nn.conv2d(input_data, weights, [1, 1, 1, 1], padding='SAME')
# add the bias
out_layer += bias
# apply a ReLU non-linear activation
out_layer = tf.nn.relu(out_layer)
# now perform max pooling
# ksize is the argument which defines the size of the max pooling window (i.e. the area over which the maximum is
# calculated). It must be 4D to match the convolution - in this case, for each image we want to use a 2 x 2 area
# applied to each channel
ksize = [1, pool_shape[0], pool_shape[1], 1]
# strides defines how the max pooling area moves through the image - a stride of 2 in the x direction will lead to
# max pooling areas starting at x=0, x=2, x=4 etc. through your image. If the stride is 1, we will get max pooling
# overlapping previous max pooling areas (and no reduction in the number of parameters). In this case, we want
# to do strides of 2 in the x and y directions.
strides = [1, 2, 2, 1]
out_layer = tf.nn.max_pool(out_layer, ksize=ksize, strides=strides, padding='SAME')
return out_layer
if __name__ == "__main__":
run_cnn()
| 5,605 | 46.508475 | 120 | py |
adventures-in-ml-code | adventures-in-ml-code-master/dueling_q_tf2_atari.py | import gym
import tensorflow as tf
from tensorflow import keras
import random
import numpy as np
import datetime as dt
import imageio
STORE_PATH = 'C:\\Users\\Andy\\TensorFlowBook\\TensorBoard'
MAX_EPSILON = 1
MIN_EPSILON = 0.1
EPSILON_MIN_ITER = 500000
GAMMA = 0.99
BATCH_SIZE = 32
TAU = 0.08
POST_PROCESS_IMAGE_SIZE = (105, 80, 1)
DELAY_TRAINING = 50000
NUM_FRAMES = 4
GIF_RECORDING_FREQ = 100
env = gym.make("SpaceInvaders-v0")
num_actions = env.action_space.n
class DQModel(keras.Model):
def __init__(self, hidden_size: int, num_actions: int, dueling: bool):
super(DQModel, self).__init__()
self.dueling = dueling
self.conv1 = keras.layers.Conv2D(16, (8, 8), (4, 4), activation='relu')
self.conv2 = keras.layers.Conv2D(32, (4, 4), (2, 2), activation='relu')
self.flatten = keras.layers.Flatten()
self.adv_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.adv_out = keras.layers.Dense(num_actions,
kernel_initializer=keras.initializers.he_normal())
if dueling:
self.v_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.v_out = keras.layers.Dense(1, kernel_initializer=keras.initializers.he_normal())
self.lambda_layer = keras.layers.Lambda(lambda x: x - tf.reduce_mean(x))
self.combine = keras.layers.Add()
def call(self, input):
x = self.conv1(input)
x = self.conv2(x)
x = self.flatten(x)
adv = self.adv_dense(x)
adv = self.adv_out(adv)
if self.dueling:
v = self.v_dense(x)
v = self.v_out(v)
norm_adv = self.lambda_layer(adv)
combined = self.combine([v, norm_adv])
return combined
return adv
primary_network = DQModel(256, num_actions, True)
target_network = DQModel(256, num_actions, True)
primary_network.compile(optimizer=keras.optimizers.Adam(), loss='mse')
# make target_network = primary_network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(e)
primary_network.compile(optimizer=keras.optimizers.Adam(), loss=tf.keras.losses.Huber())
class Memory:
def __init__(self, max_memory):
self._max_memory = max_memory
self._actions = np.zeros(max_memory, dtype=np.int32)
self._rewards = np.zeros(max_memory, dtype=np.float32)
self._frames = np.zeros((POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], max_memory), dtype=np.float32)
self._terminal = np.zeros(max_memory, dtype=np.bool)
self._i = 0
def add_sample(self, frame, action, reward, terminal):
self._actions[self._i] = action
self._rewards[self._i] = reward
self._frames[:, :, self._i] = frame[:, :, 0]
self._terminal[self._i] = terminal
if self._i % (self._max_memory - 1) == 0 and self._i != 0:
self._i = BATCH_SIZE + NUM_FRAMES + 1
else:
self._i += 1
def sample(self):
if self._i < BATCH_SIZE + NUM_FRAMES + 1:
raise ValueError("Not enough memory to extract a batch")
else:
rand_idxs = np.random.randint(NUM_FRAMES + 1, self._i, size=BATCH_SIZE)
states = np.zeros((BATCH_SIZE, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
next_states = np.zeros((BATCH_SIZE, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
for i, idx in enumerate(rand_idxs):
states[i] = self._frames[:, :, idx - 1 - NUM_FRAMES:idx - 1]
next_states[i] = self._frames[:, :, idx - NUM_FRAMES:idx]
return states, self._actions[rand_idxs], self._rewards[rand_idxs], next_states, self._terminal[rand_idxs]
memory = Memory(500000)
# memory = Memory(100)
def image_preprocess(image, new_size=(105, 80)):
# convert to greyscale, resize and normalize the image
image = tf.image.rgb_to_grayscale(image)
image = tf.image.resize(image, new_size)
image = image / 255
return image
def choose_action(state, primary_network, eps, step):
if step < DELAY_TRAINING:
return random.randint(0, num_actions - 1)
else:
if random.random() < eps:
return random.randint(0, num_actions - 1)
else:
return np.argmax(primary_network(tf.reshape(state, (1, POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)).numpy()))
def update_network(primary_network, target_network):
# update target network parameters slowly from primary network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(t * (1 - TAU) + e * TAU)
def process_state_stack(state_stack, state):
for i in range(1, state_stack.shape[-1]):
state_stack[:, :, i - 1].assign(state_stack[:, :, i])
state_stack[:, :, -1].assign(state[:, :, 0])
return state_stack
def record_gif(frame_list, episode, fps=50):
imageio.mimsave(STORE_PATH + f"/SPACE_INVADERS_EPISODE-{episode}.gif", frame_list, fps=fps) #duration=duration_per_frame)
def train(primary_network, memory, target_network=None):
states, actions, rewards, next_states, terminal = memory.sample()
# predict Q(s,a) given the batch of states
prim_qt = primary_network(states)
# predict Q(s',a') from the evaluation network
prim_qtp1 = primary_network(next_states)
# copy the prim_qt tensor into the target_q tensor - we then will update one index corresponding to the max action
target_q = prim_qt.numpy()
updates = rewards
valid_idxs = terminal != True
batch_idxs = np.arange(BATCH_SIZE)
if target_network is None:
updates[valid_idxs] += GAMMA * np.amax(prim_qtp1.numpy()[valid_idxs, :], axis=1)
else:
prim_action_tp1 = np.argmax(prim_qtp1.numpy(), axis=1)
q_from_target = target_network(next_states)
updates[valid_idxs] += GAMMA * q_from_target.numpy()[batch_idxs[valid_idxs], prim_action_tp1[valid_idxs]]
target_q[batch_idxs, actions] = updates
loss = primary_network.train_on_batch(states, target_q)
return loss
num_episodes = 1000000
eps = MAX_EPSILON
render = False
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/DuelingQSI_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
double_q = True
steps = 0
for i in range(num_episodes):
state = env.reset()
state = image_preprocess(state)
state_stack = tf.Variable(np.repeat(state.numpy(), NUM_FRAMES).reshape((POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1],
NUM_FRAMES)))
cnt = 1
avg_loss = 0
tot_reward = 0
if i % GIF_RECORDING_FREQ == 0:
frame_list = []
while True:
if render:
env.render()
action = choose_action(state_stack, primary_network, eps, steps)
next_state, reward, done, info = env.step(action)
tot_reward += reward
if i % GIF_RECORDING_FREQ == 0:
frame_list.append(tf.cast(tf.image.resize(next_state, (480, 320)), tf.uint8).numpy())
next_state = image_preprocess(next_state)
state_stack = process_state_stack(state_stack, next_state)
# store in memory
memory.add_sample(next_state, action, reward, done)
if steps > DELAY_TRAINING:
loss = train(primary_network, memory, target_network if double_q else None)
update_network(primary_network, target_network)
else:
loss = -1
avg_loss += loss
# linearly decay the eps value
if steps > DELAY_TRAINING:
eps = MAX_EPSILON - ((steps - DELAY_TRAINING) / EPSILON_MIN_ITER) * \
(MAX_EPSILON - MIN_EPSILON) if steps < EPSILON_MIN_ITER else \
MIN_EPSILON
steps += 1
if done:
if steps > DELAY_TRAINING:
avg_loss /= cnt
print(f"Episode: {i}, Reward: {tot_reward}, avg loss: {avg_loss:.5f}, eps: {eps:.3f}")
with train_writer.as_default():
tf.summary.scalar('reward', tot_reward, step=i)
tf.summary.scalar('avg loss', avg_loss, step=i)
else:
print(f"Pre-training...Episode: {i}")
if i % GIF_RECORDING_FREQ == 0:
record_gif(frame_list, i)
break
cnt += 1 | 8,874 | 39.711009 | 125 | py |
adventures-in-ml-code | adventures-in-ml-code-master/policy_gradient_reinforce_tf2.py | import gym
import tensorflow as tf
from tensorflow import keras
import numpy as np
import datetime as dt
STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard/PolicyGradientCartPole'
GAMMA = 0.95
env = gym.make("CartPole-v0")
state_size = 4
num_actions = env.action_space.n
network = keras.Sequential([
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(num_actions, activation='softmax')
])
network.compile(loss='categorical_crossentropy',optimizer=keras.optimizers.Adam())
def get_action(network, state, num_actions):
softmax_out = network(state.reshape((1, -1)))
selected_action = np.random.choice(num_actions, p=softmax_out.numpy()[0])
return selected_action
def update_network(network, rewards, states):
reward_sum = 0
discounted_rewards = []
for reward in rewards[::-1]: # reverse buffer r
reward_sum = reward + GAMMA * reward_sum
discounted_rewards.append(reward_sum)
discounted_rewards.reverse()
discounted_rewards = np.array(discounted_rewards)
# standardise the rewards
discounted_rewards -= np.mean(discounted_rewards)
discounted_rewards /= np.std(discounted_rewards)
states = np.vstack(states)
loss = network.train_on_batch(states, discounted_rewards)
return loss
num_episodes = 10000000
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/PGCartPole_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
for episode in range(num_episodes):
state = env.reset()
rewards = []
states = []
actions = []
while True:
action = get_action(network, state, num_actions)
new_state, reward, done, _ = env.step(action)
states.append(state)
rewards.append(reward)
actions.append(action)
if done:
loss = update_network(network, rewards, states)
tot_reward = sum(rewards)
print(f"Episode: {episode}, Reward: {tot_reward}, avg loss: {loss:.5f}")
with train_writer.as_default():
tf.summary.scalar('reward', tot_reward, step=episode)
tf.summary.scalar('avg loss', loss, step=episode)
break
state = new_state | 2,344 | 34 | 116 | py |
adventures-in-ml-code | adventures-in-ml-code-master/r_learning_tensorflow.py | import gym
import numpy as np
import tensorflow as tf
import matplotlib.pylab as plt
import random
import math
MAX_EPSILON = 1
MIN_EPSILON = 0.01
LAMBDA = 0.0001
GAMMA = 0.99
BATCH_SIZE = 50
class Model:
def __init__(self, num_states, num_actions, batch_size):
self._num_states = num_states
self._num_actions = num_actions
self._batch_size = batch_size
# define the placeholders
self._states = None
self._actions = None
# the output operations
self._logits = None
self._optimizer = None
self._var_init = None
# now setup the model
self._define_model()
def _define_model(self):
self._states = tf.placeholder(shape=[None, self._num_states], dtype=tf.float32)
self._q_s_a = tf.placeholder(shape=[None, self._num_actions], dtype=tf.float32)
# create a couple of fully connected hidden layers
fc1 = tf.layers.dense(self._states, 50, activation=tf.nn.relu)
fc2 = tf.layers.dense(fc1, 50, activation=tf.nn.relu)
self._logits = tf.layers.dense(fc2, self._num_actions)
loss = tf.losses.mean_squared_error(self._q_s_a, self._logits)
self._optimizer = tf.train.AdamOptimizer().minimize(loss)
self._var_init = tf.global_variables_initializer()
def predict_one(self, state, sess):
return sess.run(self._logits, feed_dict={self._states:
state.reshape(1, self.num_states)})
def predict_batch(self, states, sess):
return sess.run(self._logits, feed_dict={self._states: states})
def train_batch(self, sess, x_batch, y_batch):
sess.run(self._optimizer, feed_dict={self._states: x_batch, self._q_s_a: y_batch})
@property
def num_states(self):
return self._num_states
@property
def num_actions(self):
return self._num_actions
@property
def batch_size(self):
return self._batch_size
@property
def var_init(self):
return self._var_init
class Memory:
def __init__(self, max_memory):
self._max_memory = max_memory
self._samples = []
def add_sample(self, sample):
self._samples.append(sample)
if len(self._samples) > self._max_memory:
self._samples.pop(0)
def sample(self, no_samples):
if no_samples > len(self._samples):
return random.sample(self._samples, len(self._samples))
else:
return random.sample(self._samples, no_samples)
class GameRunner:
def __init__(self, sess, model, env, memory, max_eps, min_eps,
decay, render=True):
self._sess = sess
self._env = env
self._model = model
self._memory = memory
self._render = render
self._max_eps = max_eps
self._min_eps = min_eps
self._decay = decay
self._eps = self._max_eps
self._steps = 0
self._reward_store = []
self._max_x_store = []
def run(self):
state = self._env.reset()
tot_reward = 0
max_x = -100
while True:
if self._render:
self._env.render()
action = self._choose_action(state)
next_state, reward, done, info = self._env.step(action)
if next_state[0] >= 0.1:
reward += 10
elif next_state[0] >= 0.25:
reward += 20
elif next_state[0] >= 0.5:
reward += 100
if next_state[0] > max_x:
max_x = next_state[0]
# is the game complete? If so, set the next state to
# None for storage sake
if done:
next_state = None
self._memory.add_sample((state, action, reward, next_state))
self._replay()
# exponentially decay the eps value
self._steps += 1
self._eps = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) \
* math.exp(-LAMBDA * self._steps)
# move the agent to the next state and accumulate the reward
state = next_state
tot_reward += reward
# if the game is done, break the loop
if done:
self._reward_store.append(tot_reward)
self._max_x_store.append(max_x)
break
print("Step {}, Total reward: {}, Eps: {}".format(self._steps, tot_reward, self._eps))
def _choose_action(self, state):
if random.random() < self._eps:
return random.randint(0, self._model.num_actions - 1)
else:
return np.argmax(self._model.predict_one(state, self._sess))
def _replay(self):
batch = self._memory.sample(self._model.batch_size)
states = np.array([val[0] for val in batch])
next_states = np.array([(np.zeros(self._model.num_states)
if val[3] is None else val[3]) for val in batch])
# predict Q(s,a) given the batch of states
q_s_a = self._model.predict_batch(states, self._sess)
# predict Q(s',a') - so that we can do gamma * max(Q(s'a')) below
q_s_a_d = self._model.predict_batch(next_states, self._sess)
# setup training arrays
x = np.zeros((len(batch), self._model.num_states))
y = np.zeros((len(batch), self._model.num_actions))
for i, b in enumerate(batch):
state, action, reward, next_state = b[0], b[1], b[2], b[3]
# get the current q values for all actions in state
current_q = q_s_a[i]
# update the q value for action
if next_state is None:
# in this case, the game completed after action, so there is no max Q(s',a')
# prediction possible
current_q[action] = reward
else:
current_q[action] = reward + GAMMA * np.amax(q_s_a_d[i])
x[i] = state
y[i] = current_q
self._model.train_batch(self._sess, x, y)
@property
def reward_store(self):
return self._reward_store
@property
def max_x_store(self):
return self._max_x_store
if __name__ == "__main__":
env_name = 'MountainCar-v0'
env = gym.make(env_name)
num_states = env.env.observation_space.shape[0]
num_actions = env.env.action_space.n
model = Model(num_states, num_actions, BATCH_SIZE)
mem = Memory(50000)
with tf.Session() as sess:
sess.run(model.var_init)
gr = GameRunner(sess, model, env, mem, MAX_EPSILON, MIN_EPSILON,
LAMBDA)
num_episodes = 300
cnt = 0
while cnt < num_episodes:
if cnt % 10 == 0:
print('Episode {} of {}'.format(cnt+1, num_episodes))
gr.run()
cnt += 1
plt.plot(gr.reward_store)
plt.show()
plt.close("all")
plt.plot(gr.max_x_store)
plt.show()
| 7,025 | 31.37788 | 94 | py |
adventures-in-ml-code | adventures-in-ml-code-master/sum_tree_intro.py | import numpy as np
class Node:
def __init__(self, left, right, is_leaf: bool = False, idx = None):
self.left = left
self.right = right
self.is_leaf = is_leaf
if not self.is_leaf:
self.value = self.left.value + self.right.value
self.parent = None
self.idx = idx # this value is only set for leaf nodes
if left is not None:
left.parent = self
if right is not None:
right.parent = self
@classmethod
def create_leaf(cls, value, idx):
leaf = cls(None, None, is_leaf=True, idx=idx)
leaf.value = value
return leaf
def create_tree(input: list):
nodes = [Node.create_leaf(v, i) for i, v in enumerate(input)]
leaf_nodes = nodes
while len(nodes) > 1:
inodes = iter(nodes)
nodes = [Node(*pair) for pair in zip(inodes, inodes)]
return nodes[0], leaf_nodes
def retrieve(value: float, node: Node):
if node.is_leaf:
return node
if node.left.value >= value:
return retrieve(value, node.left)
else:
return retrieve(value - node.left.value, node.right)
def update(node: Node, new_value: float):
change = new_value - node.value
node.value = new_value
propagate_changes(change, node.parent)
def propagate_changes(change: float, node: Node):
node.value += change
if node.parent is not None:
propagate_changes(change, node.parent)
def demonstrate_sampling(root_node: Node):
tree_total = root_node.value
iterations = 1000000
selected_vals = []
for i in range(iterations):
rand_val = np.random.uniform(0, tree_total)
selected_val = retrieve(rand_val, root_node).value
selected_vals.append(selected_val)
return selected_vals
input = [1, 4, 2, 3]
root_node, leaf_nodes = create_tree(input)
selected_vals = demonstrate_sampling(root_node)
# the below print statement should output ~4
print(f"Should be ~4: {sum([1 for x in selected_vals if x == 4]) / sum([1 for y in selected_vals if y == 1])}")
update(leaf_nodes[1], 6)
selected_vals = demonstrate_sampling(root_node)
# the below print statement should output ~6
print(f"Should be ~6: {sum([1 for x in selected_vals if x == 6]) / sum([1 for y in selected_vals if y == 1])}")
# the below print statement should output ~2
print(f"Should be ~2: {sum([1 for x in selected_vals if x == 6]) / sum([1 for y in selected_vals if y == 3])}")
| 2,459 | 27.941176 | 111 | py |
adventures-in-ml-code | adventures-in-ml-code-master/tf_queuing.py | import tensorflow as tf
data_path = "C:\\Users\Andy\PycharmProjects\data\cifar-10-batches-bin\\"
def FIFO_queue_demo_no_coord():
# first let's create a simple random normal Tensor to act as dummy input data
# this operation should be run more than once, everytime the queue needs filling
# back up. However, it isn't in this case, because of our lack of a co-ordinator/
# proper threading
dummy_input = tf.random_normal([3], mean=0, stddev=1)
# let's print so we can see when this operation is called
dummy_input = tf.Print(dummy_input, data=[dummy_input],
message='New dummy inputs have been created: ', summarize=6)
# create a FIFO queue object
q = tf.FIFOQueue(capacity=3, dtypes=tf.float32)
# load up the queue with our dummy input data
enqueue_op = q.enqueue_many(dummy_input)
# grab some data out of the queue
data = q.dequeue()
# now print how much is left in the queue
data = tf.Print(data, data=[q.size()], message='This is how many items are left in q: ')
# create a fake graph that we can call upon
fg = data + 1
# now run some operations
with tf.Session() as sess:
# first load up the queue
sess.run(enqueue_op)
# now dequeue a few times, and we should see the number of items
# in the queue decrease
sess.run(fg)
sess.run(fg)
sess.run(fg)
# by this stage the queue will be emtpy, if we run the next time, the queue
# will block waiting for new data
sess.run(fg)
# this will never print:
print("We're here!")
def FIFO_queue_demo_with_coord():
# first let's create a simple random normal Tensor to act as dummy input data
# this operation should be run more than once, everytime the queue needs filling
# back up. However, it isn't in this case, because of our lack of a co-ordinator/
# proper threading
dummy_input = tf.random_normal([5], mean=0, stddev=1)
# let's print so we can see when this operation is called
dummy_input = tf.Print(dummy_input, data=[dummy_input],
message='New dummy inputs have been created: ', summarize=6)
# create a FIFO queue object
q = tf.FIFOQueue(capacity=3, dtypes=tf.float32)
# load up the queue with our dummy input data
enqueue_op = q.enqueue_many(dummy_input)
# now setup a queue runner to handle enqueue_op outside of the main thread asynchronously
qr = tf.train.QueueRunner(q, [enqueue_op] * 1)
# now we need to add qr to the TensorFlow queue runners collection
tf.train.add_queue_runner(qr)
# grab some data out of the queue
data = q.dequeue()
# now print how much is left in the queue
data = tf.Print(data, data=[q.size(), data], message='This is how many items are left in q: ')
# create a fake graph that we can call upon
fg = data + 1
# now run some operations
with tf.Session() as sess:
# we first create a TensorFlow coordinator instance which will handle
# all the asynchronous threads and their interactions
coord = tf.train.Coordinator()
# now we have to start all our queue runners - if we neglect to do this
# the main thread will hang waiting for them to be started
threads = tf.train.start_queue_runners(coord=coord)
# As opposed to the previous function, we don't have to call sess.run(enqueue_op)
# because our queue runner will figure out when this needs to be called. It
# will do so at the beginning, and also when the queue runs out of values
# now dequeue a few times, and we should see the number of items
# in the queue decrease
sess.run(fg)
sess.run(fg)
sess.run(fg)
# previously the main thread blocked / hung at this point, as it was waiting
# for the queue to be filled. However, it won't this time around, as we
# now have a queue runner on another thread making sure the queue is
# filled asynchronously
sess.run(fg)
sess.run(fg)
sess.run(fg)
# this will print, but not necessarily after the 6th call of sess.run(fg)
# due to the asynchronous operations
print("We're here!")
# we have to request all threads now stop, then we can join the queue runner
# thread back to the main thread and finish up
coord.request_stop()
coord.join(threads)
def cifar_shuffle_batch():
batch_size = 128
num_threads = 16
# create a list of all our filenames
filename_list = [data_path + 'data_batch_{}.bin'.format(i + 1) for i in range(5)]
# create a filename queue
# file_q = cifar_filename_queue(filename_list)
file_q = tf.train.string_input_producer(filename_list)
# read the data - this contains a FixedLengthRecordReader object which handles the
# de-queueing of the files. It returns a processed image and label, with shapes
# ready for a convolutional neural network
image, label = read_data(file_q)
# setup minimum number of examples that can remain in the queue after dequeuing before blocking
# occurs (i.e. enqueuing is forced) - the higher the number the better the mixing but
# longer initial load time
min_after_dequeue = 10000
# setup the capacity of the queue - this is based on recommendations by TensorFlow to ensure
# good mixing
capacity = min_after_dequeue + (num_threads + 1) * batch_size
# image_batch, label_batch = cifar_shuffle_queue_batch(image, label, batch_size, num_threads)
image_batch, label_batch = tf.train.shuffle_batch([image, label], batch_size, capacity, min_after_dequeue,
num_threads=num_threads)
# now run the training
cifar_run(image_batch, label_batch)
def cifar_run(image, label):
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
for i in range(5):
image_batch, label_batch = sess.run([image, label])
print(image_batch.shape, label_batch.shape)
coord.request_stop()
coord.join(threads)
def cifar_filename_queue(filename_list):
# convert the list to a tensor
string_tensor = tf.convert_to_tensor(filename_list, dtype=tf.string)
# randomize the tensor
tf.random_shuffle(string_tensor)
# create the queue
fq = tf.FIFOQueue(capacity=10, dtypes=tf.string)
# create our enqueue_op for this q
fq_enqueue_op = fq.enqueue_many([string_tensor])
# create a QueueRunner and add to queue runner list
# we only need one thread for this simple queue
tf.train.add_queue_runner(tf.train.QueueRunner(fq, [fq_enqueue_op] * 1))
return fq
def cifar_shuffle_queue_batch(image, label, batch_size, capacity, min_after_dequeue, threads):
tensor_list = [image, label]
dtypes = [tf.float32, tf.int32]
shapes = [image.get_shape(), label.get_shape()]
q = tf.RandomShuffleQueue(capacity=capacity, min_after_dequeue=min_after_dequeue,
dtypes=dtypes, shapes=shapes)
enqueue_op = q.enqueue(tensor_list)
# add to the queue runner
tf.train.add_queue_runner(tf.train.QueueRunner(q, [enqueue_op] * threads))
# now extract the batch
image_batch, label_batch = q.dequeue_many(batch_size)
return image_batch, label_batch
def read_data(file_q):
# Code from https://github.com/tensorflow/models/blob/master/tutorials/image/cifar10/cifar10_input.py
class CIFAR10Record(object):
pass
result = CIFAR10Record()
# Dimensions of the images in the CIFAR-10 dataset.
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
label_bytes = 1 # 2 for CIFAR-100
result.height = 32
result.width = 32
result.depth = 3
image_bytes = result.height * result.width * result.depth
# Every record consists of a label followed by the image, with a
# fixed number of bytes for each.
record_bytes = label_bytes + image_bytes
# Read a record, getting filenames from the filename_queue. No
# header or footer in the CIFAR-10 format, so we leave header_bytes
# and footer_bytes at their default of 0.
reader = tf.FixedLengthRecordReader(record_bytes=record_bytes)
result.key, value = reader.read(file_q)
# Convert from a string to a vector of uint8 that is record_bytes long.
record_bytes = tf.decode_raw(value, tf.uint8)
# The first bytes represent the label, which we convert from uint8->int32.
result.label = tf.cast(
tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32)
# The remaining bytes after the label represent the image, which we reshape
# from [depth * height * width] to [depth, height, width].
depth_major = tf.reshape(
tf.strided_slice(record_bytes, [label_bytes],
[label_bytes + image_bytes]),
[result.depth, result.height, result.width])
# Convert from [depth, height, width] to [height, width, depth].
result.uint8image = tf.transpose(depth_major, [1, 2, 0])
reshaped_image = tf.cast(result.uint8image, tf.float32)
height = 24
width = 24
# Image processing for evaluation.
# Crop the central [height, width] of the image.
resized_image = tf.image.resize_image_with_crop_or_pad(reshaped_image,
height, width)
# Subtract off the mean and divide by the variance of the pixels.
float_image = tf.image.per_image_standardization(resized_image)
# Set the shapes of tensors.
float_image.set_shape([height, width, 3])
result.label.set_shape([1])
return float_image, result.label
if __name__ == "__main__":
run_opt = 3
if run_opt == 1:
FIFO_queue_demo_no_coord()
elif run_opt == 2:
FIFO_queue_demo_with_coord()
elif run_opt == 3:
cifar_shuffle_batch()
| 10,026 | 41.487288 | 110 | py |
adventures-in-ml-code | adventures-in-ml-code-master/vanishing_gradient.py | from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf
base_path = "C:\\Users\\Andy\\PycharmProjects\\Tensorboard\\"
class Model(object):
def __init__(self, input_size, label_size, activation, num_layers=6,
hidden_size=10):
self._input_size = input_size
self._label_size = label_size
self._activation = activation
# num layers does not include the input layer
self._num_layers = num_layers
self._hidden_size = hidden_size
self._model_def()
def _model_def(self):
# create placeholder variables
self.input_images = tf.placeholder(tf.float32, shape=[None, self._input_size])
self.labels = tf.placeholder(tf.float32, shape=[None, self._label_size])
# create self._num_layers dense layers as the model
input = self.input_images
for i in range(self._num_layers - 1):
input = tf.layers.dense(input, self._hidden_size, activation=self._activation,
name='layer{}'.format(i+1))
# don't supply an activation for the final layer - the loss definition will
# supply softmax activation. This defaults to a linear activation i.e. f(x) = x
logits = tf.layers.dense(input, 10, name='layer{}'.format(self._num_layers))
# use softmax cross entropy with logits - no need to apply softmax activation to
# logits
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
labels=self.labels))
# add the loss to the summary
tf.summary.scalar('loss', self.loss)
self._log_gradients(self._num_layers)
self.optimizer = tf.train.AdamOptimizer().minimize(self.loss)
self.accuracy = self._compute_accuracy(logits, self.labels)
tf.summary.scalar('acc', self.accuracy)
self.merged = tf.summary.merge_all()
self.init_op = tf.global_variables_initializer()
def _compute_accuracy(self, logits, labels):
prediction = tf.argmax(logits, 1)
equality = tf.equal(prediction, tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(equality, tf.float32))
return accuracy
def _log_gradients(self, num_layers):
gr = tf.get_default_graph()
for i in range(num_layers):
weight = gr.get_tensor_by_name('layer{}/kernel:0'.format(i + 1))
grad = tf.gradients(self.loss, weight)[0]
mean = tf.reduce_mean(tf.abs(grad))
tf.summary.scalar('mean_{}'.format(i + 1), mean)
tf.summary.histogram('histogram_{}'.format(i + 1), grad)
tf.summary.histogram('hist_weights_{}'.format(i + 1), grad)
def run_training(model, mnist, sub_folder, iterations=2500, batch_size=30):
with tf.Session() as sess:
sess.run(model.init_op)
train_writer = tf.summary.FileWriter(base_path + sub_folder,
sess.graph)
for i in range(iterations):
image_batch, label_batch = mnist.train.next_batch(batch_size)
l, _, acc = sess.run([model.loss, model.optimizer, model.accuracy],
feed_dict={model.input_images: image_batch, model.labels: label_batch})
if i % 200 == 0:
summary = sess.run(model.merged, feed_dict={model.input_images: image_batch,
model.labels: label_batch})
train_writer.add_summary(summary, i)
print("Iteration {} of {}, loss: {:.3f}, train accuracy: "
"{:.2f}%".format(i, iterations, l, acc * 100))
if __name__ == "__main__":
scenarios = ["sigmoid", "relu", "leaky_relu"]
act_funcs = [tf.sigmoid, tf.nn.relu, tf.nn.leaky_relu]
assert len(scenarios) == len(act_funcs)
# collect the training data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
for i in range(len(scenarios)):
tf.reset_default_graph()
print("Running scenario: {}".format(scenarios[i]))
model = Model(784, 10, act_funcs[i], 6, 10)
run_training(model, mnist, scenarios[i]) | 4,262 | 49.75 | 104 | py |
adventures-in-ml-code | adventures-in-ml-code-master/per_duelingq_spaceinv_tf2.py | import gym
import tensorflow as tf
from tensorflow import keras
import random
import numpy as np
import datetime as dt
import imageio
import os
# STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard'
# STORE_PATH = "tensorboard"
STORE_PATH = "C:\\Users\\Andy\\TensorFlowBook\\TensorBoard"
MAX_EPSILON = 1
MIN_EPSILON = 0.1
EPSILON_MIN_ITER = 500000
GAMMA = 0.99
BATCH_SIZE = 32
TAU = 0.08
POST_PROCESS_IMAGE_SIZE = (105, 80, 1)
DELAY_TRAINING = 50000
BETA_DECAY_ITERS = 500000
MIN_BETA = 0.4
MAX_BETA = 1.0
NUM_FRAMES = 4
GIF_RECORDING_FREQ = 100
MODEL_SAVE_FREQ = 100
env = gym.make("SpaceInvaders-v0")
num_actions = env.action_space.n
# huber_loss = keras.losses.Huber()
def huber_loss(loss):
return 0.5 * loss ** 2 if abs(loss) < 1.0 else abs(loss) - 0.5
class DQModel(keras.Model):
def __init__(self, hidden_size: int, num_actions: int, dueling: bool):
super(DQModel, self).__init__()
self.dueling = dueling
self.conv1 = keras.layers.Conv2D(16, (8, 8), (4, 4), activation='relu')
self.conv2 = keras.layers.Conv2D(32, (4, 4), (2, 2), activation='relu')
self.flatten = keras.layers.Flatten()
self.adv_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.adv_out = keras.layers.Dense(num_actions,
kernel_initializer=keras.initializers.he_normal())
if dueling:
self.v_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.v_out = keras.layers.Dense(1, kernel_initializer=keras.initializers.he_normal())
self.lambda_layer = keras.layers.Lambda(lambda x: x - tf.reduce_mean(x))
self.combine = keras.layers.Add()
def call(self, input):
x = self.conv1(input)
x = self.conv2(x)
x = self.flatten(x)
adv = self.adv_dense(x)
adv = self.adv_out(adv)
if self.dueling:
v = self.v_dense(x)
v = self.v_out(v)
norm_adv = self.lambda_layer(adv)
combined = self.combine([v, norm_adv])
return combined
return adv
primary_network = DQModel(256, num_actions, True)
target_network = DQModel(256, num_actions, True)
primary_network.compile(optimizer=keras.optimizers.Adam(), loss=tf.keras.losses.Huber())
# make target_network = primary_network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(e)
class Node:
def __init__(self, left, right, is_leaf: bool = False, idx = None):
self.left = left
self.right = right
self.is_leaf = is_leaf
self.value = sum(n.value for n in (left, right) if n is not None)
self.parent = None
self.idx = idx # this value is only set for leaf nodes
if left is not None:
left.parent = self
if right is not None:
right.parent = self
@classmethod
def create_leaf(cls, value, idx):
leaf = cls(None, None, is_leaf=True, idx=idx)
leaf.value = value
return leaf
def create_tree(input: list):
nodes = [Node.create_leaf(v, i) for i, v in enumerate(input)]
leaf_nodes = nodes
while len(nodes) > 1:
inodes = iter(nodes)
nodes = [Node(*pair) for pair in zip(inodes, inodes)]
return nodes[0], leaf_nodes
def retrieve(value: float, node: Node):
if node.is_leaf:
return node
if node.left.value >= value:
return retrieve(value, node.left)
else:
return retrieve(value - node.left.value, node.right)
def update(node: Node, new_value: float):
change = new_value - node.value
node.value = new_value
propagate_changes(change, node.parent)
def propagate_changes(change: float, node: Node):
node.value += change
if node.parent is not None:
propagate_changes(change, node.parent)
class Memory(object):
def __init__(self, size: int):
self.size = size
self.curr_write_idx = 0
self.available_samples = 0
self.buffer = [(np.zeros((POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1]),
dtype=np.float32), 0.0, 0.0, 0.0) for i in range(self.size)]
self.base_node, self.leaf_nodes = create_tree([0 for i in range(self.size)])
self.frame_idx = 0
self.action_idx = 1
self.reward_idx = 2
self.terminal_idx = 3
self.beta = 0.4
self.alpha = 0.6
self.min_priority = 0.01
def append(self, experience: tuple, priority: float):
self.buffer[self.curr_write_idx] = experience
self.update(self.curr_write_idx, priority)
self.curr_write_idx += 1
# reset the current writer position index if creater than the allowed size
if self.curr_write_idx >= self.size:
self.curr_write_idx = 0
# max out available samples at the memory buffer size
if self.available_samples + 1 < self.size:
self.available_samples += 1
else:
self.available_samples = self.size - 1
def update(self, idx: int, priority: float):
update(self.leaf_nodes[idx], self.adjust_priority(priority))
def adjust_priority(self, priority: float):
return np.power(priority + self.min_priority, self.alpha)
def sample(self, num_samples: int):
sampled_idxs = []
is_weights = []
sample_no = 0
while sample_no < num_samples:
sample_val = np.random.uniform(0, self.base_node.value)
samp_node = retrieve(sample_val, self.base_node)
if NUM_FRAMES - 1 < samp_node.idx < self.available_samples - 1:
sampled_idxs.append(samp_node.idx)
p = samp_node.value / self.base_node.value
is_weights.append((self.available_samples + 1) * p)
sample_no += 1
# apply the beta factor and normalise so that the maximum is_weight < 1
is_weights = np.array(is_weights)
is_weights = np.power(is_weights, -self.beta)
is_weights = is_weights / np.max(is_weights)
# now load up the state and next state variables according to sampled idxs
states = np.zeros((num_samples, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
next_states = np.zeros((num_samples, POST_PROCESS_IMAGE_SIZE[0], POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES),
dtype=np.float32)
actions, rewards, terminal = [], [], []
for i, idx in enumerate(sampled_idxs):
for j in range(NUM_FRAMES):
states[i, :, :, j] = self.buffer[idx + j - NUM_FRAMES + 1][self.frame_idx][:, :, 0]
next_states[i, :, :, j] = self.buffer[idx + j - NUM_FRAMES + 2][self.frame_idx][:, :, 0]
actions.append(self.buffer[idx][self.action_idx])
rewards.append(self.buffer[idx][self.reward_idx])
terminal.append(self.buffer[idx][self.terminal_idx])
return states, np.array(actions), np.array(rewards), next_states, np.array(terminal), sampled_idxs, is_weights
memory = Memory(200000)
def image_preprocess(image, new_size=(105, 80)):
# convert to greyscale, resize and normalize the image
image = tf.image.rgb_to_grayscale(image)
image = tf.image.resize(image, new_size)
image = image / 255
return image
def choose_action(state, primary_network, eps, step):
if step < DELAY_TRAINING:
return random.randint(0, num_actions - 1)
else:
if random.random() < eps:
return random.randint(0, num_actions - 1)
else:
return np.argmax(primary_network(tf.reshape(state, (1, POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)).numpy()))
def update_network(primary_network, target_network):
# update target network parameters slowly from primary network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(t * (1 - TAU) + e * TAU)
def process_state_stack(state_stack, state):
for i in range(1, state_stack.shape[-1]):
state_stack[:, :, i - 1].assign(state_stack[:, :, i])
state_stack[:, :, -1].assign(state[:, :, 0])
return state_stack
def record_gif(frame_list, episode, fps=50):
imageio.mimsave(STORE_PATH + "\\SPACE_INVADERS_EPISODE-eps{}-r{}.gif".format(episode, reward), frame_list, fps=fps) #duration=duration_per_frame)ation_per_frame)
def get_per_error(states, actions, rewards, next_states, terminal, primary_network, target_network):
# predict Q(s,a) given the batch of states
prim_qt = primary_network(states)
# predict Q(s',a') from the evaluation network
prim_qtp1 = primary_network(next_states)
# copy the prim_qt tensor into the target_q tensor - we then will update one index corresponding to the max action
target_q = prim_qt.numpy()
# the action selection from the primary / online network
prim_action_tp1 = np.argmax(prim_qtp1.numpy(), axis=1)
# the q value for the prim_action_tp1 from the target network
q_from_target = target_network(next_states)
updates = rewards + (1 - terminal) * GAMMA * q_from_target.numpy()[:, prim_action_tp1]
target_q[:, actions] = updates
# calculate the loss / error to update priorites
error = [huber_loss(target_q[i, actions[i]] - prim_qt.numpy()[i, actions[i]]) for i in range(states.shape[0])]
return target_q, error
def train(primary_network, memory, target_network):
states, actions, rewards, next_states, terminal, idxs, is_weights = memory.sample(BATCH_SIZE)
target_q, error = get_per_error(states, actions, rewards, next_states, terminal,
primary_network, target_network)
for i in range(len(idxs)):
memory.update(idxs[i], error[i])
loss = primary_network.train_on_batch(states, target_q, is_weights)
return loss
num_episodes = 1000000
eps = MAX_EPSILON
render = False
train_writer = tf.summary.create_file_writer(STORE_PATH + "/DuelingQPERSI_{}".format(dt.datetime.now().strftime('%d%m%Y%H%M')))
steps = 0
for i in range(num_episodes):
state = env.reset()
state = image_preprocess(state)
state_stack = tf.Variable(np.repeat(state.numpy(), NUM_FRAMES).reshape((POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1],
NUM_FRAMES)))
cnt = 1
avg_loss = 0
tot_reward = 0
if i % GIF_RECORDING_FREQ == 0:
frame_list = []
while True:
if render:
env.render()
action = choose_action(state_stack, primary_network, eps, steps)
next_state, reward, done, info = env.step(action)
tot_reward += reward
if i % GIF_RECORDING_FREQ == 0:
frame_list.append(tf.cast(tf.image.resize(next_state, (480, 320)), tf.uint8).numpy())
next_state = image_preprocess(next_state)
old_state_stack = state_stack
state_stack = process_state_stack(state_stack, next_state)
if steps > DELAY_TRAINING:
loss = train(primary_network, memory, target_network)
update_network(primary_network, target_network)
_, error = get_per_error(tf.reshape(old_state_stack, (1, POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)),
np.array([action]), np.array([reward]),
tf.reshape(state_stack, (1, POST_PROCESS_IMAGE_SIZE[0],
POST_PROCESS_IMAGE_SIZE[1], NUM_FRAMES)), np.array([done]))
# store in memory
memory.append((next_state, action, reward, done), error[0])
else:
loss = -1
# store in memory - default the priority to the reward
memory.append((next_state, action, reward, done), reward)
avg_loss += loss
# linearly decay the eps and PER beta values
if steps > DELAY_TRAINING:
eps = MAX_EPSILON - ((steps - DELAY_TRAINING) / EPSILON_MIN_ITER) * \
(MAX_EPSILON - MIN_EPSILON) if steps < EPSILON_MIN_ITER else \
MIN_EPSILON
beta = MIN_BETA + ((steps - DELAY_TRAINING) / BETA_DECAY_ITERS) * \
(MAX_BETA - MIN_BETA) if steps < BETA_DECAY_ITERS else \
MAX_BETA
memory.beta = beta
steps += 1
if done:
if steps > DELAY_TRAINING:
avg_loss /= cnt
print("Episode: {}, Reward: {}, avg loss: {:.5f}, eps: {:.3f}".format(i, tot_reward, avg_loss, eps))
with train_writer.as_default():
tf.summary.scalar('reward', tot_reward, step=i)
tf.summary.scalar('avg loss', avg_loss, step=i)
else:
print("Pre-training...Episode: {}".format(i))
if i % GIF_RECORDING_FREQ == 0:
record_gif(frame_list, i, tot_reward)
break
cnt += 1
if i % MODEL_SAVE_FREQ == 0: # and i != 0:
primary_network.save_weights(STORE_PATH + "/checkpoints/cp_primary_network_episode_{}.ckpt".format(i))
target_network.save_weights(STORE_PATH + "/checkpoints/cp_target_network_episode_{}.ckpt".format(i)) | 13,766 | 40.844985 | 165 | py |
adventures-in-ml-code | adventures-in-ml-code-master/cntk_tutorial.py | import os
os.environ['PATH'] = "C:\\Users\Andy\Anaconda2\envs\TensorFlow" + ';' + os.environ['PATH']
import cntk as C
from cntk.train import Trainer
from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs
from cntk.learners import adadelta, learning_rate_schedule, UnitType
from cntk.ops import relu, element_times, constant
from cntk.layers import Dense, Sequential, For, default_options
from cntk.losses import cross_entropy_with_softmax
from cntk.metrics import classification_error
from cntk.train.training_session import *
from cntk.logging import ProgressPrinter
abs_path = os.path.dirname(os.path.abspath(__file__))
# Creates and trains a feedforward classification model for MNIST images
def simple_mnist():
input_dim = 784
num_output_classes = 10
num_hidden_layers = 2
hidden_layers_dim = 200
# Input variables denoting the features and label data
feature = C.input_variable(input_dim)
label = C.input_variable(num_output_classes)
# Instantiate the feedforward classification model
scaled_input = element_times(constant(0.00390625), feature)
# z = Sequential([
# Dense(hidden_layers_dim, activation=relu),
# Dense(hidden_layers_dim, activation=relu),
# Dense(num_output_classes)])(scaled_input)
with default_options(activation=relu, init=C.glorot_uniform()):
z = Sequential([For(range(num_hidden_layers),
lambda i: Dense(hidden_layers_dim)),
Dense(num_output_classes, activation=None)])(scaled_input)
ce = cross_entropy_with_softmax(z, label)
pe = classification_error(z, label)
# setup the data
path = abs_path + "\Train-28x28_cntk_text.txt"
reader_train = MinibatchSource(CTFDeserializer(path, StreamDefs(
features=StreamDef(field='features', shape=input_dim),
labels=StreamDef(field='labels', shape=num_output_classes))))
input_map = {
feature: reader_train.streams.features,
label: reader_train.streams.labels
}
# Training config
minibatch_size = 64
num_samples_per_sweep = 60000
num_sweeps_to_train_with = 10
# Instantiate progress writers.
progress_writers = [ProgressPrinter(
tag='Training',
num_epochs=num_sweeps_to_train_with)]
# Instantiate the trainer object to drive the model training
lr = learning_rate_schedule(1, UnitType.sample)
trainer = Trainer(z, (ce, pe), [adadelta(z.parameters, lr)], progress_writers)
training_session(
trainer=trainer,
mb_source=reader_train,
mb_size=minibatch_size,
model_inputs_to_streams=input_map,
max_samples=num_samples_per_sweep * num_sweeps_to_train_with,
progress_frequency=num_samples_per_sweep
).train()
# Load test data
path = abs_path + "\Test-28x28_cntk_text.txt"
reader_test = MinibatchSource(CTFDeserializer(path, StreamDefs(
features=StreamDef(field='features', shape=input_dim),
labels=StreamDef(field='labels', shape=num_output_classes))))
input_map = {
feature: reader_test.streams.features,
label: reader_test.streams.labels
}
# Test data for trained model
test_minibatch_size = 1024
num_samples = 10000
num_minibatches_to_test = num_samples / test_minibatch_size
test_result = 0.0
for i in range(0, int(num_minibatches_to_test)):
mb = reader_test.next_minibatch(test_minibatch_size, input_map=input_map)
eval_error = trainer.test_minibatch(mb)
test_result = test_result + eval_error
# Average of evaluation errors of all test minibatches
return test_result / num_minibatches_to_test
if __name__ == '__main__':
error = simple_mnist()
print("Error: %f" % error)
| 3,759 | 33.495413 | 90 | py |
adventures-in-ml-code | adventures-in-ml-code-master/gensim_word2vec.py | import gensim
from gensim.models import word2vec
import logging
from keras.layers import Input, Embedding, merge
from keras.models import Model
import tensorflow as tf
import numpy as np
import urllib.request
import os
import zipfile
vector_dim = 300
root_path = "C:\\Users\Andy\PycharmProjects\\adventures-in-ml-code\\"
def maybe_download(filename, url, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
# convert the input data into a list of integer indexes aligning with the wv indexes
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = f.read(f.namelist()[0]).split()
return data
def convert_data_to_index(string_data, wv):
index_data = []
for word in string_data:
if word in wv:
index_data.append(wv.vocab[word].index)
return index_data
def gensim_demo():
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', url, 31344016)
if not os.path.exists((root_path + filename).strip('.zip')):
zipfile.ZipFile(root_path+filename).extractall()
sentences = word2vec.Text8Corpus((root_path + filename).strip('.zip'))
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
model = word2vec.Word2Vec(sentences, iter=10, min_count=10, size=300, workers=4)
# get the word vector of "the"
print(model.wv['the'])
# get the most common words
print(model.wv.index2word[0], model.wv.index2word[1], model.wv.index2word[2])
# get the least common words
vocab_size = len(model.wv.vocab)
print(model.wv.index2word[vocab_size - 1], model.wv.index2word[vocab_size - 2], model.wv.index2word[vocab_size - 3])
# find the index of the 2nd most common word ("of")
print('Index of "of" is: {}'.format(model.wv.vocab['of'].index))
# some similarity fun
print(model.wv.similarity('woman', 'man'), model.wv.similarity('man', 'elephant'))
# what doesn't fit?
print(model.wv.doesnt_match("green blue red zebra".split()))
str_data = read_data(root_path + filename)
index_data = convert_data_to_index(str_data, model.wv)
print(str_data[:4], index_data[:4])
# save and reload the model
model.save(root_path + "mymodel")
def create_embedding_matrix(model):
# convert the wv word vectors into a numpy matrix that is suitable for insertion
# into our TensorFlow and Keras models
embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))
for i in range(len(model.wv.vocab)):
embedding_vector = model.wv[model.wv.index2word[i]]
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def tf_model(embedding_matrix, wv):
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# embedding layer weights are frozen to avoid updating embeddings while training
saved_embeddings = tf.constant(embedding_matrix)
embedding = tf.Variable(initial_value=saved_embeddings, trainable=False)
# create the cosine similarity operations
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embeddings = embedding / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# call our similarity operation
sim = similarity.eval()
# run through each valid example, finding closest words
for i in range(valid_size):
valid_word = wv.index2word[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = wv.index2word[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
def keras_model(embedding_matrix, wv):
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
# input words - in this case we do sample by sample evaluations of the similarity
valid_word = Input((1,), dtype='int32')
other_word = Input((1,), dtype='int32')
# setup the embedding layer
embeddings = Embedding(input_dim=embedding_matrix.shape[0], output_dim=embedding_matrix.shape[1],
weights=[embedding_matrix])
embedded_a = embeddings(valid_word)
embedded_b = embeddings(other_word)
similarity = merge([embedded_a, embedded_b], mode='cos', dot_axes=2)
# create the Keras model
k_model = Model(input=[valid_word, other_word], output=similarity)
def get_sim(valid_word_idx, vocab_size):
sim = np.zeros((vocab_size,))
in_arr1 = np.zeros((1,))
in_arr2 = np.zeros((1,))
in_arr1[0,] = valid_word_idx
for i in range(vocab_size):
in_arr2[0,] = i
out = k_model.predict_on_batch([in_arr1, in_arr2])
sim[i] = out
return sim
# now run the model and get the closest words to the valid examples
for i in range(valid_size):
valid_word = wv.index2word[valid_examples[i]]
top_k = 8 # number of nearest neighbors
sim = get_sim(valid_examples[i], len(wv.vocab))
nearest = (-sim).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = wv.index2word[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
if __name__ == "__main__":
run_opt = 2
if run_opt == 1:
gensim_demo()
elif run_opt == 2:
model = gensim.models.Word2Vec.load(root_path + "mymodel")
embedding_matrix = create_embedding_matrix(model)
tf_model(embedding_matrix, model.wv)
elif run_opt == 3:
model = gensim.models.Word2Vec.load(root_path + "mymodel")
embedding_matrix = create_embedding_matrix(model)
keras_model(embedding_matrix, model.wv)
| 7,078 | 38.327778 | 120 | py |
adventures-in-ml-code | adventures-in-ml-code-master/a2c_tf2_cartpole.py | import tensorflow as tf
from tensorflow import keras
import numpy as np
import gym
import datetime as dt
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard/A2CCartPole'
CRITIC_LOSS_WEIGHT = 0.5
ACTOR_LOSS_WEIGHT = 1.0
ENTROPY_LOSS_WEIGHT = 0.05
BATCH_SIZE = 64
GAMMA = 0.95
env = gym.make("CartPole-v0")
state_size = 4
num_actions = env.action_space.n
class Model(keras.Model):
def __init__(self, num_actions):
super().__init__()
self.num_actions = num_actions
self.dense1 = keras.layers.Dense(64, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.dense2 = keras.layers.Dense(64, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.value = keras.layers.Dense(1)
self.policy_logits = keras.layers.Dense(num_actions)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.value(x), self.policy_logits(x)
def action_value(self, state):
value, logits = self.predict_on_batch(state)
action = tf.random.categorical(logits, 1)[0]
return action, value
def critic_loss(discounted_rewards, predicted_values):
return keras.losses.mean_squared_error(discounted_rewards, predicted_values) * CRITIC_LOSS_WEIGHT
def actor_loss(combined, policy_logits):
actions = combined[:, 0]
advantages = combined[:, 1]
sparse_ce = keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.SUM
)
actions = tf.cast(actions, tf.int32)
policy_loss = sparse_ce(actions, policy_logits, sample_weight=advantages)
probs = tf.nn.softmax(policy_logits)
entropy_loss = keras.losses.categorical_crossentropy(probs, probs)
return policy_loss * ACTOR_LOSS_WEIGHT - entropy_loss * ENTROPY_LOSS_WEIGHT
def discounted_rewards_advantages(rewards, dones, values, next_value):
discounted_rewards = np.array(rewards + [next_value[0]])
for t in reversed(range(len(rewards))):
discounted_rewards[t] = rewards[t] + GAMMA * discounted_rewards[t+1] * (1-dones[t])
discounted_rewards = discounted_rewards[:-1]
# advantages are bootstrapped discounted rewards - values, using Bellman's equation
advantages = discounted_rewards - np.stack(values)[:, 0]
return discounted_rewards, advantages
model = Model(num_actions)
model.compile(optimizer=keras.optimizers.Adam(), loss=[critic_loss, actor_loss])
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/A2C-CartPole_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
num_steps = 10000000
episode_reward_sum = 0
state = env.reset()
episode = 1
for step in range(num_steps):
rewards = []
actions = []
values = []
states = []
dones = []
for _ in range(BATCH_SIZE):
_, policy_logits = model(state.reshape(1, -1))
action, value = model.action_value(state.reshape(1, -1))
new_state, reward, done, _ = env.step(action.numpy()[0])
actions.append(action)
values.append(value.numpy()[0])
states.append(state)
dones.append(done)
episode_reward_sum += reward
state = new_state
if done:
rewards.append(0.0)
state = env.reset()
print(f"Episode: {episode}, latest episode reward: {episode_reward_sum}, loss: {loss}")
with train_writer.as_default():
tf.summary.scalar('rewards', episode_reward_sum, episode)
episode_reward_sum = 0
episode += 1
else:
rewards.append(reward)
_, next_value = model.action_value(state.reshape(1, -1))
discounted_rewards, advantages = discounted_rewards_advantages(rewards, dones, values, next_value.numpy()[0])
# combine the actions and advantages into a combined array for passing to
# actor_loss function
combined = np.zeros((len(actions), 2))
combined[:, 0] = actions
combined[:, 1] = advantages
loss = model.train_on_batch(tf.stack(states), [discounted_rewards, combined])
with train_writer.as_default():
tf.summary.scalar('tot_loss', np.sum(loss), step) | 4,312 | 32.96063 | 118 | py |
adventures-in-ml-code | adventures-in-ml-code-master/pytorch_nn.py | import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
def simple_gradient():
# print the gradient of 2x^2 + 5x
x = Variable(torch.ones(2, 2) * 2, requires_grad=True)
z = 2 * (x * x) + 5 * x
# run the backpropagation
z.backward(torch.ones(2, 2))
print(x.grad)
def create_nn(batch_size=200, learning_rate=0.01, epochs=10,
log_interval=10):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(28 * 28, 200)
self.fc2 = nn.Linear(200, 200)
self.fc3 = nn.Linear(200, 10)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x)
net = Net()
print(net)
# create a stochastic gradient descent optimizer
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
# create a loss function
criterion = nn.NLLLoss()
# run the main training loop
for epoch in range(epochs):
for batch_idx, (data, target) in enumerate(train_loader):
data, target = Variable(data), Variable(target)
# resize data from (batch_size, 1, 28, 28) to (batch_size, 28*28)
data = data.view(-1, 28*28)
optimizer.zero_grad()
net_out = net(data)
loss = criterion(net_out, target)
loss.backward()
optimizer.step()
if batch_idx % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.data[0]))
# run a test loop
test_loss = 0
correct = 0
for data, target in test_loader:
data, target = Variable(data, volatile=True), Variable(target)
data = data.view(-1, 28 * 28)
net_out = net(data)
# sum up batch loss
test_loss += criterion(net_out, target).data[0]
pred = net_out.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data).sum()
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
if __name__ == "__main__":
run_opt = 2
if run_opt == 1:
simple_gradient()
elif run_opt == 2:
create_nn() | 3,316 | 33.915789 | 81 | py |
adventures-in-ml-code | adventures-in-ml-code-master/tensor_flow_tutorial.py | import tensorflow as tf
import numpy as np
import datetime as dt
from tensorflow.keras.datasets import mnist
STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorBoard'
def run_simple_graph():
# create TensorFlow variables
const = tf.Variable(2.0, name="const")
b = tf.Variable(2.0, name='b')
c = tf.Variable(1.0, name='c')
# now create some operations
d = tf.add(b, c, name='d')
e = tf.add(c, const, name='e')
a = tf.multiply(d, e, name='a')
# alternatively (and more naturally)
d = b + c
e = c + 2
a = d * e
print(f"Variable a is {a.numpy()}")
def run_simple_graph_multiple():
const = tf.Variable(2.0, name="const")
b = tf.Variable(np.arange(0, 10), name='b')
c = tf.Variable(1.0, name='c')
d = tf.cast(b, tf.float32) + c
e = c + const
a = d * e
print(f"Variable a is {a.numpy()}")
# the line below would cause an error - tensors are immutable
# b[1] = 10
# need to use assignment instead
b[1].assign(10)
d = tf.cast(b, tf.float32) + c
e = c + const
a = d * e
print(f"Variable a is {a.numpy()}")
b[6:9].assign([10, 10, 10])
f = b[2:5]
print(f.numpy())
def get_batch(x_data, y_data, batch_size):
idxs = np.random.randint(0, len(y_data), batch_size)
return x_data[idxs,:,:], y_data[idxs]
def nn_model(x_input, W1, b1, W2, b2):
# flatten the input image from 28 x 28 to 784
x_input = tf.reshape(x_input, (x_input.shape[0], -1))
x = tf.add(tf.matmul(tf.cast(x_input, tf.float32), W1), b1)
x = tf.nn.relu(x)
logits = tf.add(tf.matmul(x, W2), b2)
return logits
def loss_fn(logits, labels):
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=labels,
logits=logits))
return cross_entropy
def nn_example():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Python optimisation variables
epochs = 10
batch_size = 100
# normalize the input images by dividing by 255.0
x_train = x_train / 255.0
x_test = x_test / 255.0
# convert x_test to tensor to pass through model (train data will be converted to
# tensors on the fly)
x_test = tf.Variable(x_test)
# now declare the weights connecting the input to the hidden layer
W1 = tf.Variable(tf.random.normal([784, 300], stddev=0.03), name='W1')
b1 = tf.Variable(tf.random.normal([300]), name='b1')
# and the weights connecting the hidden layer to the output layer
W2 = tf.Variable(tf.random.normal([300, 10], stddev=0.03), name='W2')
b2 = tf.Variable(tf.random.normal([10]), name='b2')
# setup the optimizer
optimizer = tf.keras.optimizers.Adam()
# create a summary writer to view loss in TensorBoard
train_summary_writer = tf.summary.create_file_writer(STORE_PATH +
"/TensorFlow_Intro_Chapter_" +
f"{dt.datetime.now().strftime('%d%m%Y%H%M')}")
total_batch = int(len(y_train) / batch_size)
for epoch in range(epochs):
avg_loss = 0
for i in range(total_batch):
batch_x, batch_y = get_batch(x_train, y_train, batch_size=batch_size)
# create tensors
batch_x = tf.Variable(batch_x)
batch_y = tf.Variable(batch_y)
# create a one hot vector
batch_y = tf.one_hot(batch_y, 10)
with tf.GradientTape() as tape:
logits = nn_model(batch_x, W1, b1, W2, b2)
loss = loss_fn(logits, batch_y)
gradients = tape.gradient(loss, [W1, b1, W2, b2])
optimizer.apply_gradients(zip(gradients, [W1, b1, W2, b2]))
avg_loss += loss / total_batch
test_logits = nn_model(x_test, W1, b1, W2, b2)
max_idxs = tf.argmax(test_logits, axis=1)
test_acc = np.sum(max_idxs.numpy() == y_test) / len(y_test)
print(f"Epoch: {epoch + 1}, loss={avg_loss:.3f}, test set accuracy={test_acc*100:.3f}%")
with train_summary_writer.as_default():
tf.summary.scalar('loss', avg_loss, step=epoch)
tf.summary.scalar('accuracy', test_acc, step=epoch)
print("\nTraining complete!")
if __name__ == "__main__":
# run_simple_graph()
# run_simple_graph_multiple()
nn_example() | 4,411 | 32.424242 | 103 | py |
adventures-in-ml-code | adventures-in-ml-code-master/double_q_tensorflow2.py | import gym
import tensorflow as tf
from tensorflow import keras
import random
import numpy as np
import datetime as dt
import math
STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard'
MAX_EPSILON = 1
MIN_EPSILON = 0.01
LAMBDA = 0.0005
GAMMA = 0.95
BATCH_SIZE = 32
TAU = 0.08
RANDOM_REWARD_STD = 1.0
env = gym.make("CartPole-v0")
state_size = 4
num_actions = env.action_space.n
primary_network = keras.Sequential([
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(num_actions)
])
target_network = keras.Sequential([
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(30, activation='relu', kernel_initializer=keras.initializers.he_normal()),
keras.layers.Dense(num_actions)
])
primary_network.compile(optimizer=keras.optimizers.Adam(), loss='mse')
class Memory:
def __init__(self, max_memory):
self._max_memory = max_memory
self._samples = []
def add_sample(self, sample):
self._samples.append(sample)
if len(self._samples) > self._max_memory:
self._samples.pop(0)
def sample(self, no_samples):
if no_samples > len(self._samples):
return random.sample(self._samples, len(self._samples))
else:
return random.sample(self._samples, no_samples)
@property
def num_samples(self):
return len(self._samples)
memory = Memory(500000)
def choose_action(state, primary_network, eps):
if random.random() < eps:
return random.randint(0, num_actions - 1)
else:
return np.argmax(primary_network(state.reshape(1, -1)))
def train(primary_network, memory, target_network=None):
if memory.num_samples < BATCH_SIZE * 3:
return 0
batch = memory.sample(BATCH_SIZE)
states = np.array([val[0] for val in batch])
actions = np.array([val[1] for val in batch])
rewards = np.array([val[2] for val in batch])
next_states = np.array([(np.zeros(state_size)
if val[3] is None else val[3]) for val in batch])
# predict Q(s,a) given the batch of states
prim_qt = primary_network(states)
# predict Q(s',a') from the evaluation network
prim_qtp1 = primary_network(next_states)
# copy the prim_qt tensor into the target_q tensor - we then will update one index corresponding to the max action
target_q = prim_qt.numpy()
updates = rewards
valid_idxs = np.array(next_states).sum(axis=1) != 0
batch_idxs = np.arange(BATCH_SIZE)
if target_network is None:
updates[valid_idxs] += GAMMA * np.amax(prim_qtp1.numpy()[valid_idxs, :], axis=1)
else:
prim_action_tp1 = np.argmax(prim_qtp1.numpy(), axis=1)
q_from_target = target_network(next_states)
updates[valid_idxs] += GAMMA * q_from_target.numpy()[batch_idxs[valid_idxs], prim_action_tp1[valid_idxs]]
target_q[batch_idxs, actions] = updates
loss = primary_network.train_on_batch(states, target_q)
if target_network is not None:
# update target network parameters slowly from primary network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(t * (1 - TAU) + e * TAU)
return loss
num_episodes = 1000
eps = MAX_EPSILON
render = False
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/DoubleQ_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
double_q = False
steps = 0
for i in range(num_episodes):
state = env.reset()
cnt = 0
avg_loss = 0
while True:
if render:
env.render()
action = choose_action(state, primary_network, eps)
next_state, reward, done, info = env.step(action)
reward = np.random.normal(1.0, RANDOM_REWARD_STD)
if done:
next_state = None
# store in memory
memory.add_sample((state, action, reward, next_state))
loss = train(primary_network, memory, target_network if double_q else None)
avg_loss += loss
state = next_state
# exponentially decay the eps value
steps += 1
eps = MIN_EPSILON + (MAX_EPSILON - MIN_EPSILON) * math.exp(-LAMBDA * steps)
if done:
avg_loss /= cnt
print(f"Episode: {i}, Reward: {cnt}, avg loss: {avg_loss:.3f}, eps: {eps:.3f}")
with train_writer.as_default():
tf.summary.scalar('reward', cnt, step=i)
tf.summary.scalar('avg loss', avg_loss, step=i)
break
cnt += 1
| 4,711 | 32.41844 | 118 | py |
adventures-in-ml-code | adventures-in-ml-code-master/keras_lstm.py | from __future__ import print_function
import collections
import os
import tensorflow as tf
from keras.models import Sequential, load_model
from keras.layers import Dense, Activation, Embedding, Dropout, TimeDistributed
from keras.layers import LSTM
from keras.optimizers import Adam
from keras.utils import to_categorical
from keras.callbacks import ModelCheckpoint
import numpy as np
import argparse
"""To run this code, you'll need to first download and extract the text dataset
from here: http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz. Change the
data_path variable below to your local exraction path"""
data_path = "C:\\Users\Andy\Documents\simple-examples\data"
parser = argparse.ArgumentParser()
parser.add_argument('run_opt', type=int, default=1, help='An integer: 1 to train, 2 to test')
parser.add_argument('--data_path', type=str, default=data_path, help='The full path of the training data')
args = parser.parse_args()
if args.data_path:
data_path = args.data_path
def read_words(filename):
with tf.gfile.GFile(filename, "r") as f:
return f.read().decode("utf-8").replace("\n", "<eos>").split()
def build_vocab(filename):
data = read_words(filename)
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
word_to_id = dict(zip(words, range(len(words))))
return word_to_id
def file_to_word_ids(filename, word_to_id):
data = read_words(filename)
return [word_to_id[word] for word in data if word in word_to_id]
def load_data():
# get the data paths
train_path = os.path.join(data_path, "ptb.train.txt")
valid_path = os.path.join(data_path, "ptb.valid.txt")
test_path = os.path.join(data_path, "ptb.test.txt")
# build the complete vocabulary, then convert text data to list of integers
word_to_id = build_vocab(train_path)
train_data = file_to_word_ids(train_path, word_to_id)
valid_data = file_to_word_ids(valid_path, word_to_id)
test_data = file_to_word_ids(test_path, word_to_id)
vocabulary = len(word_to_id)
reversed_dictionary = dict(zip(word_to_id.values(), word_to_id.keys()))
print(train_data[:5])
print(word_to_id)
print(vocabulary)
print(" ".join([reversed_dictionary[x] for x in train_data[:10]]))
return train_data, valid_data, test_data, vocabulary, reversed_dictionary
train_data, valid_data, test_data, vocabulary, reversed_dictionary = load_data()
class KerasBatchGenerator(object):
def __init__(self, data, num_steps, batch_size, vocabulary, skip_step=5):
self.data = data
self.num_steps = num_steps
self.batch_size = batch_size
self.vocabulary = vocabulary
# this will track the progress of the batches sequentially through the
# data set - once the data reaches the end of the data set it will reset
# back to zero
self.current_idx = 0
# skip_step is the number of words which will be skipped before the next
# batch is skimmed from the data set
self.skip_step = skip_step
def generate(self):
x = np.zeros((self.batch_size, self.num_steps))
y = np.zeros((self.batch_size, self.num_steps, self.vocabulary))
while True:
for i in range(self.batch_size):
if self.current_idx + self.num_steps >= len(self.data):
# reset the index back to the start of the data set
self.current_idx = 0
x[i, :] = self.data[self.current_idx:self.current_idx + self.num_steps]
temp_y = self.data[self.current_idx + 1:self.current_idx + self.num_steps + 1]
# convert all of temp_y into a one hot representation
y[i, :, :] = to_categorical(temp_y, num_classes=self.vocabulary)
self.current_idx += self.skip_step
yield x, y
num_steps = 30
batch_size = 20
train_data_generator = KerasBatchGenerator(train_data, num_steps, batch_size, vocabulary,
skip_step=num_steps)
valid_data_generator = KerasBatchGenerator(valid_data, num_steps, batch_size, vocabulary,
skip_step=num_steps)
hidden_size = 500
use_dropout=True
model = Sequential()
model.add(Embedding(vocabulary, hidden_size, input_length=num_steps))
model.add(LSTM(hidden_size, return_sequences=True))
model.add(LSTM(hidden_size, return_sequences=True))
if use_dropout:
model.add(Dropout(0.5))
model.add(TimeDistributed(Dense(vocabulary)))
model.add(Activation('softmax'))
optimizer = Adam()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])
print(model.summary())
checkpointer = ModelCheckpoint(filepath=data_path + '/model-{epoch:02d}.hdf5', verbose=1)
num_epochs = 50
if args.run_opt == 1:
model.fit_generator(train_data_generator.generate(), len(train_data)//(batch_size*num_steps), num_epochs,
validation_data=valid_data_generator.generate(),
validation_steps=len(valid_data)//(batch_size*num_steps), callbacks=[checkpointer])
# model.fit_generator(train_data_generator.generate(), 2000, num_epochs,
# validation_data=valid_data_generator.generate(),
# validation_steps=10)
model.save(data_path + "final_model.hdf5")
elif args.run_opt == 2:
model = load_model(data_path + "\model-40.hdf5")
dummy_iters = 40
example_training_generator = KerasBatchGenerator(train_data, num_steps, 1, vocabulary,
skip_step=1)
print("Training data:")
for i in range(dummy_iters):
dummy = next(example_training_generator.generate())
num_predict = 10
true_print_out = "Actual words: "
pred_print_out = "Predicted words: "
for i in range(num_predict):
data = next(example_training_generator.generate())
prediction = model.predict(data[0])
predict_word = np.argmax(prediction[:, num_steps-1, :])
true_print_out += reversed_dictionary[train_data[num_steps + dummy_iters + i]] + " "
pred_print_out += reversed_dictionary[predict_word] + " "
print(true_print_out)
print(pred_print_out)
# test data set
dummy_iters = 40
example_test_generator = KerasBatchGenerator(test_data, num_steps, 1, vocabulary,
skip_step=1)
print("Test data:")
for i in range(dummy_iters):
dummy = next(example_test_generator.generate())
num_predict = 10
true_print_out = "Actual words: "
pred_print_out = "Predicted words: "
for i in range(num_predict):
data = next(example_test_generator.generate())
prediction = model.predict(data[0])
predict_word = np.argmax(prediction[:, num_steps - 1, :])
true_print_out += reversed_dictionary[test_data[num_steps + dummy_iters + i]] + " "
pred_print_out += reversed_dictionary[predict_word] + " "
print(true_print_out)
print(pred_print_out)
| 7,148 | 39.619318 | 109 | py |
adventures-in-ml-code | adventures-in-ml-code-master/dueling_q_tensorflow2.py | import gym
import tensorflow as tf
from tensorflow import keras
import random
import numpy as np
import datetime as dt
import math
STORE_PATH = '/Users/andrewthomas/Adventures in ML/TensorFlowBook/TensorBoard'
MAX_EPSILON = 1
MIN_EPSILON = 0.01
EPSILON_MIN_ITER = 5000
DELAY_TRAINING = 300
GAMMA = 0.95
BATCH_SIZE = 32
TAU = 0.08
RANDOM_REWARD_STD = 1.0
env = gym.make("CartPole-v0")
state_size = 4
num_actions = env.action_space.n
class DQModel(keras.Model):
def __init__(self, hidden_size: int, num_actions: int, dueling: bool):
super(DQModel, self).__init__()
self.dueling = dueling
self.dense1 = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.dense2 = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.adv_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.adv_out = keras.layers.Dense(num_actions,
kernel_initializer=keras.initializers.he_normal())
if dueling:
self.v_dense = keras.layers.Dense(hidden_size, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.v_out = keras.layers.Dense(1, kernel_initializer=keras.initializers.he_normal())
self.lambda_layer = keras.layers.Lambda(lambda x: x - tf.reduce_mean(x))
self.combine = keras.layers.Add()
def call(self, input):
x = self.dense1(input)
x = self.dense2(x)
adv = self.adv_dense(x)
adv = self.adv_out(adv)
if self.dueling:
v = self.v_dense(x)
v = self.v_out(v)
norm_adv = self.lambda_layer(adv)
combined = self.combine([v, norm_adv])
return combined
return adv
primary_network = DQModel(30, num_actions, True)
target_network = DQModel(30, num_actions, True)
primary_network.compile(optimizer=keras.optimizers.Adam(), loss='mse')
# make target_network = primary_network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(e)
def update_network(primary_network, target_network):
# update target network parameters slowly from primary network
for t, e in zip(target_network.trainable_variables, primary_network.trainable_variables):
t.assign(t * (1 - TAU) + e * TAU)
class Memory:
def __init__(self, max_memory):
self._max_memory = max_memory
self._samples = []
def add_sample(self, sample):
self._samples.append(sample)
if len(self._samples) > self._max_memory:
self._samples.pop(0)
def sample(self, no_samples):
if no_samples > len(self._samples):
return random.sample(self._samples, len(self._samples))
else:
return random.sample(self._samples, no_samples)
@property
def num_samples(self):
return len(self._samples)
memory = Memory(500000)
def choose_action(state, primary_network, eps):
if random.random() < eps:
return random.randint(0, num_actions - 1)
else:
return np.argmax(primary_network(state.reshape(1, -1)))
def train(primary_network, memory, target_network):
batch = memory.sample(BATCH_SIZE)
states = np.array([val[0] for val in batch])
actions = np.array([val[1] for val in batch])
rewards = np.array([val[2] for val in batch])
next_states = np.array([(np.zeros(state_size)
if val[3] is None else val[3]) for val in batch])
# predict Q(s,a) given the batch of states
prim_qt = primary_network(states)
# predict Q(s',a') from the evaluation network
prim_qtp1 = primary_network(next_states)
# copy the prim_qt tensor into the target_q tensor - we then will update one index corresponding to the max action
target_q = prim_qt.numpy()
updates = rewards
valid_idxs = np.array(next_states).sum(axis=1) != 0
batch_idxs = np.arange(BATCH_SIZE)
# extract the best action from the next state
prim_action_tp1 = np.argmax(prim_qtp1.numpy(), axis=1)
# get all the q values for the next state
q_from_target = target_network(next_states)
# add the discounted estimated reward from the selected action (prim_action_tp1)
updates[valid_idxs] += GAMMA * q_from_target.numpy()[batch_idxs[valid_idxs], prim_action_tp1[valid_idxs]]
# update the q target to train towards
target_q[batch_idxs, actions] = updates
# run a training batch
loss = primary_network.train_on_batch(states, target_q)
return loss
num_episodes = 1000000
eps = MAX_EPSILON
render = False
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/DuelingQ_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
steps = 0
for i in range(num_episodes):
cnt = 1
avg_loss = 0
tot_reward = 0
state = env.reset()
while True:
if render:
env.render()
action = choose_action(state, primary_network, eps)
next_state, _, done, info = env.step(action)
reward = np.random.normal(1.0, RANDOM_REWARD_STD)
tot_reward += reward
if done:
next_state = None
# store in memory
memory.add_sample((state, action, reward, next_state))
if steps > DELAY_TRAINING:
loss = train(primary_network, memory, target_network)
update_network(primary_network, target_network)
else:
loss = -1
avg_loss += loss
# linearly decay the eps value
if steps > DELAY_TRAINING:
eps = MAX_EPSILON - ((steps - DELAY_TRAINING) / EPSILON_MIN_ITER) * \
(MAX_EPSILON - MIN_EPSILON) if steps < EPSILON_MIN_ITER else \
MIN_EPSILON
steps += 1
if done:
if steps > DELAY_TRAINING:
avg_loss /= cnt
print(f"Episode: {i}, Reward: {cnt}, avg loss: {avg_loss:.5f}, eps: {eps:.3f}")
with train_writer.as_default():
tf.summary.scalar('reward', cnt, step=i)
tf.summary.scalar('avg loss', avg_loss, step=i)
else:
print(f"Pre-training...Episode: {i}")
break
state = next_state
cnt += 1
| 6,519 | 35.629213 | 118 | py |
adventures-in-ml-code | adventures-in-ml-code-master/tf_visualization.py | import tensorflow as tf
import numpy as np
from tensorflow.keras.datasets import mnist
STORE_PATH = 'C:\\Users\\Andy\\TensorFlowBook\\TensorBoard'
def get_batch(x_data, y_data, batch_size):
idxs = np.random.randint(0, len(y_data), batch_size)
return x_data[idxs,:,:], y_data[idxs]
def nn_example():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# Python optimisation variables
learning_rate = 0.5
epochs = 20
batch_size = 100
with tf.name_scope("inputs"):
# declare the training data placeholders
x = tf.placeholder(tf.float32, [None, 28, 28])
# reshape input x - for 28 x 28 pixels = 784
x_rs = tf.reshape(x, [-1, 784])
# scale the input data (maximum is 255.0, minimum is 0.0)
x_sc = tf.div(x_rs, 255.0)
# now declare the output data placeholder - 10 digits
y = tf.placeholder(tf.int64, [None, 1])
# convert the y data to one hot values
y_one_hot = tf.reshape(tf.one_hot(y, 10), [-1, 10])
with tf.name_scope("layer_1"):
# now declare the weights connecting the input to the hidden layer
W1 = tf.Variable(tf.random_normal([784, 300], stddev=0.01), name='W')
b1 = tf.Variable(tf.random_normal([300]), name='b')
hidden_logits = tf.add(tf.matmul(x_sc, W1), b1)
hidden_out = tf.nn.sigmoid(hidden_logits)
tf.summary.histogram("Hidden_logits", hidden_logits)
tf.summary.histogram("Hidden_output", hidden_out)
with tf.name_scope("layer_2"):
# and the weights connecting the hidden layer to the output layer
W2 = tf.Variable(tf.random_normal([300, 10], stddev=0.05), name='W')
b2 = tf.Variable(tf.random_normal([10]), name='b')
logits = tf.add(tf.matmul(hidden_out, W2), b2)
# now let's define the cost function which we are going to train the model on
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_one_hot,
logits=logits))
# add an optimiser
optimiser = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cross_entropy)
# finally setup the initialisation operator
init_op = tf.global_variables_initializer()
# define an accuracy assessment operation
with tf.name_scope("accuracy"):
correct_prediction = tf.equal(tf.argmax(y_one_hot, 1), tf.argmax(logits, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
with tf.variable_scope("getimages"):
correct_inputs = tf.boolean_mask(x_sc, correct_prediction)
image_summary_true = tf.summary.image('correct_images', tf.reshape(correct_inputs, (-1, 28, 28, 1)),
max_outputs=5)
incorrect_inputs = tf.boolean_mask(x_sc, tf.logical_not(correct_prediction))
image_summary_false = tf.summary.image('incorrect_images', tf.reshape(incorrect_inputs, (-1, 28, 28, 1)),
max_outputs=5)
# add a summary to store the accuracy
tf.summary.scalar('acc_summary', accuracy)
merged = tf.summary.merge_all()
# start the session
with tf.Session() as sess:
sess.run(init_op)
writer = tf.summary.FileWriter(STORE_PATH, sess.graph)
# initialise the variables
total_batch = int(len(y_train) / batch_size)
for epoch in range(epochs):
avg_cost = 0
for i in range(total_batch):
batch_x, batch_y = get_batch(x_train, y_train, batch_size=batch_size)
_, c = sess.run([optimiser, cross_entropy], feed_dict={x: batch_x, y: batch_y.reshape(-1, 1)})
avg_cost += c / total_batch
acc, summary = sess.run([accuracy, merged], feed_dict={x: x_test, y: y_test.reshape(-1, 1)})
print("Epoch: {}, cost={:.3f}, test set accuracy={:.3f}%".format(epoch + 1, avg_cost, acc*100))
writer.add_summary(summary, epoch)
print("\nTraining complete!")
if __name__ == "__main__":
nn_example() | 4,100 | 44.065934 | 113 | py |
adventures-in-ml-code | adventures-in-ml-code-master/ppo_tf2_cartpole.py | import tensorflow as tf
from tensorflow import keras
import tensorflow_probability as tfp
import numpy as np
import gym
import datetime as dt
STORE_PATH = 'C:\\Users\\andre\\TensorBoard\\PPOCartpole'
CRITIC_LOSS_WEIGHT = 0.5
ENTROPY_LOSS_WEIGHT = 0.01
ENT_DISCOUNT_RATE = 0.995
BATCH_SIZE = 64
GAMMA = 0.99
CLIP_VALUE = 0.2
LR = 0.001
NUM_TRAIN_EPOCHS = 10
env = gym.make("CartPole-v0")
state_size = 4
num_actions = env.action_space.n
ent_discount_val = ENTROPY_LOSS_WEIGHT
class Model(keras.Model):
def __init__(self, num_actions):
super().__init__()
self.num_actions = num_actions
self.dense1 = keras.layers.Dense(64, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.dense2 = keras.layers.Dense(64, activation='relu',
kernel_initializer=keras.initializers.he_normal())
self.value = keras.layers.Dense(1)
self.policy_logits = keras.layers.Dense(num_actions)
def call(self, inputs):
x = self.dense1(inputs)
x = self.dense2(x)
return self.value(x), self.policy_logits(x)
def action_value(self, state):
value, logits = self.predict_on_batch(state)
dist = tfp.distributions.Categorical(logits=logits)
action = dist.sample()
return action, value
def critic_loss(discounted_rewards, value_est):
return tf.cast(tf.reduce_mean(keras.losses.mean_squared_error(discounted_rewards, value_est)) * CRITIC_LOSS_WEIGHT,
tf.float32)
def entropy_loss(policy_logits, ent_discount_val):
probs = tf.nn.softmax(policy_logits)
entropy_loss = -tf.reduce_mean(keras.losses.categorical_crossentropy(probs, probs))
return entropy_loss * ent_discount_val
def actor_loss(advantages, old_probs, action_inds, policy_logits):
probs = tf.nn.softmax(policy_logits)
new_probs = tf.gather_nd(probs, action_inds)
ratio = new_probs / old_probs
policy_loss = -tf.reduce_mean(tf.math.minimum(
ratio * advantages,
tf.clip_by_value(ratio, 1.0 - CLIP_VALUE, 1.0 + CLIP_VALUE) * advantages
))
return policy_loss
def train_model(action_inds, old_probs, states, advantages, discounted_rewards, optimizer, ent_discount_val):
with tf.GradientTape() as tape:
values, policy_logits = model.call(tf.stack(states))
act_loss = actor_loss(advantages, old_probs, action_inds, policy_logits)
ent_loss = entropy_loss(policy_logits, ent_discount_val)
c_loss = critic_loss(discounted_rewards, values)
tot_loss = act_loss + ent_loss + c_loss
grads = tape.gradient(tot_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return tot_loss, c_loss, act_loss, ent_loss
def get_advantages(rewards, dones, values, next_value):
discounted_rewards = np.array(rewards + [next_value[0]])
for t in reversed(range(len(rewards))):
discounted_rewards[t] = rewards[t] + GAMMA * discounted_rewards[t+1] * (1-dones[t])
discounted_rewards = discounted_rewards[:-1]
# advantages are bootstrapped discounted rewards - values, using Bellman's equation
advantages = discounted_rewards - np.stack(values)[:, 0]
# standardise advantages
advantages -= np.mean(advantages)
advantages /= (np.std(advantages) + 1e-10)
# standardise rewards too
discounted_rewards -= np.mean(discounted_rewards)
discounted_rewards /= (np.std(discounted_rewards) + 1e-8)
return discounted_rewards, advantages
model = Model(num_actions)
optimizer = keras.optimizers.Adam(learning_rate=LR)
train_writer = tf.summary.create_file_writer(STORE_PATH + f"/PPO-CartPole_{dt.datetime.now().strftime('%d%m%Y%H%M')}")
num_steps = 10000000
episode_reward_sum = 0
state = env.reset()
episode = 1
total_loss = None
for step in range(num_steps):
rewards = []
actions = []
values = []
states = []
dones = []
probs = []
for _ in range(BATCH_SIZE):
_, policy_logits = model(state.reshape(1, -1))
action, value = model.action_value(state.reshape(1, -1))
new_state, reward, done, _ = env.step(action.numpy()[0])
actions.append(action)
values.append(value[0])
states.append(state)
dones.append(done)
probs.append(policy_logits)
episode_reward_sum += reward
state = new_state
if done:
rewards.append(0.0)
state = env.reset()
if total_loss is not None:
print(f"Episode: {episode}, latest episode reward: {episode_reward_sum}, "
f"total loss: {np.mean(total_loss)}, critic loss: {np.mean(c_loss)}, "
f"actor loss: {np.mean(act_loss)}, entropy loss {np.mean(ent_loss)}")
with train_writer.as_default():
tf.summary.scalar('rewards', episode_reward_sum, episode)
episode_reward_sum = 0
episode += 1
else:
rewards.append(reward)
_, next_value = model.action_value(state.reshape(1, -1))
discounted_rewards, advantages = get_advantages(rewards, dones, values, next_value[0])
actions = tf.squeeze(tf.stack(actions))
probs = tf.nn.softmax(tf.squeeze(tf.stack(probs)))
action_inds = tf.stack([tf.range(0, actions.shape[0]), tf.cast(actions, tf.int32)], axis=1)
total_loss = np.zeros((NUM_TRAIN_EPOCHS))
act_loss = np.zeros((NUM_TRAIN_EPOCHS))
c_loss = np.zeros(((NUM_TRAIN_EPOCHS)))
ent_loss = np.zeros((NUM_TRAIN_EPOCHS))
for epoch in range(NUM_TRAIN_EPOCHS):
loss_tuple = train_model(action_inds, tf.gather_nd(probs, action_inds),
states, advantages, discounted_rewards, optimizer,
ent_discount_val)
total_loss[epoch] = loss_tuple[0]
c_loss[epoch] = loss_tuple[1]
act_loss[epoch] = loss_tuple[2]
ent_loss[epoch] = loss_tuple[3]
ent_discount_val *= ENT_DISCOUNT_RATE
with train_writer.as_default():
tf.summary.scalar('tot_loss', np.mean(total_loss), step)
tf.summary.scalar('critic_loss', np.mean(c_loss), step)
tf.summary.scalar('actor_loss', np.mean(act_loss), step)
tf.summary.scalar('entropy_loss', np.mean(ent_loss), step) | 6,351 | 35.297143 | 119 | py |
adventures-in-ml-code | adventures-in-ml-code-master/keras_eager_tf_2.py | import tensorflow as tf
from tensorflow import keras
import datetime as dt
tf.enable_eager_execution()
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
# prepare training data
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(32).shuffle(10000)
train_dataset = train_dataset.map(lambda x, y: (tf.div(tf.cast(x, tf.float32), 255.0), tf.reshape(tf.one_hot(y, 10), (-1, 10))))
train_dataset = train_dataset.map(lambda x, y: (tf.image.random_flip_left_right(x), y))
train_dataset = train_dataset.repeat()
# prepare validation data
valid_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(5000).shuffle(10000)
valid_dataset = valid_dataset.map(lambda x, y: (tf.div(tf.cast(x, tf.float32),255.0), tf.reshape(tf.one_hot(y, 10), (-1, 10))))
valid_dataset = valid_dataset.repeat()
class CIFAR10Model(keras.Model):
def __init__(self):
super(CIFAR10Model, self).__init__(name='cifar_cnn')
self.conv1 = keras.layers.Conv2D(64, 5,
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.initializers.variance_scaling,
kernel_regularizer=keras.regularizers.l2(l=0.001))
self.max_pool2d = keras.layers.MaxPooling2D((3, 3), (2, 2), padding='same')
self.max_norm = keras.layers.BatchNormalization()
self.conv2 = keras.layers.Conv2D(64, 5,
padding='same',
activation=tf.nn.relu,
kernel_initializer=tf.initializers.variance_scaling,
kernel_regularizer=keras.regularizers.l2(l=0.001))
self.flatten = keras.layers.Flatten()
self.fc1 = keras.layers.Dense(750, activation=tf.nn.relu,
kernel_initializer=tf.initializers.variance_scaling,
kernel_regularizer=keras.regularizers.l2(l=0.001))
self.dropout = keras.layers.Dropout(0.5)
self.fc2 = keras.layers.Dense(10)
self.softmax = keras.layers.Softmax()
def call(self, x):
x = self.max_pool2d(self.conv1(x))
x = self.max_norm(x)
x = self.max_pool2d(self.conv2(x))
x = self.max_norm(x)
x = self.flatten(x)
x = self.dropout(self.fc1(x))
x = self.fc2(x)
return self.softmax(x)
model = CIFAR10Model()
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='categorical_crossentropy',
metrics=['accuracy'])
callbacks = [
# Write TensorBoard logs to `./logs` directory
keras.callbacks.TensorBoard(log_dir='./log/{}'.format(dt.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")), write_images=True)
]
model.fit(train_dataset, epochs=200, steps_per_epoch=1500,
validation_data=valid_dataset,
validation_steps=3, callbacks=callbacks)
| 3,037 | 44.343284 | 128 | py |
adventures-in-ml-code | adventures-in-ml-code-master/tf_word2vec.py | import urllib.request
import collections
import math
import os
import random
import zipfile
import datetime as dt
import numpy as np
import tensorflow as tf
def maybe_download(filename, url, expected_bytes):
"""Download a file if not present, and make sure it's the right size."""
if not os.path.exists(filename):
filename, _ = urllib.request.urlretrieve(url + filename, filename)
statinfo = os.stat(filename)
if statinfo.st_size == expected_bytes:
print('Found and verified', filename)
else:
print(statinfo.st_size)
raise Exception(
'Failed to verify ' + filename + '. Can you get to it with a browser?')
return filename
# Read the data into a list of strings.
def read_data(filename):
"""Extract the first file enclosed in a zip file as a list of words."""
with zipfile.ZipFile(filename) as f:
data = tf.compat.as_str(f.read(f.namelist()[0])).split()
return data
def build_dataset(words, n_words):
"""Process raw inputs into a dataset."""
count = [['UNK', -1]]
count.extend(collections.Counter(words).most_common(n_words - 1))
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count += 1
data.append(index)
count[0][1] = unk_count
reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reversed_dictionary
def collect_data(vocabulary_size=10000):
url = 'http://mattmahoney.net/dc/'
filename = maybe_download('text8.zip', url, 31344016)
vocabulary = read_data(filename)
print(vocabulary[:7])
data, count, dictionary, reverse_dictionary = build_dataset(vocabulary,
vocabulary_size)
del vocabulary # Hint to reduce memory.
return data, count, dictionary, reverse_dictionary
data_index = 0
# generate batch data
def generate_batch(data, batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
context = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
span = 2 * skip_window + 1 # [ skip_window input_word skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window # input word at the center of the buffer
targets_to_avoid = [skip_window]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window] # this is the input word
context[i * num_skips + j, 0] = buffer[target] # these are the context words
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
# Backtrack a little bit to avoid skipping words in the end of a batch
data_index = (data_index + len(data) - span) % len(data)
return batch, context
vocabulary_size = 10000
data, count, dictionary, reverse_dictionary = collect_data(vocabulary_size=vocabulary_size)
batch_size = 128
embedding_size = 300 # Dimension of the embedding vector.
skip_window = 2 # How many words to consider left and right.
num_skips = 2 # How many times to reuse an input to generate a label.
# We pick a random validation set to sample nearest neighbors. Here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
valid_examples = np.random.choice(valid_window, valid_size, replace=False)
num_sampled = 64 # Number of negative examples to sample.
graph = tf.Graph()
with graph.as_default():
# Input data.
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_context = tf.placeholder(tf.int32, shape=[batch_size, 1])
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the softmax
weights = tf.Variable(
tf.truncated_normal([embedding_size, vocabulary_size],
stddev=1.0 / math.sqrt(embedding_size)))
biases = tf.Variable(tf.zeros([vocabulary_size]))
hidden_out = tf.transpose(tf.matmul(tf.transpose(weights), tf.transpose(embed))) + biases
# convert train_context to a one-hot format
train_one_hot = tf.one_hot(train_context, vocabulary_size)
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hidden_out, labels=train_one_hot))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(cross_entropy)
# Compute the cosine similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(
valid_embeddings, normalized_embeddings, transpose_b=True)
# Add variable initializer.
init = tf.global_variables_initializer()
def run(graph, num_steps):
with tf.Session(graph=graph) as session:
# We must initialize all variables before we use them.
init.run()
print('Initialized')
average_loss = 0
for step in range(num_steps):
batch_inputs, batch_context = generate_batch(data,
batch_size, num_skips, skip_window)
feed_dict = {train_inputs: batch_inputs, train_context: batch_context}
# We perform one update step by evaluating the optimizer op (including it
# in the list of returned values for session.run()
_, loss_val = session.run([optimizer, cross_entropy], feed_dict=feed_dict)
average_loss += loss_val
if step % 2000 == 0:
if step > 0:
average_loss /= 2000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step ', step, ': ', average_loss)
average_loss = 0
# Note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k + 1]
log_str = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log_str = '%s %s,' % (log_str, close_word)
print(log_str)
final_embeddings = normalized_embeddings.eval()
num_steps = 100
softmax_start_time = dt.datetime.now()
run(graph, num_steps=num_steps)
softmax_end_time = dt.datetime.now()
print("Softmax method took {} minutes to run 100 iterations".format((softmax_end_time-softmax_start_time).total_seconds()))
with graph.as_default():
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
nce_loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_context,
inputs=embed,
num_sampled=num_sampled,
num_classes=vocabulary_size))
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(nce_loss)
# Add variable initializer.
init = tf.global_variables_initializer()
num_steps = 50000
nce_start_time = dt.datetime.now()
run(graph, num_steps)
nce_end_time = dt.datetime.now()
print("NCE method took {} minutes to run 100 iterations".format((nce_end_time-nce_start_time).total_seconds()))
| 8,637 | 38.263636 | 123 | py |
adventures-in-ml-code | adventures-in-ml-code-master/r_learning_python.py | import gym
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, InputLayer
import matplotlib.pylab as plt
env = gym.make('NChain-v0')
def naive_sum_reward_agent(env, num_episodes=500):
# this is the table that will hold our summated rewards for
# each action in each state
r_table = np.zeros((5, 2))
for g in range(num_episodes):
s = env.reset()
done = False
while not done:
if np.sum(r_table[s, :]) == 0:
# make a random selection of actions
a = np.random.randint(0, 2)
else:
# select the action with highest cummulative reward
a = np.argmax(r_table[s, :])
new_s, r, done, _ = env.step(a)
r_table[s, a] += r
s = new_s
return r_table
def q_learning_with_table(env, num_episodes=500):
q_table = np.zeros((5, 2))
y = 0.95
lr = 0.8
for i in range(num_episodes):
s = env.reset()
done = False
while not done:
if np.sum(q_table[s,:]) == 0:
# make a random selection of actions
a = np.random.randint(0, 2)
else:
# select the action with largest q value in state s
a = np.argmax(q_table[s, :])
new_s, r, done, _ = env.step(a)
q_table[s, a] += r + lr*(y*np.max(q_table[new_s, :]) - q_table[s, a])
s = new_s
return q_table
def eps_greedy_q_learning_with_table(env, num_episodes=500):
q_table = np.zeros((5, 2))
y = 0.95
eps = 0.5
lr = 0.8
decay_factor = 0.999
for i in range(num_episodes):
s = env.reset()
eps *= decay_factor
done = False
while not done:
if np.random.random() < eps or np.sum(q_table[s, :]) == 0:
a = np.random.randint(0, 2)
else:
a = np.argmax(q_table[s, :])
# pdb.set_trace()
new_s, r, done, _ = env.step(a)
q_table[s, a] += r + lr * (y * np.max(q_table[new_s, :]) - q_table[s, a])
s = new_s
return q_table
def test_methods(env, num_iterations=100):
winner = np.zeros((3,))
for g in range(num_iterations):
m0_table = naive_sum_reward_agent(env, 500)
m1_table = q_learning_with_table(env, 500)
m2_table = eps_greedy_q_learning_with_table(env, 500)
m0 = run_game(m0_table, env)
m1 = run_game(m1_table, env)
m2 = run_game(m2_table, env)
w = np.argmax(np.array([m0, m1, m2]))
winner[w] += 1
print("Game {} of {}".format(g + 1, num_iterations))
return winner
def run_game(table, env):
s = env.reset()
tot_reward = 0
done = False
while not done:
a = np.argmax(table[s, :])
s, r, done, _ = env.step(a)
tot_reward += r
return tot_reward
def q_learning_keras(env, num_episodes=1000):
# create the keras model
model = Sequential()
model.add(InputLayer(batch_input_shape=(1, 5)))
model.add(Dense(10, activation='sigmoid'))
model.add(Dense(2, activation='linear'))
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
# now execute the q learning
y = 0.95
eps = 0.5
decay_factor = 0.999
r_avg_list = []
for i in range(num_episodes):
s = env.reset()
eps *= decay_factor
if i % 100 == 0:
print("Episode {} of {}".format(i + 1, num_episodes))
done = False
r_sum = 0
while not done:
if np.random.random() < eps:
a = np.random.randint(0, 2)
else:
a = np.argmax(model.predict(np.identity(5)[s:s + 1]))
new_s, r, done, _ = env.step(a)
target = r + y * np.max(model.predict(np.identity(5)[new_s:new_s + 1]))
target_vec = model.predict(np.identity(5)[s:s + 1])[0]
target_vec[a] = target
model.fit(np.identity(5)[s:s + 1], target_vec.reshape(-1, 2), epochs=1, verbose=0)
s = new_s
r_sum += r
r_avg_list.append(r_sum / 1000)
plt.plot(r_avg_list)
plt.ylabel('Average reward per game')
plt.xlabel('Number of games')
plt.show()
for i in range(5):
print("State {} - action {}".format(i, model.predict(np.identity(5)[i:i + 1])))
if __name__ == "__main__":
q_learning_keras(env) | 4,424 | 32.522727 | 94 | py |
adventures-in-ml-code | adventures-in-ml-code-master/conv_net_py_torch.py | import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import torchvision.transforms as transforms
import torchvision.datasets
from bokeh.plotting import figure
from bokeh.io import show
from bokeh.models import LinearAxis, Range1d
import numpy as np
# Hyperparameters
num_epochs = 6
num_classes = 10
batch_size = 100
learning_rate = 0.001
DATA_PATH = 'C:\\Users\Andy\PycharmProjects\MNISTData'
MODEL_STORE_PATH = 'C:\\Users\Andy\PycharmProjects\pytorch_models\\'
# transforms to apply to the data
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root=DATA_PATH, train=True, transform=trans, download=True)
test_dataset = torchvision.datasets.MNIST(root=DATA_PATH, train=False, transform=trans)
# Data loader
train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
# Convolutional neural network (two convolutional layers)
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2))
self.drop_out = nn.Dropout()
self.fc1 = nn.Linear(7 * 7 * 64, 1000)
self.fc2 = nn.Linear(1000, 10)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.reshape(out.size(0), -1)
out = self.drop_out(out)
out = self.fc1(out)
out = self.fc2(out)
return out
model = ConvNet()
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(train_loader)
loss_list = []
acc_list = []
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Run the forward pass
outputs = model(images)
loss = criterion(outputs, labels)
loss_list.append(loss.item())
# Backprop and perform Adam optimisation
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Track the accuracy
total = labels.size(0)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
acc_list.append(correct / total)
if (i + 1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item(),
(correct / total) * 100))
# Test the model
model.eval()
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format((correct / total) * 100))
# Save the model and plot
torch.save(model.state_dict(), MODEL_STORE_PATH + 'conv_net_model.ckpt')
p = figure(y_axis_label='Loss', width=850, y_range=(0, 1), title='PyTorch ConvNet results')
p.extra_y_ranges = {'Accuracy': Range1d(start=0, end=100)}
p.add_layout(LinearAxis(y_range_name='Accuracy', axis_label='Accuracy (%)'), 'right')
p.line(np.arange(len(loss_list)), loss_list)
p.line(np.arange(len(loss_list)), np.array(acc_list) * 100, y_range_name='Accuracy', color='red')
show(p)
| 3,793 | 32.575221 | 102 | py |
adventures-in-ml-code | adventures-in-ml-code-master/keras_cnn.py | from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.layers import Dense, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Sequential
import matplotlib.pylab as plt
batch_size = 128
num_classes = 10
epochs = 10
# input image dimensions
img_x, img_y = 28, 28
# load the MNIST data set, which already splits into train and test sets for us
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# reshape the data into a 4D tensor - (sample_number, x_img_size, y_img_size, num_channels)
# because the MNIST is greyscale, we only have a single channel - RGB colour images would have 3
x_train = x_train.reshape(x_train.shape[0], img_x, img_y, 1)
x_test = x_test.reshape(x_test.shape[0], img_x, img_y, 1)
input_shape = (img_x, img_y, 1)
# convert the data to the right type
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices - this is for use in the
# categorical_crossentropy loss below
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, kernel_size=(5, 5), strides=(1, 1),
activation='relu',
input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Conv2D(64, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=['accuracy'])
class AccuracyHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.acc = []
def on_epoch_end(self, batch, logs={}):
self.acc.append(logs.get('acc'))
history = AccuracyHistory()
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
callbacks=[history])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
plt.plot(range(1, 11), history.acc)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()
| 2,477 | 31.181818 | 96 | py |
adventures-in-ml-code | adventures-in-ml-code-master/neural_network_tutorial.py | from sklearn.datasets import load_digits
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import numpy as np
import numpy.random as r
import matplotlib.pyplot as plt
def convert_y_to_vect(y):
y_vect = np.zeros((len(y), 10))
for i in range(len(y)):
y_vect[i, y[i]] = 1
return y_vect
def f(x):
return 1 / (1 + np.exp(-x))
def f_deriv(x):
return f(x) * (1 - f(x))
def setup_and_init_weights(nn_structure):
W = {}
b = {}
for l in range(1, len(nn_structure)):
W[l] = r.random_sample((nn_structure[l], nn_structure[l-1]))
b[l] = r.random_sample((nn_structure[l],))
return W, b
def init_tri_values(nn_structure):
tri_W = {}
tri_b = {}
for l in range(1, len(nn_structure)):
tri_W[l] = np.zeros((nn_structure[l], nn_structure[l-1]))
tri_b[l] = np.zeros((nn_structure[l],))
return tri_W, tri_b
def feed_forward(x, W, b):
h = {1: x}
z = {}
for l in range(1, len(W) + 1):
# if it is the first layer, then the input into the weights is x, otherwise,
# it is the output from the last layer
if l == 1:
node_in = x
else:
node_in = h[l]
z[l+1] = W[l].dot(node_in) + b[l] # z^(l+1) = W^(l)*h^(l) + b^(l)
h[l+1] = f(z[l+1]) # h^(l) = f(z^(l))
return h, z
def calculate_out_layer_delta(y, h_out, z_out):
# delta^(nl) = -(y_i - h_i^(nl)) * f'(z_i^(nl))
return -(y-h_out) * f_deriv(z_out)
def calculate_hidden_delta(delta_plus_1, w_l, z_l):
# delta^(l) = (transpose(W^(l)) * delta^(l+1)) * f'(z^(l))
return np.dot(np.transpose(w_l), delta_plus_1) * f_deriv(z_l)
def train_nn(nn_structure, X, y, iter_num=3000, alpha=0.25):
W, b = setup_and_init_weights(nn_structure)
cnt = 0
m = len(y)
avg_cost_func = []
print('Starting gradient descent for {} iterations'.format(iter_num))
while cnt < iter_num:
if cnt%1000 == 0:
print('Iteration {} of {}'.format(cnt, iter_num))
tri_W, tri_b = init_tri_values(nn_structure)
avg_cost = 0
for i in range(len(y)):
delta = {}
# perform the feed forward pass and return the stored h and z values, to be used in the
# gradient descent step
h, z = feed_forward(X[i, :], W, b)
# loop from nl-1 to 1 backpropagating the errors
for l in range(len(nn_structure), 0, -1):
if l == len(nn_structure):
delta[l] = calculate_out_layer_delta(y[i,:], h[l], z[l])
avg_cost += np.linalg.norm((y[i,:]-h[l]))
else:
if l > 1:
delta[l] = calculate_hidden_delta(delta[l+1], W[l], z[l])
# triW^(l) = triW^(l) + delta^(l+1) * transpose(h^(l))
tri_W[l] += np.dot(delta[l+1][:,np.newaxis], np.transpose(h[l][:,np.newaxis]))
# trib^(l) = trib^(l) + delta^(l+1)
tri_b[l] += delta[l+1]
# perform the gradient descent step for the weights in each layer
for l in range(len(nn_structure) - 1, 0, -1):
W[l] += -alpha * (1.0/m * tri_W[l])
b[l] += -alpha * (1.0/m * tri_b[l])
# complete the average cost calculation
avg_cost = 1.0/m * avg_cost
avg_cost_func.append(avg_cost)
cnt += 1
return W, b, avg_cost_func
def predict_y(W, b, X, n_layers):
m = X.shape[0]
y = np.zeros((m,))
for i in range(m):
h, z = feed_forward(X[i, :], W, b)
y[i] = np.argmax(h[n_layers])
return y
if __name__ == "__main__":
# load data and scale
digits = load_digits()
X_scale = StandardScaler()
X = X_scale.fit_transform(digits.data)
y = digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4)
# convert digits to vectors
y_v_train = convert_y_to_vect(y_train)
y_v_test = convert_y_to_vect(y_test)
# setup the NN structure
nn_structure = [64, 30, 10]
# train the NN
W, b, avg_cost_func = train_nn(nn_structure, X_train, y_v_train)
# plot the avg_cost_func
plt.plot(avg_cost_func)
plt.ylabel('Average J')
plt.xlabel('Iteration number')
plt.show()
# get the prediction accuracy and print
y_pred = predict_y(W, b, X_test, 3)
print('Prediction accuracy is {}%'.format(accuracy_score(y_test, y_pred) * 100))
| 4,528 | 31.582734 | 99 | py |
adventures-in-ml-code | adventures-in-ml-code-master/weight_init_tensorflow.py | import tensorflow as tf
import os
from tensorflow.examples.tutorials.mnist import input_data
from functools import partial
base_path = "C:\\Users\\Andy\\PycharmProjects\\Tensorboard\\weights\\"
def maybe_create_folder_structure(sub_folders):
for fold in sub_folders:
if not os.path.isdir(base_path + fold):
os.makedirs(base_path + fold)
class Model(object):
def __init__(self, input_size, label_size, initialization, activation, num_layers=3,
hidden_size=100):
self._input_size = input_size
self._label_size = label_size
self._init = initialization
self._activation = activation
# num layers does not include the input layer
self._num_layers = num_layers
self._hidden_size = hidden_size
self._model_def()
def _model_def(self):
# create placeholder variables
self.input_images = tf.placeholder(tf.float32, shape=[None, self._input_size])
self.labels = tf.placeholder(tf.float32, shape=[None, self._label_size])
# create self._num_layers dense layers as the model
input = self.input_images
tf.summary.scalar("input_var", self._calculate_variance(input))
for i in range(self._num_layers - 1):
input = tf.layers.dense(input, self._hidden_size, kernel_initializer=self._init,
activation=self._activation, name='layer{}'.format(i+1))
# get the input to the nodes (sans bias)
mat_mul_in = tf.get_default_graph().get_tensor_by_name("layer{}/MatMul:0".format(i + 1))
# log pre and post activation function histograms
tf.summary.histogram("mat_mul_hist_{}".format(i + 1), mat_mul_in)
tf.summary.histogram("fc_out_{}".format(i + 1), input)
# also log the variance of mat mul
tf.summary.scalar("mat_mul_var_{}".format(i + 1), self._calculate_variance(mat_mul_in))
# don't supply an activation for the final layer - the loss definition will
# supply softmax activation. This defaults to a linear activation i.e. f(x) = x
logits = tf.layers.dense(input, 10, name='layer{}'.format(self._num_layers))
mat_mul_in = tf.get_default_graph().get_tensor_by_name("layer{}/MatMul:0".format(self._num_layers))
tf.summary.histogram("mat_mul_hist_{}".format(self._num_layers), mat_mul_in)
tf.summary.histogram("fc_out_{}".format(self._num_layers), input)
# use softmax cross entropy with logits - no need to apply softmax activation to
# logits
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits,
labels=self.labels))
# add the loss to the summary
tf.summary.scalar('loss', self.loss)
self.optimizer = tf.train.AdamOptimizer().minimize(self.loss)
self.accuracy = self._compute_accuracy(logits, self.labels)
tf.summary.scalar('acc', self.accuracy)
self.merged = tf.summary.merge_all()
self.init_op = tf.global_variables_initializer()
def _compute_accuracy(self, logits, labels):
prediction = tf.argmax(logits, 1)
equality = tf.equal(prediction, tf.argmax(labels, 1))
accuracy = tf.reduce_mean(tf.cast(equality, tf.float32))
return accuracy
def _calculate_variance(self, x):
mean = tf.reduce_mean(x)
sqr = tf.square(x - mean)
return tf.reduce_mean(sqr)
def init_pass_through(model, fold):
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
with tf.Session() as sess:
sess.run(model.init_op)
train_writer = tf.summary.FileWriter(base_path + fold,
sess.graph)
image_batch, label_batch = mnist.train.next_batch(100)
summary = sess.run(model.merged, feed_dict={model.input_images: image_batch,
model.labels: label_batch})
train_writer.add_summary(summary, 0)
def train_model(model, fold, batch_size, epochs):
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
with tf.Session() as sess:
sess.run(model.init_op)
train_writer = tf.summary.FileWriter(base_path + fold,
sess.graph)
for i in range(epochs):
image_batch, label_batch = mnist.train.next_batch(batch_size)
loss, _, acc = sess.run([model.loss, model.optimizer, model.accuracy],
feed_dict={model.input_images: image_batch,
model.labels: label_batch})
if i % 50 == 0:
print("Iteration {} of {} - loss: {:.3f}, training accuracy: {:.2f}%".
format(i, epochs, loss, acc*100))
summary = sess.run(model.merged, feed_dict={model.input_images: image_batch,
model.labels: label_batch})
train_writer.add_summary(summary, i)
if __name__ == "__main__":
sub_folders = ['first_pass_normal', 'first_pass_variance',
'full_train_normal', 'full_train_variance',
'full_train_normal_relu', 'full_train_variance_relu',
'full_train_he_relu']
initializers = [tf.random_normal_initializer,
tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=False),
tf.random_normal_initializer,
tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=False),
tf.random_normal_initializer,
tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=False),
tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False)]
activations = [tf.sigmoid, tf.sigmoid, tf.sigmoid, tf.sigmoid, tf.nn.relu, tf.nn.relu, tf.nn.relu]
assert len(sub_folders) == len(initializers) == len(activations)
maybe_create_folder_structure(sub_folders)
for i in range(len(sub_folders)):
tf.reset_default_graph()
model = Model(784, 10, initializers[i], activations[i])
if "first_pass" in sub_folders[i]:
init_pass_through(model, sub_folders[i])
else:
train_model(model, sub_folders[i], 30, 1000)
| 6,524 | 51.620968 | 110 | py |
ModProp | ModProp-main/setup.py | """
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import re
from setuptools import setup, find_packages
__author__ = "Guillaume Bellec, Darjan Salaj, Anand Subramoney, Arjun Rao"
__version__ = "2.0.0"
__description__ = """
Tensorflow code for Recurrent Spiking Neural Network (so called Long short term memory Spiking Neural Network, LSNN).
For more details:
Long short-term memory and learning-to-learn in networks of spiking neurons
Guillaume Bellec*, Darjan Salaj*, Anand Subramoney*, Robert Legenstein, Wolfgang Maass
* equal contributions
Authors contributions:
GB implemented the initial code of the spiking RNN in Tensorflow. DS lead most of the sequential-MNIST simulation.
AS added features to the model and helped making the code distributable. AR helped to optimize the code.
"""
__copyright__ = "Copyright (C) 2019 the LSNN team"
__license__ = "The Clear BSD License"
def get_requirements(filename):
"""
Helper function to read the list of requirements from a file
"""
dependency_links = []
with open(filename) as requirements_file:
requirements = requirements_file.read().strip('\n').splitlines()
requirements = [req for req in requirements if not req.startswith('#')]
for i, req in enumerate(requirements):
if ':' in req:
match_obj = re.match(r"git\+(?:https|ssh|http):.*#egg=(.*)-(.*)", req)
assert match_obj, "Cannot make sense of url {}".format(req)
requirements[i] = "{req}=={ver}".format(req=match_obj.group(1), ver=match_obj.group(2))
dependency_links.append(req)
return requirements, dependency_links
requirements, dependency_links = get_requirements('requirements.txt')
setup(
name="LSNN",
version=__version__,
packages=find_packages('.'),
author=__author__,
description=__description__,
license=__copyright__,
copyright=__copyright__,
author_email="[email protected]",
provides=['lsnn'],
install_requires=requirements,
dependency_links=dependency_links,
)
| 3,706 | 53.514706 | 844 | py |
ModProp | ModProp-main/bin/rewiring_tools_NP2.py | """
Code modified to enable dense connectivity when Dale's law is enforced
and for type-specific approximation of feedback weights for ModProp
gradient approximation via automatic differentiation
Modified from https://github.com/IGITUGraz/LSNN-official
with the following copyright message retained from the original code:
##
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import tensorflow as tf
import numpy as np
import numpy.random as rd
import numpy.linalg as la
import matplotlib.pyplot as plt
def balance_matrix_per_neuron(M):
M = M.copy()
n_in, n_out = M.shape
for k in range(n_out):
# Change only non zero synapses to keep as much zeros as possible
e_act = M[:, k] > 0
i_act = M[:, k] < 0
if np.sum(i_act) == 0:
M[:, k] = 0
print(
'Warning: Neuron {} has not incoming synpases from inhibitory neurons. Setting all incoming weights to 0 to avoid un-balanced behaviour.'.format(
k))
if np.sum(e_act) == 0:
M[:, k] = 0
print(
'Warning: Neuron {} has not incoming synpases from excitatory neurons. Setting all incoming weights to 0 to avoid un-balanced behaviour.'.format(
k))
s_e = M[e_act, k].sum()
s_i = M[i_act, k].sum()
# Add a small portion to compensate if the mean is not balanced
if s_e + s_i < 0:
M[e_act, k] += np.abs(s_e + s_i) / np.sum(e_act)
else:
M[i_act, k] -= np.abs(s_e + s_i) / np.sum(i_act)
sum_check = M[:, k].sum()
assert sum_check ** 2 < 1e-5, 'Mismatch of row balancing for neuron {}, sum is {} with on exci {} and inhib {}'.format(
k, sum_check, s_e, s_i)
return M
def max_eigen_value_on_unit_circle(w):
vals = np.abs(la.eig(w)[0])
factor = 1. / np.max(vals)
return w * factor, factor
def random_sparse_signed_matrix(neuron_sign, p=1., balance_zero_mean_per_neuron=True, n_out=None):
'''
Provide a good initialization for a matrix with restricted sign.
This is a personal recipe.
:param neuron_sign:
:param p:
:param balance_zero_mean_per_neuron:
:param n_out:
:return:
'''
E = neuron_sign > 0
I = neuron_sign < 0
n = neuron_sign.__len__()
if n_out is None:
n_out = n
# Random numbers
is_con = rd.rand(n, n) < p
theta = np.abs(rd.randn(n, n))
theta = (2 * is_con - 1) * theta
sign = np.tile(np.expand_dims(neuron_sign, 1), (1, n))
w = lambda theta, sign: (theta) * (theta > 0) * sign
_w = w(theta, sign)
if (np.sum(I) > 0):
# Normalize a first time, but this is obsolete if the stabilization happens also on a single neuron basis
val_E = np.sum(_w[E, :])
val_I = - np.sum(_w[I, :])
assert val_I > 0 and val_E > 0, 'Sign error'
theta[I, :] *= val_E / val_I
_w = w(theta, sign)
if balance_zero_mean_per_neuron:
w_balanced = balance_matrix_per_neuron(_w)
theta[theta > 0] = np.abs(w_balanced[theta > 0])
_w = w(theta, sign)
assert (_w[np.logical_not(is_con)] == 0).all(), 'Balancing the neurons procuded a sign error'
else:
print("Warning: no inhibitory neurons detected, no balancing is performed")
# Normalize to scale the eigenvalues
_, factor = max_eigen_value_on_unit_circle(_w)
theta *= factor
_w = w(theta, sign)
assert (_w[E] >= 0).all(), 'Found negative excitatory weights'
assert (_w[I] <= 0).all(), 'Found negative excitatory weights'
if n_out is None:
return w, sign, theta, is_con
elif n < n_out:
sel = np.random.choice(n, size=n_out)
else:
sel = np.arange(n_out)
theta = theta[:, sel]
sign = sign[:, sel]
is_con = is_con[:, sel]
return w(theta, sign), sign, theta, is_con
def test_random_sparse_signed_matrix():
# Define parameter
p = .33
p_e = .75
mean_E = .4
std_E = 0
n_in = 400
neuron_sign = rd.choice([1, -1], n_in, p=[p_e, 1 - p_e])
M1, M1_sign, M1_theta, M1_is_con = random_sparse_signed_matrix(neuron_sign=neuron_sign, p=p,
balance_zero_mean_per_neuron=True)
s1, _ = la.eig(M1)
assert np.all(np.abs(M1[M1_is_con]) == M1_theta[M1_is_con])
assert np.all(np.sign(M1) == M1_sign * M1_is_con)
assert np.all(M1_is_con == (M1_theta > 0))
M2, _, _, _ = random_sparse_signed_matrix(neuron_sign=neuron_sign, p=1., balance_zero_mean_per_neuron=True)
M2 = M2 * (rd.rand(n_in, n_in) < p)
s2, _ = la.eig(M2)
fig, ax_list = plt.subplots(2)
ax_list[0].set_title('Random sign constrained without neuron specific balance (p={:.3g})'.format(p))
ax_list[1].set_title('Random sign constrained, probability mask taken after scaling')
ax_list[0].scatter(s1.real, s1.imag)
ax_list[1].scatter(s2.real, s2.imag)
c = plt.Circle(xy=(0, 0), radius=1, edgecolor='r', alpha=.5)
ax_list[0].add_artist(c)
c = plt.Circle(xy=(0, 0), radius=1, edgecolor='r', alpha=.5)
ax_list[1].add_artist(c)
for ax in ax_list:
ax.set_xlim([-2, 2])
ax.set_ylim([-2, 2])
plt.show()
def sample_matrix_specific_reconnection_number_for_global_fixed_connectivity(theta_list, ps, upper_bound_check=False):
with tf.name_scope('NBreconnectGenerator'):
theta_vals = [theta.read_value() for theta in theta_list]
# Compute size and probability of connections
nb_possible_connections_list = [tf.cast(tf.size(th), dtype=tf.float32) * p for th, p in zip(theta_list, ps)]
total_possible_connections = tf.reduce_sum(nb_possible_connections_list)
max_total_connections = tf.cast(total_possible_connections, dtype=tf.int32)
sampling_probs = [nb_possible_connections / total_possible_connections \
for nb_possible_connections in nb_possible_connections_list]
def nb_connected(theta_val):
is_con = tf.greater(theta_val, 0)
n_connected = tf.reduce_sum(tf.cast(is_con, tf.int32))
return n_connected
total_connected = tf.reduce_sum([nb_connected(theta) for theta in theta_vals])
if upper_bound_check:
assert_upper_bound_check = tf.Assert(tf.less_equal(total_connected, max_total_connections),
data=[max_total_connections, total_connected],
name='RewiringUpperBoundCheck')
else:
assert_upper_bound_check = tf.Assert(True,
data=[max_total_connections, total_connected],
name='SkippedRewiringUpperBoundCheck')
with tf.control_dependencies([assert_upper_bound_check]):
nb_reconnect = tf.maximum(0, max_total_connections - total_connected)
sample_split = tf.distributions.Categorical(probs=sampling_probs).sample(nb_reconnect)
is_class_i_list = [tf.equal(sample_split, i) for i in range(len(theta_list))]
counts = [tf.reduce_sum(tf.cast(is_class_i, dtype=tf.int32)) for is_class_i in is_class_i_list]
return counts
def compute_gradients_with_rewiring_variables_NP(dEdWi, dEdWr, opt, loss, var_list):
rewiring_w_list = tf.get_collection('Rewiring/Weights')
rewiring_sign_list = tf.get_collection('Rewiring/Signs')
rewiring_var_list = tf.get_collection('Rewiring/Variables')
# generate the two sets of variables
grads_and_vars = opt.compute_gradients(loss, var_list=var_list)
# compute the gradients of rewired variables (disconnected vars have non zero gradients to avoid irregularities for optimizers with momentum)
rewiring_gradient_list = tf.gradients(loss, rewiring_w_list)
if dEdWi is not None:
rewiring_gradient_list[0] = dEdWi
rewiring_gradient_list[1] = dEdWr
else:
rewiring_gradient_list[0] = dEdWr[1]
rewiring_gradient_list = [g * s if g is not None else None for g, s in
zip(rewiring_gradient_list, rewiring_sign_list)]
rewiring_gradient_dict = dict([(v, g) for g, v in zip(rewiring_gradient_list, rewiring_var_list)])
# OP to apply all gradient descent updates
ii = 0
gathered_grads_and_vars = []
for (g, v) in grads_and_vars:
if (dEdWi is not None) or (ii>0):
if v not in rewiring_var_list:
gathered_grads_and_vars.append((g, v))
else:
gathered_grads_and_vars.append((rewiring_gradient_dict[v], v))
else:
gathered_grads_and_vars.append((dEdWr[0], v))
ii+=1
return gathered_grads_and_vars
def compute_gradients_with_rewiring_variables(opt, loss, var_list):
rewiring_w_list = tf.get_collection('Rewiring/Weights')
rewiring_sign_list = tf.get_collection('Rewiring/Signs')
rewiring_var_list = tf.get_collection('Rewiring/Variables')
# generate the two sets of variables
grads_and_vars = opt.compute_gradients(loss, var_list=var_list)
# compute the gradients of rewired variables (disconnected vars have non zero gradients to avoid irregularities for optimizers with momentum)
rewiring_gradient_list = tf.gradients(loss, rewiring_w_list)
rewiring_gradient_list = [g * s if g is not None else None for g, s in
zip(rewiring_gradient_list, rewiring_sign_list)]
rewiring_gradient_dict = dict([(v, g) for g, v in zip(rewiring_gradient_list, rewiring_var_list)])
# OP to apply all gradient descent updates
gathered_grads_and_vars = []
for (g, v) in grads_and_vars:
if v not in rewiring_var_list:
gathered_grads_and_vars.append((g, v))
else:
gathered_grads_and_vars.append((rewiring_gradient_dict[v], v))
return gathered_grads_and_vars
def compute_gradients_with_rewiring_variables_mprop(opt, loss, var_list, mProp_tuple):
'''
This is where type-specific feedback weight approximation happens
'''
rewiring_w_list = tf.get_collection('Rewiring/Weights')
rewiring_sign_list = tf.get_collection('Rewiring/Signs')
rewiring_var_list = tf.get_collection('Rewiring/Variables')
# replace Wrec with Wab in grad calculation
mode3, cell = mProp_tuple
Wr_old = tf.Variable(tf.zeros_like(cell.w_rec_var), trainable=False)
Wr_old_assign = Wr_old.assign(cell.w_rec_var)
with tf.control_dependencies([Wr_old_assign]):
Wab_assign = cell.w_rec_var.assign(tf.abs(cell.Wab)) # for ModProp_Wab, replace Wrec with Wab when estimating grad
with tf.control_dependencies([Wab_assign]):
# generate the two sets of variables
grads_and_vars = opt.compute_gradients(loss, var_list=var_list)
# compute the gradients of rewired variables (disconnected vars have non zero gradients to avoid irregularities for optimizers with momentum)
rewiring_gradient_list = tf.gradients(loss, rewiring_w_list)
grad_list_cond = rewiring_gradient_list
if cell.w_in_var not in rewiring_var_list: # this code ensures Win grad will be computed
w_in_grad = tf.gradients(loss, cell.w_in_var)[0]
grad_list_cond.append(w_in_grad)
with tf.control_dependencies(grad_list_cond): # set Wrec back after grad of Win and Wrec is computed
Wr_new_assign = cell.w_rec_var.assign(Wr_old)
with tf.control_dependencies([Wr_new_assign]):
rewiring_gradient_list = [g * s if g is not None else None for g, s in
zip(rewiring_gradient_list, rewiring_sign_list)]
rewiring_gradient_dict = dict([(v, g) for g, v in zip(rewiring_gradient_list, rewiring_var_list)])
# OP to apply all gradient descent updates
gathered_grads_and_vars = []
for (g, v) in grads_and_vars:
if v not in rewiring_var_list:
if v == cell.w_in_var:
gathered_grads_and_vars.append((w_in_grad, v))
else:
gathered_grads_and_vars.append((g, v))
else:
gathered_grads_and_vars.append((rewiring_gradient_dict[v], v))
return gathered_grads_and_vars
def get_global_connectivity_bound_assertion(rewiring_var_list, rewiring_connectivities):
if np.isscalar(rewiring_connectivities): rewiring_connectivities = [rewiring_connectivities for _ in
range(len(rewiring_var_list))]
is_positive_theta_list = [tf.greater(th.read_value(), 0) for th in rewiring_var_list]
n_connected_list = [tf.reduce_sum(tf.cast(is_pos, dtype=tf.float32)) for is_pos in is_positive_theta_list]
size_list = [tf.size(is_pos) for is_pos in is_positive_theta_list]
init_n_connected_list = [tf.cast(size, dtype=tf.float32) * p for size, p in
zip(size_list, rewiring_connectivities)]
total_connected = tf.reduce_sum(n_connected_list)
limit_connected = tf.reduce_sum(init_n_connected_list)
check_connectivity = tf.Assert(total_connected <= 1.2*limit_connected, [total_connected, limit_connected],
name='CheckRewiringConnectivityBound') # TEMP!!!!
return check_connectivity
def rewiring_optimizer_wrapper(opt, loss, learning_rate, l1s, temperatures,
rewiring_connectivities, global_step=None,
var_list=None,
grads_and_vars=None, mProp_tuple=(False, )):
if var_list is None:
var_list = tf.trainable_variables()
mode3 = mProp_tuple[0]
# Select the rewired variable in the given list of variable to train
rewiring_var_list = []
rewiring_con_list = []
for v,c in zip(tf.get_collection('Rewiring/Variables'), tf.get_collection('Rewiring/ini_con')):
if v in var_list:
rewiring_var_list.append(v)
rewiring_con_list.append(c)
if grads_and_vars is None:
if mode3:
grads_and_vars = compute_gradients_with_rewiring_variables_mprop(opt, loss, var_list, mProp_tuple)
else:
grads_and_vars = compute_gradients_with_rewiring_variables(opt, loss, var_list)
else:
grads_and_vars = grads_and_vars
assert len(var_list) == len(grads_and_vars), 'Found {} elements in var_list and {} in grads_and_vars'.format(len(var_list),len(grads_and_vars))
for v, gv in zip(var_list, grads_and_vars):
assert v == gv[1]
if np.isscalar(l1s): l1s = [l1s for _ in range(len(rewiring_var_list))]
if np.isscalar(temperatures): temperatures = [temperatures for _ in range(len(rewiring_var_list))]
if np.isscalar(rewiring_connectivities): rewiring_connectivities = [rewiring_connectivities for _ in
range(len(rewiring_var_list))]
is_positive_theta_list = [tf.greater(th, 0) for th in rewiring_var_list]
with tf.control_dependencies(is_positive_theta_list):
check_connectivity = get_global_connectivity_bound_assertion(rewiring_var_list, rewiring_connectivities)
with tf.control_dependencies([check_connectivity]):
gradient_check_list = [
tf.check_numerics(g, message='Found NaN or Inf in gradients with respect to the variable ' + v.name) for
(g, v) in grads_and_vars]
with tf.control_dependencies(gradient_check_list):
apply_gradients = opt.apply_gradients(grads_and_vars, global_step=global_step)
if len(rewiring_var_list) == 0:
print('Warning: No variable to rewire are found by the rewiring optimizer wrapper')
return apply_gradients
with tf.control_dependencies([apply_gradients]):
# This is to make sure that the algorithms does not reconnect synapses by mistakes,
# This can happen with optimizers like Adam
disconnection_guards = [tf.assign(var, tf.where(is_pos, var, tf.zeros_like(var))) for var, is_pos in
zip(rewiring_var_list, is_positive_theta_list)]
with tf.control_dependencies(disconnection_guards):
rewiring_var_value_list = [th.read_value() for th in rewiring_var_list]
mask_connected = lambda th: tf.cast(tf.greater(th, 0), tf.float32)
noise_update = lambda th: mask_connected(th) * tf.random_normal(shape=tf.shape(th))
apply_regularization = [tf.assign_add(th, - learning_rate * mask_connected(th_) * l1 \
+ tf.sqrt(2 * learning_rate * temp) * noise_update(th_))
for th, th_, l1, temp in
zip(rewiring_var_list, rewiring_var_value_list, l1s, temperatures)]
with tf.control_dependencies(apply_regularization):
number_of_rewired_connections = sample_matrix_specific_reconnection_number_for_global_fixed_connectivity(
rewiring_var_list, rewiring_connectivities)
apply_rewiring = [rewiring(th, ic, nb_reconnect=nb) for th, ic, nb in
zip(rewiring_var_list, rewiring_con_list, number_of_rewired_connections)]
with tf.control_dependencies(apply_rewiring):
train_step = tf.no_op('Train')
return train_step
def rewiring_optimizer_wrapper_NP(dEdWi, dEdWr, opt, loss, learning_rate, l1s, temperatures,
rewiring_connectivities, global_step=None,
var_list=None,
grads_and_vars=None):
if var_list is None:
var_list = tf.trainable_variables()
# Select the rewired variable in the given list of variable to train
rewiring_var_list = []
rewiring_con_list = []
for v,c in zip(tf.get_collection('Rewiring/Variables'), tf.get_collection('Rewiring/ini_con')):
if v in var_list:
rewiring_var_list.append(v)
rewiring_con_list.append(c)
if grads_and_vars is None:
grads_and_vars = compute_gradients_with_rewiring_variables_NP(dEdWi, dEdWr, opt, loss, var_list)
else:
grads_and_vars = grads_and_vars
assert len(var_list) == len(grads_and_vars), 'Found {} elements in var_list and {} in grads_and_vars'.format(len(var_list),len(grads_and_vars))
for v, gv in zip(var_list, grads_and_vars):
assert v == gv[1]
if np.isscalar(l1s): l1s = [l1s for _ in range(len(rewiring_var_list))]
if np.isscalar(temperatures): temperatures = [temperatures for _ in range(len(rewiring_var_list))]
if np.isscalar(rewiring_connectivities): rewiring_connectivities = [rewiring_connectivities for _ in
range(len(rewiring_var_list))]
is_positive_theta_list = [tf.greater(th, 0) for th in rewiring_var_list]
with tf.control_dependencies(is_positive_theta_list):
check_connectivity = get_global_connectivity_bound_assertion(rewiring_var_list, rewiring_connectivities)
with tf.control_dependencies([check_connectivity]):
gradient_check_list = [
tf.check_numerics(g, message='Found NaN or Inf in gradients with respect to the variable ' + v.name) for
(g, v) in grads_and_vars]
with tf.control_dependencies(gradient_check_list):
apply_gradients = opt.apply_gradients(grads_and_vars, global_step=global_step)
if len(rewiring_var_list) == 0:
print('Warning: No variable to rewire are found by the rewiring optimizer wrapper')
return apply_gradients
with tf.control_dependencies([apply_gradients]):
# This is to make sure that the algorithms does not reconnect synapses by mistakes,
# This can happen with optimizers like Adam
disconnection_guards = [tf.assign(var, tf.where(is_pos, var, tf.zeros_like(var))) for var, is_pos in
zip(rewiring_var_list, is_positive_theta_list)]
with tf.control_dependencies(disconnection_guards):
rewiring_var_value_list = [th.read_value() for th in rewiring_var_list]
mask_connected = lambda th: tf.cast(tf.greater(th, 0), tf.float32)
# 0*
noise_update = lambda th: mask_connected(th) * tf.random_normal(shape=tf.shape(th))
apply_regularization = [tf.assign_add(th, - learning_rate * mask_connected(th_) * l1 \
+ tf.sqrt(2 * learning_rate * temp) * noise_update(th_))
for th, th_, l1, temp in
zip(rewiring_var_list, rewiring_var_value_list, l1s, temperatures)]
with tf.control_dependencies(apply_regularization):
number_of_rewired_connections = sample_matrix_specific_reconnection_number_for_global_fixed_connectivity(
rewiring_var_list, rewiring_connectivities)
apply_rewiring = [rewiring(th, ic, nb_reconnect=nb) for th, ic, nb in
zip(rewiring_var_list, rewiring_con_list, number_of_rewired_connections)]
with tf.control_dependencies(apply_rewiring):
train_step = tf.no_op('Train')
return train_step
def weight_sampler(n_in, n_out, p, dtype=tf.float32, neuron_sign=None, w_scale=1., eager=False, is_con_0=None):
'''
Returns a weight matrix and its underlying, variables, and sign matrices needed for rewiring.
:param n_in:
:param n_out:
:param p0:
:param dtype:
:return:
'''
if eager:
Variable = tf.contrib.eager.Variable
else:
Variable = tf.Variable
with tf.name_scope('SynapticSampler'):
# Gererate the random mask
# if (p>0.3) and (neuron_sign is not None):
# # Random numbers
# np.random.seed(24)
# is_con_0 = rd.rand(n_in, n_out) < p
# else:
if is_con_0 is None:
nb_non_zero = int(n_in * n_out * p)
is_con_0 = np.zeros((n_in, n_out), dtype=bool)
if p>0.95:
is_con_0 = True
else:
ind_in = rd.choice(np.arange(n_in), size=nb_non_zero)
ind_out = rd.choice(np.arange(n_out), size=nb_non_zero)
is_con_0[ind_in, ind_out] = True
# Generate random signs
if neuron_sign is None:
theta_0 = np.abs(rd.randn(n_in, n_out) / np.sqrt(p*n_in)) # initial weight values
theta_0 = theta_0 * is_con_0
sign_0 = np.sign(rd.randn(n_in, n_out))
else:
assert np.size(neuron_sign) == n_in, 'Size of neuron_sign vector {}, for n_in {} expected'.format(
np.size(neuron_sign), n_in)
_, sign_0, theta_0, _ = random_sparse_signed_matrix(neuron_sign, n_out=n_out) # p=1
theta_0 *= is_con_0
# _, sign_0, theta_0, is_con_0 = random_sparse_signed_matrix(neuron_sign, p=p,
# balance_zero_mean_per_neuron=True, n_out=n_out)
# Define the tensorflow matrices
th = Variable(theta_0 * w_scale, dtype=dtype, name='theta')
w_sign = Variable(sign_0, dtype=dtype, trainable=False, name='sign')
is_connected = tf.greater(th, 0, name='mask')
w = tf.where(condition=is_connected, x=w_sign * th, y=tf.zeros((n_in, n_out), dtype=dtype), name='weight')
ini_con = tf.greater(theta_0 * w_scale, 0)
# Add to collections to by pass and fetch them in the rewiring wrapper function
tf.add_to_collection('Rewiring/Variables', th)
tf.add_to_collection('Rewiring/Signs', w_sign)
tf.add_to_collection('Rewiring/Weights', w)
tf.add_to_collection('Rewiring/ini_con', ini_con)
return w, w_sign, th, ini_con
def assert_connection_number(theta, targeted_number):
'''
Function to check during the tensorflow simulation if the number of connection in well defined after each simulation.
:param theta:
:param targeted_number:
:return:
'''
th = theta.read_value()
is_con = tf.greater(th, 0)
nb_is_con = tf.reduce_sum(tf.cast(is_con, tf.int32))
assert_is_con = tf.Assert(tf.equal(nb_is_con, targeted_number), data=[nb_is_con, targeted_number],
name='NumberOfConnectionCheck')
return assert_is_con
def rewiring(theta, ini_con, target_nb_connection=None, nb_reconnect=None, epsilon=1e-12, check_zero_numbers=False):
'''
The rewiring operation to use after each iteration.
:param theta:
:param target_nb_connection:
:return:
'''
with tf.name_scope('rewiring'):
th = theta.read_value()
is_con = tf.greater(th, 0)
#reconnect_candidate_coord = tf.where(tf.logical_not(is_con), name='CandidateCoord')
reconnect_candidate_coord = tf.where(tf.logical_and(tf.logical_not(is_con),ini_con), name='CandidateCoord')
n_candidates = tf.shape(reconnect_candidate_coord)[0]
if nb_reconnect is None:
n_connected = tf.reduce_sum(tf.cast(is_con, tf.int32))
nb_reconnect = target_nb_connection - n_connected
nb_reconnect = tf.clip_by_value(nb_reconnect, 0, n_candidates)
reconnect_sample_id = tf.random_shuffle(tf.range(n_candidates))[:nb_reconnect]
reconnect_sample_coord = tf.gather(reconnect_candidate_coord, reconnect_sample_id, name='SelectedCoord')
# Apply the rewiring
reconnect_vals = tf.fill(dims=[nb_reconnect], value=epsilon, name='InitValues')
reconnect_op = tf.scatter_nd_update(theta, reconnect_sample_coord, reconnect_vals, name='Reconnect')
with tf.control_dependencies([reconnect_op]):
if check_zero_numbers and target_nb_connection is not None:
connection_check = assert_connection_number(theta=theta, targeted_number=target_nb_connection)
with tf.control_dependencies([connection_check]):
return tf.no_op('Rewiring')
else:
return tf.no_op('Rewiring')
if __name__ == '__main__':
test_random_sparse_signed_matrix() | 29,033 | 46.286645 | 844 | py |
ModProp | ModProp-main/bin/neuron_models.py | """
Code modified for rate-based neurons (ReLU activation) and for activation derivative
approximation (for nonlocal terms only) for ModProp via automatic differentiation
The overall ModProp framework proposed is "communicating the credit information
via cell-type-specific neuromodulators and processing it at the receiving cells
via pre-determined temporal filtering taps."
Current approximations are proof of concept for the framework, and better approximations
can be developed as a part of the future work
Modified from https://github.com/IGITUGraz/LSNN-official
with the following copyright message retained from the original code:
##
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from distutils.version import LooseVersion
import datetime
from collections import OrderedDict
from collections import namedtuple
import numpy as np
import numpy.random as rd
import tensorflow as tf
from tensorflow.python.framework import function
from tensorflow.python.framework.ops import Tensor
from tensorflow.python.ops.parallel_for.gradients import batch_jacobian
if LooseVersion(tf.__version__) >= LooseVersion("1.11"):
from tensorflow.python.ops.variables import Variable, RefVariable
else:
print("Using tensorflow version older then 1.11 -> skipping RefVariable storing")
from tensorflow.python.ops.variables import Variable
from rewiring_tools_NP2 import weight_sampler
from lsnn.toolbox.tensorflow_einsums.einsum_re_written import einsum_bi_ijk_to_bjk, einsum_bij_jk_to_bik, einsum_bi_bij_to_bj
from lsnn.toolbox.tensorflow_utils import tf_roll
from time import time
Cell = tf.contrib.rnn.BasicRNNCell
def placeholder_container_for_rnn_state(cell_state_size, dtype, batch_size, name='TupleStateHolder'):
with tf.name_scope(name):
default_dict = cell_state_size._asdict()
placeholder_dict = OrderedDict({})
for k, v in default_dict.items():
if np.shape(v) == ():
v = [v]
shape = np.concatenate([[batch_size], v])
placeholder_dict[k] = tf.placeholder(shape=shape, dtype=dtype, name=k)
placeholder_tuple = cell_state_size.__class__(**placeholder_dict)
return placeholder_tuple
def feed_dict_with_placeholder_container(dict_to_update, state_holder, state_value, batch_selection=None):
if state_value is None:
return dict_to_update
assert state_holder.__class__ == state_value.__class__, 'Should have the same class, got {} and {}'.format(
state_holder.__class__, state_value.__class__)
for k, v in state_value._asdict().items():
if batch_selection is None:
dict_to_update.update({state_holder._asdict()[k]: v})
else:
dict_to_update.update({state_holder._asdict()[k]: v[batch_selection]})
return dict_to_update
@tf.custom_gradient
def rateFunction_1(v_, dampening_factor):
'''
Not used
'''
z_ = tf.nn.relu(v_)
def grad(dy):
psi = tf.where(tf.greater(v_, 0.), tf.ones_like(v_), tf.zeros_like(v_))
psi_av = tf.reduce_mean(psi, axis=-1, keepdims=True) * dampening_factor
dv = dy * psi_av
return [dv, tf.zeros_like(dampening_factor)]
return z_, grad
@tf.custom_gradient
def rateFunction_5(v_, dampening_factor, psiT):
'''
Not used
'''
z_ = tf.nn.relu(v_)
def grad(dy):
# psi_av = tf.where(tf.greater(v_, 0.), tf.ones_like(v_), tf.zeros_like(v_))
psi_av = psiT * dampening_factor # use a constant psi
dv = dy * psi_av
return [dv, tf.zeros_like(dampening_factor), tf.zeros_like(psiT)]
return z_, grad
@tf.custom_gradient
def actFunction(v_, mu):
'''
Replaces ReLU activation function with customized gradient,
where mu is used as the approximate activation derivative
Input:
v_: n_batch x n_unit, pre-activation (voltage)
mu: scalar, smeared activation derivative
Output:
z_: n_batch x n_unit, firing rate
'''
z_ = tf.nn.relu(v_)
def grad(dy):
dv = dy * mu
return [dv, tf.zeros_like(mu)]
return z_, grad
@tf.custom_gradient
def MDGL_output(activity, Wout, Wab, hj, decay):
# Auto diff for MDGL
logits = einsum_bij_jk_to_bik(activity, Wout)
def grad(dy):
dWout = tf.einsum('bij,bik->jk', activity, dy)
dEdz = tf.einsum('btk,pk->btp', dy, Wout) #btp
decay_ = tf.expand_dims(tf.expand_dims(decay,axis=0),axis=0)
aj = dEdz*hj*(1-decay_) # (btj)
mod1 = tf.pad(einsum_bij_jk_to_bik(aj[:,1:], Wab), ((0, 0), (0, 1), (0, 0)), 'constant') #(btj,jp->btp)
dz = dEdz + mod1
return [dz, dWout, tf.zeros_like(Wab), tf.zeros_like(hj), tf.zeros_like(decay)]
return logits, grad
@tf.custom_gradient
def custom_rec(z, Wrec, Wback):
out = einsum_bi_ijk_to_bjk(z, Wrec)
def grad(dy):
drec = tf.squeeze(tf.einsum('bjk,pjk->bpk',dy, Wback))
pre_activity = tf.expand_dims(z, axis=-2) #(b,1,i)
dWrec = tf.reduce_sum(dy * pre_activity, axis=0) #(b,1,i)*(b,j,1)->(j,i)
dWrec = tf.expand_dims(tf.transpose(dWrec), axis=-1) # (i,j,1)
return drec, dWrec, tf.zeros_like(Wback)
return out, grad
@tf.custom_gradient
def filt_v(v_ghost, i_t, kappa, decay):
out = tf.zeros_like(v_ghost) # v = kappa * v + (1 - decay) * i_t
def grad(dy):
dv = dy * kappa # (b,j)*(,j)
dit = dy * (1 - decay)
return [dv, dit, tf.zeros_like(kappa), tf.zeros_like(decay)]
return out, grad
def weight_matrix_with_delay_dimension(w, d, n_delay):
"""
Generate the tensor of shape n_in x n_out x n_delay that represents the synaptic weights with the right delays.
:param w: synaptic weight value, float tensor of shape (n_in x n_out)
:param d: delay number, int tensor of shape (n_in x n_out)
:param n_delay: number of possible delays
:return:
"""
with tf.name_scope('WeightDelayer'):
w_d_list = []
for kd in range(n_delay):
mask = tf.equal(d, kd)
w_d = tf.where(condition=mask, x=w, y=tf.zeros_like(w))
w_d_list.append(w_d)
delay_axis = len(d.shape)
WD = tf.stack(w_d_list, axis=delay_axis)
return WD
# PSP on output layer
def exp_convolve(tensor, decay): # tensor shape (trial, time, neuron)
with tf.name_scope('ExpConvolve'):
assert tensor.dtype in [tf.float16, tf.float32, tf.float32]
tensor_time_major = tf.transpose(tensor, perm=[1, 0, 2])
initializer = tf.zeros_like(tensor_time_major[0])
filtered_tensor = tf.scan(lambda a, x: a * decay + (1 - decay) * x, tensor_time_major, initializer=initializer)
filtered_tensor = tf.transpose(filtered_tensor, perm=[1, 0, 2])
return filtered_tensor
def exp_convolve2(tensor, decay): # tensor shape (trial, time, neuron)
'''
Not used
'''
with tf.name_scope('ExpConvolve'):
assert tensor.dtype in [tf.float16, tf.float32, tf.float32]
tensor_time_major = tf.transpose(tensor, perm=[1, 0, 2])
initializer = tensor_time_major[0]
filtered_tensor = tf.scan(lambda a, x: a * decay + x, tensor_time_major, initializer=initializer)
filtered_tensor = tf.transpose(filtered_tensor, perm=[1, 0, 2])
return filtered_tensor
GruStateTuple = namedtuple('GruStateTuple', ('z'))
class Gru(Cell):
'''
Not used
'''
def __init__(self, n_in, n_rec, dtype=tf.float32, rewiring_connectivity=-1,
in_neuron_sign=None, rec_neuron_sign=None, custom_mode=0, w_adj=None, task='MNIST', wsig=0.0):
self.custom_mode = custom_mode
# Parameters
self.n_in = n_in
self.n_rec = n_rec
self.data_type = dtype
self._num_units = self.n_rec
self.rewiring_connectivity = rewiring_connectivity
self.in_neuron_sign = in_neuron_sign
self.rec_neuron_sign = rec_neuron_sign
self.wsig = wsig
if w_adj is not None:
W_zg_adj = w_adj['W_zg_adj']
W_zr_adj = w_adj['W_zr_adj']
W_zi_adj = w_adj['W_zi_adj']
else:
W_zg_adj = None
W_zr_adj = None
W_zi_adj = None
with tf.variable_scope('InputWeights'):
# Input weights
if (0 < rewiring_connectivity < 1):
self.W_ig, _, _, _ = weight_sampler(n_in, n_rec, rewiring_connectivity, neuron_sign=in_neuron_sign)
else:
self.W_ig = tf.Variable(rd.randn(n_in, n_rec) / np.sqrt(n_in), dtype=dtype, name="InputaWeight")
if (0 < rewiring_connectivity < 1):
self.W_ir, _, _, _ = weight_sampler(n_in, n_rec, rewiring_connectivity, neuron_sign=in_neuron_sign)
else:
self.W_ir = tf.Variable(rd.randn(n_in, n_rec) / np.sqrt(n_in), dtype=dtype, name="InputrWeight")
if (0 < rewiring_connectivity < 1):
self.W_ii, _, _, _ = weight_sampler(n_in, n_rec, rewiring_connectivity, neuron_sign=in_neuron_sign)
else:
self.W_ii = tf.Variable(rd.randn(n_in, n_rec) / np.sqrt(n_in), dtype=dtype, name="InputiWeight")
self.w_in_val = self.W_ii # for saving results
with tf.variable_scope('RecWeights'):
recurrent_disconnect_mask = np.diag(np.ones(n_rec, dtype=bool))
if 0 < rewiring_connectivity < 1:
self.W_zg, _,_,_ = weight_sampler(n_rec, n_rec, rewiring_connectivity, neuron_sign=rec_neuron_sign, is_con_0=W_zg_adj)
else:
if rec_neuron_sign is not None or in_neuron_sign is not None:
raise NotImplementedError('Neuron sign requested but this is only implemented with rewiring')
self.W_zg = Variable(rd.randn(n_rec, n_rec) / np.sqrt(n_rec), dtype=dtype, name='RecurrentaWeight')
self.W_zg_ = tf.where(recurrent_disconnect_mask, tf.zeros_like(self.W_zg), self.W_zg) # Disconnect autotapse
if 0 < rewiring_connectivity < 1:
self.W_zr, _,_,_ = weight_sampler(n_rec, n_rec, rewiring_connectivity, neuron_sign=rec_neuron_sign, is_con_0=W_zr_adj)
else:
if rec_neuron_sign is not None or in_neuron_sign is not None:
raise NotImplementedError('Neuron sign requested but this is only implemented with rewiring')
self.W_zr = Variable(rd.randn(n_rec, n_rec) / np.sqrt(n_rec), dtype=dtype, name='RecurrentrWeight')
self.W_zr_ = tf.where(recurrent_disconnect_mask, tf.zeros_like(self.W_zr), self.W_zr) # Disconnect autotapse
if 0 < rewiring_connectivity < 1:
self.W_zi, _,_,_ = weight_sampler(n_rec, n_rec, rewiring_connectivity, neuron_sign=rec_neuron_sign, is_con_0=W_zi_adj)
else:
if rec_neuron_sign is not None or in_neuron_sign is not None:
raise NotImplementedError('Neuron sign requested but this is only implemented with rewiring')
self.W_zi = Variable(rd.randn(n_rec, n_rec) / np.sqrt(n_rec), dtype=dtype, name='RecurrentiWeight')
self.W_zi_ = tf.where(recurrent_disconnect_mask, tf.zeros_like(self.W_zi), self.W_zi) # Disconnect autotapse
self.w_rec_val = self.W_zi # for saving results
with tf.variable_scope('RecBiases'):
self.b_i = tf.Variable(tf.zeros_like(self.W_zi[0]), name='rec_bias_i')
if task=='MNIST':
self.b_g = tf.Variable(-3.0*tf.ones_like(self.b_i), trainable=True, name='rec_bias_g')
else:
self.b_g = tf.Variable(-0.0*tf.ones_like(self.b_i), trainable=True, name='rec_bias_g')
self.b_r = tf.Variable(tf.zeros_like(self.W_zi[0]), name='rec_bias_r')
@property
def state_size(self):
return GruStateTuple(z=self.n_rec)
@property
def output_size(self):
return [self.n_rec, self.n_rec]
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
return GruStateTuple(z=z0)
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
# This MDGL implementation is very inefficient,
# but takes advantage of automatic differentiation
add_noise = tf.random.normal(state.z.shape, mean=0.0, stddev=self.wsig)
mult_noise = tf.random.normal(state.z.shape, mean=1.0, stddev=self.wsig)
if self.custom_mode == 2: #MDGL
inputs1 = inputs[:,:self.n_in]
inputs2 = inputs[:,self.n_in:]
z_stop = tf.stop_gradient(state.z)
g_t = tf.sigmoid(tf.matmul(inputs1, self.W_ig) + tf.matmul(z_stop, self.W_zg_) + self.b_g ) # bi,ij->bj
r_t = tf.sigmoid(tf.matmul(inputs1, self.W_ir) + tf.matmul(z_stop, self.W_zr_) + self.b_r )
i_t = tf.tanh(tf.matmul(inputs1, self.W_ii) + r_t*tf.matmul(z_stop, self.W_zi_) + self.b_i )
z_1 = ((1-g_t)*state.z + g_t * i_t)*mult_noise + add_noise
g_t = tf.sigmoid(tf.matmul(inputs2, self.W_ig) + tf.matmul(z_1, self.W_zg_) + self.b_g ) # bi,ij->bj
r_t = tf.sigmoid(tf.matmul(inputs2, self.W_ir) + tf.matmul(z_1, self.W_zr_) + self.b_r )
i_t = tf.tanh(tf.matmul(inputs2, self.W_ii) + r_t*tf.matmul(z_1, self.W_zi_) + self.b_i )
new_z = ((1-g_t)*z_1 + g_t * i_t)*mult_noise + add_noise
else:
if self.custom_mode>0:
z_stop = tf.stop_gradient(state.z)
else:
z_stop = state.z
# Explore sparse w to speed up sims; note, z is sparse for spiking neurons
g_t = tf.sigmoid(tf.matmul(inputs, self.W_ig) + tf.matmul(z_stop, self.W_zg_) + self.b_g ) # bi,ij->bj
r_t = tf.sigmoid(tf.matmul(inputs, self.W_ir) + tf.matmul(z_stop, self.W_zr_) + self.b_r )
i_t = tf.tanh(tf.matmul(inputs, self.W_ii) + r_t*tf.matmul(z_stop, self.W_zi_) + self.b_i )
new_z = ((1-g_t)*state.z + g_t * i_t)*mult_noise + add_noise
hpsi = new_z
if self.custom_mode == 2:
new_state = GruStateTuple(z=z_1) # progress one step at a time
else:
new_state = GruStateTuple(z=new_z)
return [new_z, hpsi], new_state
LIFStateTuple = namedtuple('LIFStateTuple', ('v', 'z', 'i_future_buffer', 'z_buffer'))
def tf_cell_to_savable_dict(cell, sess, supplement={}):
"""
Usefull function to return a python/numpy object from of of the tensorflow cell object defined here.
The idea is simply that varaibles and Tensors given as attributes of the object with be replaced by there numpy value evaluated on the current tensorflow session.
:param cell: tensorflow cell object
:param sess: tensorflow session
:param supplement: some possible
:return:
"""
dict_to_save = {}
dict_to_save['cell_type'] = str(cell.__class__)
time_stamp = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
dict_to_save['time_stamp'] = time_stamp
dict_to_save.update(supplement)
tftypes = [Variable, Tensor]
if LooseVersion(tf.__version__) >= LooseVersion("1.11"):
tftypes.append(RefVariable)
for k, v in cell.__dict__.items():
if k == 'self':
pass
elif type(v) in tftypes:
dict_to_save[k] = sess.run(v)
elif type(v) in [bool, int, float, np.int64, np.ndarray]:
dict_to_save[k] = v
else:
print('WARNING: attribute of key {} and value {} has type {}, recoding it as string.'.format(k, v, type(v)))
dict_to_save[k] = str(v)
return dict_to_save
class LIF(Cell):
def __init__(self, n_in, n_rec, tau=20., thr=0.03,
dt=1., n_refractory=0, dtype=tf.float32, n_delay=1, rewiring_connectivity=-1,
in_neuron_sign=None, rec_neuron_sign=None,
dampening_factor=0.3,
injected_noise_current=0.,
V0=1., custom_mode=0, w_adj=None, task='mnist'):
"""
Tensorflow cell object that simulates a LIF neuron with an approximation of the spike derivatives.
:param n_in: number of input neurons
:param n_rec: number of recurrent neurons
:param tau: membrane time constant
:param thr: threshold voltage
:param dt: time step of the simulation
:param n_refractory: number of refractory time steps
:param dtype: data type of the cell tensors
:param n_delay: number of synaptic delay, the delay range goes from 1 to n_delay time steps
:param reset: method of resetting membrane potential after spike thr-> by fixed threshold amount, zero-> to zero
"""
if np.isscalar(tau): tau = tf.ones(n_rec, dtype=dtype) * np.mean(tau)
if np.isscalar(thr): thr = tf.ones(n_rec, dtype=dtype) * np.mean(thr)
tau = tf.cast(tau, dtype=dtype)
dt = tf.cast(dt, dtype=dtype)
self.dampening_factor = dampening_factor
self.custom_mode = custom_mode
# Parameters
self.n_delay = n_delay
self.n_refractory = n_refractory
self.dt = dt
self.n_in = n_in
self.n_rec = n_rec
self.data_type = dtype
self.n_E = int(0.8 * n_rec) + 1
self.n_I = n_rec - self.n_E
self._num_units = self.n_rec
self.tau = tf.Variable(tau, dtype=dtype, name="Tau", trainable=False)
self._decay = tf.exp(-dt / tau)
self.thr = tf.Variable(thr, dtype=dtype, name="Threshold", trainable=False)
self.V0 = V0
self.injected_noise_current = injected_noise_current
self.rewiring_connectivity = rewiring_connectivity
self.in_neuron_sign = in_neuron_sign
self.rec_neuron_sign = rec_neuron_sign
self.task = task
if w_adj is not None:
wrec_adj = w_adj['wrec_adj']
else:
wrec_adj = None
with tf.variable_scope('InputWeights'):
# Input weights
if task=='seqPred':
self.W_in = tf.constant(np.expand_dims(rd.randn(n_in, n_rec) / np.sqrt(n_in), axis=-1), dtype=dtype, name="WinConst")
else:
if (0 < rewiring_connectivity < 1) and (n_in>2):
self.w_in_val, self.w_in_sign, self.w_in_var, self.w_in_inicon = weight_sampler(n_in, n_rec, rewiring_connectivity,
neuron_sign=in_neuron_sign)
else:
if (task=='mnist_row') or (task=='dlyXOR') or (task=='PGrate'):
self.w_in_var = tf.Variable(rd.randn(n_in, n_rec) / np.sqrt(n_in/2), dtype=dtype, name="InputWeight")
else:
self.w_in_var = tf.Variable(rd.randn(n_in, n_rec) / np.sqrt(n_in), dtype=dtype, name="InputWeight")
self.w_in_val = self.w_in_var
self.w_in_val = self.V0 * self.w_in_val
self.w_in_delay = tf.Variable(rd.randint(self.n_delay, size=n_in * n_rec).reshape(n_in, n_rec),
dtype=tf.int64, name="InDelays", trainable=False)
self.W_in = weight_matrix_with_delay_dimension(self.w_in_val, self.w_in_delay, self.n_delay)
with tf.variable_scope('RecWeights'):
if 0 < rewiring_connectivity < 1:
self.w_rec_val, self.w_rec_sign, self.w_rec_var, self.w_rec_inicon = weight_sampler(n_rec, n_rec,
rewiring_connectivity,
neuron_sign=rec_neuron_sign)
else:
if rec_neuron_sign is not None or in_neuron_sign is not None:
raise NotImplementedError('Neuron sign requested but this is only implemented with rewiring')
if (task=='mnist_row') or (task=='dlyXOR') or (task=='PGrate'):
self.w_rec_var = Variable(rd.randn(n_rec, n_rec) / np.sqrt(n_rec/2), dtype=dtype,
name='RecurrentWeight')
else:
self.w_rec_var = Variable(rd.randn(n_rec, n_rec) / np.sqrt(n_rec), dtype=dtype,
name='RecurrentWeight')
self.w_rec_val = self.w_rec_var
recurrent_disconnect_mask = np.diag(np.ones(n_rec, dtype=bool))
self.w_rec_val = self.w_rec_val * self.V0
self.w_rec_val = tf.where(recurrent_disconnect_mask, tf.zeros_like(self.w_rec_val),
self.w_rec_val) # Disconnect autotapse
self.w_rec_delay = tf.Variable(rd.randint(self.n_delay, size=n_rec * n_rec).reshape(n_rec, n_rec),
dtype=tf.int64, name="RecDelays", trainable=False)
self.W_rec = weight_matrix_with_delay_dimension(self.w_rec_val, self.w_rec_delay, self.n_delay)
@property
def state_size(self):
return LIFStateTuple(v=self.n_rec,
z=self.n_rec,
i_future_buffer=(self.n_rec, self.n_delay),
z_buffer=(self.n_rec, self.n_refractory))
@property
def output_size(self):
return [self.n_rec, self.n_rec]
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
v0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
i_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_delay), dtype=dtype)
z_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_refractory), dtype=dtype)
return LIFStateTuple(
v=v0,
z=z0,
i_future_buffer=i_buff0,
z_buffer=z_buff0
)
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
if self.custom_mode>0:
z_stop = tf.stop_gradient(state.z)
else:
z_stop = state.z
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + einsum_bi_ijk_to_bjk(
z_stop, self.W_rec)
new_v, new_z, psi = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = tf_roll(i_future_buffer, axis=2)
new_state = LIFStateTuple(v=new_v,
z=new_z,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer)
return [new_z, psi], new_state
def LIF_dynamic(self, v, z, z_buffer, i_future_buffer, vghost, thr=None, decay=None, n_refractory=None, add_current=0.):
"""
Function that generate the next spike and voltage tensor for given cell state.
:param v
:param z
:param z_buffer:
:param i_future_buffer:
:param thr:
:param decay:
:param n_refractory:
:param add_current:
:return:
"""
if self.injected_noise_current > 0:
add_current = tf.random_normal(shape=z.shape, stddev=self.injected_noise_current)
with tf.name_scope('LIFdynamic'):
if thr is None: thr = self.thr
if decay is None: decay = self._decay
if n_refractory is None: n_refractory = self.n_refractory
i_t = i_future_buffer[:, :, 0] + add_current
# I_reset = z * thr * self.dt
new_v = decay * v + (1 - decay) * i_t #- I_reset
if False: #self.custom_mode == 3:
new_vghost = filt_v(vghost, i_t, self.kappa * (1-decay), decay)
else:
new_vghost = tf.zeros_like(v)
# # Spike generation
v_scaled = new_v - thr #(new_v - thr) / thr
new_z = tf.nn.relu(v_scaled)
new_z = new_z * 1 / self.dt
psi = tf.gradients(new_z, new_v)[0]
return new_v, new_z, psi, new_vghost
ALIFStateTuple = namedtuple('ALIFState', (
'z',
'v',
'b',
'i_future_buffer',
'z_buffer', 'vghost'))
class ALIF(LIF):
def __init__(self, n_in, n_rec, tau=20, thr=0.01,
dt=1., n_refractory=0, dtype=tf.float32, n_delay=1,
tau_adaptation=200., beta=1.6,
rewiring_connectivity=-1, dampening_factor=0.3,
in_neuron_sign=None, rec_neuron_sign=None, injected_noise_current=0.,
V0=1., custom_mode=0, trainable_adapt_mode=0, w_adj=None, task='mnist', MDGLpp_mode=-1):
"""
Tensorflow cell object that simulates a LIF neuron with an approximation of the spike derivatives.
:param n_in: number of input neurons
:param n_rec: number of recurrent neurons
:param tau: membrane time constant
:param thr: threshold voltage
:param dt: time step of the simulation
:param n_refractory: number of refractory time steps
:param dtype: data type of the cell tensors
:param n_delay: number of synaptic delay, the delay range goes from 1 to n_delay time steps
:param tau_adaptation: adaptation time constant for the threshold voltage
:param beta: amplitude of adpatation
:param rewiring_connectivity: number of non-zero synapses in weight matrices (at initialization)
:param in_neuron_sign: vector of +1, -1 to specify input neuron signs
:param rec_neuron_sign: same of recurrent neurons
:param injected_noise_current: amplitude of current noise
:param V0: to choose voltage unit, specify the value of V0=1 Volt in the desired unit (example V0=1000 to set voltage in millivolts)
"""
super(ALIF, self).__init__(n_in=n_in, n_rec=n_rec, tau=tau, thr=thr, dt=dt, n_refractory=n_refractory,
dtype=dtype, n_delay=n_delay,
rewiring_connectivity=rewiring_connectivity,
dampening_factor=dampening_factor, in_neuron_sign=in_neuron_sign,
rec_neuron_sign=rec_neuron_sign,
injected_noise_current=injected_noise_current,
V0=V0, custom_mode=custom_mode, w_adj=w_adj, task=task)
if tau_adaptation is None: raise ValueError("alpha parameter for adaptive bias must be set")
if beta is None: raise ValueError("beta parameter for adaptive bias must be set")
trainable_adapt = (trainable_adapt_mode>0)
#self.train_logtau = (trainable_adapt_mode==2)
if trainable_adapt_mode==2: # train log tau
self.logtau = tf.Variable(np.log(tau_adaptation), dtype=dtype, name="logTauAdaptation", trainable=True)
self.tau_adaptation = tf.exp(self.logtau)
elif trainable_adapt_mode==3:
self.exptau = tf.Variable(np.exp(-dt/tau_adaptation), dtype=dtype, name="expTauAdaptation", trainable=True)
self.tau_adaptation = -dt/tf.log(self.exptau)
elif trainable_adapt_mode==4:
self.exp50tau = tf.Variable(np.exp(-50*dt/tau_adaptation), dtype=dtype, name="exp50TauAdaptation", trainable=True)
self.tau_adaptation = -50*dt/tf.log(self.exp50tau)
else:
self.tau_adaptation = tf.Variable(tau_adaptation, dtype=dtype, name="TauAdaptation", trainable=trainable_adapt)
self.beta = tf.Variable(beta, dtype=dtype, name="Beta", trainable=False)
# self.decay_b = np.expand_dims(np.exp(-dt / tau_adaptation),axis=0)
# self.decay_b = tf.Variable(np.expand_dims(np.exp(-dt / tau_adaptation),axis=0),\
# dtype=dtype, name="rho", trainable=trainable_adapt)
self.tau_a_max = np.max(tau_adaptation) # constant!
# leaky past dependencies
self.kappa = tf.Variable(tf.zeros_like(self._decay), trainable=False, name='leaky_past') # dim (n_rec, )
self.W_back = tf.Variable(tf.zeros_like(self.W_rec), trainable=False, name='W_back')
self.Wpow = tf.Variable(tf.zeros_like(self.W_rec), trainable=False, name='Wpow')
self.Wab = tf.zeros_like(self.W_rec[:,:,0]) # will get overwritten in the main code before training starts
self.MDGLpp_mode = MDGLpp_mode
@property
def output_size(self):
return [self.n_rec, self.n_rec, self.n_rec, self.n_rec, self.n_rec]
@property
def state_size(self):
return ALIFStateTuple(v=self.n_rec,
z=self.n_rec,
b=self.n_rec,
i_future_buffer=(self.n_rec, self.n_delay),
z_buffer=(self.n_rec, self.n_refractory),
vghost=self.n_rec)
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
v0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
b0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
vghost0 = v0
i_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_delay), dtype=dtype)
z_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_refractory), dtype=dtype)
return ALIFStateTuple(
v=v0,
z=z0,
b=b0,
i_future_buffer=i_buff0,
z_buffer=z_buff0,
vghost=vghost0
)
def einsum_bi_ijk_to_bjk_(self, x, W):
h_hat = einsum_bi_ijk_to_bjk(x, W)
return h_hat
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
with tf.name_scope('ALIFcall'):
if (self.custom_mode>0):
z_stop = tf.stop_gradient(state.z)
else:
z_stop = state.z
if self.custom_mode == 4:
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + \
custom_rec(state.z, self.W_rec, self.W_back)
elif self.custom_mode <= -1:
# This is to apply activation derivative approximation only to the s->z branch
# right before the weighted sum in gradient calculation
# Exact activation derivative calculation is still used for eligibility trace computation
v_scaled = state.v - (self.thr + state.b*self.beta*self.V0)
dampening = -0.1*tf.cast(self.custom_mode, tf.float32)
if self.MDGLpp_mode==1:
z_stop = rateFunction_1(v_scaled, dampening)
elif (self.MDGLpp_mode==4) or (self.MDGLpp_mode==5):
z_stop = rateFunction_5(v_scaled, dampening, self.psiT)
elif (self.MDGLpp_mode==2) or (self.MDGLpp_mode==3):
z_stop = actFunction(v_scaled, dampening)
if inputs.shape[-1] > self.n_in: # if truncation
inputs1 = inputs[:,:self.n_in]
inputs2 = tf.expand_dims(inputs[:,self.n_in:], axis=-1)
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs1, self.W_in) + \
inputs2*self.einsum_bi_ijk_to_bjk_(z_stop, self.W_rec) + (1-inputs2)*self.einsum_bi_ijk_to_bjk_(tf.stop_gradient(state.z), self.W_rec)
else: # no truncation
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + \
self.einsum_bi_ijk_to_bjk_(z_stop, self.W_rec)
else:
if inputs.shape[-1] > self.n_in: # if truncation
inputs1 = inputs[:,:self.n_in]
inputs2 = tf.expand_dims(inputs[:,self.n_in:], axis=-1)
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs1, self.W_in) + \
inputs2*einsum_bi_ijk_to_bjk(z_stop, self.W_rec) + (1-inputs2)*einsum_bi_ijk_to_bjk(tf.stop_gradient(state.z), self.W_rec)
else: # no truncation
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + \
einsum_bi_ijk_to_bjk(z_stop, self.W_rec)
self.decay_b = tf.exp(-1/tf.clip_by_value(self.tau_adaptation, 1, 3*self.tau_a_max))
new_b = self.decay_b * state.b + (1. - self.decay_b) * state.z
thr = self.thr + new_b * self.beta * self.V0
new_v, new_z, psi, new_vghost = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer, vghost=state.vghost,
decay=self._decay,
thr=thr)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = tf_roll(i_future_buffer, axis=2)
new_state = ALIFStateTuple(v=new_v,
z=new_z,
b=new_b,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer,
vghost=new_vghost)
return [new_z, new_v, thr, psi, new_vghost], new_state | 35,594 | 44.634615 | 844 | py |
ModProp | ModProp-main/bin/delayedXOR_task.py | '''
Code adapted for training a RNN using ModProp to perform a delayed XOR task.
The overall ModProp framework proposed is "communicating the credit information
via cell-type-specific neuromodulators and processing it at the receiving cells
via pre-determined temporal filtering taps."
Remarks:
- If you also train with BPTT and three-factor (by changing FLAGS.custom_mode) across many runs,
it should reproduce the performance ordering between rules as reported in the paper
- Performance for each rule may fluctuate across runs, so one should repeat across many runs,
as the focus is on performance trend across many runs
- Typically the worst run (or few runs) is ignored for every rule in plotting the learning curves
- As mentioned, this is a proof-of-concept study and future work involves testing ModProp across
a wide range of architecture; as such, there would be no performance guarantee if some network
parameters were changed (e.g. sparsity and thr)
Current approximations are proof of concept for the framework, and better approximations
can be developed as a part of the future work
Code built on top of https://github.com/IGITUGraz/LSNN-official
with the following copyright message retained from the original code:
##
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from lsnn.toolbox.tensorflow_einsums.einsum_re_written import einsum_bij_jk_to_bik
import os
from time import time
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from lsnn.toolbox.file_saver_dumper_no_h5py import save_file, load_file, get_storage_path_reference
from plotting_tools import *
from neuron_models import tf_cell_to_savable_dict, exp_convolve, ALIF, LIF, Gru, MDGL_output
from rewiring_tools_NP2 import weight_sampler, rewiring_optimizer_wrapper, rewiring_optimizer_wrapper_NP
from lsnn.toolbox.tensorflow_utils import tf_downsample
import json
FLAGS = tf.app.flags.FLAGS
##
tf.app.flags.DEFINE_string('comment', '', 'comment to retrieve the stored results')
# tf.app.flags.DEFINE_string('resume', '', 'path to the checkpoint of the form "results/script_name/2018_.../session"')
tf.app.flags.DEFINE_string('model', 'LSNN', 'Network model, do not change')
tf.app.flags.DEFINE_bool('save_data', False, 'whether to save simulation data in result folder')
tf.app.flags.DEFINE_bool('downsampled', False, 'Do not change')
##
tf.app.flags.DEFINE_integer('batch_size', 32, 'size of the minibatch')
tf.app.flags.DEFINE_integer('delay_window', 700, 'Delay Window')
tf.app.flags.DEFINE_integer('n_in', 2, 'number of input units, do not change')
tf.app.flags.DEFINE_float('n_multiplier', 1, 'multiplier for number of recurrent neurons')
tf.app.flags.DEFINE_integer('n_regular', 120, 'number of regular units in the recurrent layer')
tf.app.flags.DEFINE_integer('n_adaptive', 0, 'number of adaptive units in the recurrent layer, do not change')
tf.app.flags.DEFINE_integer('n_iter', 3000, 'number of training iterations')
tf.app.flags.DEFINE_integer('n_delay', 1, 'maximum synaptic delay, do not change')
tf.app.flags.DEFINE_integer('n_ref', 5, 'number of refractory steps, not used')
tf.app.flags.DEFINE_integer('lr_decay_every', 300, 'decay learning rate every lr_decay_every steps')
tf.app.flags.DEFINE_integer('print_every', 20, 'frequency of printing training progress')
tf.app.flags.DEFINE_float('custom_mode', -2.5, '0 if BPTT, 1 if three-factor; -x, for ModProp with mu=-0.1x')
tf.app.flags.DEFINE_integer('trainable_adapt_mode', 0, 'Do not change')
tf.app.flags.DEFINE_integer('MDGL_buflen', -1, 'Buffer length S for MDGL++; -1 for full length')
tf.app.flags.DEFINE_integer('MDGLpp_mode', 3, 'ModProp variants: 2) cell-specific feedback W, 3) type-specific W')
tf.app.flags.DEFINE_integer('ntype_EI', 1, 'Number of E types and I types, do not change')
##
tf.app.flags.DEFINE_float('beta', 1.8, 'Scaling constant of the adaptive threshold, not used')
tf.app.flags.DEFINE_float('tau_a', 700, 'Adaptation time constant mean, not used')
tf.app.flags.DEFINE_float('tau_a_range', 0, 'Adaptation time constant range, not used')
tf.app.flags.DEFINE_float('tau_v', 100, 'Membrane time constant of units, do not change')
tf.app.flags.DEFINE_float('thr', 0., 'Baseline threshold voltage, do not change')
tf.app.flags.DEFINE_float('learning_rate', 0.0005, 'Base learning rate')
tf.app.flags.DEFINE_float('lr_decay', 0.8, 'Decaying factor')
tf.app.flags.DEFINE_float('reg_l2', 5e-4, 'regularization coefficient l2')
tf.app.flags.DEFINE_float('rewiring_temperature', 0., 'regularization coefficient')
tf.app.flags.DEFINE_float('proportion_excitatory', 0.8, 'proportion of excitatory neurons, do not change')
tf.app.flags.DEFINE_float('in_high', 2.0, 'amplitude for 1')
##
tf.app.flags.DEFINE_bool('verbose', True, 'Print many info during training')
tf.app.flags.DEFINE_bool('neuron_sign', True,
"If rewiring is active, this will fix the sign of neurons (Dale's law)")
tf.app.flags.DEFINE_bool('crs_thr', False, 'Do not change')
tf.app.flags.DEFINE_float('rewiring_connectivity', 0.99, 'max connectivity limit in the network, do not change')
tf.app.flags.DEFINE_float('wout_connectivity', 0.99, 'similar to above but for output weights, do not change')
tf.app.flags.DEFINE_float('l1', 1e-2, 'l1 regularization used in rewiring (irrelevant without rewiring)')
tf.app.flags.DEFINE_float('dampening_factor', 0.3, 'Parameter necessary to approximate the spike derivative, not used')
# Analog values are fed to only single neuron
# The following arguments should be fixed
FLAGS.crs_thr=False
FLAGS.downsampled = False
FLAGS.trainable_adapt_mode = 0
FLAGS.n_in = 2
FLAGS.n_adaptive = 0
FLAGS.n_delay = 1
FLAGS.ntype_EI = 1
FLAGS.tau_v = 100
FLAGS.model = 'LSNN'
FLAGS.rewiring_connectivity = 0.99
FLAGS.wout_connectivity = 0.99
FLAGS.thr = 0
FLAGS.proportion_excitatory = 0.8
assert (FLAGS.MDGLpp_mode==2) or (FLAGS.MDGLpp_mode==3), 'FLAGS.MDGLpp_mode must be 2 or 3'
assert (FLAGS.custom_mode <=2), 'FLAGS.custom_mode must be at most 2'
FLAGS.n_regular = int(FLAGS.n_regular*FLAGS.n_multiplier)
FLAGS.n_adaptive = int(FLAGS.n_adaptive*FLAGS.n_multiplier)
n_unit = FLAGS.n_regular + FLAGS.n_adaptive
dt = 1. # Time step is by default 1 ms
fix_window = 0
cue_window = 100
delay_window = FLAGS.delay_window
T = tr_len = fix_window+2*cue_window+delay_window
batch_size = FLAGS.batch_size # the batch size
n_output_symbols = 2 # two classes for now
if FLAGS.trainable_adapt_mode < 0:
FLAGS.beta = 0.0
if FLAGS.trainable_adapt_mode == -2:
FLAGS.tau_v = 0.01
# Define the flag object as dictionnary for saving purposes
_, storage_path, flag_dict = get_storage_path_reference(__file__, FLAGS, './results/', flags=False,
comment=len(FLAGS.comment) > 0)
if FLAGS.save_data:
os.makedirs(storage_path, exist_ok=True)
save_file(flag_dict, storage_path, 'flag', 'json')
print('saving data to: ' + storage_path)
print(json.dumps(flag_dict, indent=4))
# Sign of the neurons
if 0 < FLAGS.rewiring_connectivity and FLAGS.neuron_sign:
n_excitatory_in = int(FLAGS.proportion_excitatory * FLAGS.n_in) + 1
n_inhibitory_in = FLAGS.n_in - n_excitatory_in
in_neuron_sign = np.concatenate([-np.ones(n_inhibitory_in), np.ones(n_excitatory_in)])
np.random.shuffle(in_neuron_sign)
n_excitatory = int(FLAGS.proportion_excitatory * (n_unit)) + 1
n_inhibitory = n_unit - n_excitatory
rec_neuron_sign = np.concatenate([-np.ones(n_inhibitory), np.ones(n_excitatory)])
else:
if not (FLAGS.neuron_sign == False): print(
'WARNING: Neuron sign is set to None without rewiring but sign is requested')
in_neuron_sign = None
rec_neuron_sign = None
# Define the network
tau_v = FLAGS.tau_v
w_adj_dict = None
wout_adj = None
if FLAGS.model == 'LSNN':
# We set beta == 0 to some of the neurons. Those neurons then behave like LIF neurons (without adaptation).
# And this is how we achieve a mixture of LIF and ALIF neurons in the LSNN model.
beta = np.concatenate([np.zeros(FLAGS.n_regular), np.ones(FLAGS.n_adaptive) * FLAGS.beta])
tau_a_array=np.random.uniform(low=FLAGS.tau_a-FLAGS.tau_a_range, high=FLAGS.tau_a+FLAGS.tau_a_range, size=(n_unit,))
#tau_a_array = np.random.normal(FLAGS.tau_a, FLAGS.tau_a_var, size=(FLAGS.n_regular+FLAGS.n_adaptive,))
cell = ALIF(n_in=FLAGS.n_in, n_rec=n_unit, tau=tau_v, n_delay=FLAGS.n_delay,
n_refractory=FLAGS.n_ref, dt=dt, tau_adaptation=tau_a_array, beta=beta, thr=FLAGS.thr,
rewiring_connectivity=FLAGS.rewiring_connectivity,
in_neuron_sign=in_neuron_sign, rec_neuron_sign=rec_neuron_sign,
dampening_factor=FLAGS.dampening_factor, custom_mode=FLAGS.custom_mode, trainable_adapt_mode=FLAGS.trainable_adapt_mode,
w_adj=w_adj_dict, MDGLpp_mode=FLAGS.MDGLpp_mode, task='dlyXOR'
)
elif FLAGS.model == 'Gru': # Not used
cell = Gru(n_in=FLAGS.n_in, n_rec=n_unit,
rewiring_connectivity=FLAGS.rewiring_connectivity,
in_neuron_sign=in_neuron_sign, rec_neuron_sign=rec_neuron_sign, custom_mode=FLAGS.custom_mode,
w_adj=w_adj_dict
)
else:
raise NotImplementedError("Unknown model: " + FLAGS.model)
cell.psiT = tf.Variable(np.zeros((FLAGS.batch_size, n_unit)), trainable=False, name='psiT', dtype=tf.float32)
# cell.psiT = tf.Variable(np.zeros((1, n_unit)), trainable=False, name='psiT', dtype=tf.float32)
def get_data_dict():
"""
Generate the dictionary to be fed when running a tensorflow op.
i1 and i2: two streams of inputs, 40 Hz if +, 0 otherwise
i4: 10 Hz throughout the trial
"""
# Initialize target and input cue matrices
target_num = -1*np.ones((FLAGS.batch_size,))
# cue received by i1 and i2
cue_batch = np.random.randint(0,2,size=(FLAGS.batch_size,2))
input_stack=np.zeros((FLAGS.batch_size,tr_len,FLAGS.n_in))
# Get spike encoding and target for each trial
def get_input_stack(cue): # spikes per example
# i4_spike = np.random.poisson(0.01, (tr_len,10))
in_high = FLAGS.in_high
in_low = 0.02 #in_high/2 #0.02
gauss_std = cue_window/2/2
time_steps = np.linspace(-int(cue_window/2),int(cue_window/2),cue_window)
gauss_in = in_high*np.expand_dims(np.exp(-(time_steps**2)/2/gauss_std**2), axis=-1) + in_low
i1_spike = in_low*np.ones((tr_len,1))
i2_spike = in_low*np.ones((tr_len,1))
# cue b4 delay
tstamps = np.array(range(fix_window, fix_window+cue_window))
if cue[0]==1:
i1_spike[tstamps,:] = gauss_in #in_high
# cue after delay
tstamps = np.array(range(tr_len-cue_window, tr_len))
if cue[1]==1:
i2_spike[tstamps,:] = gauss_in #in_high
input_stack = np.concatenate((i1_spike,i2_spike),1)
target_dir=int(cue[0]==cue[1]) # 1:match, 0:mismatch
return input_stack, target_dir
# loop through trials across batches
for tr in range(len(cue_batch)):
cue_num = cue_batch[tr,:]
input_stack[tr,:,:], target_num[tr] = get_input_stack(cue_num)
# add some noise
input_stack += np.random.normal(0.0, 0.01, size=input_stack.shape)
if (FLAGS.MDGL_buflen > 0) and (FLAGS.custom_mode < 0):
midT = tr_len - FLAGS.MDGL_buflen
inputs2 = np.zeros((FLAGS.batch_size, tr_len, 1))
inputs2[:,midT:] = 1 # select richer grad propagation after midT
input_stack = np.concatenate((input_stack, inputs2), axis=2)
# transform target one hot from batch x classes to batch x time x classes
data_dict = {inputs: input_stack, targets: target_num}
return data_dict, cue_batch
# Generate input
if (FLAGS.custom_mode > 1) and (FLAGS.model == 'Gru'):
inputs = tf.placeholder(dtype=tf.float32, shape=(None, None, FLAGS.n_in*2),
name='InputSpikes') # MAIN input spike placeholder
elif (FLAGS.MDGL_buflen > 0) and (FLAGS.custom_mode<0):
inputs = tf.placeholder(dtype=tf.float32, shape=(FLAGS.batch_size, None, FLAGS.n_in+1),
name='InputSpikes')
else:
inputs = tf.placeholder(dtype=tf.float32, shape=(None, None, FLAGS.n_in),
name='InputSpikes') # MAIN input spike placeholder
targets = tf.placeholder(dtype=tf.int64, shape=(None,),
name='Targets') # Lists of target characters of the recall task
# create outputs and states
outputs, states = tf.nn.dynamic_rnn(cell, inputs, dtype=tf.float32)
final_state = states[-1]
if FLAGS.model == 'LSNN':
z, v, b, psi, _ = outputs
else:
z, psi = outputs
z_regular = z[:, :, :FLAGS.n_regular]
z_adaptive = z[:, :, FLAGS.n_regular:]
with tf.name_scope('ClassificationLoss'):
#psp_decay = np.exp(-dt / FLAGS.tau_v) # output layer psp decay, chose value between 15 and 30ms as for tau_v
psp = z #exp_convolve(z, decay=psp_decay)
n_neurons = z.get_shape()[2]
# Define the readout weights
if (0 < FLAGS.wout_connectivity):
w_out, w_out_sign, w_out_var, _ = weight_sampler(n_unit, n_output_symbols,
FLAGS.wout_connectivity,
neuron_sign=rec_neuron_sign)
else:
w_out = tf.get_variable(name='out_weight', shape=[n_neurons, n_output_symbols], dtype=tf.float32)
b_out = tf.get_variable(name='out_bias', shape=[n_output_symbols], initializer=tf.zeros_initializer(), dtype=tf.float32)
# Define Wab
Wrec = cell.W_rec[:,:,0]
if (FLAGS.MDGLpp_mode == 3):
n_per_type_I = int(n_inhibitory/FLAGS.ntype_EI)
n_per_type_E = int(n_excitatory/FLAGS.ntype_EI)
if n_inhibitory % FLAGS.ntype_EI:
inh_idx = list(range(0,n_inhibitory,n_per_type_I)[:-1])
else:
inh_idx = list(range(0,n_inhibitory,n_per_type_I))
if n_excitatory % FLAGS.ntype_EI:
exc_idx = list(range(n_inhibitory,n_unit,n_per_type_E)[:-1])
else:
exc_idx = list(range(n_inhibitory,n_unit,n_per_type_E))
exc_idx.append(n_unit)
tp_idx_ = np.concatenate((np.array(inh_idx), np.array(exc_idx)))
tp_idx = np.stack((tp_idx_[:-1], tp_idx_[1:]),axis=1)
n_type = len(tp_idx)
for ii in range(n_type):
for jj in range(n_type):
W_block = Wrec[tp_idx[ii][0]:tp_idx[ii][1],tp_idx[jj][0]:tp_idx[jj][1]]
Wav = tf.reduce_mean(W_block)
if jj==0: # new row
th_row = Wav * tf.ones_like(W_block)
else:
th_row = tf.concat([th_row, Wav*tf.ones_like(W_block)], axis=1)
if jj==(n_type-1): # finished a row
if ii==0:
th_ = th_row
else:
th_ = tf.concat([th_, th_row], axis=0)
cell.Wab = th_
# Define the loss function
if (FLAGS.custom_mode>=2) and (FLAGS.model != 'Gru'): # customized MDGL grad
if FLAGS.custom_mode == 2:
Wab = tf.transpose(cell.W_rec[:,:,0]) #jp
elif (FLAGS.custom_mode == 3):
Wab = tf.transpose(Wab)
out = MDGL_output(z, w_out, Wab, psi, cell._decay) + b_out
else:
out = einsum_bij_jk_to_bik(z, w_out) + b_out
Y_predict = out[:, -1, :]
loss_recall = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets, logits=Y_predict))
# Define the accuracy
Y_predict_num = tf.argmax(Y_predict, axis=1)
accuracy = tf.reduce_mean(tf.cast(tf.equal(targets, Y_predict_num), dtype=tf.float32))
# Target regularization
with tf.name_scope('RegularizationLoss'):
# # Firing rate regularization
av = tf.reduce_mean(z, axis=(0, 1)) / dt
# # regularization_f0 = FLAGS.reg_rate / 1000
# # loss_regularization = tf.reduce_sum(tf.square(av - regularization_f0)) * FLAGS.reg
reg_l2 = FLAGS.reg_l2*(tf.nn.l2_loss(cell.w_in_val)+tf.nn.l2_loss(cell.w_rec_val)+tf.nn.l2_loss(w_out))
loss_regularization = reg_l2
# Aggregate the losses
with tf.name_scope('OptimizationScheme'):
global_step = tf.Variable(0, dtype=tf.int32, trainable=False)
learning_rate = tf.Variable(FLAGS.learning_rate, dtype=tf.float32, trainable=False)
decay_learning_rate_op = tf.assign(learning_rate, learning_rate * FLAGS.lr_decay) # Op to decay learning rate
loss = loss_recall +loss_regularization
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
# optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
if 0 < FLAGS.rewiring_connectivity:
mProp_tuple = (FLAGS.MDGLpp_mode==3 and FLAGS.custom_mode<0, cell)
train_step = rewiring_optimizer_wrapper(optimizer, loss, learning_rate, FLAGS.l1, FLAGS.rewiring_temperature,
FLAGS.rewiring_connectivity,
global_step=global_step,
var_list=tf.trainable_variables(), mProp_tuple=mProp_tuple)
else:
train_step = optimizer.minimize(loss=loss, global_step=global_step)
# Real-time plotting
# saver = tf.train.Saver()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# if FLAGS.resume:
# saver.restore(sess, FLAGS.resume)
# print("Model restored.")
# Store some results across iterations
test_loss_list = []
# test_loss_with_reg_list = []
test_error_list = []
#tau_delay_list = []
#taub_list = []
training_time_list = []
time_to_ref_list = []
# Dictionaries of tensorflow ops to be evaluated simultaneously by a session
results_tensors = {'loss': loss,
'loss_recall': loss_recall,
'accuracy': accuracy,
'av': av,
'learning_rate': learning_rate,
'w_in_val': cell.W_in,
'w_rec_val': cell.w_rec_val,
'w_out': w_out,
}
if FLAGS.model == 'LSNN':
results_tensors['b_out'] = b_out
plot_result_tensors = {'inputs': inputs,
'z': z,
'psp': psp,
'Y_predict': Y_predict,
'z_regular': z_regular,
'z_adaptive': z_adaptive,
'targets': targets}
if FLAGS.model == 'LSNN':
plot_result_tensors['b_con'] = b
t_train = 0
for k_iter in range(FLAGS.n_iter):
# Decaying learning rate
if k_iter > 0 and np.mod(k_iter, FLAGS.lr_decay_every) == 0:
old_lr = sess.run(learning_rate)
new_lr = sess.run(decay_learning_rate_op)
print('Decaying learning rate: {:.2g} -> {:.2g}'.format(old_lr, new_lr))
# Print some values to monitor convergence
if np.mod(k_iter, FLAGS.print_every) == 0:
val_dict, input_stack = get_data_dict()
results_values, plot_results_values = sess.run([results_tensors, plot_result_tensors], feed_dict=val_dict)
# Storage of the results
# test_loss_with_reg_list.append(results_values['loss_reg'])
test_loss_list.append(results_values['loss_recall'])
test_error_list.append(results_values['accuracy'])
# if FLAGS.model == 'LSNN':
# taub_list.append(sess.run(cell.tau_adaptation))
# else:
# taub_list.append(-99)
training_time_list.append(t_train)
print(
'''Iteration {}, validation accuracy {:.3g} '''
.format(k_iter, test_error_list[-1],))
def get_stats(v):
if np.size(v) == 0:
return np.nan, np.nan, np.nan, np.nan
min_val = np.min(v)
max_val = np.max(v)
k_min = np.sum(v == min_val)
k_max = np.sum(v == max_val)
return np.min(v), np.max(v), np.mean(v), np.std(v), k_min, k_max
firing_rate_stats = get_stats(results_values['av'] * 1000) # no *1000 for rate
# some connectivity statistics
rewired_ref_list = ['w_in_val', 'w_rec_val', 'w_out']
non_zeros = [np.sum(results_values[ref] != 0) for ref in rewired_ref_list]
sizes = [np.size(results_values[ref]) for ref in rewired_ref_list]
empirical_connectivity = np.sum(non_zeros) / np.sum(sizes)
empirical_connectivities = [nz / size for nz, size in zip(non_zeros, sizes)]
if FLAGS.verbose:
print('''
firing rate (Hz) min {:.0f} ({}) \t max {:.0f} ({}) \t average {:.0f} +- std {:.0f} (over neurons)
connectivity (total {:.3g})\t W_in {:.3g} \t W_rec {:.2g} \t\t w_out {:.2g}
number of non zero weights \t W_in {}/{} \t W_rec {}/{} \t w_out {}/{}
classification loss {:.2g}
learning rate {:.2g} \t training op. time {:.2g}s
'''.format(
firing_rate_stats[0], firing_rate_stats[4], firing_rate_stats[1], firing_rate_stats[5],
firing_rate_stats[2], firing_rate_stats[3],
empirical_connectivity,
empirical_connectivities[0], empirical_connectivities[1], empirical_connectivities[2],
non_zeros[0], sizes[0],
non_zeros[1], sizes[1],
non_zeros[2], sizes[2],
results_values['loss_recall'],
results_values['learning_rate'], t_train,
))
# Save files result
if FLAGS.save_data:
results = {
'error': test_error_list[-1],
'loss': test_loss_list[-1],
'error_list': test_error_list,
'loss_list': test_loss_list,
'time_to_ref': time_to_ref_list,
'training_time': training_time_list,
#'tau_delay_list': tau_delay_list,
#'taub': taub_list[-1],
'flags': flag_dict,
}
save_file(results, storage_path, 'results', file_type='json')
# train
t0 = time()
train_dict, input_stack = get_data_dict()
final_state_value, _ = sess.run([final_state, train_step], feed_dict=train_dict)
t_train = time() - t0
# if FLAGS.interactive_plot:
# update_mnist_plot(ax_list, fig, plt, cell, FLAGS, plot_results_values)
# del sess | 24,338 | 45.715931 | 844 | py |
ModProp | ModProp-main/bin/plot_curves.py | # -*- coding: utf-8 -*-
"""
Code for plotting the learning curves of saved runs
"""
import sys
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from file_saver_dumper_no_h5py import save_file, load_file, get_storage_path_reference
import json
import os
## Setup
SMALL_SIZE = 12
MEDIUM_SIZE = 14
BIGGER_SIZE = 16
M = 15 # moving avg parametner, odd number
# Paths
results_path = './results/delayedXOR_task/'
file_name = 'results'
## Plot results
def movingmean(data_set, periods=3):
data_set = np.array(data_set)
if periods > 1:
weights = np.ones(periods) / periods
return np.convolve(data_set, weights, mode='valid')
else:
return data_set
def iter_loss_acc(results_path, file_name, M, comment):
all_f = os.listdir(results_path)
flist = []
for f in range(len(all_f)):
if comment in all_f[f]:
flist.append(all_f[f])
if len(flist) > 0:
plot_len = -1
for f in range(len(flist)):
file_path = results_path + flist[f]
results_ = load_file(file_path,file_name,file_type='json')
if f==0:
loss = np.expand_dims(movingmean(results_['loss_list'][0:plot_len] ,M),axis=0)
else:
trial_loss = np.expand_dims(movingmean(results_['loss_list'][0:plot_len] ,M),axis=0)
loss = np.concatenate((loss, trial_loss), axis=0)
# remove the worst run
loss_auc = np.sum(loss, axis=1)
max_ind = np.argmax(loss_auc)
loss = np.delete(loss, obj=max_ind, axis=0)
# remove the best run
loss_auc = np.sum(loss, axis=1)
min_ind = np.argmin(loss_auc)
loss = np.delete(loss, obj=min_ind, axis=0)
mean_loss = np.mean(loss, axis=0)
std_loss = np.std(loss, axis=0,ddof=1)
iterlist = np.arange(M, M+loss.shape[1])
else: # didn't fetch any files
iterlist=np.empty(1000)
iterlist[:]=np.nan
mean_loss=np.empty(1000)
mean_loss[:]=np.nan
std_loss=np.empty(1000)
std_loss[:]=np.nan
return iterlist, mean_loss, std_loss
comment_list = [['ModProp_Wab', 'c', (0.75, 0.9, 0.9),'ModProp_Wab'] ]
sim_list = ['_lr0.0005', '_lr0.001']
samp_len = 50
fig0 = plt.figure()
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE)
for ii in range(len(comment_list)):
bestMeas = np.Inf
for sim_mode in sim_list:
comm1_iterlist, mean1_comm, std1_comm = iter_loss_acc(results_path, file_name, M, comment_list[ii][0] + sim_mode)
if np.mean(mean1_comm[-samp_len:]) < bestMeas: # take the best curve across different hyperparameters explored
comm_iterlist, mean_comm, std_comm = (comm1_iterlist, mean1_comm, std1_comm)
bestMeas = np.mean(mean1_comm[-samp_len:])
plt.plot(comm_iterlist, mean_comm, color=comment_list[ii][1],label=comment_list[ii][3])
plt.fill_between(comm_iterlist, mean_comm-std_comm, mean_comm+std_comm,color=comment_list[ii][2])
plt.legend();
plt.xlabel('Training Iterations (x20)')
plt.ylabel('Loss');
plt.title('Delayed XOR')
plt.ylim([0.0, 0.7]) | 3,600 | 33.961165 | 121 | py |
ModProp | ModProp-main/lsnn/spiking_models.py | """
The Clear BSD License
Copyright (c) 2019 the LSNN team, institute for theoretical computer science, TU Graz
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of LSNN nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from distutils.version import LooseVersion
import datetime
from collections import OrderedDict
from collections import namedtuple
import numpy as np
import numpy.random as rd
import tensorflow as tf
from tensorflow.python.framework import function
from tensorflow.python.framework.ops import Tensor
if LooseVersion(tf.__version__) >= LooseVersion("1.11"):
from tensorflow.python.ops.variables import Variable, RefVariable
else:
print("Using tensorflow version older then 1.11 -> skipping RefVariable storing")
from tensorflow.python.ops.variables import Variable
from lsnn.toolbox.rewiring_tools import weight_sampler
from lsnn.toolbox.tensorflow_einsums.einsum_re_written import einsum_bi_ijk_to_bjk
from lsnn.toolbox.tensorflow_utils import tf_roll
from time import time
Cell = tf.contrib.rnn.BasicRNNCell
def placeholder_container_for_rnn_state(cell_state_size, dtype, batch_size, name='TupleStateHolder'):
with tf.name_scope(name):
default_dict = cell_state_size._asdict()
placeholder_dict = OrderedDict({})
for k, v in default_dict.items():
if np.shape(v) == ():
v = [v]
shape = np.concatenate([[batch_size], v])
placeholder_dict[k] = tf.placeholder(shape=shape, dtype=dtype, name=k)
placeholder_tuple = cell_state_size.__class__(**placeholder_dict)
return placeholder_tuple
def feed_dict_with_placeholder_container(dict_to_update, state_holder, state_value, batch_selection=None):
if state_value is None:
return dict_to_update
assert state_holder.__class__ == state_value.__class__, 'Should have the same class, got {} and {}'.format(
state_holder.__class__, state_value.__class__)
for k, v in state_value._asdict().items():
if batch_selection is None:
dict_to_update.update({state_holder._asdict()[k]: v})
else:
dict_to_update.update({state_holder._asdict()[k]: v[batch_selection]})
return dict_to_update
#################################
# Spike function
#################################
@tf.custom_gradient
def SpikeFunction(v_scaled, dampening_factor):
z_ = tf.greater(v_scaled, 0.)
z_ = tf.cast(z_, dtype=tf.float32)
def grad(dy):
dE_dz = dy
dz_dv_scaled = tf.maximum(1 - tf.abs(v_scaled), 0)
dz_dv_scaled *= dampening_factor
dE_dv_scaled = dE_dz * dz_dv_scaled
return [dE_dv_scaled,
tf.zeros_like(dampening_factor)]
return tf.identity(z_, name="SpikeFunction"), grad
def weight_matrix_with_delay_dimension(w, d, n_delay):
"""
Generate the tensor of shape n_in x n_out x n_delay that represents the synaptic weights with the right delays.
:param w: synaptic weight value, float tensor of shape (n_in x n_out)
:param d: delay number, int tensor of shape (n_in x n_out)
:param n_delay: number of possible delays
:return:
"""
with tf.name_scope('WeightDelayer'):
w_d_list = []
for kd in range(n_delay):
mask = tf.equal(d, kd)
w_d = tf.where(condition=mask, x=w, y=tf.zeros_like(w))
w_d_list.append(w_d)
delay_axis = len(d.shape)
WD = tf.stack(w_d_list, axis=delay_axis)
return WD
# PSP on output layer
def exp_convolve(tensor, decay): # tensor shape (trial, time, neuron)
with tf.name_scope('ExpConvolve'):
assert tensor.dtype in [tf.float16, tf.float32, tf.float64]
tensor_time_major = tf.transpose(tensor, perm=[1, 0, 2])
initializer = tf.zeros_like(tensor_time_major[0])
filtered_tensor = tf.scan(lambda a, x: a * decay + (1 - decay) * x, tensor_time_major, initializer=initializer)
filtered_tensor = tf.transpose(filtered_tensor, perm=[1, 0, 2])
return filtered_tensor
LIFStateTuple = namedtuple('LIFStateTuple', ('v', 'z', 'i_future_buffer', 'z_buffer'))
def tf_cell_to_savable_dict(cell, sess, supplement={}):
"""
Usefull function to return a python/numpy object from of of the tensorflow cell object defined here.
The idea is simply that varaibles and Tensors given as attributes of the object with be replaced by there numpy value evaluated on the current tensorflow session.
:param cell: tensorflow cell object
:param sess: tensorflow session
:param supplement: some possible
:return:
"""
dict_to_save = {}
dict_to_save['cell_type'] = str(cell.__class__)
time_stamp = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
dict_to_save['time_stamp'] = time_stamp
dict_to_save.update(supplement)
tftypes = [Variable, Tensor]
if LooseVersion(tf.__version__) >= LooseVersion("1.11"):
tftypes.append(RefVariable)
for k, v in cell.__dict__.items():
if k == 'self':
pass
elif type(v) in tftypes:
dict_to_save[k] = sess.run(v)
elif type(v) in [bool, int, float, np.int64, np.ndarray]:
dict_to_save[k] = v
else:
print('WARNING: attribute of key {} and value {} has type {}, recoding it as string.'.format(k, v, type(v)))
dict_to_save[k] = str(v)
return dict_to_save
class LIF(Cell):
def __init__(self, n_in, n_rec, tau=20., thr=0.03,
dt=1., n_refractory=0, dtype=tf.float32, n_delay=1, rewiring_connectivity=-1,
in_neuron_sign=None, rec_neuron_sign=None,
dampening_factor=0.3,
injected_noise_current=0.,
V0=1.):
"""
Tensorflow cell object that simulates a LIF neuron with an approximation of the spike derivatives.
:param n_in: number of input neurons
:param n_rec: number of recurrent neurons
:param tau: membrane time constant
:param thr: threshold voltage
:param dt: time step of the simulation
:param n_refractory: number of refractory time steps
:param dtype: data type of the cell tensors
:param n_delay: number of synaptic delay, the delay range goes from 1 to n_delay time steps
:param reset: method of resetting membrane potential after spike thr-> by fixed threshold amount, zero-> to zero
"""
if np.isscalar(tau): tau = tf.ones(n_rec, dtype=dtype) * np.mean(tau)
if np.isscalar(thr): thr = tf.ones(n_rec, dtype=dtype) * np.mean(thr)
tau = tf.cast(tau, dtype=dtype)
dt = tf.cast(dt, dtype=dtype)
self.dampening_factor = dampening_factor
# Parameters
self.n_delay = n_delay
self.n_refractory = n_refractory
self.dt = dt
self.n_in = n_in
self.n_rec = n_rec
self.data_type = dtype
self._num_units = self.n_rec
self.tau = tf.Variable(tau, dtype=dtype, name="Tau", trainable=False)
self._decay = tf.exp(-dt / tau)
self.thr = tf.Variable(thr, dtype=dtype, name="Threshold", trainable=False)
self.V0 = V0
self.injected_noise_current = injected_noise_current
self.rewiring_connectivity = rewiring_connectivity
self.in_neuron_sign = in_neuron_sign
self.rec_neuron_sign = rec_neuron_sign
with tf.variable_scope('InputWeights'):
# Input weights
if 0 < rewiring_connectivity < 1:
self.w_in_val, self.w_in_sign, self.w_in_var, _ = weight_sampler(n_in, n_rec, rewiring_connectivity,
neuron_sign=in_neuron_sign)
else:
self.w_in_var = tf.Variable(rd.randn(n_in, n_rec) / np.sqrt(n_in), dtype=dtype, name="InputWeight")
self.w_in_val = self.w_in_var
self.w_in_val = self.V0 * self.w_in_val
self.w_in_delay = tf.Variable(rd.randint(self.n_delay, size=n_in * n_rec).reshape(n_in, n_rec),
dtype=tf.int64, name="InDelays", trainable=False)
self.W_in = weight_matrix_with_delay_dimension(self.w_in_val, self.w_in_delay, self.n_delay)
with tf.variable_scope('RecWeights'):
if 0 < rewiring_connectivity < 1:
self.w_rec_val, self.w_rec_sign, self.w_rec_var, _ = weight_sampler(n_rec, n_rec,
rewiring_connectivity,
neuron_sign=rec_neuron_sign)
else:
if rec_neuron_sign is not None or in_neuron_sign is not None:
raise NotImplementedError('Neuron sign requested but this is only implemented with rewiring')
self.w_rec_var = Variable(rd.randn(n_rec, n_rec) / np.sqrt(n_rec), dtype=dtype,
name='RecurrentWeight')
self.w_rec_val = self.w_rec_var
recurrent_disconnect_mask = np.diag(np.ones(n_rec, dtype=bool))
self.w_rec_val = self.w_rec_val * self.V0
self.w_rec_val = tf.where(recurrent_disconnect_mask, tf.zeros_like(self.w_rec_val),
self.w_rec_val) # Disconnect autotapse
self.w_rec_delay = tf.Variable(rd.randint(self.n_delay, size=n_rec * n_rec).reshape(n_rec, n_rec),
dtype=tf.int64, name="RecDelays", trainable=False)
self.W_rec = weight_matrix_with_delay_dimension(self.w_rec_val, self.w_rec_delay, self.n_delay)
@property
def state_size(self):
return LIFStateTuple(v=self.n_rec,
z=self.n_rec,
i_future_buffer=(self.n_rec, self.n_delay),
z_buffer=(self.n_rec, self.n_refractory))
@property
def output_size(self):
return self.n_rec
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
v0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
i_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_delay), dtype=dtype)
z_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_refractory), dtype=dtype)
return LIFStateTuple(
v=v0,
z=z0,
i_future_buffer=i_buff0,
z_buffer=z_buff0
)
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + einsum_bi_ijk_to_bjk(
state.z, self.W_rec)
new_v, new_z = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = tf_roll(i_future_buffer, axis=2)
new_state = LIFStateTuple(v=new_v,
z=new_z,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer)
return new_z, new_state
def LIF_dynamic(self, v, z, z_buffer, i_future_buffer, thr=None, decay=None, n_refractory=None, add_current=0.):
"""
Function that generate the next spike and voltage tensor for given cell state.
:param v
:param z
:param z_buffer:
:param i_future_buffer:
:param thr:
:param decay:
:param n_refractory:
:param add_current:
:return:
"""
if self.injected_noise_current > 0:
add_current = tf.random_normal(shape=z.shape, stddev=self.injected_noise_current)
with tf.name_scope('LIFdynamic'):
if thr is None: thr = self.thr
if decay is None: decay = self._decay
if n_refractory is None: n_refractory = self.n_refractory
i_t = i_future_buffer[:, :, 0] + add_current
I_reset = z * thr * self.dt
new_v = decay * v + (1 - decay) * i_t - I_reset
# Spike generation
v_scaled = (v - thr) / thr
# new_z = differentiable_spikes(v_scaled=v_scaled)
new_z = SpikeFunction(v_scaled, self.dampening_factor)
if n_refractory > 0:
is_ref = tf.greater(tf.reduce_max(z_buffer[:, :, -n_refractory:], axis=2), 0)
new_z = tf.where(is_ref, tf.zeros_like(new_z), new_z)
new_z = new_z * 1 / self.dt
return new_v, new_z
ALIFStateTuple = namedtuple('ALIFState', (
'z',
'v',
'b',
'i_future_buffer',
'z_buffer'))
class ALIF(LIF):
def __init__(self, n_in, n_rec, tau=20, thr=0.01,
dt=1., n_refractory=0, dtype=tf.float32, n_delay=1,
tau_adaptation=200., beta=1.6,
rewiring_connectivity=-1, dampening_factor=0.3,
in_neuron_sign=None, rec_neuron_sign=None, injected_noise_current=0.,
V0=1.):
"""
Tensorflow cell object that simulates a LIF neuron with an approximation of the spike derivatives.
:param n_in: number of input neurons
:param n_rec: number of recurrent neurons
:param tau: membrane time constant
:param thr: threshold voltage
:param dt: time step of the simulation
:param n_refractory: number of refractory time steps
:param dtype: data type of the cell tensors
:param n_delay: number of synaptic delay, the delay range goes from 1 to n_delay time steps
:param tau_adaptation: adaptation time constant for the threshold voltage
:param beta: amplitude of adpatation
:param rewiring_connectivity: number of non-zero synapses in weight matrices (at initialization)
:param in_neuron_sign: vector of +1, -1 to specify input neuron signs
:param rec_neuron_sign: same of recurrent neurons
:param injected_noise_current: amplitude of current noise
:param V0: to choose voltage unit, specify the value of V0=1 Volt in the desired unit (example V0=1000 to set voltage in millivolts)
"""
super(ALIF, self).__init__(n_in=n_in, n_rec=n_rec, tau=tau, thr=thr, dt=dt, n_refractory=n_refractory,
dtype=dtype, n_delay=n_delay,
rewiring_connectivity=rewiring_connectivity,
dampening_factor=dampening_factor, in_neuron_sign=in_neuron_sign,
rec_neuron_sign=rec_neuron_sign,
injected_noise_current=injected_noise_current,
V0=V0)
if tau_adaptation is None: raise ValueError("alpha parameter for adaptive bias must be set")
if beta is None: raise ValueError("beta parameter for adaptive bias must be set")
self.tau_adaptation = tf.Variable(tau_adaptation, dtype=dtype, name="TauAdaptation", trainable=False)
self.beta = tf.Variable(beta, dtype=dtype, name="Beta", trainable=False)
self.decay_b = np.exp(-dt / tau_adaptation)
@property
def output_size(self):
return [self.n_rec, self.n_rec, self.n_rec]
@property
def state_size(self):
return ALIFStateTuple(v=self.n_rec,
z=self.n_rec,
b=self.n_rec,
i_future_buffer=(self.n_rec, self.n_delay),
z_buffer=(self.n_rec, self.n_refractory))
def zero_state(self, batch_size, dtype, n_rec=None):
if n_rec is None: n_rec = self.n_rec
v0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
z0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
b0 = tf.zeros(shape=(batch_size, n_rec), dtype=dtype)
i_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_delay), dtype=dtype)
z_buff0 = tf.zeros(shape=(batch_size, n_rec, self.n_refractory), dtype=dtype)
return ALIFStateTuple(
v=v0,
z=z0,
b=b0,
i_future_buffer=i_buff0,
z_buffer=z_buff0
)
def __call__(self, inputs, state, scope=None, dtype=tf.float32):
with tf.name_scope('ALIFcall'):
i_future_buffer = state.i_future_buffer + einsum_bi_ijk_to_bjk(inputs, self.W_in) + einsum_bi_ijk_to_bjk(
state.z, self.W_rec)
new_b = self.decay_b * state.b + (1. - self.decay_b) * state.z
thr = self.thr + new_b * self.beta * self.V0
new_v, new_z = self.LIF_dynamic(
v=state.v,
z=state.z,
z_buffer=state.z_buffer,
i_future_buffer=i_future_buffer,
decay=self._decay,
thr=thr)
new_z_buffer = tf_roll(state.z_buffer, new_z, axis=2)
new_i_future_buffer = tf_roll(i_future_buffer, axis=2)
new_state = ALIFStateTuple(v=new_v,
z=new_z,
b=new_b,
i_future_buffer=new_i_future_buffer,
z_buffer=new_z_buffer)
return [new_z, new_v, thr], new_state
def static_rnn_with_gradient(cell, inputs, state, loss_function, T, verbose=True):
batch_size = tf.shape(inputs)[0]
thr_list = []
state_list = []
z_list = []
v_list = []
if verbose: print('Building forward Graph...', end=' ')
t0 = time()
for t in range(T):
outputs, state = cell(inputs[:, t, :], state)
z, v, thr = outputs
z_list.append(z)
v_list.append(v)
thr_list.append(thr)
state_list.append(state)
zs = tf.stack(z_list, axis=1)
vs = tf.stack(v_list, axis=1)
thrs = tf.stack(thr_list, axis=1)
loss = loss_function(zs)
de_dz_partial = tf.gradients(loss, zs)[0]
if de_dz_partial is None:
de_dz_partial = tf.zeros_like(zs)
print('Warning: Partial de_dz is None')
print('Done in {:.2f}s'.format(time() - t0))
def namedtuple_to_list(state):
return list(state._asdict().values())
zero_state_as_list = cell.zero_state(batch_size, tf.float32)
de_dstate = namedtuple_to_list(cell.zero_state(batch_size, dtype=tf.float32))
g_list = []
if verbose: print('Building backward Graph...', end=' ')
t0 = time()
for t in np.arange(T)[::-1]:
# gradient from next state
if t < T - 1:
state = namedtuple_to_list(state_list[t])
next_state = namedtuple_to_list(state_list[t + 1])
de_dstate = tf.gradients(ys=next_state, xs=state, grad_ys=de_dstate)
for k_var, de_dvar in enumerate(de_dstate):
if de_dvar is None:
de_dstate[k_var] = tf.zeros_like(zero_state_as_list[k_var])
print('Warning: var {} at time {} is None'.format(k_var, t))
# add the partial derivative due to current error
de_dstate[0] = de_dstate[0] + de_dz_partial[:, t]
g_list.append(de_dstate[0])
g_list = list(reversed(g_list))
gs = tf.stack(g_list, axis=1)
print('Done in {:.2f}s'.format(time() - t0))
return zs, vs, thrs, gs, state_list[-1]
| 21,033 | 39.922179 | 844 | py |
ModProp | ModProp-main/lsnn/__init__.py | 0 | 0 | 0 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.