repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
SLOPpy | SLOPpy-main/SLOPpy/subroutines/constants.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division # no more "zero" integer division bugs!:P
import numpy as np # array
# radiants, degrees conversions etc.
pi = 4.*np.arctan(1.)
dpi = 2.*pi
deg2rad = pi/180.
rad2deg = 180./pi
# various
TOLERANCE = np.finfo(np.float64(1.0)).eps
d2s = 86400. # seconds in a day = 24h = 86400 s
d2m = 1440. # min in a day = 1440. min
# masses conversions
Msmer = 6.0236e6 # Msun to Mmer
Mmers = 1./Msmer # Mmer to Msun
Msven = 4.08523719e5 # Msun to Mven
Mvens = 1./Msven # Mven to Msun
Msear = 332946.0487 # Msun to Mear
Mears = 1./Msear # Mear to Msun
Msmar = 3.09870359e6 # Msun to Mmar
Mmars = 1./Msmar # Mmar to Msun
Msjup = 1.047348644e3 # Msun to Mjup
Mjups = 1./Msjup # Mjup to Msun
Mssat = 3.4979018e3 # Msun to Msat
Msats = 1./Mssat # Msat to Msun
Msura = 2.290298e4 # Msun to Mura
Muras = 1./Msura # Mura to Msun
Msnep = 1.941226e4 # Msun to Mnep
Mneps = 1./Msnep # Mnep to Msun
Mejup = Mears * Msjup # Mear to Mjup
Mjear = Mjups * Msear # Mjup to Mear
# masses of Solar System objects
Msun = 1.9884e30 # Sun mass in kg
Mmer = Msun*Mmers # Mercury mass in kg
Mven = Msun*Mvens # Venus mass in kg
Mear = 5.9722e24 # Earth mass in kg
Mmar = Msun*Mmars # Mars mass in kg
Mjup = Msun*Mjups # Jupiter mass in kg
Msat = Msun*Msats # Saturn mass in kg
Mura = Msun*Muras # Uranus mass in kg
Mnep = Msun*Mneps # Neptune mass in kg
# radii of Solar System objects
Rsun = 696000. # Sun radius in km
Rmer = 2439.7 # Mercury radius in km
Rven = 6051.8 # Venus radius in km
Rear = 6378.1366 # Earth radius in km
Rmar = 3396.19 # Mars radius in km
Rjup = 71492. # Jupiter radius in km
Rsat = 60268. # Saturn radius in km
Rura = 25559. # Uranus radius in km
Rnep = 24764. # Neptune radius in km
Rplu = 1195. # Pluto radius in km
#
Rsjup = Rsun/Rjup # Rsun to Rjup
Rjups = Rjup/Rsun # Rjup to Rsun
Rsear = Rsun/Rear # Rsun to Rear
Rears = Rear/Rsun # Rear to Rsun
Rsnep = Rsun/Rnep # Rsun to Rnep
Rneps = Rnep/Rsun # Rnep to Rsun
#
Rejup = Rear/Rjup # Rearth to Rjupiter
Rjear = Rjup/Rear # Rjupiter to Rearth
# astronomical constants
AU = 149597870700. #Astronomical Unit in meters
kappa = 0.01720209895 # Gaussian gravitational constant
Giau = kappa*kappa # G [AU^3/Msun/d^2]
Gsi = 6.67428e-11 #Gravitational Constants in SI system [m^3/kg/s^2]
Gaumjd = Gsi*d2s*d2s*Mjup/(AU**3) # G in [AU,Mjup,day]
speed = 299792458. # speed of light (c) in [m/s]
speedaud = speed*d2s/AU # speed of light in [AU/d]
pc2AU = 206264.806
# others
RsunAU = (Rsun*1.e3)/AU #Sun radius in AU
RjupAU = (Rjup*1.e3)/AU #Jupiter radius in AU
MJD = 2400000.5 # MJD ref time to convert to JD
speed_of_light = speed
sigma2fwhm = 2. * np.sqrt(2. * np.log(2))
speed_of_light_km = speed / 1000. | 2,781 | 28.595745 | 73 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/shortcuts.py | from __future__ import print_function, division
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.common import *
def retrieve_observations(output_name, night, observations,
use_refraction=True, use_telluric=True, use_interstellar=True,
use_telluric_spline= False):
# config_in['output'], night, lists['observations']
""" Retrieving the observations"""
try:
sky_corrected = load_from_cpickle('skycorrected_fibA', output_name, night)
input_data = load_from_cpickle('input_dataset_fibA', output_name, night)
for obs in observations:
input_data[obs]['e2ds'] = sky_corrected[obs]['e2ds'].copy()
print(" Sky contamination correction: applied ")
except:
input_data = load_from_cpickle('input_dataset_fibA', output_name, night)
try:
""" Retrieving the telluric correction"""
telluric = load_from_cpickle('telluric', output_name, night)
correct_telluric = True
except:
correct_telluric = False
if use_telluric:
print(" No telluric correction available - is this expected? ")
""" Retrieval of refraction correction, if present """
try:
try:
refraction = load_from_cpickle('refraction_update', output_name, night)
correct_refraction = True
except:
refraction = load_from_cpickle('refraction', output_name, night)
correct_refraction = True
except:
correct_refraction = False
if use_refraction:
print(" No refraction correction available - is this expected? ")
""" Retrieval of refraction correction, if present """
try:
interstellar = load_from_cpickle('interstellar_lines', output_name, night)
correct_interstellar = True
except:
correct_interstellar = False
if use_interstellar:
print(" No interstellar lines correction available - is this expected? ")
for obs in observations:
""" Spectra are collected for variations of differential refraction,
if the correction has been computed before"""
if correct_refraction and use_refraction:
try:
input_data[obs]['e2ds'] /= refraction[obs]['fit_e2ds']
input_data[obs]['e2ds_err'] /= refraction[obs]['fit_e2ds']
except:
input_data[obs]['e2ds'] /= refraction[obs]['polyfit_e2ds']
input_data[obs]['e2ds_err'] /= refraction[obs]['polyfit_e2ds']
if correct_telluric and use_telluric:
if use_telluric_spline:
input_data[obs]['e2ds'] /= telluric[obs]['spline']
input_data[obs]['e2ds_err'] /= telluric[obs]['spline']
else:
input_data[obs]['e2ds'] /= telluric[obs]['spectrum']
input_data[obs]['e2ds_err'] /= telluric[obs]['spectrum']
if correct_interstellar and use_interstellar:
input_data[obs]['e2ds'] /= interstellar[obs]['correction']
input_data[obs]['e2ds_err'] /= interstellar[obs]['correction']
if correct_refraction and use_refraction:
print(" Differential refraction correction: applied ")
if correct_telluric and use_telluric:
print(" Telluric correction: applied ")
if correct_interstellar and use_interstellar:
print(" Interstellar lines correction: applied ")
print()
return input_data
def compute_rescaling(wave, flux, wavelength_range):
wl_med = np.average(wavelength_range)
wl_dif = wavelength_range[1]-wl_med
sel = np.where(np.abs(wave-wl_med) < wl_dif)
return np.median(flux[sel])
def perform_rescaling(wave, flux, ferr, wavelength_range):
factor = compute_rescaling(wave, flux, wavelength_range)
# flux_out = flux / factor
# ferr_out = ferr / factor
return factor, flux / factor, ferr / factor
def replace_values(vals, threshold, replacement=None):
if not replacement:
replacement = np.median(vals)
null = (vals <= threshold)
vals[null] = replacement
return vals, null
def replace_values_errors(vals, errs, threshold, replacement=None, error_replacement=None):
if not replacement:
replacement = np.median(vals)
if not error_replacement:
error_replacement = np.median(errs)*10.0
null = (vals <= threshold)
vals[null] = replacement
errs[null] = error_replacement
return vals, errs, null
def replace_values_errors_with_interpolation_1d(vals,
errs=None,
less_than=None,
greater_than=None,
force_positive=False,
error_replacement=None,
sigma_iter=1):
"""
Replace outliers in the input array and the associated arrors array (if provided)
Written as a fix to out-of-boundaries rebinning
At least one between less_than or greater_than must be provided
:param vals: input array to be fixed
:param errs: error array associated to input array
:param less_than: lower threshold for considering a value as outlier.
:param greater_than: upper threshold for considering a value as outlier.
:param error_replacement: optional, error to be associated to the outlier.
if not provided, ten times the error of the interpolated points is taken
:param sigma_iter: optional, if provided it will consider less_than/greater_than as sigma multiplicators
:return: vals, errs, null
"""
if errs is None:
errt = vals*0.0
else:
errt = errs
if sigma_iter <= 1:
use_sigma = False
else:
use_sigma = True
if force_positive:
null = (vals <= 0.00000000001)
else:
null = (~np.isfinite(vals))
for ii in range(0, sigma_iter):
if use_sigma:
median = np.median(vals)
sigma = np.std(vals)
if less_than:
threshold_less = median - sigma*less_than
if greater_than:
threshold_more = median + sigma*greater_than
else:
if less_than:
threshold_less = less_than
if greater_than:
threshold_more = greater_than
if less_than and greater_than:
ind_sel = np.where((vals < threshold_less) | (vals > threshold_more) | null)[0]
elif less_than:
ind_sel = np.where((vals < threshold_less) | null)[0]
elif greater_than:
ind_sel = np.where((vals > threshold_more) | null)[0]
elif force_positive:
ind_sel = np.where((vals < 0.00001) | null)[0]
else:
raise ValueError('Provide at least on condition')
null[ind_sel] = True
i_e = 0
for jj in ind_sel:
if jj < i_e: continue
i_s = jj
i_e = jj
while i_s in ind_sel:
i_s -= 1
if i_s < 0:
break
while i_e in ind_sel:
i_e += 1
if i_e >= vals.size:
break
if i_s < 0 and i_e >= vals.size:
raise ValueError('Invalid input, something must be wrong in the input array')
elif i_s < 0:
vals[:i_e] = vals[i_e]
errt[:i_e] = errt[i_e]*10.
elif i_e >= vals.size:
vals[i_s:] = vals[i_s]
errt[i_s:] = errt[i_s]*10.
else:
vals[i_s+1:i_e] = (vals[i_e]+vals[i_s])/2.
errt[i_s+1:i_e] = (vals[i_e]+vals[i_s])*10.
if errs is None:
return vals, null
else:
if error_replacement:
errs[ind_sel] = error_replacement
else:
errs[ind_sel] = errt[ind_sel]
return vals, errs, null
def replace_values_errors_with_interpolation_2d(vals,
errs=None,
less_than=None,
greater_than=None,
force_positive=False,
error_replacement=None,
sigma_iter=1,
axis=0):
if errs is None:
errt = vals*0.0
else:
errt = errs
null = (~np.isfinite(vals))
if axis ==0:
for n_order in range(0, np.size(vals[:,0])):
vals[n_order, :], errt[n_order,:], null[n_order, :] = replace_values_errors_with_interpolation_1d(
vals[n_order, :],
errt[n_order, :],
less_than,
greater_than,
force_positive,
error_replacement,
sigma_iter)
else:
for n_order in range(0, np.size(vals[0,:])):
vals[:, n_order], errt[:, n_order], null[:, n_order] = replace_values_errors_with_interpolation_1d(
vals[:, n_order],
errt[:, n_order],
less_than,
greater_than,
force_positive,
error_replacement,
sigma_iter)
if errs is None:
return vals, null
else:
return vals, errs, null
def compute_spline(wave_in, flux_in, knots_step, knot_order=3, use_null=None, use_selection=None):
""" Sometimes the interpolation range is just outside the spline evaluation range. The spline procedure will
fail even if the affected point are at the very extremes of the spectral range, i.e., points that would not be
inin the analysis anyway. We trick the spline procedure by adding fake points
"""
if use_null is not None:
selection = (~use_null)
elif use_selection is not None:
selection = use_selection
else:
selection = (wave_in > 0.000)
len_wave = np.sum(selection) + 6
wave = np.empty(len_wave, dtype=np.double)
spec = np.empty(len_wave, dtype=np.double)
wave[:3] = wave_in[0] - np.linspace(1.0, 0.5, 3)
wave[3:-3] = wave_in[selection]
wave[-3:] = wave_in[-1] + np.linspace(0.5, 1.0, 3)
spec[:3] = flux_in[selection][0]
spec[3:-3] = flux_in[selection]
spec[-3:] = flux_in[selection][-1]
""" computing the spline approximation of the rescaled master out"""
""" picking the number of knots """
nknots = (np.amax(wave) - np.amin(wave)) /knots_step
""" picking the indices of the knots"""
idx_knots = (np.arange(1, len(wave) - 1, (len(wave) - 2.) / nknots)).astype('int')
""" passing from indices to knots values """
spline_knots = wave[idx_knots]
spline_coeff = sci_int.splrep(wave, spec, task=-1, k=knot_order, t=spline_knots)
spline_eval = sci_int.splev(wave_in, spline_coeff)
return spline_eval, spline_coeff, spline_knots
def f_telluric_rescaling_factor(x, x_range, tel1, tel2):
return np.sum((tel1-(1.+x[0]*x_range)-(tel2-1.)*x[1])**2)
def find_telluric_rescaling_factor(tel1, tel2):
fit_selection = (tel2 < 1.0)
x_range = np.arange(0.0, len(tel1), 1.0)/len(tel1)
x_start = np.zeros(2)
x_start[1] = np.median((tel1[fit_selection]-1.)/(tel2[fit_selection]-1.))
x_result = sci_optimize.minimize(f_telluric_rescaling_factor,
x_start,
args=(x_range, tel1, tel2),
method='Nelder-Mead',
options=dict(maxiter=10000))
return x_result['x'][1], x_result['x'][0], x_result['success']
def f_telluric_xslope(x, x_range, tel1, tel2, std):
return np.sum((tel1-(1.+x*x_range))**2/std**2)
def f_telluric_factor(x, tel1, tel2, std):
print(x, np.sum(((tel1-1.)-(tel2-1.)*x)**2/std**2))
return np.sum(((tel1-1.)-(tel2-1.)*x)**2/std**2)
def find_telluric_rescaling_factor_2steps(tel1, tel2):
fit_xslope = (tel2 > 0.99999)
fit_factor = (tel2 < 0.99999)
std_all = np.std(tel1)
x_range = np.arange(0.0, len(tel1), 1.0)/len(tel1)
x_start = 0.000
x_result = sci_optimize.minimize(f_telluric_xslope,
x_start,
args=(x_range[fit_xslope], tel1[fit_xslope], tel2[fit_xslope], std_all),
method='Nelder-Mead',
options=dict(maxiter=10000))
x_xslope = x_result['x'][0]
x_start = np.median((tel1[fit_factor]-1.-x_range[fit_factor]*x_xslope)/(tel2[fit_factor]-1.))
print('---> ', x_start)
x_result = sci_optimize.minimize(f_telluric_factor,
x_start,
args=(tel1[fit_factor]-x_range[fit_factor]*x_xslope, tel2[fit_factor], std_all),
method='Nelder-Mead',
options=dict(maxiter=10000))
x_factor = x_result['x'][0]
return x_factor, x_xslope, x_result['success']
def write_molecfit_v1_par(filename_par, filename_data, filename_output, filename_include, molecfit_dict, observing_dict):
""" The observing_dict must contains these keywords:
'MJD': Observing date in years or MJD in days
'UTC': UTC in s
'ELEVATION': Telescope altitude angle in deg
'HUMIDITY': Humidity in %
'PRESSURE': Pressure in hPa
'TEMPERATURE_EN': Ambient temperature in deg C
'TEMPERATURE_M1': Mirror temperature in deg C
'GEOELEV': Elevation above sea level in m (default is Paranal: 2635m)
'GEOLONG': Longitude
'GEOLAT': Latitude
"""
fileout = open(filename_par, 'w')
fileout.write("### Driver for MOLECFIT\n")
# user working directory only important for REFLEX workflow and GUI
# not used by molecfit itself.
fileout.write("user_workdir:./\n")
## INPUT DATA
# Data file name (path relative to the current directory or absolute path)
fileout.write("filename: " + filename_data + "\n")
# ASCII list of files to be corrected for telluric absorption using the
# transmission curve derived from the input reference file (path of list and
# listed files relative to the current directory or absolute path; default: "none")
fileout.write("listname: none\n")
# Type of input spectrum -- 1 = transmission (default); 0 = emission
fileout.write("trans: 1\n")
# Names of the file columns (table) or extensions (image) containing:
# Wavelength Flux Flux_Err Mask
# - Flux_Err and/or Mask can be avoided by writing 'NULL'
# - 'NULL' is required for Wavelength if it is given by header keywords
# - parameter list: col_lam, col_flux, col_dflux, and col_mask
fileout.write("columns: Wavelength Flux NULL NULL\n")
# Default error relative to mean for the case that the error column is missing
fileout.write("default_error: 0.001\n")
# Multiplicative factor to convert wavelength to micron
# (e.g. nm -> wlgtomicron = 1e-3)
fileout.write("wlgtomicron: 0.0001\n")
# Wavelengths in vacuum (= vac) or air (= air)
fileout.write("vac_air: air\n")
# TODO: input from configuration file for molecfit installation path
# ASCII or FITS table for wavelength ranges in micron to be fitted
# (path relative to the current directory or absolute path; default: "none")
fileout.write("wrange_include: " + filename_include + "\n")
# ASCII or FITS table for wavelength ranges in micron to be excluded from the
# fit (path relative to the current directory or absolute path; default: "none")
# wrange_exclude: /Users/malavolta/Astro/ExoAtmospheres/molecfit_test//HIP63901_exclude_w.dat
# ASCII or FITS table for pixel ranges to be excluded from the fit
# (path relative to the current directory or absolute path; default: "none")
# prange_exclude: /Users/malavolta/Astro/ExoAtmospheres/molecfit_test//HIP63901_exclude_p.dat
## RESULTS
# Directory for output files (path relative to the current directory or absolute path)
fileout.write("output_dir:./\n")
# Name for output files
# (supplemented by "_fit" or "_tac" as well as ".asc", ".atm", ".fits",
# ".par, ".ps", and ".res")
fileout.write("output_name: "+ filename_output + "\n")
# Plot creation: gnuplot is used to create control plots
# W - screen output only (incorporating wxt terminal in gnuplot)
# X - screen output only (incorporating x11 terminal in gnuplot)
# P - postscript file labelled '<output_name>.ps', stored in <output_dir>
# combinations possible, i.e. WP, WX, XP, WXP (however, keep the order!)
# all other input: no plot creation is performed
fileout.write("plot_creation: none\n")
# Create plots for individual fit ranges? -- 1 = yes; 0 = no
fileout.write("plot_range: 0\n")
## FIT PRECISION
# Relative chi2 convergence criterion
fileout.write("ftol: " + molecfit_dict['ftol'] + "\n")
# Relative parameter convergence criterion
fileout.write("xtol: " + molecfit_dict['xtol'] + "\n")
## MOLECULAR COLUMNS
# List of molecules to be included in the model
# (default: 'H2O', N_val: nmolec)
molecules_list = "list_molec:"
for mol in molecfit_dict['molecules']:
molecules_list += " " + mol
fileout.write(molecules_list + "\n")
# Fit flags for molecules -- 1 = yes; 0 = no (N_val: nmolec)
fileout.write("fit_molec: 1 1\n")
# Values of molecular columns, expressed relatively to the input ATM profile
# columns (N_val: nmolec) [1 = 100%]
fileout.write("relcol: 1.0 1.0\n")
## BACKGROUND AND CONTINUUM
# Conversion of fluxes from phot/(s*m2*mum*as2) (emission spectrum only) to
# flux unit of observed spectrum:
# 0: phot/(s*m^2*mum*as^2) [no conversion]
# 1: W/(m^2*mum*as^2)
# 2: erg/(s*cm^2*A*as^2)
# 3: mJy/as^2
# For other units the conversion factor has to be considered as constant term
# of the continuum fit.
fileout.write("flux_unit: 0\n")
# Fit of telescope background -- 1 = yes; 0 = no (emission spectrum only)
fileout.write("fit_back: 0\n")
# Initial value for telescope background fit (range: [0,1])
fileout.write("telback: 0.1\n")
# Polynomial fit of continuum --> degree: cont_n
fileout.write("fit_cont: 1\n")
# Degree of coefficients for continuum fit
fileout.write("cont_n: {0:1.0f}".format(molecfit_dict['cont_n']) + "\n")
# Initial constant term for continuum fit (valid for all fit ranges)
# (emission spectrum: about 1 for correct flux_unit)
fileout.write("cont_const: {0:1.0f}".format(molecfit_dict['cont_const']) + "\n")
## WAVELENGTH SOLUTION
# Refinement of wavelength solution using a polynomial of degree wlc_n
fileout.write("fit_wlc: 1\n")
# Polynomial degree of the refined wavelength solution
fileout.write("wlc_n: {0:1.0f}".format(molecfit_dict['wlc_n']) + "\n")
# Initial constant term for wavelength correction (shift relative to half
# wavelength range)
fileout.write("wlc_const: {0:1.0f}".format(molecfit_dict['wlc_const']) + "\n")
## RESOLUTION
# Fit resolution by boxcar -- 1 = yes; 0 = no
fileout.write("fit_res_box: 0\n")
# Initial value for FWHM of boxcar relative to slit width (>= 0. and <= 2.)
fileout.write("relres_box: 0.0\n")
# Voigt profile approximation instead of independent Gaussian and Lorentzian
# kernels? -- 1 = yes; 0 = no
fileout.write("kernmode: 0\n")
# Fit resolution by Gaussian -- 1 = yes; 0 = no
fileout.write("fit_res_gauss: 1\n")
# Initial value for FWHM of Gaussian in pixels
fileout.write("res_gauss: {0:3.1f}".format(molecfit_dict['res_gauss']) + "\n")
# Fit resolution by Lorentzian -- 1 = yes; 0 = no
fileout.write("fit_res_lorentz: 0\n")
# Initial value for FWHM of Lorentzian in pixels
fileout.write("res_lorentz: 0.0\n")
# Size of Gaussian/Lorentzian/Voigtian kernel in FWHM
fileout.write("kernfac: {0:3.0f}".format(molecfit_dict['kernfac']) + "\n")
# Variable kernel (linear increase with wavelength)? -- 1 = yes; 0 = no
fileout.write("varkern: 0\n")
# ASCII file for kernel elements (one per line; normalisation not required)
# instead of synthetic kernel consisting of boxcar, Gaussian, and Lorentzian
# components (path relative to the current directory or absolute path; default: "none\n")
fileout.write("kernel_file: none\n")
## AMBIENT PARAMETERS
# If the input data file contains a suitable FITS header, the keyword names of
# the following parameters will be read, but the corresponding values will not
# be used. The reading of parameter values from this file can be forced by
# setting keywords to NONE.
# Observing date in years or MJD in days
# changed the format to inter in order to not have molecfit crashed
#fileout.write("obsdate: {0:13.5f}".format(observing_dict['MJD']) + "\n")
fileout.write("obsdate: {0:13.50}".format(observing_dict['MJD']) + "\n")
fileout.write("obsdate_key: NONE\n")
# UTC in s
fileout.write("utc: {0:8.0f}".format(observing_dict['UTC']) + "\n")
fileout.write("utc_key: NONE\n")
# Telescope altitude angle in deg
fileout.write("telalt: {0:13.5f}".format(observing_dict['ELEVATION']) + "\n")
fileout.write("telalt_key: NONE\n")
# Humidity in %
fileout.write("rhum: {0:13.5f}".format(observing_dict['HUMIDITY']) + "\n")
fileout.write("rhum_key: NONE\n")
# Pressure in hPa
fileout.write("pres: {0:5.1f}".format(observing_dict['PRESSURE']) + "\n")
fileout.write("pres_key: NONE\n")
# Ambient temperature in deg C
fileout.write("temp: {0:4.1f}".format(observing_dict['TEMPERATURE_EN']) + "\n")
fileout.write("temp_key: NONE\n")
# Mirror temperature in deg C
fileout.write("m1temp: {0:4.1f}".format(observing_dict['TEMPERATURE_M1']) + "\n")
fileout.write("m1temp_key: NONE\n")
# Elevation above sea level in m (default is Paranal: 2635m)
fileout.write("geoelev: {0:4.0f}".format(observing_dict['GEOELEV']) + "\n")
fileout.write("geoelev_key: NONE\n")
# Longitude (default is Paranal: -70.4051)
fileout.write("longitude: {0:9.4f}".format(observing_dict['GEOLONG']) + "\n")
fileout.write("longitude_key: NONE\n")
# Latitude (default is Paranal: -24.6276)
fileout.write("latitude: {0:9.4f}".format(observing_dict['GEOLAT']) + "\n")
fileout.write("latitude_key: NONE\n")
## INSTRUMENTAL PARAMETERS
# Slit width in arcsec (taken from FITS header if present)
fileout.write("slitw: {0:3.1f}".format(molecfit_dict['slitwidth']) + "\n")
fileout.write("slitw_key: NONE\n")
# Pixel scale in arcsec (taken from this file only)
fileout.write("pixsc: {0:4.2f}".format(molecfit_dict["pixelscale"]) + "\n")
fileout.write("pixsc_key: NONE\n")
## ATMOSPHERIC PROFILES
# Reference atmospheric profile
fileout.write("ref_atm: equ.atm\n")
# Specific GDAS-like input profile (P[hPa] HGT[m] T[K] RELHUM[%]) (path
# relative to the installation directory or absolute path). In the case of "none", no GDAS
# profiles will be considered. The default "auto" performs an automatic
# retrieval.
fileout.write("gdas_dir: data/profiles/grib\n")
fileout.write("gdas_prof: auto\n")
# Grid of layer heights for merging ref_atm and GDAS profile. Fixed grid = 1
# (default) and natural grid = 0.
fileout.write("layers: 0\n")
# Upper mixing height in km (default: 5) for considering data of a local meteo
# station. If emix is below geoelev, rhum, pres, and temp are not used for
# modifying the corresponding profiles.
fileout.write("emix: 5.0\n")
# PWV value in mm for the input water vapour profile. The merged profile
# composed of ref_atm, GDAS, and local meteo data will be scaled to this value
# if pwv > 0 (default: -1 -> no scaling).
fileout.write("pwv: -1.\n")
# internal GUI specific parameter
fileout.write("clean_mflux: 1\n")
fileout.write("end\n")
fileout.close()
def write_molecfit_par(filename_par, wave_include, molecfit_dict, observing_dict):
""" The observing_dict must contains these keywords:
'MJD': Observing date in years or MJD in days
'UTC': UTC in s
'ELEVATION': Telescope altitude angle in deg
'HUMIDITY': Humidity in %
'PRESSURE': Pressure in hPa
'TEMPERATURE_EN': Ambient temperature in deg C
'TEMPERATURE_M1': Mirror temperature in deg C
'GEOELEV': Elevation above sea level in m (default is Paranal: 2635m)
'GEOLONG': Longitude
'GEOLAT': Latitude
"""
fileout = open(filename_par, 'w')
# File: molecfitConfigFiles/model.rc
#
# Note: This configuration file has been automatically
# generated by the esorex (v3.13.5) program.
#
# Date: 25-Jun-2022 16:52:38
#
#
# --USE_ONLY_INPUT_PRIMARY_DATA
# Value=TRUE implies that only the fits primary contains the input science flux
# data.
# Value=FALSE implies that the fits extensions also contains input science
# flux data.
fileout.write('USE_ONLY_INPUT_PRIMARY_DATA=FALSE\n')
# --USE_DATA_EXTENSION_AS_DFLUX
# Only valid if USE_ONLY_INPUT_PRIMARY_DATA=TRUE. The fits extension index that
# contains the
# errors of the science flux data (DFLUX). A value of 0 implies that there is
# no DFLUX.
fileout.write('USE_DATA_EXTENSION_AS_DFLUX=0\n')
# --USE_DATA_EXTENSION_AS_MASK
# Only valid if USE_ONLY_INPUT_PRIMARY_DATA=TRUE. The fits extension index that
# contains the
# mask associated with the science flux data. A value of 0 implies that there
# is no mask data.
fileout.write('USE_DATA_EXTENSION_AS_MASK=0\n')
# --USE_INPUT_KERNEL
# If TRUE, use the kernel library if it is provided.
fileout.write('USE_INPUT_KERNEL=TRUE\n')
# --MODEL_MAPPING_KERNEL
# Mapping 'STD_MODEL/SCIENCE' - 'MODEL_KERNEL_LIBRARY' [string with ext_number
# comma separated (int)] :
# If set to NULL, check if the TAG[MODEL_MAPPING_KERNEL] FITS BINTABLE values
# is provided.
# The FITS BINTABLE have to one column [KERNEL_LIBRARY_EXT].
fileout.write('MODEL_MAPPING_KERNEL=NULL\n')
## MOLECULAR COLUMNS
# --LIST_MOLEC
# List of molecules to be included in the model. Represented as a comma
# separated
# string of molecule names, e.g. "H2O,CO2,O3".
# If set to NULL, the input TAG[MOLECULES] FITS BINTABLE values have to be
# provided
# where the FITS BINTABLE specified contains the three columns:
# LIST_MOLEC; FIT_MOLEC; and REL_COL.
molecules_list = 'LIST_MOLEC="'
# --FIT_MOLEC
# List of flags that specify which of the listed molecules are to be fitted for.
# Flag=1 implies yes. Flag=0 implies no. Represented as a string of comma
# separated
# integers in the same order as the listed molecules. For example: if
# LIST_MOLEC="H2O,CO2,O3", then
# FIT_MOLEC="1,0,1" implies that only H2O and O3 should be fitted for.
# If set to NULL, the input TAG[MOLECULES] FITS BINTABLE values have to be
# provided where the FITS
# BINTABLE specified contains the three columns: LIST_MOLEC; FIT_MOLEC; and
# REL_COL.
molecules_flag = 'FIT_MOLEC="'
# --REL_COL
# List of the intial values of fitting of the molecular columns expressed
# relatively to the input
# ATM profile columns. Represented as a comma separated list of doubles in
# the same order as the
# listed molecules. For example, if LIST_MOLEC="H2O,CO2,O3", then
# REL_COL="1.0,1.2,0.8"
# implies that H2O, CO2 and O3 have initial relative values of 1.0, 1.2 and
# 0.8 respectively.
# If set to NULL, the input TAG[MOLECULES] FITS BINTABLE values have to be
# provided where the FITS
# BINTABLE specified contains the three columns: LIST_MOLEC; FIT_MOLEC; and
# REL_COL.
molecules_rel = 'REL_COL="'
for mol in molecfit_dict['molecules']:
molecules_list += mol
molecules_list += ','
molecules_flag += '1,'
molecules_rel += '1.0,'
molecules_list = molecules_list[:-1] + '"'
molecules_flag = molecules_flag[:-1] + '"'
molecules_rel = molecules_rel[:-1] + '"'
fileout.write(molecules_list + '\n')
fileout.write(molecules_flag + '\n')
fileout.write(molecules_rel + '\n')
# --WAVE_INCLUDE
# Wavelength ranges to be included. Represented as a string of comma separated
# doubles in pairs
# specifying the start and end wavelengths of a range. The wavelength units
# are always in microns.
# For example a KMOS sample data in the range of 1.11um to 1.67um may have
# WAVE_INCLUDE="1.773,1.78633,1.79098,1.80434,1.187691,1.189937" to represent
# three inclusion regions:
# [1.773,1.78633], [1.79098,1.80434] and [1.187691,1.189937].
# If set to NULL, molecfit will check if the TAG[WAVE_INCLUDE] FITS BINTABLE
# values is provided where
# the FITS BINTABLE specified has the two columns: LOWER_LIMIT; and
# UPPER_LIMIT.
fileout.write('WAVE_INCLUDE='+wave_include+'\n')
# --WAVE_EXCLUDE
# Wavelength ranges to be excluded. Represented as a string of comma separated
# doubles in pairs
# specifying the start and end wavelengths of a range. The wavelength units
# are always in microns.
# as the input science data. For example a KMOS sample data in the range of
# 1.11um to 1.67um may have
# WAVE_EXCLUDE="1.773,1.78633,1.79098,1.80434,1.187691,1.189937" to represent
# three exclusion regions:
# [1.773,1.78633], [1.79098,1.80434] and [1.187691,1.189937].
# If set to NULL, molecfit will check if the TAG[WAVE_EXCLUDE] FITS BINTABLE
# values is provided where
# the FITS BINTABLE specified has the two columns: LOWER_LIMIT; and
# UPPER_LIMIT.
fileout.write('WAVE_EXCLUDE=NULL\n')
# --PIXEL_EXCLUDE
# Pixel ranges to be excluded. Represented as a string of comma separated
# integers in pairs specifying the
# start and end pixel of a range. For example:
# PIXEL_EXCLUDE="54,128,512,514,1020,1024" represents three
# exclusion regions: [54,128], [512,514] and [1020,1024].
# If set to NULL, molecfit will check if the TAG[PIXEL_EXCLUDE] FITS BINTABLE
# values is provided where the
# FITS BINTABLE specified has the two columns: LOWER_LIMIT; and UPPER_LIMIT.
fileout.write('PIXEL_EXCLUDE=NULL\n')
# --TELLURICCORR_PATH
# Installation directory.
fileout.write('TELLURICCORR_PATH=TELLURICCORR_PARAMETER_DEFAULT\n')
# --TELLURICCORR_DATA_PATH
# Data directory.
fileout.write('TELLURICCORR_DATA_PATH=TELLURICCORR_PARAMETER_DEFAULT\n')
# --TMP_PATH
# Temporary directory.
fileout.write('TMP_PATH=TELLURICCORR_PARAMETER_DEFAULT\n')
# --SILENT_EXTERNAL_BINS
# Silent the output of the external binaries.
fileout.write('SILENT_EXTERNAL_BINS=TRUE\n')
# --TRANSMISSION
# Type of input spectrum : 0 = Emission(radiance); 1 = Transmission.
fileout.write('TRANSMISSION=TRUE\n')
# --COLUMN_LAMBDA
# Wavelength column ('NULL' can be used if the file is an image and that
# the data are in the primary
# (data are given by the FITS header keywords [CRVAL1=wave_ini, CD1_1=step])
# f CD1_1 is absent, then the DEPRECATED CDELT1 keyword will be used.
fileout.write('COLUMN_LAMBDA=lambda\n')
# --COLUMN_FLUX
# Flux column.
fileout.write('COLUMN_FLUX=flux\n')
# --COLUMN_DFLUX
# Flux error column (Avoided by writing 'NULL') : 1-sigma error on the flux.
fileout.write('COLUMN_DFLUX=NULL\n')
# --COLUMN_MASK
# Mask column (Avoided by writing 'NULL') : Indicates if a pixel is invalid.
fileout.write('COLUMN_MASK=NULL\n')
# --DEFAULT_ERROR
# Default error relative to mean for the case that the error column
# is not provided.
fileout.write('DEFAULT_ERROR=0.01\n')
# --WLG_TO_MICRON
# Multiplicative factor applied to the wavelength to express is in micron.
# E.g.: if wavelength is given in nm, the value should be 0.001.
fileout.write('WLG_TO_MICRON=1.\n')
# --WAVELENGTH_FRAME
# Wavelength in vacuum = 'VAC'.
# Wavelength in air with the observatory reference frame = 'AIR'.
# Wavelength in vacuum with another reference frame = 'VAC_RV'.
# (typically the sun or the barycenter of the solar system).
# In the latter case, the radial velocity of the observatory relative
# to the external reference frame must be provided in the parameter obs_RV.
fileout.write('WAVELENGTH_FRAME=AIR\n')
# TODO should I convert everything in vacuum or air??
# --OBS_ERF_RV_KEY
# The radial velocity of the observatory in km/s
# relative to the external reference frame;
# It is positive if the distance between the science target and the Earth
# increases along the line-of-sight to the science target.
# It must be provided if MF_PARAMETERS_WAVELENGTH_FRAME = 'VAC_RV'.
fileout.write('OBS_ERF_RV_KEY=NONE\n')
# --OBS_ERF_RV_VALUE
# If OBS_ERF_RV_KEYWORD=='NONE' take this value.
fileout.write('OBS_ERF_RV_VALUE=0.0\n')
# --CLEAN_MODEL_FLUX
# Set model flux to 0 for non-fitted pixels.
fileout.write('CLEAN_MODEL_FLUX=FALSE\n')
# --FTOL
# Relative chi-square convergence criterion.
# FTOL=1e-10
try:
fileout.write('FTOL={0:.12f}\n'.format(molecfit_dict['ftol']))
except ValueError:
fileout.write("FTOL=" + molecfit_dict['ftol'] + "\n")
# --XTOL
# Relative parameter convergence criterion.
# XTOL=1e-10
try:
fileout.write('XTOL={0:.12f}\n'.format(molecfit_dict['xtol']))
# Relative chi2 convergence criterion
except ValueError:
fileout.write("XTOL=" + molecfit_dict['xtol'] + "\n")
# --FLUX_UNIT
# Conversion of fluxes from phot/(s*m2*mum*as2) (emission spectrum only)
# to flux unit of observed spectrum:
# 0: phot / (s * m^2 * mum * as^2) [no conversion]
# 1: W / ( m^2 * mum * as^2)
# 2: erg / (s * cm^2 * A * as^2)
# 3: mJy / ( as^2)
# For other units, the conversion factor has to be considered
# as constant term of the continuum fit.
fileout.write('FLUX_UNIT=0\n')
# --FIT_TELESCOPE_BACKGROUND
# Fit of telescope background -- 1 = yes; 0 = no (emission spectrum only).
fileout.write('FIT_TELESCOPE_BACKGROUND=TRUE\n')
#todo check this, it was zero in previous file format
# --TELESCOPE_BACKGROUND_CONST
# Initial value for telescope background fit.
fileout.write('TELESCOPE_BACKGROUND_CONST=0.1\n')
# --FIT_CONTINUUM
# Comma deliminated string of flags (1=true, 0=false) for fitting continuum in
# specific regions.
# If set to NULL, check if the TAG[WAVE_INCLUDE] points to a FITS BINTABLE
# with column CONT_FIT_FLAG provided.
fileout.write('FIT_CONTINUUM=1\n')
# --CONTINUUM_N
# Polynomial order for continuum model for each region. Presented as a comma
# deliminated string.
# If set to NULL, check if the TAG[WAVE_INCLUDE] points to a FITS BINTABLE
# with column CONT_POLY_ORDER provided.
fileout.write('CONTINUUM_N={0:1.0f}\n'.format(molecfit_dict['cont_n']))
# --CONTINUUM_CONST
# Initial constant term for continuum fit (valid for all fit ranges)
# [emission spectrum: about 1 for correct flux_unit].
fileout.write('CONTINUUM_CONST={0:10f}\n'.format(molecfit_dict['cont_const']))
# --FIT_WLC
# Flags for including regions in wavelength corrections.
fileout.write('FIT_WLC=1\n')
# --WLC_N
# Polynomial degree of the refined wavelength solution.
fileout.write('WLC_N={0:1.0f}\n'.format(molecfit_dict['wlc_n']))
# --WLC_CONST
# Initial constant term for wavelength adjustment
# (shift relative to half wavelength range).
fileout.write('WLC_CONST={0:10f}\n'.format(molecfit_dict['wlc_const']))
# --FIT_RES_BOX
# Fit resolution by Boxcar LSF.
fileout.write('FIT_RES_BOX=FALSE\n')
# --RES_BOX
# Initial value for FWHM of Boxcar rel. to slit width
# at the centre of the spectrum.
fileout.write('RES_BOX=0.0\n')
# --FIT_RES_GAUSS
# Fit resolution by Gaussian.
fileout.write('FIT_RES_GAUSS=TRUE\n')
# --RES_GAUSS
# Initial value for FWHM of the Gaussian in pixels
# at the centre of the spectrum.
fileout.write('RES_GAUSS={0:10f}\n'.format(molecfit_dict['res_gauss']))
# --FIT_RES_LORENTZ
# Fit resolution by Lorentzian.
fileout.write('FIT_RES_LORENTZ=FALSE\n')
# --RES_LORENTZ
# Initial value for FWHM of the Lorentz in pixels
# at the centre of the spectrum.
fileout.write('RES_LORENTZ=0.0\n')
# --KERNMODE
# Voigtian profile approximation instead of independent Gaussian and
# Lorentzian?.
fileout.write('KERNMODE=FALSE\n')
# --KERNFAC
# Size of Voigtian/Gaussian/Lorentzian kernel in FWHM.
fileout.write('KERNFAC={0:10f}\n'.format(molecfit_dict['kernfac']))
# --VARKERN
# Does the kernel size increase linearly with wavelength?.
fileout.write('VARKERN=TRUE\n')
#TODO: check why it was FALSE in the previous iteration
# --OBSERVING_DATE_KEYWORD
# Observing date in years or MJD in days (not string).
fileout.write('OBSERVING_DATE_KEYWORD=NONE\n')
# --OBSERVING_DATE_VALUE
# If OBSERVING_DATE_KEYWORD=='NONE' take this value.
fileout.write('OBSERVING_DATE_VALUE={0:30f}\n'.format(observing_dict['MJD']))
# --UTC_KEYWORD
# UTC in s.
fileout.write('UTC_KEYWORD=NONE\n')
# --UTC_VALUE
# If UTC_KEYWORD=='NONE' take this value.
fileout.write('UTC_VALUE={0:30f}\n'.format(observing_dict['UTC']))
# --TELESCOPE_ANGLE_KEYWORD
# Telescope altitude angle in deg.
fileout.write('TELESCOPE_ANGLE_KEYWORD=NONE\n')
# --TELESCOPE_ANGLE_VALUE
# If TELESCOPE_ANGLE_KEYWORD=='NONE' take this value.
fileout.write('TELESCOPE_ANGLE_VALUE={0:30f}\n'.format(observing_dict['ELEVATION']))
# --RELATIVE_HUMIDITY_KEYWORD
# Relative humidity in %.
fileout.write('RELATIVE_HUMIDITY_KEYWORD=NONE\n')
# --RELATIVE_HUMIDITY_VALUE
# If RELATIVE_HUMIDITY_KEYWORD=='NONE' take this value.
fileout.write('RELATIVE_HUMIDITY_VALUE={0:30f}\n'.format(observing_dict['HUMIDITY']))
# --PRESSURE_KEYWORD
# Pressure in hPa.
fileout.write('PRESSURE_KEYWORD=NONE\n')
# --PRESSURE_VALUE
# If PRESSURE_KEYWORD=='NONE' take this value.
fileout.write('PRESSURE_VALUE={0:5.1f}\n'.format(observing_dict['PRESSURE']))
# --TEMPERATURE_KEYWORD
# Ambient temperature in deg C.
fileout.write('TEMPERATURE_KEYWORD=NONE\n')
# --TEMPERATURE_VALUE
# If TEMPERATURE_KEYWORD=='NONE' take this value.
fileout.write('TEMPERATURE_VALUE={0:4.1f}\n'.format(observing_dict['TEMPERATURE_EN']))
# --MIRROR_TEMPERATURE_KEYWORD
# Mirror temperature in deg C.
fileout.write('MIRROR_TEMPERATURE_KEYWORD=NONE\n')
# --MIRROR_TEMPERATURE_VALUE
# If MIRROR_TEMPERATURE_KEYWORD=='NONE' take this value.
fileout.write('MIRROR_TEMPERATURE_VALUE={0:4.1f}\n'.format(observing_dict['TEMPERATURE_M1']))
# --ELEVATION_KEYWORD
# Elevation above sea level in m (default is Paranal: 2635. m).
fileout.write('ELEVATION_KEYWORD=NONE\n')
# --ELEVATION_VALUE
# If ELEVATION_KEYWORD=='NONE' take this value.
fileout.write('ELEVATION_VALUE={0:30f}\n'.format(observing_dict['GEOELEV']))
# --LONGITUDE_KEYWORD
# Longitude (default is Paranal: -70.4051 deg).
fileout.write('LONGITUDE_KEYWORD=NONE\n')
# --LONGITUDE_VALUE
# If LONGITUDE_KEYWORD=='NONE' take this value.
fileout.write('LONGITUDE_VALUE={0:9.4f}\n'.format(observing_dict['GEOLONG']))
# --LATITUDE_KEYWORD
# Latitude (default is Paranal: -24.6276 deg).
fileout.write('LATITUDE_KEYWORD=NONE\n')
# --LATITUDE_VALUE
# If LATITUDE_KEYWORD=='NONE' take this value.
fileout.write('LATITUDE_VALUE={0:9.4f}\n'.format(observing_dict['GEOLAT']))
# --SLIT_WIDTH_KEYWORD
# Slit width in arcsec (taken from FITS header if present).
fileout.write('SLIT_WIDTH_KEYWORD=NONE\n')
# --SLIT_WIDTH_VALUE
# If SLIT_WIDTH_KEYWORD=='NONE' take this value.
fileout.write('SLIT_WIDTH_VALUE={0:3.1f}\n'.format(molecfit_dict['slitwidth']))
# --PIX_SCALE_KEYWORD
# Pixel scale in arcsec (taken from this file only).
fileout.write('PIX_SCALE_KEYWORD=NONE\n')
# --PIX_SCALE_VALUE
# If PIX_SCALE_KEYWORD=='NONE' take this value.
fileout.write('PIX_SCALE_VALUE={0:4.12}\n'.format(molecfit_dict['pixelscale']))
# --REFERENCE_ATMOSPHERIC
# Reference atmospheric profile. Possible values:
# - equ_atm (default; equatorial atmosphere, valid for Paranal);
# - tro_atm (tropical atmosphere);
# - std_atm (standard atmosphere);
# - Other file located in :
# ({TELLURICCORR_DATA_PATH}/profiles/mipas/).
fileout.write('REFERENCE_ATMOSPHERIC=equ.fits\n')
# --GDAS_PROFILE
# Specific GDAS-like input profile (P[hPa] HGT[m] T[K] RELHUM[%])
# (if starts with /, absolute path, otherwise path relative to basedir).
# In the case of 'none', no GDAS profiles will be considered.
# The value 'auto' performs an automatic retrieval.
fileout.write('GDAS_PROFILE=auto\n')
# --LAYERS
# Grid of layer heights for merging ref_atm and GDAS profile.
# Fixed grid = CPL_TRUE and natural grid = CPL_FALSE.
fileout.write('LAYERS=TRUE\n')
# --EMIX
# Upper mixing height in km for considering data of a local meteo station.
# If emix is below geoelev, rhum, pres, and temp are not used
# for modifying the corresponding profiles.
fileout.write('EMIX=5.0\n')
# --PWV
# PWV value in mm for the input water vapor profile.
# The merged profile composed of ref_atm, GDAS, and local meteo data
# will be scaled to this value if pwv > 0 (default: -1 -> no scaling).
fileout.write('PWV=-1.0\n')
# --LNFL_LINE_DB
# File name of the line list (must be stored in the directory :
# ({TELLURICCORR_DATA_PATH}/hitran/).
fileout.write('LNFL_LINE_DB=aer_v_'+molecfit_dict['aer_version']+'\n')
# --LNFL_LINE_DB_FORMAT
# Format of the line file: gives the length in terms of characters per line.
fileout.write('LNFL_LINE_DB_FORMAT=100.0\n')
# --LBLRTM_ICNTNM
# Continua and Rayleigh extinction [0,1,2,3,4,5].
fileout.write('LBLRTM_ICNTNM=5\n')
# --LBLRTM_IAERSL
# Aerosols [0,1].
fileout.write('LBLRTM_IAERSL=0\n')
# --LBLRTM_MPTS
# Number of optical depth values.
fileout.write('LBLRTM_MPTS=5\n')
# --LBLRTM_NPTS
# Number of values for each panel.
fileout.write('LBLRTM_NPTS=5\n')
# --LBLRTM_V1
# Beginning wavenumber value for the calculation.
fileout.write('LBLRTM_V1=1.9\n')
# --LBLRTM_V2
# Ending wavenumber value for the calculation.
fileout.write('LBLRTM_V2=2.4\n')
# --LBLRTM_SAMPLE
# Number of sample points per mean halfwidth [between 1 to 4, default=4].
fileout.write('LBLRTM_SAMPLE=4\n')
# --LBLRTM_ALFAL0
# Average collision broadened halfwidth [cm-1/atm].
fileout.write('LBLRTM_ALFAL0=0.0\n')
# --LBLRTM_AVMASS
# Average molecular mass [amu] for Doppler halfwidth.
fileout.write('LBLRTM_AVMASS=0.0\n')
# --LBLRTM_DPTMIN
# Minimum molecular optical depth below which lines will be rejected.
fileout.write('LBLRTM_DPTMIN=0.0002\n')
# --LBLRTM_DPTFAC
# Factor multiplying molecular continuum optical depth.
fileout.write('LBLRTM_DPTFAC=0.001\n')
# --LBLRTM_TBOUND
# Temperature of boundary [K].
fileout.write('LBLRTM_TBOUND=0.0\n')
# --LBLRTM_SREMIS1
# Emissivity coefficient 1.
fileout.write('LBLRTM_SREMIS1=0.0\n')
# --LBLRTM_SREMIS2
# Emissivity coefficient 2.
fileout.write('LBLRTM_SREMIS2=0.0\n')
# --LBLRTM_SREMIS3
# Emissivity coefficient 3.
fileout.write('LBLRTM_SREMIS3=0.0\n')
# --LBLRTM_SRREFL1
# Reflectivity coefficient 1.
fileout.write('LBLRTM_SRREFL1=0.0\n')
# --LBLRTM_SRREFL2
# Reflectivity coefficient 2.
fileout.write('LBLRTM_SRREFL2=0.0\n')
# --LBLRTM_SRREFL3
# Reflectivity coefficient 3.
fileout.write('LBLRTM_SRREFL3=0.0\n')
# --LBLRTM_MODEL
# Atmospheric profile [0,1,2,3,4,5,6].
fileout.write('LBLRTM_MODEL=0\n')
# --LBLRTM_ITYPE
# Type of path [1,2,3].
fileout.write('LBLRTM_ITYPE=3\n')
# --LBLRTM_NOZERO
# Zeroing of small amounts of absorbers [0,1].
fileout.write('LBLRTM_NOZERO=0\n')
# --LBLRTM_NOPRNT
# Do not print output? [0,1].
fileout.write('LBLRTM_NOPRNT=0\n')
# --LBLRTM_IPUNCH
# Write out layer data to TAPE7 [0,1].
fileout.write('LBLRTM_IPUNCH=0\n')
# --LBLRTM_RE
# Radius of earth [km].
fileout.write('LBLRTM_RE=0.0\n')
# --LBLRTM_HSPACE
# Altitude definition for space [km].
fileout.write('LBLRTM_HSPACE=120.0\n')
# --LBLRTM_H2
# Upper height limit [km].
fileout.write('LBLRTM_H2=0.0\n')
# --LBLRTM_RANGE
# Length of a straight path from H1 to H2 [km].
fileout.write('LBLRTM_RANGE=0.0\n')
# --LBLRTM_BETA
# Earth centered angle from H1 to H2 [degrees].
fileout.write('LBLRTM_BETA=0.0\n')
# --LBLRTM_LEN
# Path length [0,1].
fileout.write('LBLRTM_LEN=0\n')
# --LBLRTM_HOBS
# Height of observer.
fileout.write('LBLRTM_HOBS=0.0\n')
# --LBLRTM_AVTRAT
# Maximum Voigt width ratio across a layer.
fileout.write('LBLRTM_AVTRAT=2.0\n')
# --LBLRTM_TDIFF1
# Maximum layer temperature difference at ALTD1 [K].
fileout.write('LBLRTM_TDIFF1=5.0\n')
# --LBLRTM_TDIFF2
# Maximum layer temperature difference at ALTD2 [K].
fileout.write('LBLRTM_TDIFF2=8.0\n')
# --LBLRTM_ALTD1
# Altitude of TDIFF1 [km].
fileout.write('LBLRTM_ALTD1=0.0\n')
# --LBLRTM_ALTD2
# Altitude of TDIFF2 [km].
fileout.write('LBLRTM_ALTD2=0.0\n')
# --LBLRTM_DELV
# Number of wavenumbers [cm-1] per major division.
fileout.write('LBLRTM_DELV=1.0\n')
# --EXPERT_MODE
# If set to true, will check if TAG[INIT_FIT_PARAMETERS] points to a fits file
# with a bintable of parameter values to use as initial values for the
# fitting process.
fileout.write('EXPERT_MODE=FALSE\n')
# --CHIP_EXTENSIONS
# Flag that determines if image extensions are to be treated as independent
# science data to be fitted for independently or as CHIP specific subranges
# of a single observation to be fitted for as a single combined spectrum.
# Value = TRUE implies to treat as CHIPS to be combined. Value = FALSE
# implies
# to treat as independent. [FALSE].
fileout.write('CHIP_EXTENSIONS=FALSE\n')
#
# End of file
fileout.close()
def write_molecfit_input_spectrum(wave, flux, filename):
#writing the molecfit input fits files
good=range(len(wave))
for i in range(5):
medFlux=np.median(flux[good])
madFlux=np.median(np.abs(flux[good]-medFlux))
good=np.where(flux>(medFlux-5*madFlux))[0]
dflux=np.std(flux[good])
dflux=dflux*np.sqrt(flux)#to scale the error to the signal assuming poissonian noise
#To create a table from scratch, we need to define columns first, by constructing the Column objects and their data.
#Suppose we have two columns, the first containing strings, and the second containing floating point numbers:
col1 = fits.Column(name='lambda', format='E', array=wave*1e-4)
col2 = fits.Column(name='flux', format='E', array=flux)
col3 = fits.Column(name='dflux', format='E', array=dflux)
col4 = fits.Column(name='mask', format='I', array=np.zeros(len(good)))
#Now, create a new binary table HDU object by using the BinTableHDU.from_columns() function:
#hdu = fits.BinTableHDU.from_columns(cols)
hdu = fits.BinTableHDU.from_columns([col1, col2, col3, col4])
#The data structure used to represent FITS tables is called a FITS_rec and is derived from the numpy.recarray interface.
#When creating a new table HDU the individual column arrays will be assembled into a single FITS_rec array.
#Now you may write this new table HDU directly to a FITS file like so:
hdu.writeto(filename, overwrite=True)
def write_calctrans_par(filename_par):
fileout = open(filename_par, 'w')
# File: /archive/molecfitRootDataDir/calctrans.rc
#
# Note: This configuration file has been automatically
# generated by the esorex (v3.13.3) program.
#
# Date: 17-Jan-2021 18:30:01
#
#
# --USE_ONLY_INPUT_PRIMARY_DATA
# Value=TRUE implies that only the fits primary contains the input science flux
# data.
# Value=FALSE implies that the fits extensions also contains input science
# flux data.
fileout.write('USE_ONLY_INPUT_PRIMARY_DATA=FALSE\n')
# --USE_DATA_EXTENSION_AS_DFLUX
# Only valid if USE_ONLY_INPUT_PRIMARY_DATA=TRUE. The fits extension index that
# contains the
# errors of the science flux data (DFLUX). A value of 0 implies that there is
# no DFLUX.
fileout.write('USE_DATA_EXTENSION_AS_DFLUX=0\n')
# --USE_DATA_EXTENSION_AS_MASK
# Only valid if USE_ONLY_INPUT_PRIMARY_DATA=TRUE. The fits extension index that
# contains the
# mask associated with the science flux data. A value of 0 implies that there
# is no mask data.
fileout.write('USE_DATA_EXTENSION_AS_MASK=0\n')
# --USE_INPUT_KERNEL
# If TRUE, use the kernel library if it is provided.
fileout.write('USE_INPUT_KERNEL=TRUE\n')
# --CALCTRANS_MAPPING_KERNEL
# Mapping 'SCIENCE' - 'CALCTRANS_KERNEL_LIBRARY' [string with ext_number comma
# separated (int)] :
# If set to NULL, check if the TAG[CALCTRANS_MAPPING_KERNEL] FITS BINTABLE
# values is provided.
# The FITS BINTABLE have to one column [KERNEL_LIBRARY_EXT].
fileout.write('CALCTRANS_MAPPING_KERNEL=NULL\n')
# --MAPPING_ATMOSPHERIC
# Mapping 'SCIENCE' - 'ATM_PARAMETERS' input [string with ext_number comma
# separated (int)] :
# If set to NULL, check if the TAG[MAPPING_ATMOSPHERIC] FITS BINTABLE value
# is provided.
# The FITS BINTABLE have to one column [ATM_PARAMETERS_EXT].
fileout.write('MAPPING_ATMOSPHERIC=0,1\n')
# --MAPPING_CONVOLVE
# Mapping 'LBLRTM_RESULTS' - 'TELLURIC_CORR' output [string with ext_number
# comma separated (int)] :
# If set to NULL, check if the TAG[MAPPING_CONVOLVE] FITS BINTABLE value is
# provided.
# The FITS BINTABLE have to one column [LBLRTM_RESULTS_EXT].
fileout.write('MAPPING_CONVOLVE=0,1\n')
# --CHIP_EXTENSIONS
# Flag that determines if image extensions are to be treated as independant
# science data to be fitted for independently or as CHIP specific subranges
# of a single observation to be fitted for as a single combined spectrum.
# Value = TRUE implies to treat as CHIPS to be combined. Value = FALSE
# implies
# to treat as independent. [FALSE].
fileout.write('CHIP_EXTENSIONS=FALSE\n')
#
# End of file
fileout.close()
def write_correct_par(filename_par):
fileout = open(filename_par, 'w')
# File: /archive/molecfitRootDataDir/correct.rc
#
# Note: This configuration file has been automatically
# generated by the esorex (v3.13.3) program.
#
# Date: 17-Jan-2021 18:30:12
#
#
# --USE_ONLY_INPUT_PRIMARY_DATA
# Value=TRUE implies that only the fits primary contains the input science flux
# data.
# Value=FALSE implies that the fits extensions also contains input science
# flux data.
fileout.write('USE_ONLY_INPUT_PRIMARY_DATA=FALSE\n')
# --USE_DATA_EXTENSION_AS_DFLUX
# Only valid if USE_ONLY_INPUT_PRIMARY_DATA=TRUE. The fits extension index that
# contains the
# errors of the science flux data (DFLUX). A value of 0 implies that there is
# no DFLUX.
fileout.write('USE_DATA_EXTENSION_AS_DFLUX=0\n')
# --USE_DATA_EXTENSION_AS_MASK
# Only valid if USE_ONLY_INPUT_PRIMARY_DATA=TRUE. The fits extension index that
# contains the
# mask associated with the science flux data. A value of 0 implies that there
# is no mask data.
fileout.write('USE_DATA_EXTENSION_AS_MASK=0\n')
# --SUPPRESS_EXTENSION
# Suppress arbitrary filename extension : TRUE (apply) or FALSE (don't apply).
fileout.write('SUPPRESS_EXTENSION=FALSE\n')
# --MAPPING_CORRECT
# Mapping 'SCIENCE' - 'TELLURIC_CORR' [string with ext_number comma separated
# (int)] :
# If set to NULL, check if the TAG[MAPPING_CORRECT] FITS BINTABLE value is
# provided.
# The FITS BINTABLE have to one column [TELLURIC_CORR_EXT].
fileout.write('MAPPING_CORRECT=0,1\n')
# --CHIP_EXTENSIONS
# Flag that determines if image extensions are to be treated as independant
# science data to be fitted for independently or as CHIP specific subranges
# of a single observation to be fitted for as a single combined spectrum.
# Value = TRUE implies to treat as CHIPS to be combined. Value = FALSE
# implies
# to treat as independent. [FALSE].
fileout.write('CHIP_EXTENSIONS=FALSE\n')
#
# End of file
fileout.close()
| 53,742 | 35.684642 | 124 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/fit_subroutines.py | from __future__ import print_function, division
import numpy as np
from scipy.linalg import lstsq
from scipy.optimize import curve_fit
from sklearn import linear_model, datasets
def berv_telluric_curvefit(xdata, p0, p1, p2):
return xdata[0] * p0 + xdata[1] * p1 + p2
def berv_linear_curve_fit(airmass, berv, logi_array, sigi_array, n_axis):
C = []
pams = np.zeros(3)-0.1
ltel = np.empty(n_axis)
shift = np.empty(n_axis)
zero = np.empty(n_axis)
airmass_zero = 0. #np.average(airmass)
berv_zero = 0. #np.average(berv)
for ii in xrange(0, n_axis):
popt, pcov = curve_fit(berv_telluric_curvefit,
[airmass-airmass_zero, berv-berv_zero],
logi_array[:, ii],
p0=pams,
sigma = sigi_array[:, ii],
bounds=([-np.inf, -np.inf, -np.inf], [0.000, np.inf, np.inf]))
ltel[ii] = popt[0]
shift[ii] = popt[1]
zero[ii] = popt[2]
return ltel, shift, zero
def berv_linear_lstsq(airmass, berv, logi_array):
A = np.c_[airmass, berv, np.ones(logi_array.shape[0])]
C, _, _, _ = lstsq(A, logi_array) # coefficients
return C[0], C[1], C[2]
def airmass_telluric_curvefit(xdata, p0, p1):
return xdata * p0 + p1
def airmass_linear_curve_fit_ransac(airmass, logi_array, sigi_array, n_axis):
pams = np.zeros(2)
ltel = np.empty(n_axis)
zero = np.empty(n_axis)
airmass_zero = np.average(airmass)
airmass_reshape = (airmass-airmass_zero).reshape(-1,1)
ransac = linear_model.RANSACRegressor()
for ii in range(0, n_axis):
y_zero = np.average(logi_array[:, ii])
ransac.fit(airmass_reshape, logi_array[:, ii]-y_zero)
ltel[ii] = ransac.estimator_.coef_[0]
zero[ii] = ransac.estimator_.intercept_ + y_zero
return ltel, zero
def airmass_linear_curve_fit(airmass, logi_array, sigi_array, n_axis):
C = []
pams = np.zeros(2)
ltel = np.empty(n_axis)
zero = np.empty(n_axis)
airmass_zero = np.average(airmass)
for ii in range(0, n_axis):
y_zero = np.average(logi_array[:, ii])
popt, pcov = curve_fit(airmass_telluric_curvefit,
airmass-airmass_zero,
logi_array[:, ii]-y_zero,
p0=pams,
sigma = sigi_array[:, ii],
bounds=([-np.inf, -np.inf], [np.inf, np.inf]))
ltel[ii] = popt[0]
zero[ii] = popt[1]
return ltel, zero
def berv_linear_curve_fit_modified(airmass, berv, logi_array, sigi_array, n_axis):
C = []
pams = np.zeros(3)
ltel = np.empty(n_axis)
shift = np.empty(n_axis)
zero = np.empty(n_axis)
airmass_zero = np.average(airmass)
berv_zero = np.average(berv)
for ii in range(0, n_axis):
popt, pcov = curve_fit(berv_telluric_curvefit,
[airmass-airmass_zero, berv-berv_zero],
logi_array[:, ii],
p0=pams,
sigma = sigi_array[:, ii],
bounds=([-np.inf, -np.inf, -np.inf], [np.inf, np.inf, np.inf]))
ltel[ii] = popt[0]
shift[ii] = popt[1]
zero[ii] = popt[2]
return ltel, shift, zero
def airmass_linear_lstsq(airmass, logi_array):
A = np.c_[airmass, np.ones(logi_array.shape[0])]
C, _, _, _ = lstsq(A, logi_array) # coefficients
return C[0], C[1]
| 3,592 | 27.975806 | 94 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/smooth_subroutines.py | from __future__ import print_function, division
import numpy as np
def smooth(x, window_len=5, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
# taken from here:
http://scipy-cookbook.readthedocs.io/items/SignalSmooth.html
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window can be only one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='valid')
#return y
if np.size(x)==np.size(y):
return y
else:
return y[(window_len/2-1):-(window_len/2)]
| 2,109 | 31.461538 | 121 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/object_parameters.py | from __future__ import print_function, division
import numpy as np
import SLOPpy.subroutines.kepler_exo as kp
__all__ = ["StarParameters", "PlanetParameters"]
G_grav = 6.67428e-11 # Gravitational Constants in SI system [m^3/kg/s^2]
M_sun = 1.9884e30 # Value from TRADES
class StarParameters:
"""
This class is just a stub with basic properties, to be expanded in th efuture
E.g.: stellar mass, radius, activity level, etc
"""
def __init__(self, mass=None, radius=None):
self.kind = 'star'
self.mass = mass
self.radius = radius
class PlanetParameters:
"""
This class is just a stub with basic properties, to be expanded in the future
E.g.: give a planetary mass and convert it to expected K, and vice versa
"""
def __init__(self):
#array of shape (2), error should be stored here
self.kind = 'planet'
self.star_mass = None
self.star_radius = None
self.reference_epoch = None
self.reference_transit = None
self.period = None
self.phase = None
self.eccentricity = None
self.omega = None
self.RV_semiamplitude = None
self.RVplanet_semiamplitude = None
self.radius = None # stellar radius in stellar unit
self.area = None
self.mass = None # planet mass (useless for now)
self.inclination = None #orbital inclination
self.impact_parameter = None
self.semimajor_axis = None
self.gamma = None # RV zero point (including systematic RV of the star)
def put_reference_epoch(self, Tref):
self.reference_epoch = Tref
def put_gamma(self, gamma):
self.gamma = gamma
def put_star(self, star):
self.star_mass = star.mass
self.star_radius = star.radius
def put_RVparameters(self, P, K, Tc, e, o, degree=False):
self.period = P
self.RV_semiamplitude = K
self.reference_transit = Tc
self.eccentricity = e
# Angles must be in radians
if degree:
self.omega = o/180. * np.pi
else:
self.omega = o
self.phase = kp.kepler_Tc2phase_Tref(self.period,
self.reference_transit,
self.eccentricity,
self.omega)
def put_Transitparameters(self, Rp, i, b, a, degree=True):
# degree=True by default because inclination is always expressed in degrees
self.radius = Rp
self.inclination = i
self.impact_parameter = b
self.semimajor_axis = a
def put_RVplanet(self, Kplanet):
self.RVplanet_semiamplitude = Kplanet
def get_RV_kms(self, bjd):
return self.get_RV_ms(bjd)/1000.000
def get_RV_ms(self, bjd):
return kp.kepler_RV_T0P(bjd-self.reference_epoch, self.phase, self.period,
self.RV_semiamplitude, self.eccentricity, self.omega)
def get_RVplanet_kms(self, bjd):
return self.get_RVplanet_ms(bjd)/1000.000
def get_RVplanet_ms(self, bjd):
return (-1.)*kp.kepler_RV_T0P(bjd-self.reference_epoch, self.phase, self.period,
self.RVplanet_semiamplitude, self.eccentricity, self.omega)
| 3,322 | 30.647619 | 91 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/bayesian_emcee.py | from SLOPpy.subroutines.common import *
import os
from SLOPpy.subroutines.mcmc_fit_functions import *
from SLOPpy.subroutines.math_functions import interpolate2d_grid_nocheck
#from SLOPpy.subroutines.interpol import interpolate1d_grid_nocheck
from multiprocessing import Pool
import emcee
import time
# define theta pams
def define_theta_array(model_case,line_iter_dict, planet_dict, n_jitter, allow_emission=False):
pams_dict = {} # dictionary containing the index of a given parameter
pams_list = [] # list wirth the parameter names ordered according to their index
boundaries = np.empty([0, 2]) # boundaries for MCMC / nested sampling
theta_start = np.empty(0) # starting point for MCMC
lines_center = np.empty(0) # laboratory wavelength of spectral lines
pam_index = 0 # keep track of the number of variables
for line_key, line_val in line_iter_dict['lines'].items():
pam_name = line_key + '_contrast'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
if allow_emission:
boundaries = np.append(boundaries, [[-0.20, 0.20]], axis=0)
else:
boundaries = np.append(boundaries, [[0.00, 0.20]], axis=0)
theta_start = np.append(theta_start, 0.010)
pam_index += 1
lines_center = np.append(lines_center, line_val)
""" skip the inclusion of FWHM as a free parameter for each line
if the shared FWHM is selected
"""
if model_case in [0, 1, 2, 3, 10, 11, 14, 20, 21, 24]:
# if not line_iter_dict['fit_parameters']['shared_fwhm']:
pam_name = line_key + '_fwhm (km/s)'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.00, 100.00]], axis=0)
theta_start = np.append(theta_start, 5.0)
pam_index += 1
# if line_iter_dict['fit_parameters']['fixed_separation']: continue
# if not line_iter_dict['fit_parameters']['lines_shift']: continue
if model_case in [0, 2, 10, 12, 20, 22]:
pam_name = line_key + '_winds (km/s)'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-25.00, 25.00]], axis=0)
theta_start = np.append(theta_start, 0.00)
pam_index += 1
if model_case in [12, 13, 15, 22, 23, 25]:
# if line_iter_dict['fit_parameters']['shared_fwhm']:
pam_name = 'shared_fwhm (km/s)'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.000, 100.00]], axis=0)
theta_start = np.append(theta_start, 5.000)
pam_index += 1
if model_case in [11, 13, 21, 23]:
# if line_iter_dict['fit_parameters']['fixed_separation'] and line_iter_dict['fit_parameters']['lines_shift']:
pam_name = 'shared_winds (km/s)'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-25.0, 25.0]], axis=0)
theta_start = np.append(theta_start, 0.000)
pam_index += 1
if model_case in [0, 1, 10, 11, 12, 13, 14, 15]:
pams_dict['rp_factor'] = pam_index
pams_list.append('rp_factor')
boundaries = np.append(boundaries, [[0.5, 2.0]], axis=0)
theta_start = np.append(theta_start, 1.0)
pam_index += 1
pams_dict['K_planet (km/s)'] = pam_index
pams_list.append('K_planet (km/s)')
#boundaries = np.append(boundaries,
# [[-300., planet_dict['RV_semiamplitude']
# [0] + 300.]],
# axis=0)
boundaries = np.append(boundaries,
[[planet_dict['RV_semiamplitude'][0] - 75.,
planet_dict['RV_semiamplitude'][0] + 75.]],
axis=0)
'''
boundaries = np.append(boundaries,
[[0.,
200.]],
axis=0)
'''
theta_start = np.append(
theta_start, planet_dict['RV_semiamplitude'][0] / 1000.0)
pam_index += 1
for i_j in range(0,n_jitter):
pam_name = 'jitter_' + repr(i_j)
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[10**(-12), 0.05]], axis=0)
theta_start = np.append(theta_start, 10**(-11))
pam_index += 1
return lines_center, pams_dict, pams_list, boundaries, theta_start
def emcee_lines_fit_functions(model_case,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
theta_start, boundaries, ndim, nwalkers, ngen, nsteps, nthin):
os.environ["OMP_NUM_THREADS"] = "1"
#""" Avoid starting values out of boundaries """
#for nd in range(0, ndim):
# sel = (point_star[:,nd] <= boundaries[nd,0]) | (point_star[:,nd] >= boundaries[nd,1])
# point_star[sel,nd] = theta_start[nd]
"""
print(np.shape(wave_append))
print(np.shape(flux_append))
print(np.shape(ferr_append))
print(np.shape(nobs_append))
print(np.shape(clv_rm_radius))
print(np.shape(clv_rm_grid))
print(np.shape(rvsys_PRF2ORF_append))
print(np.shape(planet_RVsinusoid_append))
print(np.shape(lines_center))
"""
model_dictionaries = {
0: logprob_case00,
1: logprob_case01,
2: logprob_case02,
3: logprob_case03,
10: logprob_case10,
11: logprob_case11,
12: logprob_case12,
13: logprob_case13,
14: logprob_case14,
15: logprob_case15,
20: logprob_case20,
21: logprob_case21,
22: logprob_case22,
23: logprob_case23,
24: logprob_case24,
25: logprob_case25,
}
logprob = model_dictionaries[model_case]
try:
from pyde.de import DiffEvol
use_pyde = True
except ImportError:
print(' Warnign: PyDE is not installed, random initialization point')
use_pyde = False
if ngen <= 1 : use_pyde = False
""" R_p is fixed to 1.0 """
if model_case in [2, 3, 20, 21, 22, 23, 24, 25]:
clv_model = interpolate2d_grid_nocheck(1.000, clv_rm_radius, clv_rm_grid)
args_input = (boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict)
else:
args_input = (boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict)
if use_pyde:
start = time.time()
with Pool() as pool:
de = DiffEvol(
logprob,
boundaries,
nwalkers,
maximize=True,
pool=pool,
args=args_input)
de.optimize(ngen)
end = time.time()
print("PyDE global optimization took {0:.1f} seconds".format( end - start))
theta_start = np.median(de.population, axis=0)
point_start = de.population
else:
point_start = theta_start + 1e-4 * np.abs(np.random.randn(nwalkers, ndim))
point_start[0, :] = theta_start
start = time.time()
with Pool() as pool:
sampler = emcee.EnsembleSampler(nwalkers,
ndim,
logprob,
args=args_input,
pool=pool)
population, prob, state = sampler.run_mcmc(point_start,
nsteps,
thin=nthin,
progress=True)
end = time.time()
print()
print("emcee MCMC optimization took {0:.1f} seconds".format(end - start))
return population, sampler.chain, sampler.lnprobability, point_start
def return_model(model_case,
theta,
wave_meshgrid,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index):
ndim = len(theta)
boundaries = np.empty([ndim, 2])
boundaries[:,0] = theta - 1.
boundaries[:,1] = theta + 1.
transmission_spec = np.ones(np.shape(wave_meshgrid))
transmission_spec_err = np.ones(np.shape(wave_meshgrid))
model_dictionaries = {
0: logprob_case00,
1: logprob_case01,
2: logprob_case02,
3: logprob_case03,
10: logprob_case10,
11: logprob_case11,
12: logprob_case12,
13: logprob_case13,
14: logprob_case14,
15: logprob_case15,
20: logprob_case20,
21: logprob_case21,
22: logprob_case22,
23: logprob_case23,
24: logprob_case24,
25: logprob_case25,
}
logprob = model_dictionaries[model_case]
if model_case in [2, 3, 20, 21, 22, 23, 24, 25]:
clv_model = interpolate2d_grid_nocheck(1.000, clv_rm_radius, clv_rm_grid)
lines_model, _, lines_array, planet_K, planet_R, jitter = logprob(
theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
{},
return_models=True)
else:
lines_model, clv_model, lines_array, planet_K, planet_R, jitter = logprob(
theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
{},
return_models=True)
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter
def emcee_flatten_median(population, sampler_chain, sampler_lnprobability, nburnin, nthin, nwalkers):
flat_chain = emcee_flatchain(sampler_chain, nburnin, nthin)
flat_lnprob, _ = emcee_flatlnprob(
sampler_lnprobability, nburnin, nthin, population, nwalkers)
lnprob_med = compute_value_sigma(flat_lnprob)
chain_med = compute_value_sigma(flat_chain)
chain_MAP, lnprob_MAP = pick_MAP_parameters(flat_chain, flat_lnprob)
#n_samplings, n_pams = np.shape(flat_chain)
return flat_chain, flat_lnprob, chain_med, chain_MAP, lnprob_med, lnprob_MAP
def emcee_compute_BIC_AIC(lnprob_med, lnprob_MAP, ndata, ndim):
print()
print(' LN posterior: {0:12f} {1:12f} {2:12f} (15-84 p) MAP: {0:12f}'.format(
lnprob_med[0], lnprob_med[2], lnprob_med[1], lnprob_MAP))
BIC = -2.0 * lnprob_med[0] + np.log(ndata) * ndim
AIC = -2.0 * lnprob_med[0] + 2.0 * ndim
AICc = AIC + (2.0 + 2.0 * ndim) * ndim / (ndata - ndim - 1.0)
print()
print(' Median BIC = {}'.format(BIC))
print(' Median AIC = {}'.format(AIC))
print(' Median AICc = {}'.format(AICc))
BIC_map = -2.0 * lnprob_MAP + np.log(ndata) * ndim
AIC_map = -2.0 * lnprob_MAP + 2.0 * ndim
AICc_map = AIC + (2.0 + 2.0 * ndim) * ndim / (ndata - ndim - 1.0)
print()
print(' MAP BIC = {}'.format(BIC_map))
print(' MAP AIC = {}'.format(AIC_map))
print(' MAP AICc = {}'.format(AICc_map))
def emcee_burnin_check(chain, nburnin, nthin, nwalkers=False):
nburn = int(nburnin / nthin)
modified = False
if not nwalkers:
_, d, _ = np.shape(chain)
else:
v1, v2 = np.shape(chain)
if v1 == nwalkers:
d = v2
else:
d = v1
if nburn >= d * 0.9:
nburn = int(d / 4)
modified = True
return nburn, modified
def emcee_flatchain(chain, nburnin, nthin):
"""flattening of the emcee chains with removal of burn-in"""
nburn, _ = emcee_burnin_check(chain, nburnin, nthin)
s = chain[:, nburn:, :].shape
return chain[:, nburn:, :].reshape(s[0] * s[1], s[2])
def emcee_flatlnprob(lnprob, nburnin, nthin, population, nwalkers):
nburn, _ = emcee_burnin_check(lnprob, nburnin, nthin, nwalkers)
v1, v2 = np.shape(lnprob)
if v1 == nwalkers:
s = lnprob[:, nburn:].shape
return lnprob[:, nburn:].reshape(s[0] * s[1]), lnprob.T
else:
s = lnprob[nburn:, :].shape
return lnprob[nburn:, :].reshape(s[0] * s[1]), lnprob
def GelmanRubin_v2(sampler_chain):
"""
:param chain_T:
:return:
"""
"""
from http://joergdietrich.github.io/emcee-convergence.html
"""
ssq = np.var(sampler_chain, axis=1, ddof=1)
W = np.mean(ssq, axis=0)
theta_b = np.mean(sampler_chain, axis=1)
theta_bb = np.mean(theta_b, axis=0)
m = sampler_chain.shape[0] * 1.0
n = sampler_chain.shape[1] * 1.0
B = n / (m - 1) * np.sum((theta_bb - theta_b) ** 2, axis=0)
var_theta = (n - 1) / n * W + 1 / n * B
Rhat = np.sqrt(var_theta / W)
return Rhat
def compute_value_sigma(samples):
if np.size(np.shape(samples)) == 1:
sample_med = np.zeros(3)
#sample_tmp = np.percentile(samples, [15.865, 50, 84.135], axis=0)
sample_tmp = np.percentile(samples[np.isfinite(samples)], [15.865, 50, 84.135], axis=0)
sample_med[0] = sample_tmp[1]
sample_med[1] = sample_tmp[2] - sample_tmp[1]
sample_med[2] = sample_tmp[1] - sample_tmp[0]
elif np.size(np.shape(samples)) == 2:
#sample_med = np.asarray(list(map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
#zip(*np.percentile(samples, [15.865, 50, 84.135], axis=0)))))
sample_med = np.zeros((samples.shape[1],3))
for k in range(samples.shape[1]):
ttt = samples[:,k]
sample_tmp = np.percentile(ttt[np.isfinite(ttt)], [15.865, 50, 84.135], axis=0)
sample_med[k,0] = sample_tmp[1]
sample_med[k,1] = sample_tmp[2] - sample_tmp[1]
sample_med[k,2] = sample_tmp[1] - sample_tmp[0]
else:
print('ERROR!!! ')
return None
return sample_med
def pick_MAP_parameters(samples, lnprob):
indmax = np.argmax(lnprob)
if np.size(np.shape(samples)) == 1:
return samples[indmax], lnprob[indmax]
elif np.size(np.shape(samples)) == 2:
return samples[indmax, :], lnprob[indmax]
else:
print('ERROR!!! ')
return None
| 15,492 | 32.827511 | 118 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/clv_rm_subroutines.py | import numpy as np
from SLOPpy.subroutines.rebin_subroutines import *
def clv_rm_correction_factor_computation(clv_rm_modelling, wave, step, rv_shift, obs):
ancillary = {}
ancillary['norm_convolved_shifted'] = \
rebin_1d_to_1d(clv_rm_modelling['common']['wave'],
clv_rm_modelling['common']['step'],
clv_rm_modelling['common']['norm_convolved'],
wave,
step,
rv_shift=rv_shift,
preserve_flux=False)
ancillary['stellar_spectra_convolved_shifted'] = \
rebin_1d_to_1d(clv_rm_modelling['common']['wave'],
clv_rm_modelling['common']['step'],
clv_rm_modelling[obs]['stellar_spectra_convolved'],
wave,
step,
rv_shift=rv_shift,
preserve_flux=False)
wavelength_exclusion = \
(wave <= clv_rm_modelling['common']['wave'][0] + 1) | \
(wave >= clv_rm_modelling['common']['wave'][-1] - 1)
wavelength_selection = \
(wave > clv_rm_modelling['common']['wave'][0] + 1) & \
(wave > clv_rm_modelling['common']['wave'][-1] - 1)
ancillary['norm_convolved_shifted'][wavelength_exclusion] = \
np.amax(ancillary['norm_convolved_shifted'][wavelength_selection])
ancillary['stellar_spectra_convolved_shifted'][wavelength_exclusion] = \
np.amax(ancillary['stellar_spectra_convolved_shifted'][wavelength_selection])
ancillary['correction'] = ancillary['stellar_spectra_convolved_shifted'] / ancillary['norm_convolved_shifted']
return ancillary['correction'], ancillary
| 1,724 | 40.071429 | 114 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/mcmc_fit_functions.py | import numpy as np
from SLOPpy.subroutines.math_functions import interpolate2d_grid_nocheck
from SLOPpy.subroutines.constants import *
def compute_single_line(wave_meshgrid, planet_RVsinusoid, planet_K, line_array):
""" computing the spectral shift in RV """
rv_shift = planet_K * planet_RVsinusoid
line_model = np.ones(np.shape(wave_meshgrid), dtype= np.double)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
sigma = line_array[2] / sigma2fwhm * line_array[0] / speed_of_light_km
line_shifted = line_array[0] + (rv_shift + line_array[3]) * line_array[0] / speed_of_light_km
line_model -= line_array[1] * np.exp(-(1./(2*sigma**2))*(wave_meshgrid-line_shifted)**2)
return line_model
def compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines):
""" computing the spectral shift in RV """
rv_shift = planet_K * planet_RVsinusoid
lines_model = np.ones(np.shape(wave_meshgrid), dtype= np.double)
for ii in range(0, n_lines):
sigma = lines_array[2,ii] / sigma2fwhm * lines_array[0,ii] / speed_of_light_km
line_shifted = lines_array[0,ii] + (rv_shift + lines_array[3,ii]) * lines_array[0,ii] / speed_of_light_km
lines_model -= lines_array[1,ii] * np.exp(-(1./(2*sigma**2))*(wave_meshgrid-line_shifted)**2)
return lines_model
""" case 0: only one spectral line, default line parameters are contrast, FWHM, rv_shift """
def logprob_case00(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
line_array = [lines_center[0], theta[0], theta[1], theta[2]]
line_model = compute_single_line(wave_meshgrid, planet_RVsinusoid, planet_K, line_array)
flux_res = transmission_spec / clv_model / line_model - 1.
ferr_res = transmission_spec_err / clv_model / line_model
if return_models:
lines_array = np.empty([4, 1])
lines_array[:, 0] = line_array
return line_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': line_array[2],
'winds': line_array[3],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
log_prior += (-(var_dict[key_name] - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" adding the jitter to error esitamets """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 1: only one spectral line, no winds """
def logprob_case01(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
line_array = [lines_center[0], theta[0], theta[1], 0.000]
line_model = compute_single_line(wave_meshgrid, planet_RVsinusoid, planet_K, line_array)
flux_res = transmission_spec / clv_model / line_model - 1.
ferr_res = transmission_spec_err / clv_model / line_model
if return_models:
lines_array = np.empty([4, 1])
lines_array[:,0] = line_array
return line_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': line_array[2],
'winds': line_array[3],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
log_prior += (-(var_dict[key_name] - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 2: only one spectral line, no planetary radius dependance """
def logprob_case02(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
"""
planet_K = theta[-2-i_j]
line_array = [lines_center[0], theta[0], theta[1], theta[2]]
line_model = compute_single_line(wave_meshgrid, planet_RVsinusoid, planet_K, line_array)
flux_res = transmission_spec / clv_model / line_model - 1.
ferr_res = transmission_spec_err / clv_model / line_model
if return_models:
lines_array = np.empty([4, 1])
lines_array[:,0] = line_array
return line_model, clv_model, lines_array, planet_K, 1.00000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': line_array[2],
'winds': line_array[3],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
log_prior += (-(var_dict[key_name] - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 3: only one spectral line, no winds and no planetary radius dependance """
def logprob_case03(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
"""
planet_K = theta[-2-i_j]
line_array = [lines_center[0], theta[0], theta[1], 0.000]
line_model = compute_single_line(wave_meshgrid, planet_RVsinusoid, planet_K, line_array)
flux_res = transmission_spec / clv_model / line_model - 1.
ferr_res = transmission_spec_err / clv_model / line_model
if return_models:
lines_array = np.empty([4, 1])
lines_array[:,0] = line_array
return line_model, clv_model, lines_array, planet_K, 1.00000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': line_array[2],
'winds': line_array[3],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
log_prior += (-(var_dict[key_name] - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 10: more than one spectral lines, all line parameters are free and independent """
def logprob_case10(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = theta[i_pams]
i_pams += 1
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 11: more than one spectral lines, all lines are affected by the same wind """
def logprob_case11(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = theta[-4-i_j]
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 12: more than one spectral lines, all lines have same FWHM """
def logprob_case12(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-4-i_j]
lines_array[3, ii] = theta[i_pams]
i_pams += 1
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 13: more than one spectral lines, all lines are affected by the same wind and have same FWHM """
def logprob_case13(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-5-i_j]
lines_array[3, ii] = theta[-4-i_j]
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 14: more than one spectral lines, no winds """
def logprob_case14(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = 0.000
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 15: more than one spectral lines, no winds, all lines have same FWHM """
def logprob_case15(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-4-i_j]
lines_array[3, ii] = 0.000
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 20: more than one spectral lines, no Rp dependance, all line parameters are free and independent """
def logprob_case20(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = theta[i_pams]
i_pams += 1
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.00000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 21: more than one spectral lines, no Rp dependance, all lines are affected by the same wind """
def logprob_case21(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = theta[-3-i_j]
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 22: more than one spectral lines, no Rp dependance, all lines have same FWHM """
def logprob_case22(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-3-i_j]
lines_array[3, ii] = theta[i_pams]
i_pams += 1
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 23: more than one spectral lines, no Rp dependance, all lines are affected by the same wind and have same FWHM """
def logprob_case23(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-4-i_j]
lines_array[3, ii] = theta[-3-i_j]
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 24: more than one spectral lines, no Rp dependance, no winds """
def logprob_case24(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = 0.000
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 25: more than one spectral lines, no Rp dependance, no winds, all lines have same FWHM """
def logprob_case25(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-3-i_j]
lines_array[3, ii] = 0.000
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
| 48,048 | 33.345247 | 135 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/kepler_exo.py | import numpy as np
from scipy.optimize import fsolve
import SLOPpy.subroutines.constants as constants
# +
# NAME:
# exofast_keplereq
# PURPOSE:
# Solve Kepler's Equation
# DESCRIPTION:
# Solve Kepler's Equation. Method by S. Mikkola (1987) Celestial
# Mechanics, 40 , 329-334.
# result from Mikkola then used as starting value for
# Newton-Raphson iteration to extend the applicability of this
# function to higher eccentricities
__all__ = ["kepler_K1",
"kepler_RV",
"kepler_Tc2phase_Tref",
"kepler_phase2Tc_Tref",
"get_planet_mass",
"kepler_true_anomaly_orbital_distance"]
def kepler_E(M_in, ec):
E = 0.0
E0 = 0.0
M = np.atleast_1d(M_in)
ecc = np.asarray(ec, dtype=np.double)
eccanom = np.zeros(np.size(M), dtype=np.double)
for ii in range(0, np.size(M)):
# -np.pi < M < np.pi
mx = M[ii]
if mx > np.pi:
mx = mx % (2. * np.pi)
if mx > np.pi:
mx = mx - (2. * np.pi)
if mx <= -np.pi:
mx = mx % (2. * np.pi)
if mx < -np.pi:
mx += (2. * np.pi)
if ecc < 1e-10:
eccanom[ii] = mx
else:
# equation 9a
aux = 4.0 * ecc + 0.50
alpha = (1.0 - ecc) / aux
beta = mx / (2.0 * aux)
# equation 9b
## the actual equation 9b is much much slower, but gives the same
## answer (probably because more refinement necessary)
aux = np.sqrt(beta * beta + alpha * alpha * alpha)
z = beta + aux
if z < 0.:
z = beta - aux
z = z ** (1. / 3.)
if abs(z) < 1e-8:
s0 = 0.
else:
s0 = z - alpha / z
s1 = s0 - (0.078 * s0 ** 5) / ((1.) + ecc)
e0 = mx + ecc * (3. * s1 - 4. * s1 ** 3.)
se0 = np.sin(e0)
ce0 = np.cos(e0)
f = e0 - ecc * se0 - mx
f1 = (1.0) - ecc * ce0
f2 = ecc * se0
f3 = ecc * ce0
u1 = -f / f1
u2 = -f / (f1 + 0.5 * f2 * u1)
u3 = -f / (f1 + 0.5 * f2 * u2 + (1. / 6.) * f3 * u2 ** 2.)
u4 = -f / (f1 + 0.5 * f2 * u3 + (1. / 6.) * f3 * u3 ** 2 - (1. / 24.) * f2 * u3 ** 3)
ecan_tmp = e0 + u4
if ecan_tmp >= 2. * np.pi:
ecan_tmp = ecan_tmp - 2. * np.pi
if ecan_tmp < 0.:
ecan_tmp = ecan_tmp + 2. * np.pi
## Now get more precise solution using Newton Raphson method
## for those times when the Kepler equation is not yet solved
## to better than 1e-10
## (modification J. Wilms)
if mx < 0.:
mx = mx + 2. * np.pi
## calculate the differences
diff = abs(ecan_tmp - ecc * np.sin(ecan_tmp) - mx)
if diff > abs(diff - 2 * np.pi):
diff = abs(diff - 2 * np.pi)
thresh1 = 1e-8
thresh2 = 10000
countt = 0
while (diff > thresh1 and countt < thresh2):
## E-e sinE-M
fe = (ecan_tmp - ecc * np.sin(ecan_tmp) - mx) % (2 * np.pi)
## f' = 1-e*cosE
fs = (1. - ecc * np.cos(ecan_tmp)) % (2 * np.pi)
oldval = ecan_tmp
ecan_tmp = (oldval - fe / fs)
diff = abs(oldval - ecan_tmp)
countt += 1
## range reduction
if ecan_tmp >= 2. * np.pi:
ecan_tmp = ecan_tmp % 2. * np.pi
if ecan_tmp < 0.:
ecan_tmp = ecan_tmp % 2. * np.pi + 2. * np.pi
eccanom[ii] = ecan_tmp
return eccanom
def kepler_K1(m_star1, m_star2, period, i, e0):
""" Computes the radial velocity semi-amplitude of the primary star
:param m_star1: mass of the primary, in Solar mass units
:param m_star2: mass of the secondary/planet, in Solar mass units
:param period: orbital period of star2, in [d]
:param i: orbital inclination of star2 wrt the observer (0=face on), in [deg]
:param e0: orbital eccentricity of star2
:return: k1, the observed radial velocity semi-amplitude of the primary, in [m s^-1]
"""
# period must be given in days, conversion factor to seconds are included in the routine
# constants.Gsi: Gravitational constant in SI system [m^3 kg^-1 s^-2]
# constants.Msun: Sun mass in SI system [kg]
# 86400. / constants.d2s: seconds in a day
return (2. * np.pi * constants.Gsi * constants.Msun / 86400.) ** (1. / 3.) \
* (np.sin(i * np.pi / 180.0) / np.sqrt(1. - e0 ** 2.)) * period ** (-1. / 3.) \
* (m_star2 * (m_star1 + m_star2) ** (-2. / 3.))
def kepler_RV(BJD, TPeri, Period, gamma, K, e0, omega0):
# omega = argument of pericenter
# Mean Anomaly
#
MeAn = 2. * np.pi * (1. + (BJD - TPeri) / Period % 1.)
if abs(e0) < 1e-3:
TrAn = np.asarray(MeAn, dtype=np.double)
e = np.asarray(0., dtype=np.double)
omega = np.asarray(0., dtype=np.double)
else:
if e0 < 0.:
e = np.asarray(-e0, dtype=np.double)
omega = np.asarray(omega0, dtype=np.double) + np.pi
else:
e = np.asarray(e0, dtype=np.double)
omega = np.asarray(omega0, dtype=np.double)
# Eccentric Anomaly
EccAn = kepler_E(MeAn, e)
TrAn = 2. * np.arctan(np.sqrt((1.0 + e) / (1.0 - e)) * np.tan(EccAn / 2.0))
rv = K * (np.cos(TrAn + omega) + e * np.cos(omega)) + gamma
return rv
def kepler_RV_T0P(BJD0, phase, Period, K, e0, omega0):
# BJD0 is given as BJD-T0, where T0 is arbitrarily defined by the user
# Tperi_ is substituted by _phase_, which is the phase of the orbit where
# BJD0+T0+phase*Period = Tperi
# omega = argument of pericenter
#
omega = np.asarray(omega0, dtype=np.double)
e = np.asarray(e0, dtype=np.double)
MeAn = 2. * np.pi * (1. + ((BJD0 / Period) + (phase - omega0) / (2 * np.pi)) % 1.)
if abs(e0) < 1e-3:
TrAn = np.asarray(MeAn, dtype=np.double)
e = np.asarray(0., dtype=np.double)
else:
if e0 < 0.:
e = -1 * e
omega += np.pi
# Eccentric Anomaly
EccAn = kepler_E(MeAn, e)
TrAn = 2. * np.arctan(np.sqrt((1.0 + e) / (1.0 - e)) * np.tan(EccAn / 2.0))
rv = K * (np.cos(TrAn + omega) + e * np.cos(omega))
return rv
def kepler_true_anomaly_orbital_distance(BJD0, Tcent0, Period, e0, omega0, a_sm):
# BJD0 is given as BJD-T0, where T0 is arbitrarily defined by the user
# Tperi_ is substituted by _phase_, which is the phase of the orbit where
# BJD0+T0+phase*Period = Tperi
# omega = argument of pericenter
phase = kepler_Tc2phase_Tref(Period, Tcent0, e0, omega0)
omega = np.asarray(omega0, dtype=np.double)
e = np.asarray(e0, dtype=np.double)
MeAn = 2. * np.pi * (1. + ((BJD0 / Period) + (phase - omega0) / (2 * np.pi)) % 1.)
if abs(e0) < 1e-3:
TrAn = np.asarray(MeAn, dtype=np.double)
e = np.asarray(0., dtype=np.double)
r_orb = a_sm
else:
if e0 < 0.:
e = -1 * e
omega += np.pi
# Eccentric Anomaly
EccAn = kepler_E(MeAn, e)
TrAn = 2. * np.arctan(np.sqrt((1.0 + e) / (1.0 - e)) * np.tan(EccAn / 2.0))
r_orb = a_sm * (1. - e ** 2) / (1. + e * np.cos(TrAn))
return TrAn, r_orb
def kepler_phase2Tc_Tref(Period, phase, e0, omega0):
# The closest Tcent after Tref is given back
TrAn = np.pi / 2 - omega0
EccAn = 2. * np.arctan(np.sqrt((1.0 - e0) / (1.0 + e0)) * np.tan(TrAn / 2.0))
MeAn = EccAn - e0 * np.sin(EccAn)
return (MeAn - phase + omega0) / (2 * np.pi) * Period % Period
def kepler_Tc2phase_Tref(Period, Tcent, e0, omega0):
# The closest Tcent after Tref is given back
TrAn = np.pi / 2 - omega0
EccAn = 2. * np.arctan(np.sqrt((1.0 - e0) / (1.0 + e0)) * np.tan(TrAn / 2.0))
MeAn = EccAn - e0 * np.sin(EccAn)
return (omega0 + MeAn - Tcent / Period * 2 * np.pi) % (2 * np.pi)
def f_get_mass(m_star2, m_star1, period, e0, k1):
""" Computes the difference between the input radial velocity semi-amplitude
of the primary star and the value corresponding to the provided orbital parameters.
Supporting function to get_planet_mass subroutine
:param m_star2: mass of the secondary/planet, in Solar mass units
:param m_star1: mass of the primary, in Solar mass units
:param period: orbital period of star2, in [d]
:param e0: orbital eccentricity of star2
:param k1: observed RV semi-amplitude of the primary
:return: the difference between the observed and theoretical RV semi-amplitude of the primary, in [m s^-1]
"""
# period must be given in days, conversion factor to seconds are included in the routine
# constants.Gsi: Gravitational constant in SI system [m^3 kg^-1 s^-2]
# constants.Msun: Sun mass in SI system [kg]
# 86400. / constants.d2s: seconds in a day
# M_star1, M_star2 in solar masses
# P in days -> Period is converted in seconds in the routine
# inclination assumed to be 90 degrees
# Gravitational constant in SI system [in m^3 kg^-1 s^-2]
# output in m/s
return k1 \
- ((2. * np.pi * constants.Gsi * constants.Msun / 86400.0) ** (1. / 3.)
* (1. / np.sqrt(1. - e0 ** 2.))
* period ** (-1. / 3.)
* (m_star2 * (m_star1 + m_star2) ** (-2. / 3.)))
def get_approximate_mass(period, k1, e0, m_star1):
""" Return the approximate mass of the planet in Solar mass units, in the assumption that M_planet << M_star
:param period: orbital period of star2, in [d]
:param k1: observed RV semi-amplitude of the primary
:param e0: orbital eccentricity of star2
:param m_star1: mass of the primary, in Solar mass units
:return: mass of the planet, in Solar mass units
"""
return k1 / ((2. * np.pi * constants.Gsi * constants.Msun / 86400.0) ** (1. / 3.)
* (1. / np.sqrt(1. - e0 ** 2.))
* period ** (-1. / 3.)
* (m_star1 ** (-2. / 3.)))
def get_planet_mass(P, K, e, Mstar, approximation_limit=30.):
n = np.size(K)
if n == 1:
M_approx = min(get_approximate_mass(P, K, e, Mstar), 2*constants.Msear)
return fsolve(f_get_mass, M_approx, args=(Mstar, P, e, K))
M_approx = get_approximate_mass(P, K, e, Mstar)
if np.average(M_approx) > approximation_limit/constants.Msear:
print('Computing exact mass of the planet (average approximate mass larger than {0:3.1f} Me)'.format(approximation_limit))
M_init = np.average(M_approx)
for i in range(0, n):
M_approx[i] = fsolve(f_get_mass, np.average(M_init), args=(Mstar[i], P[i], e[i], K[i]))
else:
print('Computing planetary mass under the approximation M_planet << M_star (threshold at {0:3.1f} Me)'.format(approximation_limit))
return M_approx
| 11,085 | 34.993506 | 139 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/common.py | import sys
import os
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.colors import Normalize
import numpy as np
import scipy.stats as sci_stats
import scipy.optimize as sci_optimize
import scipy.interpolate as sci_int
from astropy.io import fits
import json
import warnings
import pygtc
import re
# Use ordered dictionaries for the observations
from collections import OrderedDict
"""
List of exceptions
"""
class MissingFileException(Exception):
pass
class MissingKeywordException(Exception):
pass
class OutOfRangeException(Exception):
pass
def difference_utc2tdb(jd):
### jd date sec leap diff
# 2441317.5 1972-01-01T00:00:00 2272060800 10 42.184
# 2441499.5 1972-07-01T00:00:00 2287785600 11 43.184
# 2441683.5 1973-01-01T00:00:00 2303683200 12 44.184
# 2442048.5 1974-01-01T00:00:00 2335219200 13 45.184
# 2442413.5 1975-01-01T00:00:00 2366755200 14 46.184
# 2442778.5 1976-01-01T00:00:00 2398291200 15 47.184
# 2443144.5 1977-01-01T00:00:00 2429913600 16 48.184
# 2443509.5 1978-01-01T00:00:00 2461449600 17 49.184
# 2443874.5 1979-01-01T00:00:00 2492985600 18 50.184
# 2444239.5 1980-01-01T00:00:00 2524521600 19 51.184
# 2444786.5 1981-07-01T00:00:00 2571782400 20 52.184
# 2445151.5 1982-07-01T00:00:00 2603318400 21 53.184
# 2445516.5 1983-07-01T00:00:00 2634854400 22 54.184
# 2446247.5 1985-07-01T00:00:00 2698012800 23 55.184
# 2447161.5 1988-01-01T00:00:00 2776982400 24 56.184
# 2447892.5 1990-01-01T00:00:00 2840140800 25 57.184
# 2448257.5 1991-01-01T00:00:00 2871676800 26 58.184
# 2448804.5 1992-07-01T00:00:00 2918937600 27 59.184
# 2449169.5 1993-07-01T00:00:00 2950473600 28 60.184
# 2449534.5 1994-07-01T00:00:00 2982009600 29 61.184
# 2450083.5 1996-01-01T00:00:00 3029443200 30 62.184
# 2450630.5 1997-07-01T00:00:00 3076704000 31 63.184
# 2451179.5 1999-01-01T00:00:00 3124137600 32 64.184
# 2453736.5 2006-01-01T00:00:00 3345062400 33 65.184
# 2454832.5 2009-01-01T00:00:00 3439756800 34 66.184
# 2456109.5 2012-07-01T00:00:00 3550089600 35 67.184
# 2457204.5 2015-07-01T00:00:00 3644697600 36 68.184
# 2457754.5 2017-01-01T00:00:00 3692217600 37 69.184
jd_table = np.asarray([2441317.5, 2441499.5, 2441683.5, 2442048.5, 2442413.5, 2442778.5, 2443144.5, 2443509.5, 2443874.5, 2444239.5,
2444786.5, 2445151.5, 2445516.5, 2446247.5, 2447161.5, 2447892.5, 2448257.5, 2448804.5, 2449169.5, 2449534.5,
2450083.5, 2450630.5, 2451179.5, 2453736.5, 2454832.5, 2456109.5, 2457204.5, 2457754.5])
df_table = np.asarray([42.184, 43.184, 44.184, 45.184, 46.184, 47.184, 48.184, 49.184, 50.184, 51.184,
52.184, 53.184, 54.184, 55.184, 56.184, 57.184, 58.184, 59.184, 60.184, 61.184,
62.184, 63.184, 64.184, 65.184, 66.184, 67.184, 68.184, 69.184])/86400.
return df_table[(jd_table-jd<0)][-1] | 3,093 | 42.577465 | 136 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/__init__.py | 0 | 0 | 0 | py |
|
SLOPpy | SLOPpy-main/SLOPpy/subroutines/math_functions.py | import numpy as np
from scipy.optimize import curve_fit
def interpolate1d_grid_nocheck(val, arr_1, arr_2):
# using enumerate() + next() to find index of
# first element just greater than 0.6
res = next(x for x, v in enumerate(arr_1) if v > val)
interp_out = (val-arr_1[res-1])/(arr_1[res]-arr_1[res-1])*(arr_2[res,:]-arr_2[res-1,:]) + arr_2[res-1,:]
return interp_out
def interpolate2d_grid_nocheck(val, arr_1, arr_2):
# using enumerate() + next() to find index of
# first element just greater than 0.6
res = next(x for x, v in enumerate(arr_1) if v > val)
interp_out = (val-arr_1[res-1])/(arr_1[res]-arr_1[res-1])*(arr_2[res,:,:]-arr_2[res-1,:,:]) + arr_2[res-1,:,:]
return interp_out
def first_derivative(x_arr, y_arr):
n_points = len(x_arr)
derivative = np.zeros(n_points, dtype=np.double)
derivative[1:-1] = (y_arr[2:] - y_arr[:-2]) / (x_arr[2:] - x_arr[:-2])
derivative[0] = derivative[1]
derivative[-1] = derivative[-2]
return derivative
| 1,020 | 35.464286 | 116 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/eso_skycalc_cli.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.io_subroutines import get_filename
def get_eso_sckycalc_harps(obs_ref, wave_range, ra, dec, night, output):
# https://www.eso.org/observing/etc/doc/skycalc/helpskycalccli.html
time_tag = obs_ref[6:25]
wave_range_nm = wave_range/10.
wdelta = 0.00075
input_filename = get_filename('skycalc_input_' + repr(wave_range_nm[0]) + '_' + repr(wave_range_nm[1]),
output, night, extension=".JSON")
alman_filename = get_filename('skycalc_alman_' + time_tag + '_' + repr(wave_range_nm[1]),
output, night, extension=".JSON")
output_filename = get_filename('skycalc_output_' + repr(wave_range_nm[0]) + '_' + repr(wave_range_nm[1]) + time_tag,
output, night, extension=".fits")
if os.path.isfile(output_filename):
skycalc_hdu = fits.open(night + '.fits')
data = skycalc_hdu[1].data
skycalc_hdu.close()
return data.field(0) * 10., \
np.ones(len(data.field(0))) * wdelta, \
data.field(14), \
np.ones(len(data.field(0)))
if not os.path.isfile(input_filename):
input_pams = {
"pwv_mode": "pwv",
"incl_moon": "N", # No moon contamination
"incl_starlight": "N", # No starlight
"incl_zodiacal": "N", # No zodiacal light
"incl_loweratm": "Y",
"incl_upperatm": "Y",
"incl_airglow": "N", # No airglow
"incl_therm": "N",
"vacair": "air", # compute in the air
"wmin": wave_range[0],
"wmax": wave_range[1],
"wgrid_mode": "fixed_wavelength_step",
"wdelta": wdelta,
"wres": 20000,
"lsf_type": "Gaussian", # Gaussian lsf
"lsf_gauss_fwhm": 5.5,
}
with open(input_filename, 'w') as outfile:
json.dump(input_pams, outfile)
if not os.path.isfile(alman_filename):
almanac_pams = {
"ra": ra,
"dec": dec,
"date": time_tag,
"observatory": "lasilla"
}
with open(alman_filename, 'w') as outfile:
json.dump(almanac_pams, outfile)
os.system('skycalc_cli'
+ ' --in ' + input_filename
+ ' --alm ' + alman_filename
+ ' --out ' + output_filename)
skycalc_hdu = fits.open(output_filename)
data = skycalc_hdu[1].data
skycalc_hdu.close()
return data.field(0) * 10., \
np.ones(len(data.field(0))) * wdelta, \
data.field(14), \
np.ones(len(data.field(0)))
| 2,761 | 33.098765 | 120 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/io_subroutines.py | from __future__ import print_function, division
import numpy as np
try:
import cPickle as pickle
except:
import pickle
import os
from os import path
import oyaml as yaml
from SLOPpy.subroutines.object_parameters import StarParameters, PlanetParameters
from SLOPpy.config_default import *
__all__ = ["save_to_cpickle",
"load_from_cpickle",
"delete_cpickle",
"check_existence_cpickle",
"get_filename",
"load_yaml_file",
"pars_input",
"yaml_parser",
"get_filelists",
"from_config_get_nights",
"from_config_get_instrument",
"from_config_get_system",
"from_config_get_pipeline",
"from_config_get_planet",
"from_config_get_star",
"from_config_get_clv_rm",
"from_config_refraction",
"from_config_get_interstellar_lines",
"from_config_get_transmission_lightcurve",
"from_config_get_transmission",
"from_config_get_molecfit",
"from_config_get_transmission_mcmc",
"from_config_get_spectral_lines",
"from_config_get_interactive_plots",
"from_config_get_pca_parameters",
"from_config_get_fullspectrum_parameters"]
accepted_extensions = ['.yaml', '.yml', '.conf', '.config', '.input', ]
def save_to_cpickle(fname, dictionary, output, night='', lines='', it_string=''):
output_file = get_filename(fname, output, night, lines, it_string)
pickle.dump(dictionary, open(output_file, "wb"))
def load_from_cpickle(fname, output, night='', lines='', it_string=''):
output_file = get_filename(fname, output, night, lines, it_string)
return pickle.load(open(output_file, "rb"))
def delete_cpickle(fname, output, night='', lines='', it_string=''):
output_file = get_filename(fname, output, night, lines, it_string)
os.remove(output_file)
def check_existence_cpickle(fname, output, night='', lines='', it_string=''):
output_file = get_filename(fname, output, night, lines, it_string)
return path.isfile(output_file)
def get_filename(fname, output, night, lines='', it_string='', extension=".p"):
str_lines = output
for str_input in [lines, night, fname, it_string]:
if len(str_input) > 0:
str_lines += '_' + str_input
return str_lines + extension
if lines == '':
if night == '':
return output + '_' + fname + extension
else:
return output + '_' + night + "_" + fname + extension
else:
if night == '':
return output + '_' + lines + '_' + fname + extension
else:
return output + '_' + lines + '_' + night + "_" + fname + extension
def load_yaml_file(file_conf):
# shortcut for jupyter notebook plots
config_in = yaml_parser(file_conf)
return pars_input(config_in)
def yaml_parser(file_conf):
stream = open(file_conf, 'r')
try:
config_in = yaml.load(stream, Loader=yaml.FullLoader)
except AttributeError:
config_in = yaml.load(stream)
print(' Consider updating YAML')
except:
print(' Some error happened while reading the configuration file')
quit()
if 'output' not in config_in:
for extension in accepted_extensions:
if file_conf.find(extension) > 0:
output_name = file_conf.replace(extension, "")
continue
config_in['output'] = output_name
return config_in
def pars_input(config_in):
config_in['system'] = {}
if 'settings' not in config_in:
config_in['settings'] = config_default['settings'].copy()
else:
for key, key_val in config_default['settings'].items():
if key not in config_in['settings']:
config_in['settings'][key] = key_val
for instrument in config_in['instruments']:
for key, key_val in config_default['instruments'].items():
if key not in config_in['instruments'][instrument]:
config_in['instruments'][instrument][key] = key_val
""" create the refraction dictionary if not listed under the instrument section"""
if 'refraction' not in config_in['instruments'][instrument]:
config_in['instruments'][instrument]['refraction'] = {}
""" when the refractions parameters are not explicitely specified in this section, they are either inherited
from the top level dictionary or copied from the default dictionary """
for key, key_val in config_default['refraction'].items():
if key not in config_in['instruments'][instrument]['refraction']:
try:
config_in['instruments'][instrument]['refraction'][key] = config_in['refraction'][key]
except:
config_in['instruments'][instrument]['refraction'][key] = key_val
if 'master-out' not in config_in:
config_in['master-out'] = config_default['master-out'].copy()
else:
for key, key_val in config_default['master-out'].items():
if key not in config_in['master-out']:
config_in['master-out'][key] = key_val
if 'shared' not in config_in['instruments']:
if 'wavelength_step' not in config_in['master-out']:
config_in['instruments']['shared'] = {
'wavelength_step': 0.0100
}
else:
config_in['instruments']['shared'] = {
'wavelength_step': config_in['master-out']['wavelength_step']
}
if 'molecfit' not in config_in:
config_in['molecfit'] = config_default['molecfit'].copy()
else:
for key, key_val in config_default['molecfit'].items():
if key not in config_in['molecfit']:
config_in['molecfit'][key] = key_val
if 'molecfit' not in config_in:
config_in['molecfit'] = config_default['molecfit'].copy()
else:
for key, key_val in config_default['molecfit'].items():
if key not in config_in['molecfit']:
config_in['molecfit'][key] = key_val
for night in config_in['nights']:
instrument = config_in['nights'][night]['instrument']
""" keywords are inherited from the instrument dictionary, when not explicitely specified"""
for key in copy_from_instrument:
if key not in config_in['nights'][night]:
config_in['nights'][night][key] = config_in['instruments'][instrument][key]
if 'refraction' not in config_in['nights'][night]:
config_in['nights'][night]['refraction'] = config_in['instruments'][instrument]['refraction'].copy()
else:
for key, key_val in config_in['instruments'][instrument]['refraction'].items():
if key not in config_in['nights'][night]['refraction']:
config_in['nights'][night]['refraction'][key] = key_val
#if 'master_out_method' not in config_in['nights'][night]:
# config_in['nights'][night]['master_out_method'] = None
if config_in['nights'][night]['use_analytical_rvs'] and 'RV_semiamplitude' not in config_in['star']:
print(" Missing RV_semiamplitude keyword for the star, the value will be computed from the RVs ")
config_in['nights'][night]['use_analytical_rvs'] = False
""" OLD approach to compute the RV of the planet, left here because it may be useful in the future
try:
_dict_star = {'mass': None, 'radius': None, 'gamma': None}
for key in config_in['star']:
_dict_star[key] = config_in['star'][key]
config_in['system']['star'] = StarParameters(
mass=_dict_star['mass'],
radius=_dict_star['radius'])
config_in['system']['common'] = {'degree': False, 'n_planets': 0, 'planets_list': []}
for key in config_in['planets']['common']:
config_in['system']['common'][key] = config_in['planets']['common'][key]
for key in config_in['planets']:
if key not in ['common']:
config_in['system'][key] = PlanetParameters()
config_in['system'][key].put_reference_epoch(config_in['system']['common']['Tref'])
config_in['system'][key].put_RVparameters(
P=config_in['planets'][key]['P'],
K=config_in['planets'][key]['K'],
f=config_in['planets'][key]['Tc'],
e=config_in['planets'][key]['e'],
o=config_in['planets'][key]['o'],
degree=config_in['system']['common']['degree'])
config_in['system'][key].put_RVplanet(config_in['planets'][key]['K_planet'])
config_in['system'][key].put_star(config_in['system']['star'])
config_in['system']['common']['n_planets'] += 1
config_in['system']['common']['planets_list'].extend([key])
except:
pass
"""
return config_in
def get_filelists(night_selected):
"""
:param night_selected: usually the night_dict[night] dictionary from the main program
:return:
"""
""" List files are supposed to be in the same directory of the yaml file,
NOT on the archive directory: in this way it is possible to try different
combinations of nights and files without making a mess in the archive """
files_list = np.atleast_1d(np.genfromtxt(night_selected['all'], dtype=str))
try:
files_transit_out = np.atleast_1d(np.genfromtxt(night_selected['out_transit'], dtype=str))
files_transit_in = np.atleast_1d(np.genfromtxt(night_selected['in_transit'], dtype=str))
files_transit_full = np.atleast_1d(np.genfromtxt(night_selected['full_transit'], dtype=str))
except (FileNotFoundError, IOError):
files_transit_out = None
files_transit_in = None
files_transit_full = None
try:
files_telluric = np.atleast_1d(np.genfromtxt(night_selected['telluric_list'], dtype=str))
except (FileNotFoundError, IOError):
files_telluric = None
if night_selected['telluric'] is not None:
files_star_telluric = np.atleast_1d(np.genfromtxt(night_selected['star_telluric'], dtype=str))
else:
files_star_telluric = None
return files_list, files_transit_out, files_transit_in, files_transit_full, files_telluric, files_star_telluric
def from_config_get_nights(config_in):
"""
This subroutine creates a shortcut to the night list
:param config_in:
:return: dictionary
"""
return config_in['nights']
def from_config_get_instrument(config_in):
"""
This subroutine creates a shortcut to the instrument list
:param config_in:
:return: dictionary
"""
return config_in['instruments']
def from_config_refraction(config_in, night):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
return config_in['nights'][night]['refraction']
def from_config_get_transmission_lightcurve(config_in):
"""
This subroutine creates a shortcut to the transmission_lightcurve dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['transmission_lightcurve']
except:
return config_in['transmission']
def from_config_get_transmission(config_in):
"""
This subroutine creates a shortcut to the transmission_lightcurve dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['transmission']
except:
return config_in['transmission_lightcurve']
def from_config_get_system(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
return config_in['system']
def from_config_get_pipeline(config_in):
"""
This subroutine creates a shortcut to the pipeline parameters dictionary
:param config_in:
:return: dictionary
"""
return config_in['pipeline']
def from_config_get_planet(config_in):
"""
This subroutine creates a shortcut to the planet dictionary
:param config_in:
:return: dictionary
"""
return config_in['planet']
def from_config_get_star(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
return config_in['star']
def from_config_get_clv_rm(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
return config_in['CLV_RM_correction']
def from_config_get_interstellar_lines(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['interstellar_lines']
except:
return None
def from_config_get_molecfit(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['molecfit']
except:
return None
def from_config_get_transmission_mcmc(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['transmission_mcmc']
except:
return None
def from_config_get_spectral_lines(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['spectral_lines']
except:
return None
def from_config_get_interactive_plots(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['interactive_plots']
except:
return False
def from_config_get_pca_parameters(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['pca_parameters']
except:
return {}
def from_config_get_fullspectrum_parameters(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['full_spectrum']
except:
return {} | 14,538 | 31.525727 | 116 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/spectral_subroutines.py | from __future__ import print_function, division
import numpy as np
from astropy.io import fits
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.constants import *
""" Empty module left here for back-compatibility, as almost every module
is importing this one rather than the rebin_subroutines
Everything has been moved into the instruments folder for
easier handling of different instruments
"""
#from SLOPpy.instruments.HARPN_DRSv3 import *
#from SLOPpy.instruments.HARPS_DRSv3 import *
#from SLOPpy.instruments.PEPSI_reduced import *
#
#def get_calib_data(instrument, archive, file_rad, fiber='A', order_selection=None):
#
# if instrument =='HARPS-N':
# return HARPN_DRSv3_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
# elif instrument =='HARPS':
# return HARPS_DRSv3_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
# elif instrument =='PEPSI':
# return PEPSI_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
# else:
# raise ValueError("Instrument not supported")
#
#
#def get_input_data(instrument, archive, file_rad, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
#
# if instrument =='HARPS-N':
# return HARPN_DRSv3_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
# elif instrument =='HARPS':
# return HARPS_DRSv3_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
# elif instrument =='PEPSI':
# return PEPSI_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
# else:
# raise ValueError("Instrument not supported")
| 1,883 | 46.1 | 151 | py |
SLOPpy | SLOPpy-main/SLOPpy/instruments/PEPSI_reduced.py | import numpy as np
from astropy.io import fits
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines import constants
from SLOPpy.subroutines.common import *
from astropy.coordinates import SkyCoord
from astropy import units as u
def PEPSI_get_instrument_keywords():
properties = {
# DRS-specific keywords
'time_stamp': 'mid_exposure',
'time_standard': 'TDB',
# Observatory-specific keywords
'geoelev': 3221.0, # meters
'longitude' : -7.325938, # Tel geo longitude (+=East) (deg)
'latitude' : 32.7013083, # Tel geo latitute (+=North) (deg)
# Instrument-specific keyword
# The following are the input values used by Molecfit, taken from Allart+2017
# for convenience, all the default values are listed here instead of being scattered into the code
'molecfit': {
'default_wstep': 0.01000, # default wavelength step size for the input stellar spectra
'molecules': ['H2O', 'O2'],
'ftol': 1e-9,
'xtol': 1e-9,
'cont_const': 1.0, # a0, This value differs from Allart+2017 since we are using normalized spectra
'cont_n': 3, # n_cont, Degree of coefficients for continuum fit
'wlc_n': 2, # n_lambda, Polynomial degree of the refined wavelength solution
'wlc_const': 0.0, # b0, Initial constant term for wavelength correction (shift relative to half wavelength range)
'res_gauss': 4.8, # omega_Gaussian, Initial value for FWHM of Gaussian in pixels
'kernfac': 15, #kernel_size, Size of Gaussian/Lorentzian/Voigtian kernel in FWHM
'slitwidth': 1.00, # in arcseconds
'pixelscale': 0.16,
}
}
return properties
def PEPSI_get_calib_data(archive, file_rad, fiber='A', order_selection=None):
""" There are no calibration files from PEPSI, so this subroutine will
provide the dictionary required by SLOPpy to properly work
"fiber", "order_selection" variables are kept for consistency with the
main code, but they do not have applicability
"""
calib_dict = {}
# file from which to extract the relevant keywords - it could be a science
# frame as well
pepsi_fits = fits.open(archive+'/'+file_rad)
data_fits = pepsi_fits[1].data['Fun']
# Blaze file - if only blaze-corrected files are available, set it equal to 1.
calib_dict['n_pixels'] = len(data_fits)
calib_dict['n_orders'] = 1
calib_dict['blaze'] = np.ones((calib_dict['n_orders'], calib_dict['n_pixels']))
return calib_dict
def PEPSI_get_input_data(archive, file_rad, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
"""_summary_
Returns:
_type_: _description_
"""
""" PEPSI delivers calibrated, rebinned spectra only in a specific format
so many entry in the dictionary will be empty.
Given the simplicity of the format, this subroutines can be used as
template for other instruments
"fiber", "skip_ccf", "skip_s1d", "order_selection" variables are kept
for consistency with the main code, but they do not have applicability
"""
input_dict = {'mask': mask, 'header':{}}
input_s1d = {'header':{}}
properties = PEPSI_get_instrument_keywords()
pepsi_fits = fits.open(archive+'/'+file_rad)
input_dict['header']['e2ds'] = pepsi_fits[0].header
#Arg = file_fits[1].data['Arg']
#Fun = file_fits[1].data['Fun']
#Var = file_fits[1].data['Var']
#Mask = file_fits[1].data['Mask']
input_dict['n_pixels'] = pepsi_fits[1].header['NAXIS2']
input_dict['n_orders'] = 1
# Not sure if these keywords are required anywhere
#input_dict['DPR_CATG'] = 'SCIENCE'
#input_dict['DPR_TYPE'] = ??
input_dict['BERV'] = pepsi_fits[0].header['SSBVEL'] / 1000. # in km/s
input_dict['RVC'] = pepsi_fits[0].header['RADVEL'] / 1000.
# RV of the star, it must be provided but it can be bypassed
input_dict['EXPTIME'] = pepsi_fits[0].header['EXPTIME']
# BJD provided at midexposure ,no need to check for it
input_dict['BJD'] = pepsi_fits[0].header['JD-TDB']
input_dict['MJD'] = pepsi_fits[0].header['JD-OBS'] - constants.MJD
input_dict['AIRMASS'] = pepsi_fits[0].header['AIRMASS']
input_dict['UTC'] = (input_dict['MJD'] - int(input_dict['MJD'])) * 86400.
input_dict['HUMIDITY'] = pepsi_fits[0].header['LBTH'] # Relative humidity in % for GEOELEV.
input_dict['PRESSURE'] = pepsi_fits[0].header['LBTP']
input_dict['TEMPERATURE_EN'] = pepsi_fits[0].header['LBTT'] #Ambient temperature in C for GEOELEV
input_dict['TEMPERATURE_M1'] = pepsi_fits[0].header['LBTT'] #Temperature of primary mirror M1 in C (for emission spectra only)
input_dict['ELEVATION'] = np.arcsin(1./input_dict['AIRMASS']) * (180./np.pi)
input_dict['GEOELEV'] = properties['geoelev']
input_dict['GEOLONG'] = properties['longitude']
input_dict['GEOLAT'] = properties['latitude']
input_dict['molecfit'] = properties['molecfit']
skycoords = c = SkyCoord(pepsi_fits[0].header['RA'], pepsi_fits[0].header['DEC'], unit=(u.hourangle, u.deg))
input_dict['RA'] = c.ra.degree
input_dict['DEC'] = c.dec.degree
# Not sure if thes evalies are required, it may be possible
#input_dict['BLAZE_file'] = None
#input_dict['CCD_SIGDET'] = None
#input_dict['CCD_GAIN'] = None
# getting data
"""
NOTE: PEPSI provides 1D spectra in the stellar reference frame,
but SLOPpy requires 2D spectra (order by order) in the observer reference
frame. Thus:
1) we shift the wavelength from the stellar to the observer reference frame
2) we transform the array into (1,n_pixels) shaped arrays
An empty array as required by SLOPpy would have the shape
np.empty([n_orders, n_pixels])
"""
wave_stellar = pepsi_fits[1].data['Arg']
rvshift = pepsi_fits[0].header['SSTVEL'] / 1000.
input_dict['wave_size'] = pepsi_fits[1].header['NAXIS2']
input_dict['wave'] = np.reshape(shift_wavelength_array(wave_stellar, rvshift), (1, input_dict['wave_size']))
input_dict['e2ds'] = np.reshape(pepsi_fits[1].data['Fun'], (1, input_dict['wave_size']))
input_dict['e2ds_err'] = np.reshape(pepsi_fits[1].data['Var'], (1, input_dict['wave_size']))
""" PEPSI spectra are normalized to unity, but SLOPpy is expecting to have spectra in absolute counts
absolute counts mean that a larger step size will have a larger number of counts given the same
flux density at a specific wavelength.
PEPSI spectra have been resampled on a non-linear scale and than normalized, so not taking into account
the bin (or step) size would introduce a deformation during the rebinning phase
After several tests, I am forced to introduce a flag to force non-preservation of flux at every
rebinning step across the code
"""
input_dict['absolute_flux'] = False
input_dict['step'] = np.zeros_like(input_dict['wave'])
input_dict['step'][0,1:-1] = (input_dict['wave'][0,2:] - input_dict['wave'][0,:-2])/2.
input_dict['step'][0,0] = input_dict['step'][0,1]
input_dict['step'][0,-1] = input_dict['step'][0,-2]
# order selection is always equal the the first - and unique - order
input_dict['orders'] = [0]
input_dict['absolute_flux'] = False
pepsi_fits.close()
return input_dict,input_s1d
| 7,474 | 39.188172 | 131 | py |
SLOPpy | SLOPpy-main/SLOPpy/instruments/HARPS_DRSv3.py | from __future__ import print_function, division
from SLOPpy.instruments.common_DRSv3 import *
def HARPSv3_get_instrument_keywords():
""" These definitions applt to DRS version 3.x """
keywords = {
'header_rvc': 'HIERARCH ESO DRS CCF RVC',
'header_berv': 'HIERARCH ESO DRS BERV',
'header_bjd': 'HIERARCH ESO DRS BJD',
'header_mjd': 'MJD-OBS', # MJD in days. This parameter is required for the retrieval of GDAS data
'header_blaze': 'HIERARCH ESO DRS BLAZE FILE',
'header_ccd': 'HIERARCH ESO DRS CCD SIGDET',
'header_conad': 'HIERARCH ESO DRS CCD CONAD',
'header_dpr_catg': 'HIERARCH ESO DPR CATG',
'header_dpr_type': 'HIERARCH ESO DPR TYPE',
'header_deg_ll': 'HIERARCH ESO DRS CAL TH DEG LL',
'header_coeff_ll': 'HIERARCH ESO DRS CAL TH COEFF LL',
'airmass_alt_start': 'HIERARCH ESO TEL AIRM START',
'airmass_alt_end': 'HIERARCH ESO TEL AIRM END',
## Telescope altitude is computed using the middle values obtained from airmass
'humidity':'HIERARCH ESO TEL AMBI RHUM', # Relative humidity in % for GEOELEV.
#'pressure_start' : 'HIERARCH ESO TEL AMBI PRES START',
#'pressure_end': 'HIERARCH ESO TEL AMBI PRES END',
'pressure':'HIERARCH ESO TEL AMBI PRES END',
'temperature_env': 'HIERARCH ESO TEL AMBI TEMP', #Ambient temperature in C for GEOELEV
'temperature_m1': 'HIERARCH ESO TEL TH M1 TEMP', # Temperature of primary mirror M1 in C (for emission spectra only)
}
properties = {
# DRS-specific keywords
'time_stamp': 'mid_exposure',
'time_standard': 'UTC',
# Observatory-specific keywords
'geoelev': 2400.0, # meters
'longitude' : -70.7345, # Tel geo longitude (+=East) (deg)
'latitude' : -29.2584, # Tel geo latitute (+=North) (deg)
# Instrument-specific keyword
'n_orders_A': 72,
'n_orders_B': 71,
'orders_BtoA':
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, -1, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70],
# after many experiments, I found out the easiest and more robust way to define
# the order correspondence between fiber A anf B is just to write it down
'red_ccd':
[ 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71],
'blue_ccd':
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44],
'full_ccd':
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71],
# The following are the input values used by Molecfit, taken from Allart+2017
# for convenience, all the default values are listed here instead of being scattered into the code
'molecfit': {
'default_wstep': 0.01000, # default wavelength step size for the input stellar spectra
'molecules': ['H2O', 'O2'],
'ftol': 1e-9,
'xtol': 1e-9,
'cont_const': 1.0, # a0, This value differs from Allart+2017 since we are using normalized spectra
'cont_n': 3, # n_cont, Degree of coefficients for continuum fit
'wlc_n': 2, # n_lambda, Polynomial degree of the refined wavelength solution
'wlc_const': 0.0, # b0, Initial constant term for wavelength correction (shift relative to half wavelength range)
'res_gauss': 4.8, # omega_Gaussian, Initial value for FWHM of Gaussian in pixels
'kernfac': 15, #kernel_size, Size of Gaussian/Lorentzian/Voigtian kernel in FWHM
'slitwidth': 1.00, # in arcseconds
'pixelscale': 0.16,
}
}
return keywords, properties
# Shortcut from DRS-geenral to instrument-specific subroutine
def HARPS_DRSv3_get_calib_data(archive, file_rad, fiber='A', order_selection=None):
keywords, properties = HARPSv3_get_instrument_keywords()
return DRSv3_get_calib_data(archive, file_rad, keywords, properties, fiber=fiber, order_selection=order_selection)
# Shortcut from DRS-geenral to instrument-specific subroutine
def HARPS_DRSv3_get_input_data(archive, file_rad, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
keywords, properties = HARPSv3_get_instrument_keywords()
return DRSv3_get_input_data(archive, file_rad, keywords, properties, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
| 5,356 | 46.830357 | 162 | py |
SLOPpy | SLOPpy-main/SLOPpy/instruments/HARPN_DRSv3.py | from __future__ import print_function, division
from SLOPpy.instruments.common_DRSv3 import *
def HARPNv3_get_instrument_keywords():
""" These definitions applt to DRS version 3.x """
keywords = {
'header_rvc': 'HIERARCH TNG DRS CCF RVC',
'header_berv': 'HIERARCH TNG DRS BERV',
'header_bjd': 'HIERARCH TNG DRS BJD',
'header_mjd': 'MJD-OBS', # MJD in days. This parameter is required for the retrieval of GDAS data
'header_blaze': 'HIERARCH TNG DRS BLAZE FILE',
'header_ccd': 'HIERARCH TNG DRS CCD SIGDET',
'header_conad': 'HIERARCH TNG DRS CCD CONAD',
'header_dpr_catg': 'HIERARCH TNG DPR CATG',
'header_dpr_type': 'HIERARCH TNG DPR TYPE',
'header_deg_ll': 'HIERARCH TNG DRS CAL TH DEG LL',
'header_coeff_ll': 'HIERARCH TNG DRS CAL TH COEFF LL',
'airmass_alt_start': 'HIERARCH TNG TEL AIRM START',
'airmass_alt_end': 'HIERARCH TNG TEL AIRM END',
## Telescope altitude is computed using the middle values obtained from airmass
'humidity':'HIERARCH TNG METEO HUMIDITY', # Relative humidity in % for GEOELEV.
'pressure':'HIERARCH TNG METEO PRESSURE',
'temperature_env': 'HIERARCH TNG METEO TEMP10M', #Ambient temperature in C for GEOELEV
'temperature_m1': 'HIERARCH TNG M1 CH1TEMP', # Temperature of primary mirror M1 in C (for emission spectra only)
}
properties = {
# DRS-specific keywords
'time_stamp': 'mid_exposure',
'time_standard': 'UTC',
# Observatory-specific keywords
'geoelev': 2387.2, # meters
'longitude' : -17.889, # Tel geo longitude (+=East) (deg)
'latitude' : 28.754, # Tel geo latitute (+=North) (deg)
# Instrument-specific keyword
'n_orders_A': 69,
'n_orders_B': 69,
'orders_BtoA':
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68],
# after many experiments, I found out the easiest and more robust way to define
# the order correspondence between fiber A anf B is just to write it down
'red_ccd':
[ 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68],
'blue_ccd':
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41],
'full_ccd':
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68],
# The following are the input values used by Molecfit, taken from Allart+2017
# for convenience, all the default values are listed here instead of being scattered into the code
'molecfit': {
'default_wstep': 0.01000, # default wavelength step size for the input stellar spectra
'molecules': ['H2O', 'O2'],
'ftol': "1e-9",
'xtol': "1e-9",
'cont_const': 1.0, # a0, This value differs from Allart+2017 since we are using normalized spectra
'cont_n': 3, # n_cont, Degree of coefficients for continuum fit
'wlc_n': 2, # n_lambda, Polynomial degree of the refined wavelength solution
'wlc_const': 0.0, # b0, Initial constant term for wavelength correction (shift relative to half wavelength range)
'res_gauss': 4.8, # omega_Gaussian, Initial value for FWHM of Gaussian in pixels
'kernfac': 15, #kernel_size, Size of Gaussian/Lorentzian/Voigtian kernel in FWHM
'slitwidth': 1.00, # in arcseconds
'pixelscale': 0.16,
}
}
return keywords, properties
# Shortcut from DRS-geenral to instrument-specific subroutine
def HARPN_DRSv3_get_calib_data(archive, file_rad, fiber='A', order_selection=None):
keywords, properties = HARPNv3_get_instrument_keywords()
return DRSv3_get_calib_data(archive, file_rad, keywords, properties, fiber=fiber, order_selection=order_selection)
# Shortcut from DRS-geenral to instrument-specific subroutine
def HARPN_DRSv3_get_input_data(archive, file_rad, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
keywords, properties = HARPNv3_get_instrument_keywords()
return DRSv3_get_input_data(archive, file_rad, keywords, properties, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
| 5,057 | 46.716981 | 162 | py |
SLOPpy | SLOPpy-main/SLOPpy/instruments/common_DRSv3.py | import numpy as np
from astropy.io import fits
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.common import *
def DRSv3_map_orders_AB(properties, order_selection):
n_orders_A = properties['n_orders_A']
map_orders_A_full = np.arange(0, n_orders_A, dtype=np.int16)
map_orders_BtoA = np.asarray(properties['orders_BtoA'])
map_orders_A = []
map_orders_B = []
#if order_selection == 'red_ccd':
# working_A_full = map_orders_A_full[properties['red_ccd']]
# working_BtoA = map_orders_BtoA[properties['red_ccd']]
if order_selection in ['red_ccd', 'blue_ccd', 'full_ccd']:
working_A_full = map_orders_A_full[properties[order_selection]]
working_BtoA = map_orders_BtoA[properties[order_selection]]
elif order_selection:
working_A_full = map_orders_A_full[order_selection]
working_BtoA = map_orders_BtoA[order_selection]
else:
working_A_full = map_orders_A_full
working_BtoA = map_orders_BtoA
for order_A, order_B in zip(working_A_full, working_BtoA):
if order_B < 0: continue
map_orders_A.extend([order_A])
map_orders_B.extend([order_B])
return map_orders_A, map_orders_B
def DRSv3_give_back_selected_orders(properties, fiber, order_selection):
map_orders_A, map_orders_B = DRSv3_map_orders_AB(properties, order_selection)
if fiber != 'A':
return map_orders_B
else:
return map_orders_A
def DRSv3_get_calib_data(archive, file_rad, keywords, properties, fiber='A', order_selection=None):
calib_dict = {}
selected_orders = DRSv3_give_back_selected_orders(properties, fiber, order_selection)
map_orders_A, map_orders_B = DRSv3_map_orders_AB(properties, order_selection)
if fiber=='A':
calib_dict['fibAB_orders_match'] = map_orders_A - np.min(selected_orders)
""" The first selected order could not have a match in fiber B, so we need to renumber from the first order of
the input selection, not from the first order that had a match """
else:
calib_dict['fibAB_orders_match'] = map_orders_B - np.min(map_orders_B)
""" Only the orders with a match in fiber A are read in the first place, so we can safely rescale with respect
to the number of the first order in the matched list """
e2ds_fits = fits.open(archive+'/'+file_rad+'_e2ds_'+fiber+'.fits')
if e2ds_fits[0].header[keywords['header_dpr_catg']] != 'SCIENCE':
return
try:
blaze_file = e2ds_fits[0].header[keywords['header_blaze']]
blaze_fits = fits.open(archive + '/' + blaze_file)
except:
blaze_file = e2ds_fits[0].header[keywords['header_blaze']].replace(':', '-')
blaze_fits = fits.open(archive + '/' + blaze_file)
# getting blaze file
calib_dict['blaze'] = blaze_fits[0].data[selected_orders, :]
# getting lamp file
try:
lamp_fits = fits.open(archive + '/' + blaze_file[:29] + '_lamp_' + fiber + '.fits')
calib_dict['lamp'] = lamp_fits[0].data[selected_orders, :]
lamp_fits.close()
except:
print("lamp files not available, sky correction will not be performed")
calib_dict['n_pixels'] = blaze_fits[0].header['NAXIS1']
calib_dict['n_orders'] = len(selected_orders)
blaze_fits.close()
return calib_dict
def DRSv3_get_input_data(archive, file_rad, keywords, properties, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
input_dict = {'mask': mask, 'header':{}}
input_s1d = {'header':{}}
selected_orders = DRSv3_give_back_selected_orders(properties, fiber, order_selection)
if mask is None:
skip_ccf = True
e2ds_fits = fits.open(archive+'/'+file_rad+'_e2ds_'+fiber+'.fits')
input_dict['header']['e2ds'] = e2ds_fits[0].header
input_dict['n_pixels'] = e2ds_fits[0].header['NAXIS1']
input_dict['n_orders'] = len(selected_orders)
input_dict['DPR_CATG'] = e2ds_fits[0].header[keywords['header_dpr_catg']]
if input_dict['DPR_CATG'] != 'SCIENCE':
return
input_dict['DPR_TYPE'] = e2ds_fits[0].header[keywords['header_dpr_type']]
if not skip_s1d:
s1d_fits = fits.open(archive + '/' + file_rad + '_s1d_'+fiber+'.fits')
input_dict['header']['s1d'] = s1d_fits[0].header
input_s1d['header']['s1d'] = s1d_fits[0].header
temp_wave, temp_step = DRSv3_get_s1d_wave(s1d_fits)
sel_wave = (temp_wave >= 3879.99990) & (temp_wave <= 6900.0001)
input_s1d['flux'] = s1d_fits[0].data[sel_wave]
input_s1d['wave'] = temp_wave[sel_wave]
input_s1d['step'] = temp_step[sel_wave]
input_s1d['size'] = np.size(input_s1d['wave'])
s1d_fits.close()
if not skip_ccf:
ccf_fits = fits.open(archive+'/'+file_rad+'_ccf_'+mask+'_'+fiber+'.fits')
input_dict['RVC'] = ccf_fits[0].header[keywords['header_rvc']]
input_dict['header']['ccf'] = ccf_fits[0].header
input_dict['BERV'] = e2ds_fits[0].header[keywords['header_berv']]
input_dict['EXPTIME'] = e2ds_fits[0].header['EXPTIME']
if properties['time_stamp'] == 'start_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']] + input_dict['EXPTIME']/86400.
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']] + input_dict['EXPTIME']/86400.
elif properties['time_stamp'] == 'mid_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']]
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']]
elif properties['time_stamp'] == 'end_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']] - input_dict['EXPTIME']/86400.
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']] - input_dict['EXPTIME']/86400.
else:
print('*** please specify the relationship between epoch and exposure time - assuming mid-exposure epochs')
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']]
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']]
if properties['time_standard'] == 'UTC':
input_dict['BJD']+= difference_utc2tdb(input_dict['MJD']+2400000.5)
input_dict['LST'] = e2ds_fits[0].header['LST']
try:
input_dict['AIRMASS'] = e2ds_fits[0].header['AIRMASS']
except:
input_dict['AIRMASS'] = (e2ds_fits[0].header[keywords['airmass_alt_start']]
+ e2ds_fits[0].header[keywords['airmass_alt_end']])/2.
input_dict['UTC'] = (input_dict['MJD'] - int(input_dict['MJD'])) * 86400.
input_dict['HUMIDITY'] = e2ds_fits[0].header[keywords['humidity']]
input_dict['PRESSURE'] = e2ds_fits[0].header[keywords['pressure']]
input_dict['TEMPERATURE_EN'] = e2ds_fits[0].header[keywords['temperature_env']]
input_dict['TEMPERATURE_M1'] = e2ds_fits[0].header[keywords['temperature_m1']]
input_dict['ELEVATION'] = np.arcsin(1./input_dict['AIRMASS']) * (180./np.pi)
input_dict['GEOELEV'] = properties['geoelev']
input_dict['GEOLONG'] = properties['longitude']
input_dict['GEOLAT'] = properties['latitude']
input_dict['molecfit'] = properties['molecfit']
try:
try:
input_dict['RA'] = e2ds_fits[0].header['RA-DEG']
input_dict['DEC'] = e2ds_fits[0].header['DEC-DEG']
except:
input_dict['RA'] = e2ds_fits[0].header['RA-RAD'] * 180.00 / np.pi
input_dict['DEC'] = e2ds_fits[0].header['DEC-RAD'] * 180.00 / np.pi # weird choice of using DEC in hours
except:
input_dict['RA'] = e2ds_fits[0].header['RA']
input_dict['DEC'] = e2ds_fits[0].header['DEC']
try:
input_dict['BERV'] = e2ds_fits[0].header[keywords['header_berv']]
input_dict['EXPTIME'] = e2ds_fits[0].header['EXPTIME']
if properties['time_stamp'] == 'start_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']] + input_dict['EXPTIME']/86400.
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']] + input_dict['EXPTIME']/86400.
elif properties['time_stamp'] == 'mid_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']]
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']]
elif properties['time_stamp'] == 'end_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']] - input_dict['EXPTIME']/86400.
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']] - input_dict['EXPTIME']/86400.
else:
print('*** please specify the relationship between epoch and exposure time - assuming mid-exposure epochs')
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']]
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']]
if properties['time_standard'] == 'UTC':
input_dict['BJD']+= difference_utc2tdb(input_dict['MJD']+2400000.5)
input_dict['LST'] = e2ds_fits[0].header['LST']
try:
input_dict['AIRMASS'] = e2ds_fits[0].header['AIRMASS']
except:
input_dict['AIRMASS'] = (e2ds_fits[0].header[keywords['airmass_alt_start']]
+ e2ds_fits[0].header[keywords['airmass_alt_end']])/2.
input_dict['UTC'] = (input_dict['MJD'] - int(input_dict['MJD'])) * 86400.
input_dict['HUMIDITY'] = e2ds_fits[0].header[keywords['humidity']]
input_dict['PRESSURE'] = e2ds_fits[0].header[keywords['pressure']]
input_dict['TEMPERATURE_EN'] = e2ds_fits[0].header[keywords['temperature_env']]
input_dict['TEMPERATURE_M1'] = e2ds_fits[0].header[keywords['temperature_m1']]
input_dict['ELEVATION'] = np.arcsin(1./input_dict['AIRMASS']) * (180./np.pi)
input_dict['GEOELEV'] = properties['geoelev']
input_dict['GEOLONG'] = properties['longitude']
input_dict['GEOLAT'] = properties['latitude']
input_dict['molecfit'] = properties['molecfit']
try:
try:
input_dict['RA'] = e2ds_fits[0].header['RA-DEG']
input_dict['DEC'] = e2ds_fits[0].header['DEC-DEG']
except:
input_dict['RA'] = e2ds_fits[0].header['RA-RAD'] * 180.00 / np.pi
input_dict['DEC'] = e2ds_fits[0].header['DEC-RAD'] * 180.00 / np.pi # weird choice of using DEC in hours
except:
input_dict['RA'] = e2ds_fits[0].header['RA']
input_dict['DEC'] = e2ds_fits[0].header['DEC']
except:
print('Keyword error in prepare_dataset - check the FITS header of your files')
quit()
pass
input_dict['BLAZE_file'] = e2ds_fits[0].header[keywords['header_blaze']]
input_dict['CCD_SIGDET'] = e2ds_fits[0].header[keywords['header_ccd']]
input_dict['CCD_GAIN'] = e2ds_fits[0].header[keywords['header_conad']]
# getting data
input_dict['e2ds'] = e2ds_fits[0].data[selected_orders, :]
input_dict['e2ds_err'] = np.sqrt(np.abs(input_dict['e2ds']))
temp_wave, temp_step = DRSv3_get_e2ds_wave(e2ds_fits, keywords['header_deg_ll'], keywords['header_coeff_ll'])
input_dict['wave'] = temp_wave[selected_orders, :]
input_dict['step'] = temp_step[selected_orders, :]
input_dict['orders'] = len(selected_orders)
input_dict['wave_size'] = e2ds_fits[0].header['NAXIS1']
e2ds_fits.close()
if not skip_ccf:
ccf_fits.close()
return input_dict,input_s1d
def DRSv3_get_s1d_wave(s1d_fits):
return np.arange(0, s1d_fits[0].header['NAXIS1'], 1.)*s1d_fits[0].header['CDELT1'] + s1d_fits[0].header['CRVAL1'], \
np.ones(s1d_fits[0].header['NAXIS1'])*s1d_fits[0].header['CDELT1']
def DRSv3_get_e2ds_wave(e2ds_fits, header_deg_ll, header_coeff_ll, order=None):
e2ds_o = e2ds_fits[0].header['NAXIS2']
e2ds_w = e2ds_fits[0].header['NAXIS1']
e2ds_wave = np.zeros([e2ds_o, e2ds_w], dtype=np.double)
e2ds_step = np.zeros([e2ds_o, e2ds_w], dtype=np.double)
d = e2ds_fits[0].header[header_deg_ll]
x = np.arange(0, e2ds_w, 1.)
for n in range(0, e2ds_o):
for i in range(d, -1, -1):
a_sel = i + n*(1+d)
a_coeff = e2ds_fits[0].header[header_coeff_ll+repr(a_sel)]
if i == d:
y_w = a_coeff
y_s = i*a_coeff
else:
y_w = y_w*x + a_coeff
if i > 0: y_s = y_s*x + i*a_coeff
e2ds_wave[n, :] = y_w
e2ds_step[n, :] = y_s
if order is None:
return e2ds_wave, e2ds_step
else:
return e2ds_wave[order, :], e2ds_step[order, :]
| 12,657 | 40.366013 | 135 | py |
SLOPpy | SLOPpy-main/SLOPpy/instruments/get_data.py | from __future__ import print_function, division
import numpy as np
from astropy.io import fits
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.constants import *
from SLOPpy.instruments.HARPN_DRSv3 import *
from SLOPpy.instruments.HARPS_DRSv3 import *
from SLOPpy.instruments.PEPSI_reduced import *
def get_calib_data(instrument, archive, file_rad, fiber='A', order_selection=None):
if instrument =='HARPS-N':
return HARPN_DRSv3_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
elif instrument =='HARPS':
return HARPS_DRSv3_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
elif instrument in ['PEPSI', 'PEPSI_red', 'PEPSI_blue']:
return PEPSI_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
else:
raise ValueError("Instrument not supported")
def get_input_data(instrument, archive, file_rad, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
if instrument =='HARPS-N':
return HARPN_DRSv3_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
elif instrument =='HARPS':
return HARPS_DRSv3_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
elif instrument in ['PEPSI', 'PEPSI_red', 'PEPSI_blue']:
return PEPSI_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
else:
raise ValueError("Instrument not supported")
| 1,669 | 51.1875 | 150 | py |
BehaviorTree.CPP | BehaviorTree.CPP-master/convert_v3_to_v4.py | #!/usr/bin/env python3
"""Converts BehaviorTree.CPP V3 compatible tree xml files to V4 format.
"""
import argparse
import copy
import logging
import sys
import typing
import xml.etree.ElementTree as ET
logger = logging.getLogger(__name__)
def strtobool(val: typing.Union[str, int, bool]) -> bool:
"""``distutils.util.strtobool`` equivalent, since it will be deprecated.
origin: https://stackoverflow.com/a/715468/17094594
"""
return str(val).lower() in ("yes", "true", "t", "1")
# see ``XMLParser::Pimpl::createNodeFromXML`` for all underscores
SCRIPT_DIRECTIVES = [
"_successIf",
"_failureIf",
"_skipIf",
"_while",
"_onSuccess",
"_onFailure",
"_onHalted",
"_post",
]
def convert_single_node(node: ET.Element) -> None:
"""converts a leaf node from V3 to V4.
Args:
node (ET.Element): the node to convert.
"""
if node.tag == "root":
node.attrib["BTCPP_format"] = "4"
def convert_no_warn(node_type: str, v3_name: str, v4_name: str):
if node.tag == v3_name:
node.tag = v4_name
elif (
(node.tag == node_type)
and ("ID" in node.attrib)
and (node.attrib["ID"] == v3_name)
):
node.attrib["ID"] = v3_name
original_attrib = copy.copy(node.attrib)
convert_no_warn("Control", "SequenceStar", "SequenceWithMemory")
if node.tag == "SubTree":
logger.info(
"SubTree is now deprecated, auto converting to V4 SubTree"
" (formerly known as SubTreePlus)"
)
for key, val in original_attrib.items():
if key == "__shared_blackboard" and strtobool(val):
logger.warning(
"__shared_blackboard for subtree is deprecated"
", using _autoremap instead."
" Some behavior may change!"
)
node.attrib.pop(key)
node.attrib["_autoremap"] = "1"
elif key == "ID":
pass
else:
node.attrib[key] = f"{{{val}}}"
elif node.tag == "SubTreePlus":
node.tag = "SubTree"
for key, val in original_attrib.items():
if key == "__autoremap":
node.attrib.pop(key)
node.attrib["_autoremap"] = val
for key in node.attrib:
if key in SCRIPT_DIRECTIVES:
logging.error(
"node %s%s has port %s, this is reserved for scripts in V4."
" Please edit the node before converting to V4.",
node.tag,
f" with ID {node.attrib['ID']}" if "ID" in node.attrib else "",
key,
)
def convert_all_nodes(root_node: ET.Element) -> None:
"""recursively converts all nodes inside a root node.
Args:
root_node (ET.Element): the root node to start the conversion.
"""
def recurse(base_node: ET.Element) -> None:
convert_single_node(base_node)
for node in base_node:
recurse(node)
recurse(root_node)
def convert_stream(in_stream: typing.TextIO, out_stream: typing.TextIO):
"""Converts the behavior tree V3 xml from in_file to V4, and writes to out_file.
Args:
in_stream (typing.TextIO): The input file stream.
out_stream (typing.TextIO): The output file stream.
"""
class CommentedTreeBuilder(ET.TreeBuilder):
"""Class for preserving comments in xml
see: https://stackoverflow.com/a/34324359/17094594
"""
def comment(self, text):
self.start(ET.Comment, {})
self.data(text)
self.end(ET.Comment)
element_tree = ET.parse(in_stream, ET.XMLParser(target=CommentedTreeBuilder()))
convert_all_nodes(element_tree.getroot())
element_tree.write(out_stream, encoding="unicode", xml_declaration=True)
def main():
"""the main function when used in cli mode"""
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-i",
"--in_file",
type=argparse.FileType("r"),
help="The file to convert from (v3). If absent, reads xml string from stdin.",
)
parser.add_argument(
"-o",
"--out_file",
nargs="?",
type=argparse.FileType("w"),
default=sys.stdout,
help="The file to write the converted xml (V4)."
" Prints to stdout if not specified.",
)
class ArgsType(typing.NamedTuple):
"""Dummy class to provide type hinting to arguments parsed with argparse"""
in_file: typing.Optional[typing.TextIO]
out_file: typing.TextIO
args: ArgsType = parser.parse_args()
if args.in_file is None:
if not sys.stdin.isatty():
args.in_file = sys.stdin
else:
logging.error(
"The input file was not specified, nor a stdin stream was detected."
)
sys.exit(1)
convert_stream(args.in_file, args.out_file)
if __name__ == "__main__":
main() | 5,144 | 28.739884 | 86 | py |
A4000_coeffs | A4000_coeffs-master/__init__.py | import numpy as np
import scipy.interpolate as si
import numpy.ma as ma
import math
import os
module_dir = os.path.dirname(os.path.abspath(__file__))
class R_curves:
def __init__(self, bands):
self.A1_splines = {}
self.A2_splines = {}
for band in bands:
self.A1_splines[band] = []
self.A2_splines[band] = []
for R in np.arange(2.1, 5.6, 0.1):
with open("{0:s}/{1:d}_curves.out".format(module_dir, int(R*10)),
'r') as f:
first_line = f.readline().split()
columns_required_keys = []
columns_required_values = []
columns_required_keys.append("Teff")
columns_required_values.append(first_line.index("Teff")-1)
for band in bands:
columns_required_keys.append("A_{}_1".format(band))
columns_required_values.append(
first_line.index("A_{}_1".format(band))-1)
columns_required_keys.append("A_{}_2".format(band))
columns_required_values.append(
first_line.index("A_{}_2".format(band))-1)
R_file = ma.masked_invalid(np.genfromtxt("{0:s}/{1:d}_curves.out".
format(module_dir, int(R*10)),
usecols=columns_required_values)[::-1])
Teff_col = columns_required_keys.index("Teff")
Teff_data = np.log10(ma.filled(R_file[:, Teff_col],
fill_value=50000))
for band in bands:
u_col = columns_required_keys.index("A_{}_2".format(band))
v_col = columns_required_keys.index("A_{}_1".format(band))
self.A2_splines[band].append(si.UnivariateSpline(
ma.masked_array(Teff_data, mask=R_file[:, u_col].mask).
compressed(), R_file[:, u_col].compressed(), k=1))
self.A1_splines[band].append(si.UnivariateSpline(
ma.masked_array(Teff_data, mask=R_file[:, v_col].mask).
compressed(), R_file[:, v_col].compressed(), k=1))
class R_set:
def __init__(self, bands):
self.bands = bands
self.R_set = R_curves(self.bands)
def A_X(self, band, log_Teff, A_4000, R_5495=3.1):
R_index = int(np.rint((R_5495-2.1)/0.1))
return (self.R_set.A2_splines[band][R_index](log_Teff) * A_4000*A_4000
+ self.R_set.A1_splines[band][R_index](log_Teff) * A_4000)
| 2,556 | 34.027397 | 78 | py |
EPSANet | EPSANet-master/main.py | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from loss import CrossEntropyLabelSmooth
import models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', '-a', metavar='ARCH', default='epsanet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: epsanet50)')
parser.add_argument('--data', metavar='DIR',default='/path/dataset',
help='path to dataset')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=120, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', default=False, dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int, nargs='+',
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--action', default='', type=str,
help='other information.')
best_prec1 = 0
best_prec5 = 0
best_epoch = 0
def main():
global args, best_prec1
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.gpu is not None:
model = model.cuda(args.gpu)
elif args.distributed:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
else:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
print(model)
# get the number of models parameters
print('Number of models parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
# define loss function (criterion) and optimizer
criterion = CrossEntropyLabelSmooth(num_classes=1000, epsilon=0.1)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
m = time.time()
_, _ = validate(val_loader, model, criterion)
n = time.time()
print((n - m) / 3600)
return
directory = "runs/%s/" % (args.arch + '_' + args.action)
if not os.path.exists(directory):
os.makedirs(directory)
Loss_plot = {}
train_prec1_plot = {}
train_prec5_plot = {}
val_prec1_plot = {}
val_prec5_plot = {}
for epoch in range(args.start_epoch, args.epochs):
start_time = time.time()
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch)
# train for one epoch
# train(train_loader, model, criterion, optimizer, epoch)
loss_temp, train_prec1_temp, train_prec5_temp = train(train_loader, model, criterion, optimizer, epoch)
Loss_plot[epoch] = loss_temp
train_prec1_plot[epoch] = train_prec1_temp
train_prec5_plot[epoch] = train_prec5_temp
# evaluate on validation set
# prec1 = validate(val_loader, model, criterion)
prec1, prec5 = validate(val_loader, model, criterion)
val_prec1_plot[epoch] = prec1
val_prec5_plot[epoch] = prec5
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best)
if is_best:
best_epoch = epoch + 1
best_prec5 = prec5
print(' * BestPrec so far@1 {top1:.3f} @5 {top5:.3f} in epoch {best_epoch}'.format(top1=best_prec1,
top5=best_prec5,
best_epoch=best_epoch))
data_save(directory + 'Loss_plot.txt', Loss_plot)
data_save(directory + 'train_prec1.txt', train_prec1_plot)
data_save(directory + 'train_prec5.txt', train_prec5_plot)
data_save(directory + 'val_prec1.txt', val_prec1_plot)
data_save(directory + 'val_prec5.txt', val_prec5_plot)
end_time = time.time()
time_value = (end_time - start_time) / 3600
print("-" * 80)
print(time_value)
print("-" * 80)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
losses_batch = {}
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = "runs/%s/" % (args.arch + '_' + args.action)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, directory + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def data_save(root, file):
if not os.path.exists(root):
os.mknod(root)
file_temp = open(root, 'r')
lines = file_temp.readlines()
if not lines:
epoch = -1
else:
epoch = lines[-1][:lines[-1].index(' ')]
epoch = int(epoch)
file_temp.close()
file_temp = open(root, 'a')
for line in file:
if line > epoch:
file_temp.write(str(line) + " " + str(file[line]) + '\n')
file_temp.close()
if __name__ == '__main__':
main()
| 15,032 | 34.707838 | 114 | py |
EPSANet | EPSANet-master/loss.py | import torch
import numpy as np
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
Args:
num_classes (int): number of classes.
epsilon (float): weight.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1)
if self.use_gpu: targets = targets.cuda()
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).mean(0).sum()
return loss | 1,320 | 36.742857 | 91 | py |
EPSANet | EPSANet-master/models/SE_weight_module.py |
import torch.nn as nn
class SEWeightModule(nn.Module):
def __init__(self, channels, reduction=16):
super(SEWeightModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels//reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels//reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.avg_pool(x)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
weight = self.sigmoid(out)
return weight | 651 | 30.047619 | 85 | py |
EPSANet | EPSANet-master/models/epsanet.py | import torch
import torch.nn as nn
import math
from .SE_weight_module import SEWeightModule
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, groups=1):
"""standard convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class PSAModule(nn.Module):
def __init__(self, inplans, planes, conv_kernels=[3, 5, 7, 9], stride=1, conv_groups=[1, 4, 8, 16]):
super(PSAModule, self).__init__()
self.conv_1 = conv(inplans, planes//4, kernel_size=conv_kernels[0], padding=conv_kernels[0]//2,
stride=stride, groups=conv_groups[0])
self.conv_2 = conv(inplans, planes//4, kernel_size=conv_kernels[1], padding=conv_kernels[1]//2,
stride=stride, groups=conv_groups[1])
self.conv_3 = conv(inplans, planes//4, kernel_size=conv_kernels[2], padding=conv_kernels[2]//2,
stride=stride, groups=conv_groups[2])
self.conv_4 = conv(inplans, planes//4, kernel_size=conv_kernels[3], padding=conv_kernels[3]//2,
stride=stride, groups=conv_groups[3])
self.se = SEWeightModule(planes // 4)
self.split_channel = planes // 4
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
batch_size = x.shape[0]
x1 = self.conv_1(x)
x2 = self.conv_2(x)
x3 = self.conv_3(x)
x4 = self.conv_4(x)
feats = torch.cat((x1, x2, x3, x4), dim=1)
feats = feats.view(batch_size, 4, self.split_channel, feats.shape[2], feats.shape[3])
x1_se = self.se(x1)
x2_se = self.se(x2)
x3_se = self.se(x3)
x4_se = self.se(x4)
x_se = torch.cat((x1_se, x2_se, x3_se, x4_se), dim=1)
attention_vectors = x_se.view(batch_size, 4, self.split_channel, 1, 1)
attention_vectors = self.softmax(attention_vectors)
feats_weight = feats * attention_vectors
for i in range(4):
x_se_weight_fp = feats_weight[:, i, :, :]
if i == 0:
out = x_se_weight_fp
else:
out = torch.cat((x_se_weight_fp, out), 1)
return out
class EPSABlock(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None, conv_kernels=[3, 5, 7, 9],
conv_groups=[1, 4, 8, 16]):
super(EPSABlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = norm_layer(planes)
self.conv2 = PSAModule(planes, planes, stride=stride, conv_kernels=conv_kernels, conv_groups=conv_groups)
self.bn2 = norm_layer(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class EPSANet(nn.Module):
def __init__(self,block, layers, num_classes=1000):
super(EPSANet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layers(block, 64, layers[0], stride=1)
self.layer2 = self._make_layers(block, 128, layers[1], stride=2)
self.layer3 = self._make_layers(block, 256, layers[2], stride=2)
self.layer4 = self._make_layers(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layers(self, block, planes, num_blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def epsanet50():
model = EPSANet(EPSABlock, [3, 4, 6, 3], num_classes=1000)
return model
def epsanet101():
model = EPSANet(EPSABlock, [3, 4, 23, 3], num_classes=1000)
return model
| 6,122 | 35.664671 | 113 | py |
EPSANet | EPSANet-master/models/__init__.py | from .epsanet import *
| 23 | 11 | 22 | py |
trieste-develop | trieste-develop/setup.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from setuptools import find_packages, setup
with open("README.md", "r") as file:
long_description = file.read()
setup(
name="trieste",
version=Path("trieste/VERSION").read_text().strip(),
author="The Trieste contributors",
author_email="[email protected]",
description="A Bayesian optimization research toolbox built on TensorFlow",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/secondmind-labs/trieste",
packages=find_packages(include=("trieste*",)),
package_data={
"trieste": ["py.typed", "VERSION"],
},
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires="~=3.7",
install_requires=[
"absl-py",
"dill!=0.3.6",
"gpflow>=2.8.1",
"gpflux>=0.4.2",
"numpy",
"tensorflow>=2.5; platform_system!='Darwin' or platform_machine!='arm64'",
"tensorflow-macos>=2.5; platform_system=='Darwin' and platform_machine=='arm64'",
"tensorflow-probability>=0.13",
"greenlet>=1.1.0",
],
extras_require={
"plotting": ["seaborn", "plotly"],
"qhsri": ["pymoo", "cvxpy"],
},
)
| 1,939 | 33.642857 | 89 | py |
trieste-develop | trieste-develop/trieste/bayesian_optimizer.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the :class:`BayesianOptimizer` class, used to perform Bayesian optimization.
"""
from __future__ import annotations
import copy
import traceback
import warnings
from collections import Counter
from dataclasses import dataclass
from pathlib import Path
from typing import (
Any,
Callable,
ClassVar,
Dict,
Generic,
Mapping,
MutableMapping,
Optional,
TypeVar,
cast,
overload,
)
import absl
import dill
import numpy as np
import tensorflow as tf
from scipy.spatial.distance import pdist
from .acquisition.multi_objective import non_dominated
try:
import pandas as pd
import seaborn as sns
except ModuleNotFoundError:
pd = None
sns = None
from . import logging
from .acquisition.rule import TURBO, AcquisitionRule, EfficientGlobalOptimization
from .data import Dataset
from .models import SupportsCovarianceWithTopFidelity, TrainableProbabilisticModel
from .observer import OBJECTIVE, Observer
from .space import SearchSpace
from .types import State, Tag, TensorType
from .utils import Err, Ok, Result, Timer
StateType = TypeVar("StateType")
""" Unbound type variable. """
SearchSpaceType = TypeVar("SearchSpaceType", bound=SearchSpace)
""" Type variable bound to :class:`SearchSpace`. """
TrainableProbabilisticModelType = TypeVar(
"TrainableProbabilisticModelType", bound=TrainableProbabilisticModel, contravariant=True
)
""" Contravariant type variable bound to :class:`TrainableProbabilisticModel`. """
EarlyStopCallback = Callable[
[Mapping[Tag, Dataset], Mapping[Tag, TrainableProbabilisticModelType], Optional[StateType]],
bool,
]
""" Early stop callback type, generic in the model and state types. """
@dataclass(frozen=True)
class Record(Generic[StateType]):
"""Container to record the state of each step of the optimization process."""
datasets: Mapping[Tag, Dataset]
""" The known data from the observer. """
models: Mapping[Tag, TrainableProbabilisticModel]
""" The models over the :attr:`datasets`. """
acquisition_state: StateType | None
""" The acquisition state. """
@property
def dataset(self) -> Dataset:
"""The dataset when there is just one dataset."""
if len(self.datasets) == 1:
return next(iter(self.datasets.values()))
else:
raise ValueError(f"Expected a single dataset, found {len(self.datasets)}")
@property
def model(self) -> TrainableProbabilisticModel:
"""The model when there is just one dataset."""
if len(self.models) == 1:
return next(iter(self.models.values()))
else:
raise ValueError(f"Expected a single model, found {len(self.models)}")
def save(self, path: Path | str) -> FrozenRecord[StateType]:
"""Save the record to disk. Will overwrite any existing file at the same path."""
Path(path).parent.mkdir(exist_ok=True, parents=True)
with open(path, "wb") as f:
dill.dump(self, f, dill.HIGHEST_PROTOCOL)
return FrozenRecord(Path(path))
@dataclass(frozen=True)
class FrozenRecord(Generic[StateType]):
"""
A Record container saved on disk.
Note that records are saved via pickling and are therefore neither portable nor secure.
Only open frozen records generated on the same system.
"""
path: Path
""" The path to the pickled Record. """
def load(self) -> Record[StateType]:
"""Load the record into memory."""
with open(self.path, "rb") as f:
return dill.load(f)
@property
def datasets(self) -> Mapping[Tag, Dataset]:
"""The known data from the observer."""
return self.load().datasets
@property
def models(self) -> Mapping[Tag, TrainableProbabilisticModel]:
"""The models over the :attr:`datasets`."""
return self.load().models
@property
def acquisition_state(self) -> StateType | None:
"""The acquisition state."""
return self.load().acquisition_state
@property
def dataset(self) -> Dataset:
"""The dataset when there is just one dataset."""
return self.load().dataset
@property
def model(self) -> TrainableProbabilisticModel:
"""The model when there is just one dataset."""
return self.load().model
# this should be a generic NamedTuple, but mypy doesn't support them
# https://github.com/python/mypy/issues/685
@dataclass(frozen=True)
class OptimizationResult(Generic[StateType]):
"""The final result, and the historical data of the optimization process."""
final_result: Result[Record[StateType]]
"""
The final result of the optimization process. This contains either a :class:`Record` or an
exception.
"""
history: list[Record[StateType] | FrozenRecord[StateType]]
r"""
The history of the :class:`Record`\ s from each step of the optimization process. These
:class:`Record`\ s are created at the *start* of each loop, and as such will never
include the :attr:`final_result`. The records may be either in memory or on disk.
"""
@staticmethod
def step_filename(step: int, num_steps: int) -> str:
"""Default filename for saved optimization steps."""
return f"step.{step:0{len(str(num_steps - 1))}d}.pickle"
STEP_GLOB: ClassVar[str] = "step.*.pickle"
RESULTS_FILENAME: ClassVar[str] = "results.pickle"
def astuple(
self,
) -> tuple[Result[Record[StateType]], list[Record[StateType] | FrozenRecord[StateType]]]:
"""
**Note:** In contrast to the standard library function :func:`dataclasses.astuple`, this
method does *not* deepcopy instance attributes.
:return: The :attr:`final_result` and :attr:`history` as a 2-tuple.
"""
return self.final_result, self.history
@property
def is_ok(self) -> bool:
"""`True` if the final result contains a :class:`Record`."""
return self.final_result.is_ok
@property
def is_err(self) -> bool:
"""`True` if the final result contains an exception."""
return self.final_result.is_err
def try_get_final_datasets(self) -> Mapping[Tag, Dataset]:
"""
Convenience method to attempt to get the final data.
:return: The final data, if the optimization completed successfully.
:raise Exception: If an exception occurred during optimization.
"""
return self.final_result.unwrap().datasets
def try_get_final_dataset(self) -> Dataset:
"""
Convenience method to attempt to get the final data for a single dataset run.
:return: The final data, if the optimization completed successfully.
:raise Exception: If an exception occurred during optimization.
:raise ValueError: If the optimization was not a single dataset run.
"""
datasets = self.try_get_final_datasets()
if len(datasets) == 1:
return next(iter(datasets.values()))
else:
raise ValueError(f"Expected a single dataset, found {len(datasets)}")
def try_get_optimal_point(self) -> tuple[TensorType, TensorType, TensorType]:
"""
Convenience method to attempt to get the optimal point for a single dataset,
single objective run.
:return: Tuple of the optimal query point, observation and its index.
"""
dataset = self.try_get_final_dataset()
if tf.rank(dataset.observations) != 2 or dataset.observations.shape[1] != 1:
raise ValueError("Expected a single objective")
if tf.reduce_any(
[
isinstance(model, SupportsCovarianceWithTopFidelity)
for model in self.try_get_final_models()
]
):
raise ValueError("Expected single fidelity models")
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
return dataset.query_points[arg_min_idx], dataset.observations[arg_min_idx], arg_min_idx
def try_get_final_models(self) -> Mapping[Tag, TrainableProbabilisticModel]:
"""
Convenience method to attempt to get the final models.
:return: The final models, if the optimization completed successfully.
:raise Exception: If an exception occurred during optimization.
"""
return self.final_result.unwrap().models
def try_get_final_model(self) -> TrainableProbabilisticModel:
"""
Convenience method to attempt to get the final model for a single model run.
:return: The final model, if the optimization completed successfully.
:raise Exception: If an exception occurred during optimization.
:raise ValueError: If the optimization was not a single model run.
"""
models = self.try_get_final_models()
if len(models) == 1:
return next(iter(models.values()))
else:
raise ValueError(f"Expected single model, found {len(models)}")
@property
def loaded_history(self) -> list[Record[StateType]]:
"""The history of the optimization process loaded into memory."""
return [record if isinstance(record, Record) else record.load() for record in self.history]
def save_result(self, path: Path | str) -> None:
"""Save the final result to disk. Will overwrite any existing file at the same path."""
Path(path).parent.mkdir(exist_ok=True, parents=True)
with open(path, "wb") as f:
dill.dump(self.final_result, f, dill.HIGHEST_PROTOCOL)
def save(self, base_path: Path | str) -> None:
"""Save the optimization result to disk. Will overwrite existing files at the same path."""
path = Path(base_path)
num_steps = len(self.history)
self.save_result(path / self.RESULTS_FILENAME)
for i, record in enumerate(self.loaded_history):
record_path = path / self.step_filename(i, num_steps)
record.save(record_path)
@classmethod
def from_path(cls, base_path: Path | str) -> OptimizationResult[StateType]:
"""Load a previously saved OptimizationResult."""
try:
with open(Path(base_path) / cls.RESULTS_FILENAME, "rb") as f:
result = dill.load(f)
except FileNotFoundError as e:
result = Err(e)
history: list[Record[StateType] | FrozenRecord[StateType]] = [
FrozenRecord(file) for file in sorted(Path(base_path).glob(cls.STEP_GLOB))
]
return cls(result, history)
class BayesianOptimizer(Generic[SearchSpaceType]):
"""
This class performs Bayesian optimization, the data-efficient optimization of an expensive
black-box *objective function* over some *search space*. Since we may not have access to the
objective function itself, we speak instead of an *observer* that observes it.
"""
def __init__(self, observer: Observer, search_space: SearchSpaceType):
"""
:param observer: The observer of the objective function.
:param search_space: The space over which to search. Must be a
:class:`~trieste.space.SearchSpace`.
"""
self._observer = observer
self._search_space = search_space
def __repr__(self) -> str:
""""""
return f"BayesianOptimizer({self._observer!r}, {self._search_space!r})"
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModel],
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModel, object]
] = None,
start_step: int = 0,
) -> OptimizationResult[None]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, object]
] = None,
start_step: int = 0,
# this should really be OptimizationResult[None], but tf.Tensor is untyped so the type
# checker can't differentiate between TensorType and State[S | None, TensorType], and
# the return types clash. object is close enough to None that object will do.
) -> OptimizationResult[object]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, object]
] = None,
start_step: int = 0,
) -> OptimizationResult[object]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None = None,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, StateType]
] = None,
start_step: int = 0,
) -> OptimizationResult[StateType]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None = None,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, StateType]
] = None,
start_step: int = 0,
) -> OptimizationResult[StateType]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Dataset,
models: TrainableProbabilisticModel,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModel, object]
] = None,
start_step: int = 0,
) -> OptimizationResult[None]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, object]
] = None,
start_step: int = 0,
) -> OptimizationResult[object]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, object]
] = None,
start_step: int = 0,
) -> OptimizationResult[object]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None = None,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, StateType]
] = None,
start_step: int = 0,
) -> OptimizationResult[StateType]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None = None,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, StateType]
] = None,
start_step: int = 0,
) -> OptimizationResult[StateType]:
...
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset] | Dataset,
models: Mapping[Tag, TrainableProbabilisticModelType] | TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
TensorType | State[StateType | None, TensorType],
SearchSpaceType,
TrainableProbabilisticModelType,
]
| None = None,
acquisition_state: StateType | None = None,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, StateType]
] = None,
start_step: int = 0,
) -> OptimizationResult[StateType] | OptimizationResult[None]:
"""
Attempt to find the minimizer of the ``observer`` in the ``search_space`` (both specified at
:meth:`__init__`). This is the central implementation of the Bayesian optimization loop.
For each step in ``num_steps``, this method:
- Finds the next points with which to query the ``observer`` using the
``acquisition_rule``'s :meth:`acquire` method, passing it the ``search_space``,
``datasets``, ``models``, and current acquisition state.
- Queries the ``observer`` *once* at those points.
- Updates the datasets and models with the data from the ``observer``.
If any errors are raised during the optimization loop, this method will catch and return
them instead and print a message (using `absl` at level `absl.logging.ERROR`).
If ``track_state`` is enabled, then in addition to the final result, the history of the
optimization process will also be returned. If ``track_path`` is also set, then
the history and final result will be saved to disk rather than all being kept in memory.
**Type hints:**
- The ``acquisition_rule`` must use the same type of
:class:`~trieste.space.SearchSpace` as specified in :meth:`__init__`.
- The ``acquisition_state`` must be of the type expected by the ``acquisition_rule``.
Any acquisition state in the optimization result will also be of this type.
:param num_steps: The number of optimization steps to run.
:param datasets: The known observer query points and observations for each tag.
:param models: The model to use for each :class:`~trieste.data.Dataset` in
``datasets``.
:param acquisition_rule: The acquisition rule, which defines how to search for a new point
on each optimization step. Defaults to
:class:`~trieste.acquisition.rule.EfficientGlobalOptimization` with default
arguments. Note that if the default is used, this implies the tags must be
`OBJECTIVE`, the search space can be any :class:`~trieste.space.SearchSpace`, and the
acquisition state returned in the :class:`OptimizationResult` will be `None`.
:param acquisition_state: The acquisition state to use on the first optimization step.
This argument allows the caller to restore the optimization process from an existing
:class:`Record`.
:param track_state: If `True`, this method saves the optimization state at the start of each
step. Models and acquisition state are copied using `copy.deepcopy`.
:param track_path: If set, the optimization state is saved to disk at this path,
rather than being copied in memory.
:param fit_model: If `False` then we never fit the model during BO (e.g. if we
are using a rule that doesn't rely on the models and don't want to waste computation).
:param fit_initial_model: If `False` then we assume that the initial models have
already been optimized on the datasets and so do not require optimization before
the first optimization step.
:param early_stop_callback: An optional callback that is evaluated with the current
datasets, models and optimization state before every optimization step. If this
returns `True` then the optimization loop is terminated early.
:param start_step: The step number to start with. This number is removed from ``num_steps``
and is useful for restarting previous computations.
:return: An :class:`OptimizationResult`. The :attr:`final_result` element contains either
the final optimization data, models and acquisition state, or, if an exception was
raised while executing the optimization loop, it contains the exception raised. In
either case, the :attr:`history` element is the history of the data, models and
acquisition state at the *start* of each optimization step (up to and including any step
that fails to complete). The history will never include the final optimization result.
:raise ValueError: If any of the following are true:
- ``num_steps`` is negative.
- the keys in ``datasets`` and ``models`` do not match
- ``datasets`` or ``models`` are empty
- the default `acquisition_rule` is used and the tags are not `OBJECTIVE`.
"""
if isinstance(datasets, Dataset):
datasets = {OBJECTIVE: datasets}
models = {OBJECTIVE: models} # type: ignore[dict-item]
# reassure the type checker that everything is tagged
datasets = cast(Dict[Tag, Dataset], datasets)
models = cast(Dict[Tag, TrainableProbabilisticModelType], models)
if num_steps < 0:
raise ValueError(f"num_steps must be at least 0, got {num_steps}")
if datasets.keys() != models.keys():
raise ValueError(
f"datasets and models should contain the same keys. Got {datasets.keys()} and"
f" {models.keys()} respectively."
)
if not datasets:
raise ValueError("dicts of datasets and models must be populated.")
if fit_model and isinstance(acquisition_rule, TURBO):
warnings.warn(
"""
Are you sure you want to keep fitting the global model even though you
are using TURBO which has only local models? This is a waste of computation.
Consider setting 'fit_model'='False'.
"""
)
if acquisition_rule is None:
if datasets.keys() != {OBJECTIVE}:
raise ValueError(
f"Default acquisition rule EfficientGlobalOptimization requires tag"
f" {OBJECTIVE!r}, got keys {datasets.keys()}"
)
acquisition_rule = EfficientGlobalOptimization[
SearchSpaceType, TrainableProbabilisticModelType
]()
history: list[FrozenRecord[StateType] | Record[StateType]] = []
query_plot_dfs: dict[int, pd.DataFrame] = {}
observation_plot_dfs = observation_plot_init(datasets)
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=0):
write_summary_init(
self._observer,
self._search_space,
acquisition_rule,
datasets,
models,
num_steps,
)
for step in range(start_step + 1, num_steps + 1):
logging.set_step_number(step)
if early_stop_callback and early_stop_callback(datasets, models, acquisition_state):
tf.print("Optimization terminated early", output_stream=absl.logging.INFO)
break
try:
if track_state:
try:
if track_path is None:
datasets_copy = copy.deepcopy(datasets)
models_copy = copy.deepcopy(models)
acquisition_state_copy = copy.deepcopy(acquisition_state)
record = Record(datasets_copy, models_copy, acquisition_state_copy)
history.append(record)
else:
track_path = Path(track_path)
record = Record(datasets, models, acquisition_state)
file_name = OptimizationResult.step_filename(step, num_steps)
history.append(record.save(track_path / file_name))
except Exception as e:
raise NotImplementedError(
"Failed to save the optimization state. Some models do not support "
"deecopying or serialization and cannot be saved. "
"(This is particularly common for deep neural network models, though "
"some of the model wrappers accept a model closure as a workaround.) "
"For these models, the `track_state`` argument of the "
":meth:`~trieste.bayesian_optimizer.BayesianOptimizer.optimize` method "
"should be set to `False`. This means that only the final model "
"will be available."
) from e
if step == 1 and fit_model and fit_initial_model:
with Timer() as initial_model_fitting_timer:
for tag, model in models.items():
dataset = datasets[tag]
model.update(dataset)
model.optimize(dataset)
if summary_writer:
logging.set_step_number(0)
with summary_writer.as_default(step=0):
write_summary_initial_model_fit(
datasets, models, initial_model_fitting_timer
)
logging.set_step_number(step)
with Timer() as total_step_wallclock_timer:
with Timer() as query_point_generation_timer:
points_or_stateful = acquisition_rule.acquire(
self._search_space, models, datasets=datasets
)
if callable(points_or_stateful):
acquisition_state, query_points = points_or_stateful(acquisition_state)
else:
query_points = points_or_stateful
observer_output = self._observer(query_points)
tagged_output = (
observer_output
if isinstance(observer_output, Mapping)
else {OBJECTIVE: observer_output}
)
datasets = {tag: datasets[tag] + tagged_output[tag] for tag in tagged_output}
with Timer() as model_fitting_timer:
if fit_model:
for tag, model in models.items():
dataset = datasets[tag]
model.update(dataset)
model.optimize(dataset)
if summary_writer:
with summary_writer.as_default(step=step):
write_summary_observations(
datasets,
models,
tagged_output,
model_fitting_timer,
observation_plot_dfs,
)
write_summary_query_points(
datasets,
models,
self._search_space,
query_points,
query_point_generation_timer,
query_plot_dfs,
)
logging.scalar("wallclock/step", total_step_wallclock_timer.time)
except Exception as error: # pylint: disable=broad-except
tf.print(
f"\nOptimization failed at step {step}, encountered error with traceback:"
f"\n{traceback.format_exc()}"
f"\nTerminating optimization and returning the optimization history. You may "
f"be able to use the history to restart the process from a previous successful "
f"optimization step.\n",
output_stream=absl.logging.ERROR,
)
if isinstance(error, MemoryError):
tf.print(
"\nOne possible cause of memory errors is trying to evaluate acquisition "
"\nfunctions over large datasets, e.g. when initializing optimizers. "
"\nYou may be able to word around this by splitting up the evaluation "
"\nusing split_acquisition_function or split_acquisition_function_calls.",
output_stream=absl.logging.ERROR,
)
result = OptimizationResult(Err(error), history)
if track_state and track_path is not None:
result.save_result(Path(track_path) / OptimizationResult.RESULTS_FILENAME)
return result
tf.print("Optimization completed without errors", output_stream=absl.logging.INFO)
record = Record(datasets, models, acquisition_state)
result = OptimizationResult(Ok(record), history)
if track_state and track_path is not None:
result.save_result(Path(track_path) / OptimizationResult.RESULTS_FILENAME)
return result
def continue_optimization(
self,
num_steps: int,
optimization_result: OptimizationResult[StateType],
*args: Any,
**kwargs: Any,
) -> OptimizationResult[StateType]:
"""
Continue a previous optimization that either failed, was terminated early, or which
you simply wish to run for more steps.
:param num_steps: The total number of optimization steps, including any that have already
been run.
:param optimization_result: The optimization result from which to extract the datasets,
models and acquisition state. If the result was successful then the final result is
used; otherwise the last record in the history is used. The size of the history
is used to determine how many more steps are required.
:param args: Any more positional arguments to pass on to optimize.
:param kwargs: Any more keyword arguments to pass on to optimize.
:return: An :class:`OptimizationResult`. The history will contain both the history from
`optimization_result` (including the `final_result` if that was successful) and
any new records.
"""
history: list[Record[StateType] | FrozenRecord[StateType]] = []
history.extend(optimization_result.history)
if optimization_result.final_result.is_ok:
history.append(optimization_result.final_result.unwrap())
if not history:
raise ValueError("Cannot continue from empty optimization result")
result = self.optimize( # type: ignore[call-overload]
num_steps,
history[-1].datasets,
history[-1].models,
*args,
acquisition_state=history[-1].acquisition_state,
**kwargs,
start_step=len(history) - 1,
)
result.history[:1] = history
return result
def write_summary_init(
observer: Observer,
search_space: SearchSpace,
acquisition_rule: AcquisitionRule[
TensorType | State[StateType | None, TensorType],
SearchSpaceType,
TrainableProbabilisticModelType,
],
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModel],
num_steps: int,
) -> None:
"""Write initial BO loop TensorBoard summary."""
devices = tf.config.list_logical_devices()
logging.text(
"metadata",
f"Observer: `{observer}`\n\n"
f"Number of steps: `{num_steps}`\n\n"
f"Number of initial points: "
f"`{dict((k, len(v)) for k, v in datasets.items())}`\n\n"
f"Search Space: `{search_space}`\n\n"
f"Acquisition rule:\n\n {acquisition_rule}\n\n"
f"Models:\n\n {models}\n\n"
f"Available devices: `{dict(Counter(d.device_type for d in devices))}`",
)
def write_summary_initial_model_fit(
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModel],
model_fitting_timer: Timer,
) -> None:
"""Write TensorBoard summary for the model fitting to the initial data."""
for tag, model in models.items():
with tf.name_scope(f"{tag}.model"):
model.log(datasets[tag])
logging.scalar(
"wallclock/model_fitting",
model_fitting_timer.time,
)
def observation_plot_init(
datasets: Mapping[Tag, Dataset],
) -> dict[Tag, pd.DataFrame]:
"""Initialise query point pairplot dataframes with initial observations.
Also logs warnings if pairplot dependencies are not installed."""
observation_plot_dfs: dict[Tag, pd.DataFrame] = {}
if logging.get_tensorboard_writer():
seaborn_warning = False
if logging.include_summary("query_points/_pairplot") and not (pd and sns):
seaborn_warning = True
for tag in datasets:
if logging.include_summary(f"{tag}.observations/_pairplot"):
output_dim = tf.shape(datasets[tag].observations)[-1]
if output_dim >= 2:
if not (pd and sns):
seaborn_warning = True
else:
columns = [f"x{i}" for i in range(output_dim)]
observation_plot_dfs[tag] = pd.DataFrame(
datasets[tag].observations, columns=columns
).applymap(float)
observation_plot_dfs[tag]["observations"] = "initial"
if seaborn_warning:
tf.print(
"\nPairplot TensorBoard summaries require seaborn to be installed."
"\nOne way to do this is to install 'trieste[plotting]'.",
output_stream=absl.logging.INFO,
)
return observation_plot_dfs
def write_summary_observations(
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModel],
tagged_output: Mapping[Tag, TensorType],
model_fitting_timer: Timer,
observation_plot_dfs: MutableMapping[Tag, pd.DataFrame],
) -> None:
"""Write TensorBoard summary for the current step observations."""
for tag in datasets:
with tf.name_scope(f"{tag}.model"):
models[tag].log(datasets[tag])
output_dim = tf.shape(tagged_output[tag].observations)[-1]
for i in tf.range(output_dim):
suffix = f"[{i}]" if output_dim > 1 else ""
if tf.size(tagged_output[tag].observations) > 0:
logging.histogram(
f"{tag}.observation{suffix}/new_observations",
tagged_output[tag].observations[..., i],
)
logging.scalar(
f"{tag}.observation{suffix}/best_new_observation",
np.min(tagged_output[tag].observations[..., i]),
)
if tf.size(datasets[tag].observations) > 0:
logging.scalar(
f"{tag}.observation{suffix}/best_overall",
np.min(datasets[tag].observations[..., i]),
)
if logging.include_summary(f"{tag}.observations/_pairplot") and (
pd and sns and output_dim >= 2
):
columns = [f"x{i}" for i in range(output_dim)]
observation_new_df = pd.DataFrame(
tagged_output[tag].observations, columns=columns
).applymap(float)
observation_new_df["observations"] = "new"
observation_plot_df = pd.concat(
(observation_plot_dfs.get(tag), observation_new_df),
copy=False,
ignore_index=True,
)
hue_order = ["initial", "old", "new"]
palette = {"initial": "tab:green", "old": "tab:green", "new": "tab:orange"}
markers = {"initial": "X", "old": "o", "new": "o"}
# assume that any OBJECTIVE- or single-tagged multi-output dataset => multi-objective
# more complex scenarios (e.g. constrained data) need to be plotted by the acq function
if len(datasets) > 1 and tag != OBJECTIVE:
observation_plot_df["observation type"] = observation_plot_df.apply(
lambda x: x["observations"],
axis=1,
)
else:
observation_plot_df["pareto"] = non_dominated(datasets[tag].observations)[1]
observation_plot_df["observation type"] = observation_plot_df.apply(
lambda x: x["observations"] + x["pareto"] * " (non-dominated)",
axis=1,
)
hue_order += [hue + " (non-dominated)" for hue in hue_order]
palette.update(
{
"initial (non-dominated)": "tab:purple",
"old (non-dominated)": "tab:purple",
"new (non-dominated)": "tab:red",
}
)
markers.update(
{
"initial (non-dominated)": "X",
"old (non-dominated)": "o",
"new (non-dominated)": "o",
}
)
pairplot = sns.pairplot(
observation_plot_df,
vars=columns,
hue="observation type",
hue_order=hue_order,
palette=palette,
markers=markers,
)
logging.pyplot(f"{tag}.observations/_pairplot", pairplot.fig)
observation_plot_df.loc[
observation_plot_df["observations"] == "new", "observations"
] = "old"
observation_plot_dfs[tag] = observation_plot_df
logging.scalar(
"wallclock/model_fitting",
model_fitting_timer.time,
)
def write_summary_query_points(
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModel],
search_space: SearchSpace,
query_points: TensorType,
query_point_generation_timer: Timer,
query_plot_dfs: MutableMapping[int, pd.DataFrame],
) -> None:
"""Write TensorBoard summary for the current step query points."""
if tf.rank(query_points) == 2:
for i in tf.range(tf.shape(query_points)[1]):
if len(query_points) == 1:
logging.scalar(f"query_point/[{i}]", float(query_points[0, i]))
else:
logging.histogram(f"query_points/[{i}]", query_points[:, i])
logging.histogram("query_points/euclidean_distances", lambda: pdist(query_points))
if pd and sns and logging.include_summary("query_points/_pairplot"):
columns = [f"x{i}" for i in range(tf.shape(query_points)[1])]
qp_preds = query_points
for tag in datasets:
pred = models[tag].predict(query_points)[0]
qp_preds = tf.concat([qp_preds, tf.cast(pred, query_points.dtype)], 1)
output_dim = tf.shape(pred)[-1]
for i in range(output_dim):
columns.append(f"{tag}{i if (output_dim > 1) else ''} predicted")
query_new_df = pd.DataFrame(qp_preds, columns=columns).applymap(float)
query_new_df["query points"] = "new"
query_plot_df = pd.concat(
(query_plot_dfs.get(0), query_new_df), copy=False, ignore_index=True
)
pairplot = sns.pairplot(
query_plot_df, hue="query points", hue_order=["old", "new"], height=2.25
)
padding = 0.025 * (search_space.upper - search_space.lower)
upper_limits = search_space.upper + padding
lower_limits = search_space.lower - padding
for i in range(search_space.dimension):
pairplot.axes[0, i].set_xlim((lower_limits[i], upper_limits[i]))
pairplot.axes[i, 0].set_ylim((lower_limits[i], upper_limits[i]))
logging.pyplot("query_points/_pairplot", pairplot.fig)
query_plot_df["query points"] = "old"
query_plot_dfs[0] = query_plot_df
logging.scalar(
"wallclock/query_point_generation",
query_point_generation_timer.time,
)
def stop_at_minimum(
minimum: Optional[tf.Tensor] = None,
minimizers: Optional[tf.Tensor] = None,
minimum_atol: float = 0,
minimum_rtol: float = 0.05,
minimizers_atol: float = 0,
minimizers_rtol: float = 0.05,
objective_tag: Tag = OBJECTIVE,
minimum_step_number: Optional[int] = None,
) -> EarlyStopCallback[TrainableProbabilisticModel, object]:
"""
Generate an early stop function that terminates a BO loop when it gets close enough to the
given objective minimum and/or minimizer points.
:param minimum: Optional minimum to stop at, with shape [1].
:param minimizers: Optional minimizer points to stop at, with shape [N, D].
:param minimum_atol: Absolute tolerance for minimum.
:param minimum_rtol: Relative tolerance for minimum.
:param minimizers_atol: Absolute tolerance for minimizer point.
:param minimizers_rtol: Relative tolerance for minimizer point.
:param objective_tag: The tag for the objective data.
:param minimum_step_number: Minimum step number to stop at.
:return: An early stop function that terminates if we get close enough to both the minimum
and any of the minimizer points.
"""
def early_stop_callback(
datasets: Mapping[Tag, Dataset],
_models: Mapping[Tag, TrainableProbabilisticModel],
_acquisition_state: object,
) -> bool:
if minimum_step_number is not None and logging.get_step_number() < minimum_step_number:
return False
dataset = datasets[objective_tag]
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
if minimum is not None:
best_y = dataset.observations[arg_min_idx]
close_y = np.isclose(best_y, minimum, atol=minimum_atol, rtol=minimum_rtol)
if not tf.reduce_all(close_y):
return False
if minimizers is not None:
best_x = dataset.query_points[arg_min_idx]
close_x = np.isclose(best_x, minimizers, atol=minimizers_atol, rtol=minimizers_rtol)
if not tf.reduce_any(tf.reduce_all(close_x, axis=-1), axis=0):
return False
return True
return early_stop_callback
| 46,324 | 40.39857 | 100 | py |
trieste-develop | trieste-develop/trieste/ask_tell_optimization.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the Ask/Tell API for users of Trieste who would like to
perform Bayesian Optimization with external control of the optimization loop.
"""
from __future__ import annotations
from copy import deepcopy
from typing import Dict, Generic, Mapping, TypeVar, cast, overload
try:
import pandas as pd
except ModuleNotFoundError:
pd = None
import warnings
from . import logging
from .acquisition.rule import TURBO, AcquisitionRule, EfficientGlobalOptimization
from .bayesian_optimizer import (
FrozenRecord,
OptimizationResult,
Record,
observation_plot_init,
write_summary_initial_model_fit,
write_summary_observations,
write_summary_query_points,
)
from .data import Dataset
from .models import TrainableProbabilisticModel
from .observer import OBJECTIVE
from .space import SearchSpace
from .types import State, Tag, TensorType
from .utils import Ok, Timer
StateType = TypeVar("StateType")
""" Unbound type variable. """
SearchSpaceType = TypeVar("SearchSpaceType", bound=SearchSpace)
""" Type variable bound to :class:`SearchSpace`. """
TrainableProbabilisticModelType = TypeVar(
"TrainableProbabilisticModelType", bound=TrainableProbabilisticModel, contravariant=True
)
""" Contravariant type variable bound to :class:`TrainableProbabilisticModel`. """
class AskTellOptimizer(Generic[SearchSpaceType, TrainableProbabilisticModelType]):
"""
This class provides Ask/Tell optimization interface. It is designed for those use cases
when control of the optimization loop by Trieste is impossible or not desirable.
For more details about the Bayesian Optimization routine, refer to :class:`BayesianOptimizer`.
"""
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
*,
fit_model: bool = True,
):
...
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
fit_model: bool = True,
):
...
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None,
*,
fit_model: bool = True,
):
...
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Dataset,
models: TrainableProbabilisticModelType,
*,
fit_model: bool = True,
):
...
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
fit_model: bool = True,
):
...
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None = None,
*,
fit_model: bool = True,
):
...
def __init__(
self,
search_space: SearchSpaceType,
datasets: Mapping[Tag, Dataset] | Dataset,
models: Mapping[Tag, TrainableProbabilisticModelType] | TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
TensorType | State[StateType | None, TensorType],
SearchSpaceType,
TrainableProbabilisticModelType,
]
| None = None,
acquisition_state: StateType | None = None,
*,
fit_model: bool = True,
):
"""
:param search_space: The space over which to search for the next query point.
:param datasets: Already observed input-output pairs for each tag.
:param models: The model to use for each :class:`~trieste.data.Dataset` in
``datasets``.
:param acquisition_rule: The acquisition rule, which defines how to search for a new point
on each optimization step. Defaults to
:class:`~trieste.acquisition.rule.EfficientGlobalOptimization` with default
arguments. Note that if the default is used, this implies the tags must be
`OBJECTIVE` and the search space can be any :class:`~trieste.space.SearchSpace`.
:param acquisition_state: The optional acquisition state for stateful acquisitions.
:param fit_model: If `True` (default), models passed in will be optimized on the given data.
If `False`, the models are assumed to be optimized already.
:raise ValueError: If any of the following are true:
- the keys in ``datasets`` and ``models`` do not match
- ``datasets`` or ``models`` are empty
- default acquisition is used but incompatible with other inputs
"""
self._search_space = search_space
self._acquisition_state = acquisition_state
if not datasets or not models:
raise ValueError("dicts of datasets and models must be populated.")
if isinstance(datasets, Dataset):
datasets = {OBJECTIVE: datasets}
models = {OBJECTIVE: models} # type: ignore[dict-item]
# reassure the type checker that everything is tagged
datasets = cast(Dict[Tag, Dataset], datasets)
models = cast(Dict[Tag, TrainableProbabilisticModelType], models)
if datasets.keys() != models.keys():
raise ValueError(
f"datasets and models should contain the same keys. Got {datasets.keys()} and"
f" {models.keys()} respectively."
)
self._datasets = datasets
self._models = models
self._query_plot_dfs: dict[int, pd.DataFrame] = {}
self._observation_plot_dfs = observation_plot_init(self._datasets)
if acquisition_rule is None:
if self._datasets.keys() != {OBJECTIVE}:
raise ValueError(
f"Default acquisition rule EfficientGlobalOptimization requires tag"
f" {OBJECTIVE!r}, got keys {self._datasets.keys()}"
)
self._acquisition_rule = cast(
AcquisitionRule[TensorType, SearchSpaceType, TrainableProbabilisticModelType],
EfficientGlobalOptimization(),
)
else:
self._acquisition_rule = acquisition_rule
if (fit_model) and isinstance(acquisition_rule, TURBO):
warnings.warn(
"""
Are you sure you want to keep fitting the global model even though you
are using TURBO which uses local models? This is a waste of computation.
"""
)
if fit_model:
with Timer() as initial_model_fitting_timer:
for tag, model in self._models.items():
dataset = datasets[tag]
model.update(dataset)
model.optimize(dataset)
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
write_summary_initial_model_fit(
self._datasets, self._models, initial_model_fitting_timer
)
def __repr__(self) -> str:
"""Print-friendly string representation"""
return f"""AskTellOptimizer({self._search_space!r}, {self._datasets!r},
{self._models!r}, {self._acquisition_rule!r}), "
{self._acquisition_state!r}"""
@property
def datasets(self) -> Mapping[Tag, Dataset]:
"""The current datasets."""
return self._datasets
@property
def dataset(self) -> Dataset:
"""The current dataset when there is just one dataset."""
if len(self.datasets) == 1:
return next(iter(self.datasets.values()))
else:
raise ValueError(f"Expected a single dataset, found {len(self.datasets)}")
@property
def models(self) -> Mapping[Tag, TrainableProbabilisticModelType]:
"""The current models."""
return self._models
@models.setter
def models(self, models: Mapping[Tag, TrainableProbabilisticModelType]) -> None:
"""Update the current models."""
if models.keys() != self.models.keys():
raise ValueError(
f"New models contain incorrect keys. Expected {self.models.keys()}, "
f"received {models.keys()}."
)
self._models = dict(models)
@property
def model(self) -> TrainableProbabilisticModel:
"""The current model when there is just one model."""
if len(self.models) == 1:
return next(iter(self.models.values()))
else:
raise ValueError(f"Expected a single model, found {len(self.models)}")
@model.setter
def model(self, model: TrainableProbabilisticModelType) -> None:
"""Update the current model, using the OBJECTIVE tag."""
if len(self.models) != 1:
raise ValueError(f"Expected a single model, found {len(self.models)}")
elif self.models.keys() != {OBJECTIVE}:
raise ValueError(
f"Expected a single model tagged OBJECTIVE, found {self.models.keys()}. "
"To update this, pass in a dictionary to the models property instead."
)
self._models = {OBJECTIVE: model}
@property
def acquisition_state(self) -> StateType | None:
"""The current acquisition state."""
return self._acquisition_state
@classmethod
def from_record(
cls,
record: Record[StateType] | FrozenRecord[StateType],
search_space: SearchSpaceType,
acquisition_rule: AcquisitionRule[
TensorType | State[StateType | None, TensorType],
SearchSpaceType,
TrainableProbabilisticModelType,
]
| None = None,
) -> AskTellOptimizer[SearchSpaceType, TrainableProbabilisticModelType]:
"""Creates new :class:`~AskTellOptimizer` instance from provided optimization state.
Model training isn't triggered upon creation of the instance.
:param record: Optimization state record.
:param search_space: The space over which to search for the next query point.
:param acquisition_rule: The acquisition rule, which defines how to search for a new point
on each optimization step. Defaults to
:class:`~trieste.acquisition.rule.EfficientGlobalOptimization` with default
arguments.
:return: New instance of :class:`~AskTellOptimizer`.
"""
# we are recovering previously saved optimization state
# so the model was already trained
# thus there is no need to train it again
# type ignore below is due to the fact that overloads don't allow
# optional acquisition_rule along with acquisition_state
return cls(
search_space,
record.datasets,
cast(Mapping[Tag, TrainableProbabilisticModelType], record.models),
acquisition_rule=acquisition_rule, # type: ignore
acquisition_state=record.acquisition_state,
fit_model=False,
)
def to_record(self, copy: bool = True) -> Record[StateType]:
"""Collects the current state of the optimization, which includes datasets,
models and acquisition state (if applicable).
:param copy: Whether to return a copy of the current state or the original. Copying
is not supported for all model types. However, continuing the optimization will
modify the original state.
:return: An optimization state record.
"""
try:
datasets_copy = deepcopy(self._datasets) if copy else self._datasets
models_copy = deepcopy(self._models) if copy else self._models
state_copy = deepcopy(self._acquisition_state) if copy else self._acquisition_state
except Exception as e:
raise NotImplementedError(
"Failed to copy the optimization state. Some models do not support "
"deecopying (this is particularly common for deep neural network models). "
"For these models, the `copy` argument of the `to_record` or `to_result` "
"methods should be set to `False`. This means that the returned state may be "
"modified by subsequent optimization."
) from e
return Record(datasets=datasets_copy, models=models_copy, acquisition_state=state_copy)
def to_result(self, copy: bool = True) -> OptimizationResult[StateType]:
"""Converts current state of the optimization
into a :class:`~trieste.data.OptimizationResult` object.
:param copy: Whether to return a copy of the current state or the original. Copying
is not supported for all model types. However, continuing the optimization will
modify the original state.
:return: A :class:`~trieste.data.OptimizationResult` object.
"""
record: Record[StateType] = self.to_record(copy=copy)
return OptimizationResult(Ok(record), [])
def ask(self) -> TensorType:
"""Suggests a point (or points in batch mode) to observe by optimizing the acquisition
function. If the acquisition is stateful, its state is saved.
:return: A :class:`TensorType` instance representing suggested point(s).
"""
# This trick deserves a comment to explain what's going on
# acquisition_rule.acquire can return different things:
# - when acquisition has no state attached, it returns just points
# - when acquisition has state, it returns a Callable
# which, when called, returns state and points
# so code below is needed to cater for both cases
with Timer() as query_point_generation_timer:
points_or_stateful = self._acquisition_rule.acquire(
self._search_space, self._models, datasets=self._datasets
)
if callable(points_or_stateful):
self._acquisition_state, query_points = points_or_stateful(self._acquisition_state)
else:
query_points = points_or_stateful
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
write_summary_query_points(
self._datasets,
self._models,
self._search_space,
query_points,
query_point_generation_timer,
self._query_plot_dfs,
)
return query_points
def tell(self, new_data: Mapping[Tag, Dataset] | Dataset) -> None:
"""Updates optimizer state with new data.
:param new_data: New observed data.
:raise ValueError: If keys in ``new_data`` do not match those in already built dataset.
"""
if isinstance(new_data, Dataset):
new_data = {OBJECTIVE: new_data}
if self._datasets.keys() != new_data.keys():
raise ValueError(
f"new_data keys {new_data.keys()} doesn't "
f"match dataset keys {self._datasets.keys()}"
)
for tag in self._datasets:
self._datasets[tag] += new_data[tag]
with Timer() as model_fitting_timer:
for tag, model in self._models.items():
dataset = self._datasets[tag]
model.update(dataset)
model.optimize(dataset)
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
write_summary_observations(
self._datasets,
self._models,
new_data,
model_fitting_timer,
self._observation_plot_dfs,
)
| 17,459 | 37.886414 | 100 | py |
trieste-develop | trieste-develop/trieste/logging.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This module contains logging utilities. """
from __future__ import annotations
import io
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, TypeVar, Union
import absl
import tensorflow as tf
from tensorflow.python.eager import context
from trieste.types import TensorType
if TYPE_CHECKING:
import matplotlib
SummaryFilter = Callable[[str], bool]
def default_summary_filter(name: str) -> bool:
"""Default summary filter: omits any names that start with _."""
return not (name.startswith("_") or "/_" in name)
_TENSORBOARD_WRITER: Optional[tf.summary.SummaryWriter] = None
_STEP_NUMBER: int = 0
_SUMMARY_FILTER: SummaryFilter = default_summary_filter
def set_tensorboard_writer(summary_writer: Optional[tf.summary.SummaryWriter]) -> None:
"""
Set a :class:`~tf.summary.SummaryWriter` instance to use for logging
to TensorBoard, or `None` to disable.
:param summary_writer: optional summary writer instance.
"""
global _TENSORBOARD_WRITER
_TENSORBOARD_WRITER = summary_writer
def get_tensorboard_writer() -> Optional[tf.summary.SummaryWriter]:
"""
Returns a :class:`~tf.summary.SummaryWriter` instance to use for logging
to TensorBoard, or `None`.
:return: optional summary writer instance.
"""
return _TENSORBOARD_WRITER
@contextmanager
def tensorboard_writer(summary_writer: Optional[tf.summary.SummaryWriter]) -> Iterator[None]:
"""
A context manager for setting or overriding a TensorBoard summary writer inside a code block.
:param summary_writer: optional summary writer instance.
"""
old_writer = get_tensorboard_writer()
set_tensorboard_writer(summary_writer)
yield
set_tensorboard_writer(old_writer)
def set_step_number(step_number: int) -> None:
"""
Set an optimization step number to use for logging purposes.
:param step_number: current step number
:raise ValueError: if step_number < 0
"""
global _STEP_NUMBER
_STEP_NUMBER = step_number
def get_step_number() -> int:
"""
Get the optimization step number used for logging purposes.
:return: current step number.
"""
return _STEP_NUMBER
@contextmanager
def step_number(step_number: int) -> Iterator[None]:
"""
A context manager for setting or overriding the optimization step number inside a code block.
:param step_number: current step number
"""
old_step_number = get_step_number()
set_step_number(step_number)
yield
set_step_number(old_step_number)
def set_summary_filter(summary_filter: SummaryFilter) -> None:
"""
Set a filter on summary names. The default is to only omit names that start with _.
:param summary_filter: new summary filter
"""
global _SUMMARY_FILTER
_SUMMARY_FILTER = summary_filter
def get_summary_filter() -> SummaryFilter:
"""
Get the current filter on summary names. The default is to only omit names that start with _.
:return: current summary filter.
"""
return _SUMMARY_FILTER
def get_current_name_scope() -> str:
"""Returns the full name scope. Copied from TF 2.5."""
ctx = context.context()
if ctx.executing_eagerly():
return ctx.scope_name.rstrip("/")
else:
return tf.compat.v1.get_default_graph().get_name_scope()
def include_summary(name: str) -> bool:
"""
Whether a summary name should be included.
:param: full summary name (including name spaces)
:return: whether the summary should be included.
"""
full_name = get_current_name_scope() + "/" + name
return _SUMMARY_FILTER(full_name)
T = TypeVar("T")
def evaluate_data(data: T | Callable[[], T]) -> T:
"""Return the passed in data, evaluating it if it's inside a closure."""
return data() if callable(data) else data
def histogram(name: str, data: TensorType | Callable[[], TensorType], **kwargs: Any) -> bool:
"""
Wrapper for tf.summary.histogram that first filters out unwanted summaries by name.
Accepts either data or closures that only get evaluated when logged.
"""
if include_summary(name):
try:
return tf.summary.histogram(name, evaluate_data(data), **kwargs)
except Exception as e:
tf.print(
f"Failed to write tensorboard histogram summary '{name}':\n\n{e}",
output_stream=absl.logging.INFO,
)
return False
def scalar(name: str, data: float | Callable[[], float], **kwargs: Any) -> bool:
"""
Wrapper for tf.summary.scalar that first filters out unwanted summaries by name.
Accepts either data or closures that only get evaluated when logged.
"""
if include_summary(name):
try:
return tf.summary.scalar(name, evaluate_data(data), **kwargs)
except Exception as e:
tf.print(
f"Failed to write tensorboard scalar summary '{name}':\n\n{e}",
output_stream=absl.logging.INFO,
)
return False
def text(name: str, data: str | Callable[[], str], **kwargs: Any) -> bool:
"""
Wrapper for tf.summary.text that first filters out unwanted summaries by name.
Accepts either data or closures that only get evaluated when logged.
"""
if include_summary(name):
try:
return tf.summary.text(name, evaluate_data(data), **kwargs)
except Exception as e:
tf.print(
f"Failed to write tensorboard text summary '{name}':\n\n{e}",
output_stream=absl.logging.INFO,
)
return False
def pyplot(
name: str, figure: Union["matplotlib.figure.Figure", Callable[[], "matplotlib.figure.Figure"]]
) -> bool:
"""
Utility function for passing a matplotlib figure to tf.summary.image.
Accepts either data or closures that only get evaluated when logged.
"""
if include_summary(name):
try:
figure = evaluate_data(figure)
with io.BytesIO() as buffer:
figure.savefig(buffer, dpi=150.0, format="png")
buffer.seek(0)
image = tf.image.decode_png(buffer.getvalue(), channels=4)
image = tf.expand_dims(image, 0)
return tf.summary.image(name, image)
except Exception as e:
tf.print(
f"Failed to write tensorboard image summary '{name}':\n\n{e}",
output_stream=absl.logging.INFO,
)
return False
| 7,102 | 30.153509 | 98 | py |
trieste-develop | trieste-develop/trieste/space.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This module contains implementations of various types of search space. """
from __future__ import annotations
import operator
from abc import ABC, abstractmethod
from functools import reduce
from typing import Callable, Optional, Sequence, Tuple, TypeVar, Union, overload
import numpy as np
import scipy.optimize as spo
import tensorflow as tf
import tensorflow_probability as tfp
from .types import TensorType
SearchSpaceType = TypeVar("SearchSpaceType", bound="SearchSpace")
""" A type variable bound to :class:`SearchSpace`. """
DEFAULT_DTYPE: tf.DType = tf.float64
""" Default dtype to use when none is provided. """
class SampleTimeoutError(Exception):
"""Raised when sampling from a search space has timed out."""
class NonlinearConstraint(spo.NonlinearConstraint): # type: ignore[misc]
"""
A wrapper class for nonlinear constraints on variables. The constraints expression is of the
form::
lb <= fun(x) <= ub
:param fun: The function defining the nonlinear constraints; with input shape [..., D] and
output shape [..., 1], returning a scalar value for each input point.
:param lb: The lower bound of the constraint. Should be a scalar or of shape [1].
:param ub: The upper bound of the constraint. Should be a scalar or of shape [1].
:param keep_feasible: Keep the constraints feasible throughout optimization iterations if this
is `True`.
"""
def __init__(
self,
fun: Callable[[TensorType], TensorType],
lb: Sequence[float] | TensorType,
ub: Sequence[float] | TensorType,
keep_feasible: bool = False,
):
# Implement caching to avoid calling the constraint function multiple times to get value
# and gradient.
def _constraint_value_and_gradient(x: TensorType) -> Tuple[TensorType, TensorType]:
val, grad = tfp.math.value_and_gradient(fun, x)
tf.debugging.assert_shapes(
[(val, [..., 1])],
message="Nonlinear constraint only supports single output function.",
)
return tf.cast(val, dtype=x.dtype), tf.cast(grad, dtype=x.dtype)
cache_x: TensorType = tf.constant([])
cache_f: TensorType = tf.constant([])
cache_df_dx: TensorType = tf.constant([])
def val_fun(x: TensorType) -> TensorType:
nonlocal cache_x, cache_f, cache_df_dx
if not np.array_equal(x, cache_x):
cache_f, cache_df_dx = _constraint_value_and_gradient(x)
cache_x = x
return cache_f
def jac_fun(x: TensorType) -> TensorType:
nonlocal cache_x, cache_f, cache_df_dx
if not np.array_equal(x, cache_x):
cache_f, cache_df_dx = _constraint_value_and_gradient(x)
cache_x = x
return cache_df_dx
self._orig_fun = fun # Used for constraints equality check.
super().__init__(val_fun, lb, ub, jac=jac_fun, keep_feasible=keep_feasible)
def residual(self, points: TensorType) -> TensorType:
"""
Calculate the residuals between the constraint function and its lower/upper limits.
:param points: The points to calculate the residuals for, with shape [..., D].
:return: A tensor containing the lower and upper residual values with shape [..., 2].
"""
tf.debugging.assert_rank_at_least(points, 2)
non_d_axes = np.ones_like(points.shape)[:-1] # Avoid adding axes shape to static graph.
lb = tf.cast(tf.reshape(self.lb, (*non_d_axes, -1)), dtype=points.dtype)
ub = tf.cast(tf.reshape(self.ub, (*non_d_axes, -1)), dtype=points.dtype)
fval = self.fun(points)
fval = tf.reshape(fval, (*points.shape[:-1], -1)) # Atleast 2D.
fval = tf.cast(fval, dtype=points.dtype)
values = [fval - lb, ub - fval]
values = tf.concat(values, axis=-1)
return values
def __repr__(self) -> str:
""""""
return f"""
NonlinearConstraint({self.fun!r}, {self.lb!r}, {self.ub!r}, {self.keep_feasible!r})"
"""
def __eq__(self, other: object) -> bool:
"""
:param other: A constraint.
:return: Whether the constraint is identical to this one.
"""
if not isinstance(other, NonlinearConstraint):
return False
return bool(
self._orig_fun == other._orig_fun
and tf.reduce_all(self.lb == other.lb)
and tf.reduce_all(self.ub == other.ub)
and self.keep_feasible == other.keep_feasible
)
class LinearConstraint(spo.LinearConstraint): # type: ignore[misc]
"""
A wrapper class for linear constraints on variables. The constraints expression is of the form::
lb <= A @ x <= ub
:param A: The matrix defining the linear constraints with shape [M, D], where M is the
number of constraints.
:param lb: The lower bound of the constraint. Should be a scalar or of shape [M].
:param ub: The upper bound of the constraint. Should be a scalar or of shape [M].
:param keep_feasible: Keep the constraints feasible throughout optimization iterations if this
is `True`.
"""
def __init__(
self,
A: TensorType,
lb: Sequence[float] | TensorType,
ub: Sequence[float] | TensorType,
keep_feasible: bool = False,
):
super().__init__(A, lb, ub, keep_feasible=keep_feasible)
def residual(self, points: TensorType) -> TensorType:
"""
Calculate the residuals between the constraint function and its lower/upper limits.
:param points: The points to calculate the residuals for, with shape [..., D].
:return: A tensor containing the lower and upper residual values with shape [..., M*2].
"""
tf.debugging.assert_rank_at_least(points, 2)
non_d_axes = np.ones_like(points.shape)[:-1] # Avoid adding axes shape to static graph.
lb = tf.cast(tf.reshape(self.lb, (*non_d_axes, -1)), dtype=points.dtype)
ub = tf.cast(tf.reshape(self.ub, (*non_d_axes, -1)), dtype=points.dtype)
A = tf.cast(self.A, dtype=points.dtype)
fval = tf.linalg.matmul(points, A, transpose_b=True)
fval = tf.reshape(fval, (*points.shape[:-1], -1)) # Atleast 2D.
values = [fval - lb, ub - fval]
values = tf.concat(values, axis=-1)
return values
def __repr__(self) -> str:
""""""
return f"""
LinearConstraint({self.A!r}, {self.lb!r}, {self.ub!r}, {self.keep_feasible!r})"
"""
def __eq__(self, other: object) -> bool:
"""
:param other: A constraint.
:return: Whether the constraint is identical to this one.
"""
if not isinstance(other, LinearConstraint):
return False
return bool(
tf.reduce_all(self.A == other.A)
and tf.reduce_all(self.lb == other.lb)
and tf.reduce_all(self.ub == other.ub)
and tf.reduce_all(self.keep_feasible == other.keep_feasible)
)
Constraint = Union[LinearConstraint, NonlinearConstraint]
""" Type alias for constraints. """
class SearchSpace(ABC):
"""
A :class:`SearchSpace` represents the domain over which an objective function is optimized.
"""
@abstractmethod
def sample(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
"""
:param num_samples: The number of points to sample from this search space.
:param seed: Random seed for reproducibility.
:return: ``num_samples`` i.i.d. random points, sampled uniformly from this search space.
"""
def contains(self, value: TensorType) -> TensorType:
"""Method for checking membership.
:param value: A point or points to check for membership of this :class:`SearchSpace`.
:return: A boolean array showing membership for each point in value.
:raise ValueError (or tf.errors.InvalidArgumentError): If ``value`` has a different
dimensionality points from this :class:`SearchSpace`.
"""
tf.debugging.assert_equal(
tf.rank(value) > 0 and tf.shape(value)[-1] == self.dimension,
True,
message=f"""
Dimensionality mismatch: space is {self.dimension}, value is {tf.shape(value)[-1]}
""",
)
return self._contains(value)
@abstractmethod
def _contains(self, value: TensorType) -> TensorType:
"""Space-specific implementation of membership. Can assume valid input shape.
:param value: A point or points to check for membership of this :class:`SearchSpace`.
:return: A boolean array showing membership for each point in value.
"""
def __contains__(self, value: TensorType) -> bool:
"""Method called by `in` operator. Doesn't support broadcasting as Python insists
on converting the result to a boolean.
:param value: A single point to check for membership of this :class:`SearchSpace`.
:return: `True` if ``value`` is a member of this search space, else `False`.
:raise ValueError (or tf.errors.InvalidArgumentError): If ``value`` has a different
dimensionality from this :class:`SearchSpace`.
"""
tf.debugging.assert_equal(
tf.rank(value) == 1,
True,
message=f"""
Rank mismatch: expected 1, got {tf.rank(value)}. To get a tensor of boolean
membership values from a tensor of points, use `space.contains(value)`
rather than `value in space`.
""",
)
return self.contains(value)
@property
@abstractmethod
def dimension(self) -> TensorType:
"""The number of inputs in this search space."""
@property
@abstractmethod
def lower(self) -> TensorType:
"""The lowest value taken by each search space dimension."""
@property
@abstractmethod
def upper(self) -> TensorType:
"""The highest value taken by each search space dimension."""
@abstractmethod
def product(self: SearchSpaceType, other: SearchSpaceType) -> SearchSpaceType:
"""
:param other: A search space of the same type as this search space.
:return: The Cartesian product of this search space with the ``other``.
"""
@overload
def __mul__(self: SearchSpaceType, other: SearchSpaceType) -> SearchSpaceType:
...
@overload
def __mul__(self: SearchSpaceType, other: SearchSpace) -> SearchSpace: # type: ignore[misc]
# mypy complains that this is superfluous, but it seems to use it fine to infer
# that Box * Box = Box, while Box * Discrete = SearchSpace.
...
def __mul__(self, other: SearchSpace) -> SearchSpace:
"""
:param other: A search space.
:return: The Cartesian product of this search space with the ``other``.
If both spaces are of the same type then this calls the :meth:`product` method.
Otherwise, it generates a :class:`TaggedProductSearchSpace`.
"""
# If the search space has any constraints, always return a tagged product search space.
if not self.has_constraints and not other.has_constraints and isinstance(other, type(self)):
return self.product(other)
return TaggedProductSearchSpace((self, other))
def __pow__(self: SearchSpaceType, other: int) -> SearchSpaceType:
"""
Return the Cartesian product of ``other`` instances of this search space. For example, for
an exponent of `3`, and search space `s`, this is `s ** 3`, which is equivalent to
`s * s * s`.
:param other: The exponent, or number of instances of this search space to multiply
together. Must be strictly positive.
:return: The Cartesian product of ``other`` instances of this search space.
:raise tf.errors.InvalidArgumentError: If the exponent ``other`` is less than 1.
"""
tf.debugging.assert_positive(other, message="Exponent must be strictly positive")
return reduce(operator.mul, [self] * other)
def discretize(self, num_samples: int) -> DiscreteSearchSpace:
"""
:param num_samples: The number of points in the :class:`DiscreteSearchSpace`.
:return: A discrete search space consisting of ``num_samples`` points sampled uniformly from
this search space.
:raise NotImplementedError: If this :class:`SearchSpace` has constraints.
"""
if self.has_constraints: # Constraints are not supported.
raise NotImplementedError(
"Discretization is currently not supported in the presence of constraints."
)
return DiscreteSearchSpace(points=self.sample(num_samples))
@abstractmethod
def __eq__(self, other: object) -> bool:
"""
:param other: A search space.
:return: Whether the search space is identical to this one.
"""
@property
def constraints(self) -> Sequence[Constraint]:
"""The sequence of explicit constraints specified in this search space."""
return []
def constraints_residuals(self, points: TensorType) -> TensorType:
"""
Return residuals for all the constraints in this :class:`SearchSpace`.
:param points: The points to get the residuals for, with shape [..., D].
:return: A tensor of all the residuals with shape [..., C], where C is the total number of
constraints.
:raise NotImplementedError: If this :class:`SearchSpace` does not support constraints.
"""
raise NotImplementedError("Constraints are currently not supported for this search space.")
def is_feasible(self, points: TensorType) -> TensorType:
"""
Checks if points satisfy the explicit constraints of this :class:`SearchSpace`.
Note membership of the search space is not checked.
:param points: The points to check constraints feasibility for, with shape [..., D].
:return: A tensor of booleans. Returns `True` for each point if it is feasible in this
search space, else `False`.
:raise NotImplementedError: If this :class:`SearchSpace` has constraints.
"""
# Everything is feasible in the absence of constraints. Must be overriden if there are
# constraints.
if self.has_constraints:
raise NotImplementedError("Feasibility check is not implemented for this search space.")
return tf.cast(tf.ones(points.shape[:-1]), dtype=bool)
@property
def has_constraints(self) -> bool:
"""Returns `True` if this search space has any explicit constraints specified."""
# By default assume there are no constraints; can be overridden by a subclass.
return False
class DiscreteSearchSpace(SearchSpace):
r"""
A discrete :class:`SearchSpace` representing a finite set of :math:`D`-dimensional points in
:math:`\mathbb{R}^D`.
For example:
>>> points = tf.constant([[-1.0, 0.4], [-1.0, 0.6], [0.0, 0.4]])
>>> search_space = DiscreteSearchSpace(points)
>>> assert tf.constant([0.0, 0.4]) in search_space
>>> assert tf.constant([1.0, 0.5]) not in search_space
"""
def __init__(self, points: TensorType):
"""
:param points: The points that define the discrete space, with shape ('N', 'D').
:raise ValueError (or tf.errors.InvalidArgumentError): If ``points`` has an invalid shape.
"""
tf.debugging.assert_shapes([(points, ("N", "D"))])
self._points = points
self._dimension = tf.shape(self._points)[-1]
def __repr__(self) -> str:
""""""
return f"DiscreteSearchSpace({self._points!r})"
@property
def lower(self) -> TensorType:
"""The lowest value taken across all points by each search space dimension."""
return tf.reduce_min(self.points, -2)
@property
def upper(self) -> TensorType:
"""The highest value taken across all points by each search space dimension."""
return tf.reduce_max(self.points, -2)
@property
def points(self) -> TensorType:
"""All the points in this space."""
return self._points
@property
def dimension(self) -> TensorType:
"""The number of inputs in this search space."""
return self._dimension
def _contains(self, value: TensorType) -> TensorType:
comparison = tf.math.equal(self._points, tf.expand_dims(value, -2)) # [..., N, D]
return tf.reduce_any(tf.reduce_all(comparison, axis=-1), axis=-1) # [...]
def sample(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
"""
:param num_samples: The number of points to sample from this search space.
:param seed: Random seed for reproducibility.
:return: ``num_samples`` i.i.d. random points, sampled uniformly,
from this search space.
"""
if seed is not None: # ensure reproducibility
tf.random.set_seed(seed)
if num_samples == 0:
return self.points[:0, :]
else:
sampled_indices = tf.random.categorical(
tf.ones((1, tf.shape(self.points)[0])), num_samples, seed=seed
)
return tf.gather(self.points, sampled_indices)[0, :, :] # [num_samples, D]
def product(self, other: DiscreteSearchSpace) -> DiscreteSearchSpace:
r"""
Return the Cartesian product of the two :class:`DiscreteSearchSpace`\ s. For example:
>>> sa = DiscreteSearchSpace(tf.constant([[0, 1], [2, 3]]))
>>> sb = DiscreteSearchSpace(tf.constant([[4, 5, 6], [7, 8, 9]]))
>>> (sa * sb).points.numpy()
array([[0, 1, 4, 5, 6],
[0, 1, 7, 8, 9],
[2, 3, 4, 5, 6],
[2, 3, 7, 8, 9]], dtype=int32)
:param other: A :class:`DiscreteSearchSpace` with :attr:`points` of the same dtype as this
search space.
:return: The Cartesian product of the two :class:`DiscreteSearchSpace`\ s.
:raise TypeError: If one :class:`DiscreteSearchSpace` has :attr:`points` of a different
dtype to the other.
"""
if self.points.dtype is not other.points.dtype:
return NotImplemented
tile_self = tf.tile(self.points[:, None], [1, len(other.points), 1])
tile_other = tf.tile(other.points[None], [len(self.points), 1, 1])
cartesian_product = tf.concat([tile_self, tile_other], axis=2)
product_space_dimension = self.points.shape[-1] + other.points.shape[-1]
return DiscreteSearchSpace(tf.reshape(cartesian_product, [-1, product_space_dimension]))
def __eq__(self, other: object) -> bool:
"""
:param other: A search space.
:return: Whether the search space is identical to this one.
"""
if not isinstance(other, DiscreteSearchSpace):
return NotImplemented
return bool(tf.reduce_all(tf.sort(self.points, 0) == tf.sort(other.points, 0)))
def __deepcopy__(self, memo: dict[int, object]) -> DiscreteSearchSpace:
return self
class Box(SearchSpace):
r"""
Continuous :class:`SearchSpace` representing a :math:`D`-dimensional box in
:math:`\mathbb{R}^D`. Mathematically it is equivalent to the Cartesian product of :math:`D`
closed bounded intervals in :math:`\mathbb{R}`.
"""
@overload
def __init__(
self,
lower: Sequence[float],
upper: Sequence[float],
constraints: Optional[Sequence[Constraint]] = None,
ctol: float | TensorType = 1e-7,
):
...
@overload
def __init__(
self,
lower: TensorType,
upper: TensorType,
constraints: Optional[Sequence[Constraint]] = None,
ctol: float | TensorType = 1e-7,
):
...
def __init__(
self,
lower: Sequence[float] | TensorType,
upper: Sequence[float] | TensorType,
constraints: Optional[Sequence[Constraint]] = None,
ctol: float | TensorType = 1e-7,
):
r"""
If ``lower`` and ``upper`` are `Sequence`\ s of floats (such as lists or tuples),
they will be converted to tensors of dtype `DEFAULT_DTYPE`.
:param lower: The lower (inclusive) bounds of the box. Must have shape [D] for positive D,
and if a tensor, must have float type.
:param upper: The upper (inclusive) bounds of the box. Must have shape [D] for positive D,
and if a tensor, must have float type.
:param constraints: Sequence of explicit input constraints for this search space.
:param ctol: Tolerance to use to check constraints satisfaction.
:raise ValueError (or tf.errors.InvalidArgumentError): If any of the following are true:
- ``lower`` and ``upper`` have invalid shapes.
- ``lower`` and ``upper`` do not have the same floating point type.
- ``upper`` is not greater than ``lower`` across all dimensions.
"""
tf.debugging.assert_shapes([(lower, ["D"]), (upper, ["D"])])
tf.assert_rank(lower, 1)
tf.assert_rank(upper, 1)
tf.debugging.assert_non_negative(ctol, message="Tolerance must be non-negative")
if isinstance(lower, Sequence):
self._lower = tf.constant(lower, dtype=DEFAULT_DTYPE)
self._upper = tf.constant(upper, dtype=DEFAULT_DTYPE)
else:
self._lower = tf.convert_to_tensor(lower)
self._upper = tf.convert_to_tensor(upper)
tf.debugging.assert_same_float_dtype([self._lower, self._upper])
tf.debugging.assert_less(self._lower, self._upper)
self._dimension = tf.shape(self._upper)[-1]
if constraints is None:
self._constraints: Sequence[Constraint] = []
else:
self._constraints = constraints
self._ctol = ctol
def __repr__(self) -> str:
""""""
return f"Box({self._lower!r}, {self._upper!r}, {self._constraints!r}, {self._ctol!r})"
@property
def lower(self) -> tf.Tensor:
"""The lower bounds of the box."""
return self._lower
@property
def upper(self) -> tf.Tensor:
"""The upper bounds of the box."""
return self._upper
@property
def dimension(self) -> TensorType:
"""The number of inputs in this search space."""
return self._dimension
@property
def constraints(self) -> Sequence[Constraint]:
"""The sequence of explicit constraints specified in this search space."""
return self._constraints
def _contains(self, value: TensorType) -> TensorType:
"""
For each point in ``value``, return `True` if the point is a member of this search space,
else `False`. A point is a member if all of its coordinates lie in the closed intervals
bounded by the lower and upper bounds.
:param value: A point or points to check for membership of this :class:`SearchSpace`.
:return: A boolean array showing membership for each point in value.
"""
return tf.reduce_all(value >= self._lower, axis=-1) & tf.reduce_all(
value <= self._upper, axis=-1
)
def _sample(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
# Internal common method to sample randomly from the space.
dim = tf.shape(self._lower)[-1]
return tf.random.uniform(
(num_samples, dim),
minval=self._lower,
maxval=self._upper,
dtype=self._lower.dtype,
seed=seed,
)
def sample(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
"""
Sample randomly from the space.
:param num_samples: The number of points to sample from this search space.
:param seed: Random seed for reproducibility.
:return: ``num_samples`` i.i.d. random points, sampled uniformly,
from this search space with shape '[num_samples, D]' , where D is the search space
dimension.
"""
tf.debugging.assert_non_negative(num_samples)
if seed is not None: # ensure reproducibility
tf.random.set_seed(seed)
return self._sample(num_samples, seed)
def _sample_halton(
self,
start: int,
num_samples: int,
seed: Optional[int] = None,
) -> TensorType:
# Internal common method to sample from the space using a Halton sequence.
tf.debugging.assert_non_negative(num_samples)
if num_samples == 0:
return tf.constant([])
if seed is not None: # ensure reproducibility
tf.random.set_seed(seed)
dim = tf.shape(self._lower)[-1]
sequence_indices = tf.range(start=start, limit=start + num_samples, dtype=tf.int32)
return (self._upper - self._lower) * tfp.mcmc.sample_halton_sequence(
dim=dim, sequence_indices=sequence_indices, dtype=self._lower.dtype, seed=seed
) + self._lower
def sample_halton(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
"""
Sample from the space using a Halton sequence. The resulting samples are guaranteed to be
diverse and are reproducible by using the same choice of ``seed``.
:param num_samples: The number of points to sample from this search space.
:param seed: Random seed for the halton sequence
:return: ``num_samples`` of points, using halton sequence with shape '[num_samples, D]' ,
where D is the search space dimension.
"""
return self._sample_halton(0, num_samples, seed)
def sample_sobol(self, num_samples: int, skip: Optional[int] = None) -> TensorType:
"""
Sample a diverse set from the space using a Sobol sequence.
If ``skip`` is specified, then the resulting samples are reproducible.
:param num_samples: The number of points to sample from this search space.
:param skip: The number of initial points of the Sobol sequence to skip
:return: ``num_samples`` of points, using sobol sequence with shape '[num_samples, D]' ,
where D is the search space dimension.
"""
tf.debugging.assert_non_negative(num_samples)
if num_samples == 0:
return tf.constant([])
if skip is None: # generate random skip
skip = tf.random.uniform([1], maxval=2**16, dtype=tf.int32)[0]
dim = tf.shape(self._lower)[-1]
return (self._upper - self._lower) * tf.math.sobol_sample(
dim=dim, num_results=num_samples, dtype=self._lower.dtype, skip=skip
) + self._lower
def _sample_feasible_loop(
self,
num_samples: int,
sampler: Callable[[], TensorType],
max_tries: int = 100,
) -> TensorType:
"""
Rejection sampling using provided callable. Try ``max_tries`` number of times to find
``num_samples`` feasible points.
:param num_samples: The number of feasible points to sample from this search space.
:param sampler: Callable to return samples. Called potentially multiple times.
:param max_tries: Maximum attempts to sample the requested number of points.
:return: ``num_samples`` feasible points sampled using ``sampler``.
:raise SampleTimeoutError: If ``max_tries`` are exhausted before ``num_samples`` are
sampled.
"""
xs = []
count = 0
tries = 0
while count < num_samples and tries < max_tries:
tries += 1
xi = sampler()
mask = self.is_feasible(xi)
xo = tf.boolean_mask(xi, mask)
xs.append(xo)
count += xo.shape[0]
if count < num_samples:
raise SampleTimeoutError(
f"""Failed to sample {num_samples} feasible point(s), even after {tries} attempts.
Sampled only {count} feasible point(s)."""
)
xs = tf.concat(xs, axis=0)[:num_samples]
return xs
def sample_feasible(
self, num_samples: int, seed: Optional[int] = None, max_tries: int = 100
) -> TensorType:
"""
Sample feasible points randomly from the space.
:param num_samples: The number of feasible points to sample from this search space.
:param seed: Random seed for reproducibility.
:param max_tries: Maximum attempts to sample the requested number of points.
:return: ``num_samples`` i.i.d. random points, sampled uniformly,
from this search space with shape '[num_samples, D]' , where D is the search space
dimension.
:raise SampleTimeoutError: If ``max_tries`` are exhausted before ``num_samples`` are
sampled.
"""
tf.debugging.assert_non_negative(num_samples)
# Without constraints or zero-num-samples use the normal sample method directly.
if not self.has_constraints or num_samples == 0:
return self.sample(num_samples, seed)
if seed is not None: # ensure reproducibility
tf.random.set_seed(seed)
def _sampler() -> TensorType:
return self._sample(num_samples, seed)
return self._sample_feasible_loop(num_samples, _sampler, max_tries)
def sample_halton_feasible(
self, num_samples: int, seed: Optional[int] = None, max_tries: int = 100
) -> TensorType:
"""
Sample feasible points from the space using a Halton sequence. The resulting samples are
guaranteed to be diverse and are reproducible by using the same choice of ``seed``.
:param num_samples: The number of feasible points to sample from this search space.
:param seed: Random seed for the halton sequence
:param max_tries: Maximum attempts to sample the requested number of points.
:return: ``num_samples`` of points, using halton sequence with shape '[num_samples, D]' ,
where D is the search space dimension.
:raise SampleTimeoutError: If ``max_tries`` are exhausted before ``num_samples`` are
sampled.
"""
tf.debugging.assert_non_negative(num_samples)
# Without constraints or zero-num-samples use the normal sample method directly.
if not self.has_constraints or num_samples == 0:
return self.sample_halton(num_samples, seed)
start = 0
def _sampler() -> TensorType:
nonlocal start
# Global seed is set on every call in _sample_halton() so that we always sample from
# the same (randomised) sequence, and skip the relevant number of beginning samples.
samples = self._sample_halton(start, num_samples, seed)
start += num_samples
return samples
return self._sample_feasible_loop(num_samples, _sampler, max_tries)
def sample_sobol_feasible(
self, num_samples: int, skip: Optional[int] = None, max_tries: int = 100
) -> TensorType:
"""
Sample a diverse set of feasible points from the space using a Sobol sequence.
If ``skip`` is specified, then the resulting samples are reproducible.
:param num_samples: The number of feasible points to sample from this search space.
:param skip: The number of initial points of the Sobol sequence to skip
:param max_tries: Maximum attempts to sample the requested number of points.
:return: ``num_samples`` of points, using sobol sequence with shape '[num_samples, D]' ,
where D is the search space dimension.
:raise SampleTimeoutError: If ``max_tries`` are exhausted before ``num_samples`` are
sampled.
"""
tf.debugging.assert_non_negative(num_samples)
# Without constraints or zero-num-samples use the normal sample method directly.
if not self.has_constraints or num_samples == 0:
return self.sample_sobol(num_samples, skip)
if skip is None: # generate random skip
skip = tf.random.uniform([1], maxval=2**16, dtype=tf.int32)[0]
_skip: TensorType = skip # To keep mypy happy.
def _sampler() -> TensorType:
nonlocal _skip
samples = self.sample_sobol(num_samples, skip=_skip)
# Skip the relevant number of beginning samples from previous iterations.
_skip += num_samples
return samples
return self._sample_feasible_loop(num_samples, _sampler, max_tries)
def product(self, other: Box) -> Box:
r"""
Return the Cartesian product of the two :class:`Box`\ es (concatenating their respective
lower and upper bounds). For example:
>>> unit_interval = Box([0.0], [1.0])
>>> square_at_origin = Box([-2.0, -2.0], [2.0, 2.0])
>>> new_box = unit_interval * square_at_origin
>>> new_box.lower.numpy()
array([ 0., -2., -2.])
>>> new_box.upper.numpy()
array([1., 2., 2.])
:param other: A :class:`Box` with bounds of the same type as this :class:`Box`.
:return: The Cartesian product of the two :class:`Box`\ es.
:raise TypeError: If the bounds of one :class:`Box` have different dtypes to those of
the other :class:`Box`.
"""
if self.lower.dtype is not other.lower.dtype:
return NotImplemented
product_lower_bound = tf.concat([self._lower, other.lower], axis=-1)
product_upper_bound = tf.concat([self._upper, other.upper], axis=-1)
return Box(product_lower_bound, product_upper_bound)
def __eq__(self, other: object) -> bool:
"""
:param other: A search space.
:return: Whether the search space is identical to this one.
"""
if not isinstance(other, Box):
return NotImplemented
return bool(
tf.reduce_all(self.lower == other.lower)
and tf.reduce_all(self.upper == other.upper)
# Constraints match only if they are exactly the same (in the same order).
and self._constraints == other._constraints
)
def __deepcopy__(self, memo: dict[int, object]) -> Box:
return self
def constraints_residuals(self, points: TensorType) -> TensorType:
"""
Return residuals for all the constraints in this :class:`SearchSpace`.
:param points: The points to get the residuals for, with shape [..., D].
:return: A tensor of all the residuals with shape [..., C], where C is the total number of
constraints.
"""
residuals = [constraint.residual(points) for constraint in self._constraints]
residuals = tf.concat(residuals, axis=-1)
return residuals
def is_feasible(self, points: TensorType) -> TensorType:
"""
Checks if points satisfy the explicit constraints of this :class:`SearchSpace`.
Note membership of the search space is not checked.
:param points: The points to check constraints feasibility for, with shape [..., D].
:return: A tensor of booleans. Returns `True` for each point if it is feasible in this
search space, else `False`.
"""
return tf.math.reduce_all(self.constraints_residuals(points) >= -self._ctol, axis=-1)
@property
def has_constraints(self) -> bool:
"""Returns `True` if this search space has any explicit constraints specified."""
return len(self._constraints) > 0
class TaggedProductSearchSpace(SearchSpace):
r"""
Product :class:`SearchSpace` consisting of a product of
multiple :class:`SearchSpace`. This class provides functionality for
accessing either the resulting combined search space or each individual space.
Note that this class assumes that individual points in product spaces are
represented with their inputs in the same order as specified when initializing
the space.
"""
def __init__(self, spaces: Sequence[SearchSpace], tags: Optional[Sequence[str]] = None):
r"""
Build a :class:`TaggedProductSearchSpace` from a list ``spaces`` of other spaces. If
``tags`` are provided then they form the identifiers of the subspaces, otherwise the
subspaces are labelled numerically.
:param spaces: A sequence of :class:`SearchSpace` objects representing the space's subspaces
:param tags: An optional list of tags giving the unique identifiers of
the space's subspaces.
:raise ValueError (or tf.errors.InvalidArgumentError): If ``spaces`` has a different
length to ``tags`` when ``tags`` is provided or if ``tags`` contains duplicates.
"""
number_of_subspaces = len(spaces)
if tags is None:
tags = [str(index) for index in range(number_of_subspaces)]
else:
number_of_tags = len(tags)
tf.debugging.assert_equal(
number_of_tags,
number_of_subspaces,
message=f"""
Number of tags must match number of subspaces but
received {number_of_tags} tags and {number_of_subspaces} subspaces.
""",
)
number_of_unique_tags = len(set(tags))
tf.debugging.assert_equal(
number_of_tags,
number_of_unique_tags,
message=f"Subspace names must be unique but received {tags}.",
)
self._spaces = dict(zip(tags, spaces))
subspace_sizes = [space.dimension for space in spaces]
self._subspace_sizes_by_tag = {
tag: subspace_size for tag, subspace_size in zip(tags, subspace_sizes)
}
self._subspace_starting_indices = dict(zip(tags, tf.cumsum(subspace_sizes, exclusive=True)))
self._dimension = tf.cast(tf.reduce_sum(subspace_sizes), dtype=tf.int32)
self._tags = tuple(tags) # avoid accidental modification by users
def __repr__(self) -> str:
""""""
return f"""TaggedProductSearchSpace(spaces =
{[self.get_subspace(tag) for tag in self.subspace_tags]},
tags = {self.subspace_tags})
"""
@property
def lower(self) -> TensorType:
"""The lowest values taken by each space dimension, concatenated across subspaces."""
lower_for_each_subspace = [self.get_subspace(tag).lower for tag in self.subspace_tags]
return (
tf.concat(lower_for_each_subspace, axis=-1)
if lower_for_each_subspace
else tf.constant([], dtype=DEFAULT_DTYPE)
)
@property
def upper(self) -> TensorType:
"""The highest values taken by each space dimension, concatenated across subspaces."""
upper_for_each_subspace = [self.get_subspace(tag).upper for tag in self.subspace_tags]
return (
tf.concat(upper_for_each_subspace, axis=-1)
if upper_for_each_subspace
else tf.constant([], dtype=DEFAULT_DTYPE)
)
@property
def subspace_tags(self) -> tuple[str, ...]:
"""Return the names of the subspaces contained in this product space."""
return self._tags
@property
def dimension(self) -> TensorType:
"""The number of inputs in this product search space."""
return self._dimension
def get_subspace(self, tag: str) -> SearchSpace:
"""
Return the domain of a particular subspace.
:param tag: The tag specifying the target subspace.
:return: Target subspace.
"""
tf.debugging.assert_equal(
tag in self.subspace_tags,
True,
message=f"""
Attempted to access a subspace that does not exist. This space only contains
subspaces with the tags {self.subspace_tags} but received {tag}.
""",
)
return self._spaces[tag]
def fix_subspace(self, tag: str, values: TensorType) -> TaggedProductSearchSpace:
"""
Return a new :class:`TaggedProductSearchSpace` with the specified subspace replaced with
a :class:`DiscreteSearchSpace` containing ``values`` as its points. This is useful if you
wish to restrict subspaces to sets of representative points.
:param tag: The tag specifying the target subspace.
:param values: The values used to populate the new discrete subspace.z
:return: New :class:`TaggedProductSearchSpace` with the specified subspace replaced with
a :class:`DiscreteSearchSpace` containing ``values`` as its points.
"""
new_spaces = [
self.get_subspace(t) if t != tag else DiscreteSearchSpace(points=values)
for t in self.subspace_tags
]
return TaggedProductSearchSpace(spaces=new_spaces, tags=self.subspace_tags)
def get_subspace_component(self, tag: str, values: TensorType) -> TensorType:
"""
Returns the components of ``values`` lying in a particular subspace.
:param tag: Subspace tag.
:param values: Points from the :class:`TaggedProductSearchSpace` of shape [N,Dprod].
:return: The sub-components of ``values`` lying in the specified subspace, of shape
[N, Dsub], where Dsub is the dimensionality of the specified subspace.
"""
starting_index_of_subspace = self._subspace_starting_indices[tag]
ending_index_of_subspace = starting_index_of_subspace + self._subspace_sizes_by_tag[tag]
return values[..., starting_index_of_subspace:ending_index_of_subspace]
def _contains(self, value: TensorType) -> TensorType:
"""
Return `True` if ``value`` is a member of this search space, else `False`. A point is a
member if each of its subspace components lie in each subspace.
Recall that individual points in product spaces are represented with their inputs in the
same order as specified when initializing the space.
:param value: A point to check for membership of this :class:`SearchSpace`.
:return: `True` if ``value`` is a member of this search space, else `False`. May return a
scalar boolean `TensorType` instead of the `bool` itself.
:raise ValueError (or tf.errors.InvalidArgumentError): If ``value`` has a different
dimensionality from the search space.
"""
in_each_subspace = [
self._spaces[tag].contains(self.get_subspace_component(tag, value))
for tag in self._tags
]
return tf.reduce_all(in_each_subspace, axis=0)
def sample(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
"""
Sample randomly from the space by sampling from each subspace
and concatenating the resulting samples.
:param num_samples: The number of points to sample from this search space.
:param seed: Optional tf.random seed.
:return: ``num_samples`` i.i.d. random points, sampled uniformly,
from this search space with shape '[num_samples, D]' , where D is the search space
dimension.
"""
tf.debugging.assert_non_negative(num_samples)
if seed is not None: # ensure reproducibility
tf.random.set_seed(seed)
subspace_samples = [self._spaces[tag].sample(num_samples, seed=seed) for tag in self._tags]
return tf.concat(subspace_samples, -1)
def product(self, other: TaggedProductSearchSpace) -> TaggedProductSearchSpace:
r"""
Return the Cartesian product of the two :class:`TaggedProductSearchSpace`\ s,
building a tree of :class:`TaggedProductSearchSpace`\ s.
:param other: A search space of the same type as this search space.
:return: The Cartesian product of this search space with the ``other``.
"""
return TaggedProductSearchSpace(spaces=[self, other])
def __eq__(self, other: object) -> bool:
"""
:param other: A search space.
:return: Whether the search space is identical to this one.
"""
if not isinstance(other, TaggedProductSearchSpace):
return NotImplemented
return self._tags == other._tags and self._spaces == other._spaces
def __deepcopy__(self, memo: dict[int, object]) -> TaggedProductSearchSpace:
return self
| 45,403 | 41.040741 | 100 | py |
trieste-develop | trieste-develop/trieste/data.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This module contains utilities for :class:`~trieste.observer.Observer` data. """
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional, Sequence
import tensorflow as tf
from trieste.types import TensorType
@dataclass(frozen=True)
class Dataset:
"""
Container for the query points and corresponding observations from an
:class:`~trieste.observer.Observer`.
"""
query_points: TensorType
""" The points at which the :class:`~trieste.observer.Observer` was queried. """
observations: TensorType
""" The observed output of the :class:`~trieste.observer.Observer` for each query point. """
def __post_init__(self) -> None:
"""
:raise ValueError (or InvalidArgumentError): If ``query_points`` or ``observations`` have \
rank less than two, or they have unequal shape in any but their last dimension.
"""
tf.debugging.assert_rank_at_least(self.query_points, 2)
tf.debugging.assert_rank_at_least(self.observations, 2)
if 0 in (self.query_points.shape[-1], self.observations.shape[-1]):
raise ValueError(
f"query_points and observations cannot have dimension 0, got shapes"
f" {self.query_points.shape} and {self.observations.shape}."
)
if (
self.query_points.shape[:-1] != self.observations.shape[:-1]
# can't check dynamic shapes, so trust that they're ok (if not, they'll fail later)
and None not in self.query_points.shape[:-1]
):
raise ValueError(
f"Leading shapes of query_points and observations must match. Got shapes"
f" {self.query_points.shape}, {self.observations.shape}."
)
def __add__(self, rhs: Dataset) -> Dataset:
r"""
Return the :class:`Dataset` whose query points are the result of concatenating the
`query_points` in each :class:`Dataset` along the zeroth axis, and the same for the
`observations`. For example:
>>> d1 = Dataset(
... tf.constant([[0.1, 0.2], [0.3, 0.4]]),
... tf.constant([[0.5, 0.6], [0.7, 0.8]])
... )
>>> d2 = Dataset(tf.constant([[0.9, 1.0]]), tf.constant([[1.1, 1.2]]))
>>> (d1 + d2).query_points
<tf.Tensor: shape=(3, 2), dtype=float32, numpy=
array([[0.1, 0.2],
[0.3, 0.4],
[0.9, 1. ]], dtype=float32)>
>>> (d1 + d2).observations
<tf.Tensor: shape=(3, 2), dtype=float32, numpy=
array([[0.5, 0.6],
[0.7, 0.8],
[1.1, 1.2]], dtype=float32)>
:param rhs: A :class:`Dataset` with the same shapes as this one, except in the zeroth
dimension, which can have any size.
:return: The result of concatenating the :class:`Dataset`\ s.
:raise InvalidArgumentError: If the shapes of the `query_points` in each :class:`Dataset`
differ in any but the zeroth dimension. The same applies for `observations`.
"""
return Dataset(
tf.concat([self.query_points, rhs.query_points], axis=0),
tf.concat([self.observations, rhs.observations], axis=0),
)
def __len__(self) -> tf.Tensor:
"""
:return: The number of query points, or equivalently the number of observations.
"""
return tf.shape(self.observations)[0]
def __deepcopy__(self, memo: dict[int, object]) -> Dataset:
return self
def astuple(self) -> tuple[TensorType, TensorType]:
"""
**Note:** Unlike the standard library function `dataclasses.astuple`, this method does
**not** deepcopy the attributes.
:return: A 2-tuple of the :attr:`query_points` and :attr:`observations`.
"""
return self.query_points, self.observations
def check_and_extract_fidelity_query_points(
query_points: TensorType, max_fidelity: Optional[int] = None
) -> tuple[TensorType, TensorType]:
"""Check whether the final column of a tensor is close enough to ints
to be reasonably considered to represent fidelities.
The final input column of multi-fidelity data should be a reference to
the fidelity of the query point. We cannot have mixed type tensors, but
we can check that thhe final column values are suitably close to integers.
:param query_points: Data to check final column of.
:raise: ValueError: If there are not enough columns to be multifidelity data
:raise InvalidArgumentError: If any value in the final column is far from an integer
:return: Query points without fidelity column
and the fidelities of each of the query points
"""
# Check we have sufficient columns
if query_points.shape[-1] < 2:
raise ValueError(
"Query points do not have enough columns to be multifidelity,"
f" need at least 2, got {query_points.shape[1]}"
)
input_points = query_points[..., :-1]
fidelity_col = query_points[..., -1:]
# Check fidelity column values are close to ints
tf.debugging.assert_equal(
tf.round(fidelity_col),
fidelity_col,
message="Fidelity column should be float(int), but got a float that"
" was not close to an int",
)
# Check fidelity column values are non-negative
tf.debugging.assert_non_negative(fidelity_col, message="Fidelity must be non-negative")
if max_fidelity is not None:
max_input_fid = tf.reduce_max(fidelity_col)
max_fidelity_float = tf.cast(max_fidelity, dtype=query_points.dtype)
tf.debugging.assert_less_equal(
max_input_fid,
max_fidelity_float,
message=(
f"Model only supports fidelities up to {max_fidelity},"
f" but {max_input_fid} was passed"
),
)
return input_points, fidelity_col
def split_dataset_by_fidelity(dataset: Dataset, num_fidelities: int) -> Sequence[Dataset]:
"""Split dataset into individual datasets without fidelity information
:param dataset: Dataset for which to split fidelities
:param num_fidlities: Number of fidelities in the problem (not just dataset)
:return: Ordered list of datasets with lowest fidelity at index 0 and highest at -1
"""
if num_fidelities < 1:
raise ValueError(f"Data must have 1 or more fidelities, got {num_fidelities}")
datasets = [get_dataset_for_fidelity(dataset, fidelity) for fidelity in range(num_fidelities)]
return datasets
def get_dataset_for_fidelity(dataset: Dataset, fidelity: int) -> Dataset:
"""Get a dataset with only the specified fidelity of data in
:param dataset: The dataset from which to extract the single fidelity data
:param fidelity: The fidelity to extract the data for
:return: Dataset with a single fidelity and no fidelity column
"""
input_points, fidelity_col = check_and_extract_fidelity_query_points(
dataset.query_points
) # [..., D], [..., 1]
mask = fidelity_col == fidelity # [..., ]
inds = tf.where(mask)[..., 0] # [..., ]
inputs_for_fidelity = tf.gather(input_points, inds, axis=0) # [..., D]
observations_for_fidelity = tf.gather(dataset.observations, inds, axis=0) # [..., 1]
return Dataset(query_points=inputs_for_fidelity, observations=observations_for_fidelity)
def add_fidelity_column(query_points: TensorType, fidelity: int) -> TensorType:
"""Add fidelity column to query_points without fidelity data
:param query_points: query points without fidelity to add fidelity column to
:param fidelity: fidelity to populate fidelity column with
:return: TensorType of query points with fidelity column added
"""
fidelity_col = tf.ones((tf.shape(query_points)[-2], 1), dtype=query_points.dtype) * fidelity
query_points_for_fidelity = tf.concat([query_points, fidelity_col], axis=-1)
return query_points_for_fidelity
| 8,597 | 41.147059 | 99 | py |
trieste-develop | trieste-develop/trieste/types.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains type aliases."""
from typing import Callable, Hashable, Tuple, TypeVar, Union
import tensorflow as tf
TensorType = Union[tf.Tensor, tf.Variable]
"""Type alias for tensor-like types."""
S = TypeVar("S")
"""Unbound type variable."""
T = TypeVar("T")
"""Unbound type variable."""
State = Callable[[S], Tuple[S, T]]
"""
A `State` produces a value of type `T`, given a state of type `S`, and in doing so can update the
state. If the state is updated, it is not updated in-place. Instead, a new state is created. This
is a referentially transparent alternative to mutable state.
"""
Tag = Hashable
"""Type alias for a tag used to label datasets and models."""
| 1,272 | 33.405405 | 97 | py |
trieste-develop | trieste-develop/trieste/version.py | # Copyright 2022 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module exposes the trieste version number."""
from pathlib import Path
BASE_PATH = Path(__file__).parents[0]
VERSION = BASE_PATH / "VERSION"
__version__ = Path(VERSION).read_text().strip()
| 787 | 36.52381 | 74 | py |
trieste-develop | trieste-develop/trieste/__init__.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The library root. See :mod:`~trieste.bayesian_optimizer` for the core optimizer, which requires
models (see :mod:`~trieste.models`), and data sets (see :mod:`~trieste.data`). The
:mod:`~trieste.acquisition` package provides a selection of acquisition algorithms and the
functionality to define your own. The :mod:`~trieste.ask_tell_optimization` package provides API
for Ask-Tell optimization and manual control of the optimization loop.
The :mod:`~trieste.objectives` package contains several popular objective functions,
useful for experimentation.
"""
from . import (
acquisition,
ask_tell_optimization,
bayesian_optimizer,
data,
models,
objectives,
observer,
space,
types,
utils,
)
from .version import __version__
| 1,351 | 36.555556 | 96 | py |
trieste-develop | trieste-develop/trieste/observer.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Definitions and utilities for observers of objective functions. """
from __future__ import annotations
from typing import Callable, Mapping, Union
import tensorflow as tf
from typing_extensions import Final
from .data import Dataset
from .types import Tag, TensorType
SingleObserver = Callable[[TensorType], Dataset]
"""
Type alias for an observer of the objective function (that takes query points and returns an
unlabelled dataset).
"""
MultiObserver = Callable[[TensorType], Mapping[Tag, Dataset]]
"""
Type alias for an observer of the objective function (that takes query points and returns labelled
datasets).
"""
Observer = Union[SingleObserver, MultiObserver]
"""
Type alias for an observer, returning either labelled datasets or a single unlabelled dataset.
"""
OBJECTIVE: Final[Tag] = "OBJECTIVE"
"""
A tag typically used by acquisition rules to denote the data sets and models corresponding to the
optimization objective.
"""
def _is_finite(t: TensorType) -> TensorType:
return tf.logical_and(tf.math.is_finite(t), tf.logical_not(tf.math.is_nan(t)))
def filter_finite(query_points: TensorType, observations: TensorType) -> Dataset:
"""
:param query_points: A tensor of shape (N x M).
:param observations: A tensor of shape (N x 1).
:return: A :class:`~trieste.data.Dataset` containing all the rows in ``query_points`` and
``observations`` where the ``observations`` are finite numbers.
:raise ValueError or InvalidArgumentError: If ``query_points`` or ``observations`` have invalid
shapes.
"""
tf.debugging.assert_shapes([(observations, ("N", 1))])
mask = tf.reshape(_is_finite(observations), [-1])
return Dataset(tf.boolean_mask(query_points, mask), tf.boolean_mask(observations, mask))
def map_is_finite(query_points: TensorType, observations: TensorType) -> Dataset:
"""
:param query_points: A tensor.
:param observations: A tensor.
:return: A :class:`~trieste.data.Dataset` containing all the rows in ``query_points``,
along with the tensor result of mapping the elements of ``observations`` to: `1` if they are
a finite number, else `0`, with dtype `tf.uint8`.
:raise ValueError or InvalidArgumentError: If ``query_points`` and ``observations`` do not
satisfy the shape constraints of :class:`~trieste.data.Dataset`.
"""
return Dataset(query_points, tf.cast(_is_finite(observations), tf.uint8))
| 3,024 | 37.291139 | 100 | py |
trieste-develop | trieste-develop/trieste/objectives/single_objectives.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains toy objective functions, useful for experimentation. A number of them have been
taken from `this Virtual Library of Simulation Experiments
<https://web.archive.org/web/20211015101644/https://www.sfu.ca/~ssurjano/> (:cite:`ssurjano2021`)`_.
"""
from __future__ import annotations
import math
from dataclasses import dataclass
from math import pi
from typing import Callable, Sequence
import tensorflow as tf
from ..space import Box, Constraint, LinearConstraint, NonlinearConstraint
from ..types import TensorType
@dataclass(frozen=True)
class ObjectiveTestProblem:
"""
Convenience container class for synthetic objective test functions.
"""
name: str
"""The test function name"""
objective: Callable[[TensorType], TensorType]
"""The synthetic test function"""
search_space: Box
"""The (continuous) search space of the test function"""
@property
def dim(self) -> int:
"""The input dimensionality of the test function"""
return self.search_space.dimension
@property
def bounds(self) -> list[list[float]]:
"""The input space bounds of the test function"""
return [self.search_space.lower, self.search_space.upper]
@dataclass(frozen=True)
class SingleObjectiveTestProblem(ObjectiveTestProblem):
"""
Convenience container class for synthetic single-objective test functions,
including the global minimizers and minimum.
"""
minimizers: TensorType
"""The global minimizers of the test function."""
minimum: TensorType
"""The global minimum of the test function."""
def _branin_internals(x: TensorType, scale: TensorType, translate: TensorType) -> TensorType:
x0 = x[..., :1] * 15.0 - 5.0
x1 = x[..., 1:] * 15.0
b = 5.1 / (4 * math.pi**2)
c = 5 / math.pi
r = 6
s = 10
t = 1 / (8 * math.pi)
return scale * ((x1 - b * x0**2 + c * x0 - r) ** 2 + s * (1 - t) * tf.cos(x0) + translate)
def branin(x: TensorType) -> TensorType:
"""
The Branin-Hoo function over :math:`[0, 1]^2`. See
:cite:`Picheny2013` for details.
:param x: The points at which to evaluate the function, with shape [..., 2].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 2))])
return _branin_internals(x, 1, 10)
def scaled_branin(x: TensorType) -> TensorType:
"""
The Branin-Hoo function, rescaled to have zero mean and unit variance over :math:`[0, 1]^2`. See
:cite:`Picheny2013` for details.
:param x: The points at which to evaluate the function, with shape [..., 2].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 2))])
return _branin_internals(x, 1 / 51.95, -44.81)
_ORIGINAL_BRANIN_MINIMIZERS = tf.constant(
[[-math.pi, 12.275], [math.pi, 2.275], [9.42478, 2.475]], tf.float64
)
Branin = SingleObjectiveTestProblem(
name="Branin",
objective=branin,
search_space=Box([0.0], [1.0]) ** 2,
minimizers=(_ORIGINAL_BRANIN_MINIMIZERS + [5.0, 0.0]) / 15.0,
minimum=tf.constant([0.397887], tf.float64),
)
"""The Branin-Hoo function over :math:`[0, 1]^2`. See :cite:`Picheny2013` for details."""
ScaledBranin = SingleObjectiveTestProblem(
name="Scaled Branin",
objective=scaled_branin,
search_space=Branin.search_space,
minimizers=Branin.minimizers,
minimum=tf.constant([-1.047393], tf.float64),
)
"""The Branin-Hoo function, rescaled to have zero mean and unit variance over :math:`[0, 1]^2`. See
:cite:`Picheny2013` for details."""
def _scaled_branin_constraints() -> Sequence[Constraint]:
def _nlc_func0(x: TensorType) -> TensorType:
c0 = x[..., 0] - 0.2 - tf.sin(x[..., 1])
c0 = tf.expand_dims(c0, axis=-1)
return c0
def _nlc_func1(x: TensorType) -> TensorType:
c1 = x[..., 0] - tf.cos(x[..., 1])
c1 = tf.expand_dims(c1, axis=-1)
return c1
constraints: Sequence[Constraint] = [
LinearConstraint(
A=tf.constant([[-1.0, 1.0], [1.0, 0.0], [0.0, 1.0]]),
lb=tf.constant([-0.4, 0.15, 0.2]),
ub=tf.constant([0.5, 0.9, 0.9]),
),
NonlinearConstraint(_nlc_func0, tf.constant(-1.0), tf.constant(0.0)),
NonlinearConstraint(_nlc_func1, tf.constant(-0.8), tf.constant(0.0)),
]
return constraints
ConstrainedScaledBranin = SingleObjectiveTestProblem(
name="Constrained Scaled Branin",
objective=scaled_branin,
search_space=Box(
Branin.search_space.lower,
Branin.search_space.upper,
constraints=_scaled_branin_constraints(),
),
minimizers=tf.constant([[0.16518, 0.66518]], tf.float64),
minimum=tf.constant([-0.99888], tf.float64),
)
"""The rescaled Branin-Hoo function with a combination of linear and nonlinear constraints on the
search space."""
def simple_quadratic(x: TensorType) -> TensorType:
"""
A trivial quadratic function over :math:`[0, 1]^2`. Useful for quick testing.
:param x: The points at which to evaluate the function, with shape [..., 2].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 2))])
return -tf.math.reduce_sum(x, axis=-1, keepdims=True) ** 2
SimpleQuadratic = SingleObjectiveTestProblem(
name="Simple Quadratic",
objective=simple_quadratic,
search_space=Branin.search_space,
minimizers=tf.constant([[1.0, 1.0]], tf.float64),
minimum=tf.constant([-4.0], tf.float64),
)
"""A trivial quadratic function over :math:`[0, 1]^2`. Useful for quick testing."""
def gramacy_lee(x: TensorType) -> TensorType:
"""
The Gramacy & Lee function, typically evaluated over :math:`[0.5, 2.5]`. See
:cite:`gramacy2012cases` for details.
:param x: Where to evaluate the function, with shape [..., 1].
:return: The function values, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 1))])
return tf.sin(10 * math.pi * x) / (2 * x) + (x - 1) ** 4
GramacyLee = SingleObjectiveTestProblem(
name="Gramacy & Lee",
objective=gramacy_lee,
search_space=Box([0.5], [2.5]),
minimizers=tf.constant([[0.548562]], tf.float64),
minimum=tf.constant([-0.869011], tf.float64),
)
"""The Gramacy & Lee function, typically evaluated over :math:`[0.5, 2.5]`. See
:cite:`gramacy2012cases` for details."""
def logarithmic_goldstein_price(x: TensorType) -> TensorType:
"""
A logarithmic form of the Goldstein-Price function, with zero mean and unit variance over
:math:`[0, 1]^2`. See :cite:`Picheny2013` for details.
:param x: The points at which to evaluate the function, with shape [..., 2].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 2))])
x0, x1 = tf.split(4 * x - 2, 2, axis=-1)
a = (x0 + x1 + 1) ** 2
b = 19 - 14 * x0 + 3 * x0**2 - 14 * x1 + 6 * x0 * x1 + 3 * x1**2
c = (2 * x0 - 3 * x1) ** 2
d = 18 - 32 * x0 + 12 * x0**2 + 48 * x1 - 36 * x0 * x1 + 27 * x1**2
return (1 / 2.427) * (tf.math.log((1 + a * b) * (30 + c * d)) - 8.693)
LogarithmicGoldsteinPrice = SingleObjectiveTestProblem(
name="Logarithmic Goldstein-Price",
objective=logarithmic_goldstein_price,
search_space=Box([0.0], [1.0]) ** 2,
minimizers=tf.constant([[0.5, 0.25]], tf.float64),
minimum=tf.constant([-3.12913], tf.float64),
)
"""A logarithmic form of the Goldstein-Price function, with zero mean and unit variance over
:math:`[0, 1]^2`. See :cite:`Picheny2013` for details."""
def hartmann_3(x: TensorType) -> TensorType:
"""
The Hartmann 3 test function over :math:`[0, 1]^3`. This function has 3 local
and one global minima. See https://www.sfu.ca/~ssurjano/hart3.html for details.
:param x: The points at which to evaluate the function, with shape [..., 3].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 3))])
a = [1.0, 1.2, 3.0, 3.2]
A = [[3.0, 10.0, 30.0], [0.1, 10.0, 35.0], [3.0, 10.0, 30.0], [0.1, 10.0, 35.0]]
P = [
[0.3689, 0.1170, 0.2673],
[0.4699, 0.4387, 0.7470],
[0.1091, 0.8732, 0.5547],
[0.0381, 0.5743, 0.8828],
]
inner_sum = -tf.reduce_sum(A * (tf.expand_dims(x, 1) - P) ** 2, -1)
return -tf.reduce_sum(a * tf.math.exp(inner_sum), -1, keepdims=True)
Hartmann3 = SingleObjectiveTestProblem(
name="Hartmann 3",
objective=hartmann_3,
search_space=Box([0.0], [1.0]) ** 3,
minimizers=tf.constant([[0.114614, 0.555649, 0.852547]], tf.float64),
minimum=tf.constant([-3.86278], tf.float64),
)
"""The Hartmann 3 test function over :math:`[0, 1]^3`. This function has 3 local
and one global minima. See https://www.sfu.ca/~ssurjano/hart3.html for details."""
def shekel_4(x: TensorType) -> TensorType:
"""
The Shekel test function over :math:`[0, 1]^4`. This function has ten local
minima and a single global minimum. See https://www.sfu.ca/~ssurjano/shekel.html for details.
Note that we rescale the original problem, which is typically defined
over `[0, 10]^4`.
:param x: The points at which to evaluate the function, with shape [..., 4].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 4))])
y: TensorType = x * 10.0
beta = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5]
C = [
[4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6],
[4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6],
]
inner_sum = tf.reduce_sum((tf.expand_dims(y, -1) - C) ** 2, 1)
inner_sum += tf.cast(tf.transpose(beta), dtype=inner_sum.dtype)
return -tf.reduce_sum(inner_sum ** (-1), -1, keepdims=True)
Shekel4 = SingleObjectiveTestProblem(
name="Shekel 4",
objective=shekel_4,
search_space=Box([0.0], [1.0]) ** 4,
minimizers=tf.constant([[0.4, 0.4, 0.4, 0.4]], tf.float64),
minimum=tf.constant([-10.5363], tf.float64),
)
"""The Shekel test function over :math:`[0, 1]^4`. This function has ten local
minima and a single global minimum. See https://www.sfu.ca/~ssurjano/shekel.html for details.
Note that we rescale the original problem, which is typically defined
over `[0, 10]^4`."""
def levy(x: TensorType, d: int) -> TensorType:
"""
The Levy test function over :math:`[0, 1]^d`. This function has many local
minima and a single global minimum. See https://www.sfu.ca/~ssurjano/levy.html for details.
Note that we rescale the original problem, which is typically defined
over `[-10, 10]^d`, to be defined over a unit hypercube :math:`[0, 1]^d`.
:param x: The points at which to evaluate the function, with shape [..., d].
:param d: The dimension of the function.
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_greater_equal(d, 1)
tf.debugging.assert_shapes([(x, (..., d))])
w: TensorType = 1 + ((x * 20.0 - 10) - 1) / 4
term1 = tf.pow(tf.sin(pi * w[..., 0:1]), 2)
term3 = (w[..., -1:] - 1) ** 2 * (1 + tf.pow(tf.sin(2 * pi * w[..., -1:]), 2))
wi = w[..., 0:-1]
wi_sum = tf.reduce_sum(
(wi - 1) ** 2 * (1 + 10 * tf.pow(tf.sin(pi * wi + 1), 2)), axis=-1, keepdims=True
)
return term1 + wi_sum + term3
def levy_8(x: TensorType) -> TensorType:
"""
Convenience function for the 8-dimensional :func:`levy` function, with output
normalised to unit interval
:param x: The points at which to evaluate the function, with shape [..., 8].
:return: The function values at ``x``, with shape [..., 1].
"""
return levy(x, d=8) / 450.0
Levy8 = SingleObjectiveTestProblem(
name="Levy 8",
objective=levy_8,
search_space=Box([0.0], [1.0]) ** 8,
minimizers=tf.constant([[11 / 20] * 8], tf.float64),
minimum=tf.constant([0], tf.float64),
)
"""Convenience function for the 8-dimensional :func:`levy` function.
Taken from https://www.sfu.ca/~ssurjano/levy.html"""
def rosenbrock(x: TensorType, d: int) -> TensorType:
"""
The Rosenbrock function, also known as the Banana function, is a unimodal function,
however the minima lies in a narrow valley. Even though this valley is
easy to find, convergence to the minimum is difficult. See
https://www.sfu.ca/~ssurjano/rosen.html for details. Inputs are rescaled to
be defined over a unit hypercube :math:`[0, 1]^d`.
:param x: The points at which to evaluate the function, with shape [..., d].
:param d: The dimension of the function.
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_greater_equal(d, 1)
tf.debugging.assert_shapes([(x, (..., d))])
y: TensorType = x * 15.0 - 5
unscaled_function = tf.reduce_sum(
(100.0 * (y[..., 1:] - y[..., :-1]) ** 2 + (1 - y[..., :-1]) ** 2), axis=-1, keepdims=True
)
return unscaled_function
def rosenbrock_4(x: TensorType) -> TensorType:
"""
Convenience function for the 4-dimensional :func:`rosenbrock` function with steepness 10.
It is rescaled to have zero mean and unit variance over :math:`[0, 1]^4. See
:cite:`Picheny2013` for details.
:param x: The points at which to evaluate the function, with shape [..., 4].
:return: The function values at ``x``, with shape [..., 1].
"""
return (rosenbrock(x, d=4) - 3.827 * 1e5) / (3.755 * 1e5)
Rosenbrock4 = SingleObjectiveTestProblem(
name="Rosenbrock 4",
objective=rosenbrock_4,
search_space=Box([0.0], [1.0]) ** 4,
minimizers=tf.constant([[0.4] * 4], tf.float64),
minimum=tf.constant([-1.01917], tf.float64),
)
"""The Rosenbrock function, rescaled to have zero mean and unit variance over :math:`[0, 1]^4. See
:cite:`Picheny2013` for details.
This function (also known as the Banana function) is unimodal, however the minima
lies in a narrow valley."""
def ackley_5(x: TensorType) -> TensorType:
"""
The Ackley test function over :math:`[0, 1]^5`. This function has
many local minima and a global minima. See https://www.sfu.ca/~ssurjano/ackley.html
for details.
Note that we rescale the original problem, which is typically defined
over `[-32.768, 32.768]`.
:param x: The points at which to evaluate the function, with shape [..., 5].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 5))])
x = (x - 0.5) * (32.768 * 2.0)
exponent_1 = -0.2 * tf.math.sqrt((1 / 5.0) * tf.reduce_sum(x**2, -1))
exponent_2 = (1 / 5.0) * tf.reduce_sum(tf.math.cos(2.0 * math.pi * x), -1)
function = (
-20.0 * tf.math.exp(exponent_1)
- tf.math.exp(exponent_2)
+ 20.0
+ tf.cast(tf.math.exp(1.0), dtype=x.dtype)
)
return tf.expand_dims(function, -1)
Ackley5 = SingleObjectiveTestProblem(
name="Ackley 5",
objective=ackley_5,
search_space=Box([0.0], [1.0]) ** 5,
minimizers=tf.constant([[0.5, 0.5, 0.5, 0.5, 0.5]], tf.float64),
minimum=tf.constant([0.0], tf.float64),
)
"""The Ackley test function over :math:`[0, 1]^5`. This function has
many local minima and a global minima. See https://www.sfu.ca/~ssurjano/ackley.html
for details.
Note that we rescale the original problem, which is typically defined
over `[-32.768, 32.768]`."""
def hartmann_6(x: TensorType) -> TensorType:
"""
The Hartmann 6 test function over :math:`[0, 1]^6`. This function has
6 local and one global minima. See https://www.sfu.ca/~ssurjano/hart6.html
for details.
:param x: The points at which to evaluate the function, with shape [..., 6].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 6))])
a = [1.0, 1.2, 3.0, 3.2]
A = [
[10.0, 3.0, 17.0, 3.5, 1.7, 8.0],
[0.05, 10.0, 17.0, 0.1, 8.0, 14.0],
[3.0, 3.5, 1.7, 10.0, 17.0, 8.0],
[17.0, 8.0, 0.05, 10.0, 0.1, 14.0],
]
P = [
[0.1312, 0.1696, 0.5569, 0.0124, 0.8283, 0.5886],
[0.2329, 0.4135, 0.8307, 0.3736, 0.1004, 0.9991],
[0.2348, 0.1451, 0.3522, 0.2883, 0.3047, 0.6650],
[0.4047, 0.8828, 0.8732, 0.5743, 0.1091, 0.0381],
]
inner_sum = -tf.reduce_sum(A * (tf.expand_dims(x, 1) - P) ** 2, -1)
return -tf.reduce_sum(a * tf.math.exp(inner_sum), -1, keepdims=True)
Hartmann6 = SingleObjectiveTestProblem(
name="Hartmann 6",
objective=hartmann_6,
search_space=Box([0.0], [1.0]) ** 6,
minimizers=tf.constant([[0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573]], tf.float64),
minimum=tf.constant([-3.32237], tf.float64),
)
"""The Hartmann 6 test function over :math:`[0, 1]^6`. This function has
6 local and one global minima. See https://www.sfu.ca/~ssurjano/hart6.html
for details."""
def michalewicz(x: TensorType, d: int = 2, m: int = 10) -> TensorType:
"""
The Michalewicz function over :math:`[0, \\pi]` for all i=1,...,d. Dimensionality is determined
by the parameter ``d`` and it features steep ridges and drops. It has :math:`d!` local minima,
and it is multimodal. The parameter ``m`` defines the steepness of they valleys and ridges; a
larger ``m`` leads to a more difficult search. The recommended value of ``m`` is 10. See
https://www.sfu.ca/~ssurjano/egg.html for details.
:param x: The points at which to evaluate the function, with shape [..., d].
:param d: The dimension of the function.
:param m: The steepness of the valleys/ridges.
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_greater_equal(d, 1)
tf.debugging.assert_shapes([(x, (..., d))])
xi = tf.range(1, (d + 1), delta=1, dtype=x.dtype) * tf.pow(x, 2)
result = tf.reduce_sum(tf.sin(x) * tf.pow(tf.sin(xi / math.pi), 2 * m), axis=1, keepdims=True)
return -result
def michalewicz_2(x: TensorType) -> TensorType:
"""
Convenience function for the 2-dimensional :func:`michalewicz` function with steepness 10.
:param x: The points at which to evaluate the function, with shape [..., 2].
:return: The function values at ``x``, with shape [..., 1].
"""
return michalewicz(x, d=2)
def michalewicz_5(x: TensorType) -> TensorType:
"""
Convenience function for the 5-dimensional :func:`michalewicz` function with steepness 10.
:param x: The points at which to evaluate the function, with shape [..., 5].
:return: The function values at ``x``, with shape [..., 1].
"""
return michalewicz(x, d=5)
def michalewicz_10(x: TensorType) -> TensorType:
"""
Convenience function for the 10-dimensional :func:`michalewicz` function with steepness 10.
:param x: The points at which to evaluate the function, with shape [..., 10].
:return: The function values at ``x``, with shape [..., 1].
"""
return michalewicz(x, d=10)
Michalewicz2 = SingleObjectiveTestProblem(
name="Michalewicz 2",
objective=michalewicz_2,
search_space=Box([0.0], [pi]) ** 2,
minimizers=tf.constant([[2.202906, 1.570796]], tf.float64),
minimum=tf.constant([-1.8013034], tf.float64),
)
"""Convenience function for the 2-dimensional :func:`michalewicz` function with steepness 10.
Taken from https://arxiv.org/abs/2003.09867"""
Michalewicz5 = SingleObjectiveTestProblem(
name="Michalewicz 5",
objective=michalewicz_5,
search_space=Box([0.0], [pi]) ** 5,
minimizers=tf.constant([[2.202906, 1.570796, 1.284992, 1.923058, 1.720470]], tf.float64),
minimum=tf.constant([-4.6876582], tf.float64),
)
"""Convenience function for the 5-dimensional :func:`michalewicz` function with steepness 10.
Taken from https://arxiv.org/abs/2003.09867"""
Michalewicz10 = SingleObjectiveTestProblem(
name="Michalewicz 10",
objective=michalewicz_10,
search_space=Box([0.0], [pi]) ** 10,
minimizers=tf.constant(
[
[
2.202906,
1.570796,
1.284992,
1.923058,
1.720470,
1.570796,
1.454414,
1.756087,
1.655717,
1.570796,
]
],
tf.float64,
),
minimum=tf.constant([-9.6601517], tf.float64),
)
"""Convenience function for the 10-dimensional :func:`michalewicz` function with steepness 10.
Taken from https://arxiv.org/abs/2003.09867"""
def trid(x: TensorType, d: int = 10) -> TensorType:
"""
The Trid function over :math:`[-d^2, d^2]` for all i=1,...,d. Dimensionality is determined
by the parameter ``d`` and it has a global minimum. This function has large variation in
output which makes it challenging for Bayesian optimisation with vanilla Gaussian processes
with non-stationary kernels. Models that can deal with non-stationarities, such as deep
Gaussian processes, can be useful for modelling these functions. See :cite:`hebbal2019bayesian`
and https://www.sfu.ca/~ssurjano/trid.html for details.
:param x: The points at which to evaluate the function, with shape [..., d].
:param d: Dimensionality.
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_greater_equal(d, 2)
tf.debugging.assert_shapes([(x, (..., d))])
result = tf.reduce_sum(tf.pow(x - 1, 2), 1, True) - tf.reduce_sum(x[:, 1:] * x[:, :-1], 1, True)
return result
def trid_10(x: TensorType) -> TensorType:
"""The Trid function with dimension 10.
:param x: The points at which to evaluate the function, with shape [..., 10].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
return trid(x, d=10)
Trid10 = SingleObjectiveTestProblem(
name="Trid 10",
objective=trid_10,
search_space=Box([-(10**2)], [10**2]) ** 10,
minimizers=tf.constant([[i * (10 + 1 - i) for i in range(1, 10 + 1)]], tf.float64),
minimum=tf.constant([-10 * (10 + 4) * (10 - 1) / 6], tf.float64),
)
"""The Trid function with dimension 10."""
| 23,895 | 35.819723 | 100 | py |
trieste-develop | trieste-develop/trieste/objectives/multifidelity_objectives.py | # Copyright 2023 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains synthetic multi-fidelity objective functions, useful for experimentation.
"""
from dataclasses import dataclass
import numpy as np
import tensorflow as tf
from ..space import Box, DiscreteSearchSpace, SearchSpace, TaggedProductSearchSpace
from ..types import TensorType
from .single_objectives import SingleObjectiveTestProblem
@dataclass(frozen=True)
class SingleObjectiveMultifidelityTestProblem(SingleObjectiveTestProblem):
num_fidelities: int
"""The number of fidelities of test function"""
fidelity_search_space: TaggedProductSearchSpace
"""The search space including fidelities"""
def linear_multifidelity(x: TensorType) -> TensorType:
x_input = x[..., :-1]
x_fidelity = x[..., -1:]
f = 0.5 * ((6.0 * x_input - 2.0) ** 2) * tf.math.sin(12.0 * x_input - 4.0) + 10.0 * (
x_input - 1.0
)
f = f + x_fidelity * (f - 20.0 * (x_input - 1.0))
return f
_LINEAR_MULTIFIDELITY_MINIMIZERS = {
2: tf.constant([[0.75724875]], tf.float64),
3: tf.constant([[0.76333767]], tf.float64),
5: tf.constant([[0.76801846]], tf.float64),
}
_LINEAR_MULTIFIDELITY_MINIMA = {
2: tf.constant([-6.020740055], tf.float64),
3: tf.constant([-6.634287061], tf.float64),
5: tf.constant([-7.933019704], tf.float64),
}
def _linear_multifidelity_search_space_builder(
n_fidelities: int, input_search_space: SearchSpace
) -> TaggedProductSearchSpace:
fidelity_search_space = DiscreteSearchSpace(np.arange(n_fidelities, dtype=float).reshape(-1, 1))
search_space = TaggedProductSearchSpace(
[input_search_space, fidelity_search_space], ["input", "fidelity"]
)
return search_space
Linear2Fidelity = SingleObjectiveMultifidelityTestProblem(
name="Linear 2 Fidelity",
objective=linear_multifidelity,
search_space=Box(np.zeros(1), np.ones(1)),
fidelity_search_space=_linear_multifidelity_search_space_builder(
2, Box(np.zeros(1), np.ones(1))
),
minimizers=_LINEAR_MULTIFIDELITY_MINIMIZERS[2],
minimum=_LINEAR_MULTIFIDELITY_MINIMA[2],
num_fidelities=2,
)
Linear3Fidelity = SingleObjectiveMultifidelityTestProblem(
name="Linear 3 Fidelity",
objective=linear_multifidelity,
search_space=Box(np.zeros(1), np.ones(1)),
fidelity_search_space=_linear_multifidelity_search_space_builder(
3, Box(np.zeros(1), np.ones(1))
),
minimizers=_LINEAR_MULTIFIDELITY_MINIMIZERS[3],
minimum=_LINEAR_MULTIFIDELITY_MINIMA[3],
num_fidelities=3,
)
Linear5Fidelity = SingleObjectiveMultifidelityTestProblem(
name="Linear 5 Fidelity",
objective=linear_multifidelity,
search_space=Box(np.zeros(1), np.ones(1)),
fidelity_search_space=_linear_multifidelity_search_space_builder(
5, Box(np.zeros(1), np.ones(1))
),
minimizers=_LINEAR_MULTIFIDELITY_MINIMIZERS[5],
minimum=_LINEAR_MULTIFIDELITY_MINIMA[5],
num_fidelities=5,
)
| 3,507 | 31.785047 | 100 | py |
trieste-develop | trieste-develop/trieste/objectives/utils.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains functions convenient for creating :class:`Observer` objects that return data
from objective functions, appropriately formatted for usage with the toolbox.
"""
from __future__ import annotations
from collections.abc import Callable
from typing import Optional, overload
from ..data import Dataset
from ..observer import MultiObserver, Observer, SingleObserver
from ..types import Tag, TensorType
@overload
def mk_observer(objective: Callable[[TensorType], TensorType]) -> SingleObserver:
...
@overload
def mk_observer(objective: Callable[[TensorType], TensorType], key: Tag) -> MultiObserver:
...
def mk_observer(
objective: Callable[[TensorType], TensorType], key: Optional[Tag] = None
) -> Observer:
"""
:param objective: An objective function designed to be used with a single data set and model.
:param key: An optional key to use to access the data from the observer result.
:return: An observer returning the data from ``objective``.
"""
if key is not None:
return lambda qp: {key: Dataset(qp, objective(qp))}
else:
return lambda qp: Dataset(qp, objective(qp))
def mk_multi_observer(**kwargs: Callable[[TensorType], TensorType]) -> MultiObserver:
"""
:param kwargs: Observation functions.
:return: An multi-observer returning the data from ``kwargs``.
"""
return lambda qp: {key: Dataset(qp, objective(qp)) for key, objective in kwargs.items()}
| 2,051 | 33.2 | 97 | py |
trieste-develop | trieste-develop/trieste/objectives/multi_objectives.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains synthetic multi-objective functions, useful for experimentation.
"""
from __future__ import annotations
import math
from dataclasses import dataclass
from functools import partial
import tensorflow as tf
from typing_extensions import Protocol
from ..space import Box
from ..types import TensorType
from .single_objectives import ObjectiveTestProblem
class GenParetoOptimalPoints(Protocol):
"""A Protocol representing a function that generates Pareto optimal points."""
def __call__(self, n: int, seed: int | None = None) -> TensorType:
"""
Generate `n` Pareto optimal points.
:param n: The number of pareto optimal points to be generated.
:param seed: An integer used to create a random seed for distributions that
used to generate pareto optimal points.
:return: The Pareto optimal points
"""
@dataclass(frozen=True)
class MultiObjectiveTestProblem(ObjectiveTestProblem):
"""
Convenience container class for synthetic multi-objective test functions, containing
a generator for the pareto optimal points, which can be used as a reference of performance
measure of certain multi-objective optimization algorithms.
"""
gen_pareto_optimal_points: GenParetoOptimalPoints
"""Function to generate Pareto optimal points, given the number of points and an optional
random number seed."""
def vlmop2(x: TensorType, d: int) -> TensorType:
"""
The VLMOP2 synthetic function.
:param x: The points at which to evaluate the function, with shape [..., d].
:param d: The dimensionality of the synthetic function.
:return: The function values at ``x``, with shape [..., 2].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes(
[(x, (..., d))],
message=f"input x dim: {x.shape[-1]} does not align with pre-specified dim: {d}",
)
transl = 1 / tf.sqrt(tf.cast(d, x.dtype))
y1 = 1 - tf.exp(-1 * tf.reduce_sum((x - transl) ** 2, axis=-1))
y2 = 1 - tf.exp(-1 * tf.reduce_sum((x + transl) ** 2, axis=-1))
return tf.stack([y1, y2], axis=-1)
def VLMOP2(input_dim: int) -> MultiObjectiveTestProblem:
"""
The VLMOP2 problem, typically evaluated over :math:`[-2, 2]^d`.
The idea pareto fronts lies on -1/sqrt(d) - 1/sqrt(d) and x1=...=xdim.
See :cite:`van1999multiobjective` and :cite:`fonseca1995multiobjective`
(the latter for discussion of pareto front property) for details.
:param input_dim: The input dimensionality of the synthetic function.
:return: The problem specification.
"""
def gen_pareto_optimal_points(n: int, seed: int | None = None) -> TensorType:
tf.debugging.assert_greater(n, 0)
transl = 1 / tf.sqrt(tf.cast(input_dim, tf.float64))
_x = tf.tile(tf.linspace([-transl], [transl], n), [1, input_dim])
return vlmop2(_x, input_dim)
return MultiObjectiveTestProblem(
name=f"VLMOP2({input_dim})",
objective=partial(vlmop2, d=input_dim),
search_space=Box([-2.0], [2.0]) ** input_dim,
gen_pareto_optimal_points=gen_pareto_optimal_points,
)
def dtlz_mkd(input_dim: int, num_objective: int) -> tuple[int, int, int]:
"""Return m/k/d values for dtlz synthetic functions."""
tf.debugging.assert_greater(input_dim, 0)
tf.debugging.assert_greater(num_objective, 0)
tf.debugging.assert_greater(
input_dim,
num_objective,
f"input dimension {input_dim}"
f" must be greater than function objective numbers {num_objective}",
)
M = num_objective
k = input_dim - M + 1
d = input_dim
return (M, k, d)
def dtlz1(x: TensorType, m: int, k: int, d: int) -> TensorType:
"""
The DTLZ1 synthetic function.
:param x: The points at which to evaluate the function, with shape [..., d].
:param m: The objective numbers.
:param k: The input dimensionality for g.
:param d: The dimensionality of the synthetic function.
:return: The function values at ``x``, with shape [..., m].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes(
[(x, (..., d))],
message=f"input x dim: {x.shape[-1]} does not align with pre-specified dim: {d}",
)
tf.debugging.assert_greater(m, 0, message=f"positive objective numbers expected but found {m}")
def g(xM: TensorType) -> TensorType:
return 100 * (
k
+ tf.reduce_sum(
(xM - 0.5) ** 2 - tf.cos(20 * math.pi * (xM - 0.5)), axis=-1, keepdims=True
)
)
ta = tf.TensorArray(x.dtype, size=m)
for i in range(m):
xM = x[..., m - 1 :]
y = 1 + g(xM)
y *= 1 / 2 * tf.reduce_prod(x[..., : m - 1 - i], axis=-1, keepdims=True)
if i > 0:
y *= 1 - x[..., m - i - 1, tf.newaxis]
ta = ta.write(i, y)
return tf.squeeze(tf.concat(tf.split(ta.stack(), m, axis=0), axis=-1), axis=0)
def DTLZ1(input_dim: int, num_objective: int) -> MultiObjectiveTestProblem:
"""
The DTLZ1 problem, the idea pareto fronts lie on a linear hyper-plane.
See :cite:`deb2002scalable` for details.
:param input_dim: The input dimensionality of the synthetic function.
:param num_objective: The number of objectives.
:return: The problem specification.
"""
M, k, d = dtlz_mkd(input_dim, num_objective)
def gen_pareto_optimal_points(n: int, seed: int | None = None) -> TensorType:
tf.debugging.assert_greater_equal(M, 2)
rnd = tf.random.uniform([n, M - 1], minval=0, maxval=1, seed=seed, dtype=tf.float64)
strnd = tf.sort(rnd, axis=-1)
strnd = tf.concat(
[tf.zeros([n, 1], dtype=tf.float64), strnd, tf.ones([n, 1], dtype=tf.float64)], axis=-1
)
return 0.5 * (strnd[..., 1:] - strnd[..., :-1])
return MultiObjectiveTestProblem(
name=f"DTLZ1({input_dim}, {num_objective})",
objective=partial(dtlz1, m=M, k=k, d=d),
search_space=Box([0.0], [1.0]) ** d,
gen_pareto_optimal_points=gen_pareto_optimal_points,
)
def dtlz2(x: TensorType, m: int, d: int) -> TensorType:
"""
The DTLZ2 synthetic function.
:param x: The points at which to evaluate the function, with shape [..., d].
:param m: The objective numbers.
:param d: The dimensionality of the synthetic function.
:return: The function values at ``x``, with shape [..., m].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes(
[(x, (..., d))],
message=f"input x dim: {x.shape[-1]} does not align with pre-specified dim: {d}",
)
tf.debugging.assert_greater(m, 0, message=f"positive objective numbers expected but found {m}")
def g(xM: TensorType) -> TensorType:
z = (xM - 0.5) ** 2
return tf.reduce_sum(z, axis=-1, keepdims=True)
ta = tf.TensorArray(x.dtype, size=m)
for i in tf.range(m):
y = 1 + g(x[..., m - 1 :])
for j in tf.range(m - 1 - i):
y *= tf.cos(math.pi / 2 * x[..., j, tf.newaxis])
if i > 0:
y *= tf.sin(math.pi / 2 * x[..., m - 1 - i, tf.newaxis])
ta = ta.write(i, y)
return tf.squeeze(tf.concat(tf.split(ta.stack(), m, axis=0), axis=-1), axis=0)
def DTLZ2(input_dim: int, num_objective: int) -> MultiObjectiveTestProblem:
"""
The DTLZ2 problem, the idea pareto fronts lie on (part of) a unit hyper sphere.
See :cite:`deb2002scalable` for details.
:param input_dim: The input dimensionality of the synthetic function.
:param num_objective: The number of objectives.
:return: The problem specification.
"""
M, k, d = dtlz_mkd(input_dim, num_objective)
def gen_pareto_optimal_points(n: int, seed: int | None = None) -> TensorType:
tf.debugging.assert_greater_equal(M, 2)
rnd = tf.random.normal([n, M], seed=seed, dtype=tf.float64)
samples = tf.abs(rnd / tf.norm(rnd, axis=-1, keepdims=True))
return samples
return MultiObjectiveTestProblem(
name=f"DTLZ2({input_dim}, {num_objective})",
objective=partial(dtlz2, m=M, d=d),
search_space=Box([0.0], [1.0]) ** d,
gen_pareto_optimal_points=gen_pareto_optimal_points,
)
| 8,968 | 36.527197 | 99 | py |
trieste-develop | trieste-develop/trieste/objectives/__init__.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package contains examples of popular objective functions used in (Bayesian) optimization.
"""
from . import multi_objectives, utils
from .multi_objectives import DTLZ1, DTLZ2, VLMOP2, MultiObjectiveTestProblem
from .multifidelity_objectives import (
Linear2Fidelity,
Linear3Fidelity,
Linear5Fidelity,
SingleObjectiveMultifidelityTestProblem,
)
from .single_objectives import (
Ackley5,
Branin,
ConstrainedScaledBranin,
GramacyLee,
Hartmann3,
Hartmann6,
Levy8,
LogarithmicGoldsteinPrice,
Michalewicz2,
Michalewicz5,
Michalewicz10,
ObjectiveTestProblem,
Rosenbrock4,
ScaledBranin,
Shekel4,
SimpleQuadratic,
SingleObjectiveTestProblem,
Trid10,
)
from .utils import mk_multi_observer, mk_observer
| 1,380 | 27.770833 | 94 | py |
trieste-develop | trieste-develop/trieste/models/utils.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains auxiliary objects and functions that are used by multiple model types.
"""
from __future__ import annotations
import gpflow
import tensorflow as tf
from gpflow.utilities.traversal import _merge_leaf_components, leaf_components
from .. import logging
from ..data import Dataset
from .interfaces import ProbabilisticModel
def write_summary_data_based_metrics(
dataset: Dataset,
model: ProbabilisticModel,
prefix: str = "",
) -> None:
"""
Logging utility for writing TensorBoard summary of various metrics for model diagnostics.
:param dataset: The dataset to use for computing the metrics. All available data in the
dataset will be used.
:param model: The model to produce metrics for.
:param prefix: The prefix to add to "accuracy" category of model summaries.
"""
name = prefix + "accuracy"
predict = model.predict(dataset.query_points)
# basics
logging.histogram(f"{name}/predict_mean", predict[0])
logging.scalar(f"{name}/predict_mean__mean", tf.reduce_mean(predict[0]))
logging.histogram(f"{name}/predict_variance", predict[1])
logging.scalar(f"{name}/predict_variance__mean", tf.reduce_mean(predict[1]))
logging.histogram(f"{name}/observations", dataset.observations)
logging.scalar(f"{name}/observations_mean", tf.reduce_mean(dataset.observations))
logging.scalar(f"{name}/observations_variance", tf.math.reduce_variance(dataset.observations))
# accuracy metrics
diffs = tf.cast(dataset.observations, predict[0].dtype) - predict[0]
z_residuals = diffs / tf.math.sqrt(predict[1])
logging.histogram(f"{name}/absolute_error", tf.math.abs(diffs))
logging.histogram(f"{name}/z_residuals", z_residuals)
logging.scalar(f"{name}/root_mean_square_error", tf.math.sqrt(tf.reduce_mean(diffs**2)))
logging.scalar(f"{name}/mean_absolute_error", tf.reduce_mean(tf.math.abs(diffs)))
logging.scalar(f"{name}/z_residuals_std", tf.math.reduce_std(z_residuals))
# variance metrics
variance_error = predict[1] - diffs**2
logging.histogram(f"{name}/variance_error", variance_error)
logging.scalar(
f"{name}/root_mean_variance_error",
tf.math.sqrt(tf.reduce_mean(variance_error**2)),
)
def write_summary_kernel_parameters(kernel: gpflow.kernels.Kernel, prefix: str = "") -> None:
"""
Logging utility for writing TensorBoard summary of kernel parameters. Provides useful
diagnostics for models with a GPflow kernel. Only trainable parameters are logged.
:param kernel: The kernel to use for computing the metrics.
:param prefix: The prefix to add to "kernel" category of model summaries.
"""
components = _merge_leaf_components(leaf_components(kernel))
for k, v in components.items():
if v.trainable:
if tf.rank(v) == 0:
logging.scalar(f"{prefix}kernel.{k}", v)
elif tf.rank(v) == 1:
for i, vi in enumerate(v):
logging.scalar(f"{prefix}kernel.{k}[{i}]", vi)
def write_summary_likelihood_parameters(
likelihood: gpflow.likelihoods.Likelihood, prefix: str = ""
) -> None:
"""
Logging utility for writing TensorBoard summary of likelihood parameters. Provides useful
diagnostics for models with a GPflow likelihood. Only trainable parameters are logged.
:param likelihood: The likelihood to use for computing the metrics.
:param prefix: The prefix to add to "likelihood" category of model summaries.
"""
likelihood_components = _merge_leaf_components(leaf_components(likelihood))
for k, v in likelihood_components.items():
if v.trainable:
logging.scalar(f"{prefix}likelihood.{k}", v)
| 4,316 | 40.114286 | 98 | py |
trieste-develop | trieste-develop/trieste/models/interfaces.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Callable, Generic, Optional, TypeVar
import gpflow
import tensorflow as tf
from typing_extensions import Protocol, runtime_checkable
from ..data import Dataset
from ..types import TensorType
from ..utils import DEFAULTS
from ..utils.misc import get_variables
ProbabilisticModelType = TypeVar(
"ProbabilisticModelType", bound="ProbabilisticModel", contravariant=True
)
""" Contravariant type variable bound to :class:`~trieste.models.ProbabilisticModel`.
This is used to specify classes such as samplers and acquisition function builders that
take models as input parameters and might ony support models with certain features. """
@runtime_checkable
class ProbabilisticModel(Protocol):
"""A probabilistic model.
NOTE: This and its subclasses are defined as Protocols rather than ABCs in order to allow
acquisition functions to depend on the intersection of different model types. As a result, it
is also possible to pass models to acquisition functions that don't explicitly inherit from
this class, as long as they implement all the necessary methods. This may change in future if
https://github.com/python/typing/issues/213 is implemented.
"""
@abstractmethod
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
Return the mean and variance of the independent marginal distributions at each point in
``query_points``.
This is essentially a convenience method for :meth:`predict_joint`, where non-event
dimensions of ``query_points`` are all interpreted as broadcasting dimensions instead of
batch dimensions, and the covariance is squeezed to remove redundant nesting.
:param query_points: The points at which to make predictions, of shape [..., D].
:return: The mean and variance of the independent marginal distributions at each point in
``query_points``. For a predictive distribution with event shape E, the mean and
variance will both have shape [...] + E.
"""
raise NotImplementedError
@abstractmethod
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
"""
Return ``num_samples`` samples from the independent marginal distributions at
``query_points``.
:param query_points: The points at which to sample, with shape [..., N, D].
:param num_samples: The number of samples at each point.
:return: The samples. For a predictive distribution with event shape E, this has shape
[..., S, N] + E, where S is the number of samples.
"""
raise NotImplementedError
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
Return the mean and variance of the independent marginal distributions at each point in
``query_points`` for the observations, including noise contributions.
Note that this is not supported by all models.
:param query_points: The points at which to make predictions, of shape [..., D].
:return: The mean and variance of the independent marginal distributions at each point in
``query_points``. For a predictive distribution with event shape E, the mean and
variance will both have shape [...] + E.
"""
pass # (required so that mypy doesn't think this method is abstract)
raise NotImplementedError(
f"Model {self!r} does not support predicting observations, just the latent function"
)
def log(self, dataset: Optional[Dataset] = None) -> None:
"""
Log model-specific information at a given optimization step.
:param dataset: Optional data that can be used to log additional data-based model summaries.
"""
return
def get_module_with_variables(self, *dependencies: Any) -> tf.Module:
"""
Return a fresh module with the model's variables attached, which can then be extended
with methods and saved using tf.saved_model.
:param dependencies: Dependent objects whose variables should also be included.
"""
module = tf.Module()
module.saved_variables = get_variables(self)
for dependency in dependencies:
module.saved_variables += get_variables(dependency)
return module
@runtime_checkable
class TrainableProbabilisticModel(ProbabilisticModel, Protocol):
"""A trainable probabilistic model."""
@abstractmethod
def update(self, dataset: Dataset) -> None:
"""
Update the model given the specified ``dataset``. Does not train the model.
:param dataset: The data with which to update the model.
"""
raise NotImplementedError
@abstractmethod
def optimize(self, dataset: Dataset) -> None:
"""
Optimize the model objective with respect to (hyper)parameters given the specified
``dataset``.
:param dataset: The data with which to train the model.
"""
raise NotImplementedError
@runtime_checkable
class SupportsPredictJoint(ProbabilisticModel, Protocol):
"""A probabilistic model that supports predict_joint."""
@abstractmethod
def predict_joint(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
:param query_points: The points at which to make predictions, of shape [..., B, D].
:return: The mean and covariance of the joint marginal distribution at each batch of points
in ``query_points``. For a predictive distribution with event shape E, the mean will
have shape [..., B] + E, and the covariance shape [...] + E + [B, B].
"""
raise NotImplementedError
@runtime_checkable
class SupportsGetKernel(ProbabilisticModel, Protocol):
"""A probabilistic model that supports get_kernel."""
@abstractmethod
def get_kernel(self) -> gpflow.kernels.Kernel:
"""
Return the kernel of the model.
:return: The kernel.
"""
raise NotImplementedError
@runtime_checkable
class TrainableSupportsGetKernel(TrainableProbabilisticModel, SupportsGetKernel, Protocol):
"""A trainable probabilistic model that supports get_kernel."""
pass
@runtime_checkable
class SupportsGetObservationNoise(ProbabilisticModel, Protocol):
"""A probabilistic model that supports get_observation_noise."""
@abstractmethod
def get_observation_noise(self) -> TensorType:
"""
Return the variance of observation noise.
:return: The observation noise.
"""
raise NotImplementedError
@runtime_checkable
class SupportsGetInternalData(ProbabilisticModel, Protocol):
"""A probabilistic model that stores and has access to its own training data."""
@abstractmethod
def get_internal_data(self) -> Dataset:
"""
Return the model's training data.
:return: The model's training data.
"""
raise NotImplementedError
@runtime_checkable
class SupportsGetMeanFunction(ProbabilisticModel, Protocol):
"""A probabilistic model that makes use of a mean function."""
@abstractmethod
def get_mean_function(self) -> Callable[[TensorType], TensorType]:
"""
Return the model's mean function, i.e. a parameterized function that can explain
coarse scale variations in the data, leaving just the residuals to be explained by
our model.
:return: The model's mean function.
"""
raise NotImplementedError
@runtime_checkable
class FastUpdateModel(ProbabilisticModel, Protocol):
"""A model with the ability to predict based on (possibly fantasized) supplementary data."""
@abstractmethod
def conditional_predict_f(
self, query_points: TensorType, additional_data: Dataset
) -> tuple[TensorType, TensorType]:
"""
Return the mean and variance of the independent marginal distributions at each point in
``query_points``, given an additional batch of (possibly fantasized) data.
:param query_points: The points at which to make predictions, of shape [M, D].
:param additional_data: Dataset with query_points with shape [..., N, D] and observations
with shape [..., N, L]
:return: The mean and variance of the independent marginal distributions at each point in
``query_points``, with shape [..., L, M, M].
"""
raise NotImplementedError
@abstractmethod
def conditional_predict_joint(
self, query_points: TensorType, additional_data: Dataset
) -> tuple[TensorType, TensorType]:
"""
:param query_points: The points at which to make predictions, of shape [M, D].
:param additional_data: Dataset with query_points with shape [..., N, D] and observations
with shape [..., N, L]
:return: The mean and covariance of the joint marginal distribution at each batch of points
in ``query_points``, with shape [..., L, M, M].
"""
raise NotImplementedError
@abstractmethod
def conditional_predict_f_sample(
self, query_points: TensorType, additional_data: Dataset, num_samples: int
) -> TensorType:
"""
Return ``num_samples`` samples from the independent marginal distributions at
``query_points``, given an additional batch of (possibly fantasized) data.
:param query_points: The points at which to sample, with shape [..., N, D].
:param additional_data: Dataset with query_points with shape [..., N, D] and observations
with shape [..., N, L]
:param num_samples: The number of samples at each point.
:return: The samples. For a predictive distribution with event shape E, this has shape
[..., S, N] + E, where S is the number of samples.
"""
raise NotImplementedError
def conditional_predict_y(
self, query_points: TensorType, additional_data: Dataset
) -> tuple[TensorType, TensorType]:
"""
Return the mean and variance of the independent marginal distributions at each point in
``query_points`` for the observations, including noise contributions, given an additional
batch of (possibly fantasized) data.
Note that this is not supported by all models.
:param query_points: The points at which to make predictions, of shape [M, D].
:param additional_data: Dataset with query_points with shape [..., N, D] and observations
with shape [..., N, L]
:return: The mean and variance of the independent marginal distributions at each point in
``query_points``.
"""
raise NotImplementedError(
f"Model {self!r} does not support predicting observations, just the latent function"
)
@runtime_checkable
class HasTrajectorySampler(ProbabilisticModel, Protocol):
"""A probabilistic model that has an associated trajectory sampler."""
def trajectory_sampler(
self: ProbabilisticModelType,
) -> TrajectorySampler[ProbabilisticModelType]:
"""
Return a trajectory sampler that supports this model.
:return: The trajectory sampler.
"""
raise NotImplementedError
@runtime_checkable
class HasReparamSampler(ProbabilisticModel, Protocol):
"""A probabilistic model that has an associated reparametrization sampler."""
def reparam_sampler(
self: ProbabilisticModelType, num_samples: int
) -> ReparametrizationSampler[ProbabilisticModelType]:
"""
Return a reparametrization sampler providing `num_samples` samples.
:param num_samples: The desired number of samples.
:return: The reparametrization sampler.
"""
raise NotImplementedError
@runtime_checkable
class SupportsReparamSamplerObservationNoise(
HasReparamSampler, SupportsGetObservationNoise, Protocol
):
"""A model that supports both reparam_sampler and get_observation_noise."""
pass
class ModelStack(ProbabilisticModel, Generic[ProbabilisticModelType]):
r"""
A :class:`ModelStack` is a wrapper around a number of :class:`ProbabilisticModel`\ s of type
:class:`ProbabilisticModelType`. It combines the outputs of each model for predictions and
sampling.
**Note:** Only supports vector outputs (i.e. with event shape [E]). Outputs for any two models
are assumed independent. Each model may itself be single- or multi-output, and any one
multi-output model may have dependence between its outputs. When we speak of *event size* in
this class, we mean the output dimension for a given :class:`ProbabilisticModel`,
whether that is the :class:`ModelStack` itself, or one of the subsidiary
:class:`ProbabilisticModel`\ s within the :class:`ModelStack`. Of course, the event
size for a :class:`ModelStack` will be the sum of the event sizes of each subsidiary model.
"""
def __init__(
self,
model_with_event_size: tuple[ProbabilisticModelType, int],
*models_with_event_sizes: tuple[ProbabilisticModelType, int],
):
r"""
The order of individual models specified at :meth:`__init__` determines the order of the
:class:`ModelStack` output dimensions.
:param model_with_event_size: The first model, and the size of its output events.
**Note:** This is a separate parameter to ``models_with_event_sizes`` simply so that the
method signature requires at least one model. It is not treated specially.
:param \*models_with_event_sizes: The other models, and sizes of their output events.
"""
self._models, self._event_sizes = zip(*(model_with_event_size,) + models_with_event_sizes)
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
r"""
:param query_points: The points at which to make predictions, of shape [..., D].
:return: The predictions from all the wrapped models, concatenated along the event axis in
the same order as they appear in :meth:`__init__`. If the wrapped models have predictive
distributions with event shapes [:math:`E_i`], the mean and variance will both have
shape [..., :math:`\sum_i E_i`].
"""
means, vars_ = zip(*[model.predict(query_points) for model in self._models])
return tf.concat(means, axis=-1), tf.concat(vars_, axis=-1)
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
r"""
:param query_points: The points at which to sample, with shape [..., N, D].
:param num_samples: The number of samples at each point.
:return: The samples from all the wrapped models, concatenated along the event axis. For
wrapped models with predictive distributions with event shapes [:math:`E_i`], this has
shape [..., S, N, :math:`\sum_i E_i`], where S is the number of samples.
"""
samples = [model.sample(query_points, num_samples) for model in self._models]
return tf.concat(samples, axis=-1)
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
r"""
:param query_points: The points at which to make predictions, of shape [..., D].
:return: The predictions from all the wrapped models, concatenated along the event axis in
the same order as they appear in :meth:`__init__`. If the wrapped models have predictive
distributions with event shapes [:math:`E_i`], the mean and variance will both have
shape [..., :math:`\sum_i E_i`].
:raise NotImplementedError: If any of the models don't implement predict_y.
"""
means, vars_ = zip(*[model.predict_y(query_points) for model in self._models])
return tf.concat(means, axis=-1), tf.concat(vars_, axis=-1)
def log(self, dataset: Optional[Dataset] = None) -> None:
"""
Log model-specific information at a given optimization step.
:param dataset: Optional data that can be used to log additional data-based model summaries.
"""
for i, model in enumerate(self._models):
with tf.name_scope(f"{i}"):
model.log(dataset)
class TrainableModelStack(ModelStack[TrainableProbabilisticModel], TrainableProbabilisticModel):
r"""
A :class:`TrainableModelStack` is a wrapper around a number of
:class:`TrainableProbabilisticModel`\ s.
It delegates training data to each model for updates and optimization.
:class:`TrainableProbabilisticModel`\ s within the :class:`TrainableModelStack`.
Of course, the event size for a :class:`TrainableModelStack` will be the sum of the
event sizes of each subsidiary model.
"""
def update(self, dataset: Dataset) -> None:
"""
Update all the wrapped models on their corresponding data. The data for each model is
extracted by splitting the observations in ``dataset`` along the event axis according to the
event sizes specified at :meth:`__init__`.
:param dataset: The query points and observations for *all* the wrapped models.
"""
observations = tf.split(dataset.observations, self._event_sizes, axis=-1)
for model, obs in zip(self._models, observations):
model.update(Dataset(dataset.query_points, obs))
def optimize(self, dataset: Dataset) -> None:
"""
Optimize all the wrapped models on their corresponding data. The data for each model is
extracted by splitting the observations in ``dataset`` along the event axis according to the
event sizes specified at :meth:`__init__`.
:param dataset: The query points and observations for *all* the wrapped models.
"""
observations = tf.split(dataset.observations, self._event_sizes, axis=-1)
for model, obs in zip(self._models, observations):
model.optimize(Dataset(dataset.query_points, obs))
class HasReparamSamplerModelStack(ModelStack[HasReparamSampler], HasReparamSampler):
r"""
A :class:`PredictJointModelStack` is a wrapper around a number of
:class:`HasReparamSampler`\ s.
It provides a :meth:`reparam_sampler` method only if all the submodel samplers
are the same.
"""
def reparam_sampler(self, num_samples: int) -> ReparametrizationSampler[HasReparamSampler]:
"""
Return a reparameterization sampler providing `num_samples` samples across
all the models in the model stack. This is currently only implemented for
stacks made from models that have a :class:`BatchReparametrizationSampler`
as their reparameterization sampler.
:param num_samples: The desired number of samples.
:return: The reparametrization sampler.
:raise NotImplementedError: If the models in the stack do not share the
same :meth:`reparam_sampler`.
"""
samplers = [model.reparam_sampler(num_samples) for model in self._models]
unique_sampler_types = set(type(sampler) for sampler in samplers)
if len(unique_sampler_types) == 1:
# currently assume that all sampler constructors look the same
shared_sampler_type = type(samplers[0])
return shared_sampler_type(num_samples, self)
else:
raise NotImplementedError(
f"""
Reparameterization sampling is only currently supported for model
stacks built from models that use the same reparameterization sampler,
however, received samplers of types {unique_sampler_types}.
"""
)
class PredictJointModelStack(ModelStack[SupportsPredictJoint], SupportsPredictJoint):
r"""
A :class:`PredictJointModelStack` is a wrapper around a number of
:class:`SupportsPredictJoint`\ s.
It delegates :meth:`predict_joint` to each model.
"""
def predict_joint(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
r"""
:param query_points: The points at which to make predictions, of shape [..., B, D].
:return: The predictions from all the wrapped models, concatenated along the event axis in
the same order as they appear in :meth:`__init__`. If the wrapped models have predictive
distributions with event shapes [:math:`E_i`], the mean will have shape
[..., B, :math:`\sum_i E_i`], and the covariance shape
[..., :math:`\sum_i E_i`, B, B].
"""
means, covs = zip(*[model.predict_joint(query_points) for model in self._models])
return tf.concat(means, axis=-1), tf.concat(covs, axis=-3)
# It's useful, though a bit ugly, to define the stack constructors for some model type combinations
class TrainableSupportsPredictJoint(TrainableProbabilisticModel, SupportsPredictJoint, Protocol):
"""A model that is both trainable and supports predict_joint."""
pass
class TrainablePredictJointModelStack(
TrainableModelStack, PredictJointModelStack, ModelStack[TrainableSupportsPredictJoint]
):
"""A stack of models that are both trainable and support predict_joint."""
pass
class TrainableSupportsPredictJointHasReparamSampler(
TrainableSupportsPredictJoint, HasReparamSampler, Protocol
):
"""A model that is trainable, supports predict_joint and has a reparameterization sampler."""
pass
class TrainablePredictJointReparamModelStack(
TrainablePredictJointModelStack,
HasReparamSamplerModelStack,
ModelStack[TrainableSupportsPredictJointHasReparamSampler],
):
"""A stack of models that are both trainable and support predict_joint."""
pass
class ReparametrizationSampler(ABC, Generic[ProbabilisticModelType]):
r"""
These samplers employ the *reparameterization trick* to draw samples from a
:class:`ProbabilisticModel`\ 's predictive distribution across a discrete set of
points. See :cite:`wilson2018maximizing` for details.
"""
def __init__(self, sample_size: int, model: ProbabilisticModelType):
r"""
Note that our :class:`TrainableModelStack` currently assumes that
all :class:`ReparametrizationSampler` constructors have **only** these inputs
and so will not work with more complicated constructors.
:param sample_size: The desired number of samples.
:param model: The model to sample from.
:raise ValueError (or InvalidArgumentError): If ``sample_size`` is not positive.
"""
tf.debugging.assert_positive(sample_size)
self._sample_size = sample_size
self._model = model
self._initialized = tf.Variable(False) # Keep track of when we need to resample
def __repr__(self) -> str:
""""""
return f"{self.__class__.__name__}({self._sample_size!r}, {self._model!r})"
@abstractmethod
def sample(self, at: TensorType, *, jitter: float = DEFAULTS.JITTER) -> TensorType:
"""
:param at: Where to sample the predictive distribution, with shape `[..., 1, D]`, for points
of dimension `D`.
:param jitter: The size of the jitter to use when stabilising the Cholesky decomposition of
the covariance matrix.
:return: The samples, of shape `[..., S, B, L]`, where `S` is the `sample_size`, `B` is
the number of points per batch, and `L` is the number of latent model dimensions.
"""
raise NotImplementedError
def reset_sampler(self) -> None:
"""
Reset the sampler so that new samples are drawn at the next :meth:`sample` call.
"""
self._initialized.assign(False)
TrajectoryFunction = Callable[[TensorType], TensorType]
"""
Type alias for trajectory functions. These have similar behaviour to an :const:`AcquisitionFunction`
but have additional sampling properties and support multiple model outputs.
An :const:`TrajectoryFunction` evaluates a batch of `B` samples, each across different sets
of `N` query points (of dimension `D`) i.e. takes input of shape `[N, B, D]` and returns
shape `[N, B, L]`, where `L` is the number of outputs of the model. Note that we require the `L`
dimension to be present, even if there is only one output.
A key property of these trajectory functions is that the same sample draw is evaluated
for all queries. This property is known as consistency.
"""
class TrajectoryFunctionClass(ABC):
"""
An :class:`TrajectoryFunctionClass` is a trajectory function represented using a class
rather than as a standalone function. Using a class to represent a trajectory function
makes it easier to update and resample without having to retrace the function.
"""
@abstractmethod
def __call__(self, x: TensorType) -> TensorType:
"""Call trajectory function."""
class TrajectorySampler(ABC, Generic[ProbabilisticModelType]):
r"""
This class builds functions that approximate a trajectory sampled from an
underlying :class:`ProbabilisticModel`.
Unlike the :class:`ReparametrizationSampler`, a :class:`TrajectorySampler` provides
consistent samples (i.e ensuring that the same sample draw is used for all evaluations
of a particular trajectory function).
"""
def __init__(self, model: ProbabilisticModelType):
"""
:param model: The model to sample from.
"""
self._model = model
def __repr__(self) -> str:
""""""
return f"{self.__class__.__name__}({self._model!r})"
@abstractmethod
def get_trajectory(self) -> TrajectoryFunction:
"""
Sample a batch of `B` trajectories. Note that the batch size `B` is determined
by the first call of the :const:`TrajectoryFunction`. To change the batch size
of a :const:`TrajectoryFunction` after initialization, you must
recall :meth:`get_trajectory`.
:return: A trajectory function representing an approximate trajectory
from the model, taking an input of shape `[N, B, D]` and returning shape `[N, B, L]`,
where `L` is the number of outputs of the model.
"""
raise NotImplementedError
def resample_trajectory(self, trajectory: TrajectoryFunction) -> TrajectoryFunction:
"""
A :const:`TrajectoryFunction` can often be efficiently updated in-place to provide
a new sample without retracing. Note that if the underlying :class:`ProbabilisticModel`
has been updated, then we must call :meth:`update_trajectory` to get a new sample from
the new model.
Efficient implementations of a :class:`TrajectorySampler` will have a custom method here
to allow in-place resampling. However, the default behavior is just to make a new
trajectory from scratch.
:param trajectory: The trajectory function to be resampled.
:return: The new resampled trajectory function.
"""
return self.get_trajectory()
def update_trajectory(self, trajectory: TrajectoryFunction) -> TrajectoryFunction:
"""
Update a :const:`TrajectoryFunction` to reflect an update in its
underlying :class:`ProbabilisticModel` and resample accordingly.
Efficient implementations will have a custom method here to allow in-place resampling
and updating. However, the default behavior is just to make a new trajectory from scratch.
:param trajectory: The trajectory function to be resampled.
:return: The new trajectory function updated for a new model
"""
return self.get_trajectory()
@runtime_checkable
class SupportsGetInducingVariables(ProbabilisticModel, Protocol):
"""A probabilistic model uses and has access to an inducing point approximation."""
@abstractmethod
def get_inducing_variables(self) -> tuple[TensorType, TensorType, TensorType, bool]:
"""
Return the model's inducing variables.
:return: Tensors containing: the inducing points (i.e. locations of the inducing
variables); the variational mean q_mu; the Cholesky decomposition of the
variational covariance q_sqrt; and a bool denoting if we are using whitened
or not whitened representations.
"""
raise NotImplementedError
@runtime_checkable
class SupportsCovarianceWithTopFidelity(ProbabilisticModel, Protocol):
"""A probabilistic model is multifidelity and has access to a method to calculate the
covariance between a point and the same point at the top fidelity"""
@property
@abstractmethod
def num_fidelities(self) -> int:
"""
The number of fidelities
"""
raise NotImplementedError
@abstractmethod
def covariance_with_top_fidelity(self, query_points: TensorType) -> TensorType:
"""
Calculate the covariance of the output at `query_point` and a given fidelity with the
highest fidelity output at the same `query_point`.
:param query_points: The query points to calculate the covariance for, of shape [N, D+1],
where the final column of the final dimension contains the fidelity of the query point
:return: The covariance with the top fidelity for the `query_points`, of shape [N, P]
"""
raise NotImplementedError
| 30,175 | 41.263305 | 100 | py |
trieste-develop | trieste-develop/trieste/models/__init__.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This package contains the primary interfaces for probabilistic models, :class:`ProbabilisticModel`
and its trainable subclass :class:`TrainableProbabilisticModel`. It also contains tooling for
creating :class:`TrainableProbabilisticModel`\ s from config.
"""
from . import gpflow, gpflux, keras, optimizer
from .interfaces import (
FastUpdateModel,
ModelStack,
ProbabilisticModel,
ProbabilisticModelType,
ReparametrizationSampler,
SupportsCovarianceWithTopFidelity,
TrainableModelStack,
TrainableProbabilisticModel,
TrajectoryFunction,
TrajectoryFunctionClass,
TrajectorySampler,
)
| 1,220 | 34.911765 | 98 | py |
trieste-develop | trieste-develop/trieste/models/optimizer.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This module contains common optimizers based on :class:`~tf.optimizers.Optimizer` that can be used
with models. Specific models can also sub-class these optimizers or implement their own, and should
register their loss functions using a :func:`create_loss_function`.
"""
from __future__ import annotations
import copy
from dataclasses import dataclass, field
from functools import singledispatch
from typing import Any, Callable, Iterable, Optional, Tuple, Union
import scipy
import tensorflow as tf
import tensorflow_probability as tfp
from ..data import Dataset
from ..types import TensorType
from ..utils import jit
TrainingData = Union[Tuple[TensorType, TensorType], Iterable[Tuple[TensorType, TensorType]]]
""" Type alias for a batch, or batches, of training data. """
DatasetTransformer = Callable[[Dataset, Optional[int]], TrainingData]
"""
Type alias for a function that converts a :class:`~trieste.data.Dataset` to batches of training
data.
"""
LossClosure = Callable[[], TensorType]
""" Type alias for a loss closure, typically used in optimization. """
OptimizeResult = Union[scipy.optimize.OptimizeResult, None]
"""
Optimization result. TensorFlow optimizer doesn't return any result. For scipy optimizer that is
also commonly used, it is :class:`~scipy.optimize.OptimizeResult`.
"""
@dataclass
class Optimizer:
"""Optimizer for training models with all the training data at once."""
optimizer: Any
"""
The underlying optimizer to use. For example, one of the subclasses of
:class:`~tensorflow.optimizers.Optimizer` could be used. Note that we use a flexible type `Any`
to allow for various optimizers that specific models might need to use.
"""
minimize_args: dict[str, Any] = field(default_factory=lambda: {})
""" The keyword arguments to pass to the :meth:`minimize` method of the :attr:`optimizer`. """
compile: bool = False
""" If `True`, the optimization process will be compiled with :func:`~tf.function`. """
def create_loss(self, model: tf.Module, dataset: Dataset) -> LossClosure:
"""
Build a loss function for the specified `model` with the `dataset` using a
:func:`create_loss_function`.
:param model: The model to build a loss function for.
:param dataset: The data with which to build the loss function.
:return: The loss function.
"""
x = tf.convert_to_tensor(dataset.query_points)
y = tf.convert_to_tensor(dataset.observations)
data = (x, y)
return create_loss_function(model, data, self.compile)
def optimize(self, model: tf.Module, dataset: Dataset) -> OptimizeResult:
"""
Optimize the specified `model` with the `dataset`.
:param model: The model to optimize.
:param dataset: The data with which to optimize the `model`.
:return: The return value of the optimizer's :meth:`minimize` method.
"""
loss_fn = self.create_loss(model, dataset)
variables = model.trainable_variables
return self.optimizer.minimize(loss_fn, variables, **self.minimize_args)
@dataclass
class BatchOptimizer(Optimizer):
"""Optimizer for training models with mini-batches of training data."""
max_iter: int = 100
""" The number of iterations over which to optimize the model. """
batch_size: int = 100
""" The size of the mini-batches. """
dataset_builder: DatasetTransformer | None = None
""" A mapping from :class:`~trieste.observer.Observer` data to mini-batches. """
def create_loss(self, model: tf.Module, dataset: Dataset) -> LossClosure:
"""
Build a loss function for the specified `model` with the `dataset`.
:param model: The model to build a loss function for.
:param dataset: The data with which to build the loss function.
:return: The loss function.
"""
def creator_fn(data: TrainingData) -> LossClosure:
return create_loss_function(model, data, self.compile)
if self.dataset_builder is None:
return creator_fn(
iter(
tf.data.Dataset.from_tensor_slices(dataset.astuple())
.shuffle(len(dataset))
.batch(self.batch_size)
.prefetch(tf.data.experimental.AUTOTUNE)
.repeat()
)
)
return creator_fn(self.dataset_builder(dataset, self.batch_size))
def optimize(self, model: tf.Module, dataset: Dataset) -> None:
"""
Optimize the specified `model` with the `dataset`.
:param model: The model to optimize.
:param dataset: The data with which to optimize the `model`.
"""
loss_fn = self.create_loss(model, dataset)
variables = model.trainable_variables
@jit(apply=self.compile)
def train_fn() -> None:
self.optimizer.minimize(loss_fn, variables, **self.minimize_args)
for _ in range(self.max_iter):
train_fn()
def __deepcopy__(self, memo: dict[int, object]) -> BatchOptimizer:
# workaround for https://github.com/tensorflow/tensorflow/issues/58973
# (keras optimizers not being deepcopyable in TF 2.11 and 2.12)
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if (
k == "optimizer"
and isinstance(v, tf.keras.optimizers.Optimizer)
and hasattr(v, "_distribution_strategy")
):
# avoid copying distribution strategy: reuse it instead
strategy = v._distribution_strategy
v._distribution_strategy = None
try:
setattr(result, k, copy.deepcopy(v, memo))
finally:
v._distribution_strategy = strategy
result.optimizer._distribution_strategy = strategy
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
@dataclass
class KerasOptimizer:
"""Optimizer wrapper for training models implemented with Keras."""
optimizer: tf.keras.optimizers.Optimizer
""" The underlying optimizer to use for training the model. """
fit_args: dict[str, Any] = field(default_factory=lambda: {})
"""
The keyword arguments to pass to the ``fit`` method of a :class:`~tf.keras.Model` instance.
See https://keras.io/api/models/model_training_apis/#fit-method for a list of possible
arguments in the dictionary.
"""
loss: Optional[
Union[
tf.keras.losses.Loss, Callable[[TensorType, tfp.distributions.Distribution], TensorType]
]
] = None
""" Optional loss function for training the model. """
metrics: Optional[list[tf.keras.metrics.Metric]] = None
""" Optional metrics for monitoring the performance of the network. """
def __deepcopy__(self, memo: dict[int, object]) -> KerasOptimizer:
# workaround for https://github.com/tensorflow/tensorflow/issues/58973
# (keras optimizers not being deepcopyable in TF 2.11 and 2.12)
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k == "optimizer" and hasattr(v, "_distribution_strategy"):
# avoid copying distribution strategy: reuse it instead
strategy = v._distribution_strategy
v._distribution_strategy = None
try:
setattr(result, k, copy.deepcopy(v, memo))
finally:
v._distribution_strategy = strategy
result.optimizer._distribution_strategy = strategy
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
@singledispatch
def create_loss_function(model: Any, dataset: TrainingData, compile: bool = False) -> LossClosure:
"""
Generic function for building a loss function for a specified `model` and `dataset`.
The implementations depends on the type of the model, which should use this function as a
decorator together with its register method to make a model-specific loss function available.
:param model: The model to build a loss function for.
:param dataset: The data with which to build the loss function.
:param compile: Whether to compile with :func:`tf.function`.
:return: The loss function.
"""
raise NotImplementedError(f"Unknown model {model} passed for loss function extraction")
| 9,230 | 37.949367 | 100 | py |
trieste-develop | trieste-develop/trieste/models/gpflux/sampler.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC
from typing import Callable, cast
import gpflow.kernels
import tensorflow as tf
from gpflow.inducing_variables import InducingPoints
from gpflux.layers import GPLayer, LatentVariableLayer
from gpflux.layers.basis_functions.fourier_features import RandomFourierFeaturesCosine
from gpflux.math import compute_A_inv_b
from gpflux.models import DeepGP
from ...types import TensorType
from ...utils import DEFAULTS, flatten_leading_dims
from ..interfaces import (
ReparametrizationSampler,
TrajectoryFunction,
TrajectoryFunctionClass,
TrajectorySampler,
)
from .interface import GPfluxPredictor
class DeepGaussianProcessReparamSampler(ReparametrizationSampler[GPfluxPredictor]):
r"""
This sampler employs the *reparameterization trick* to approximate samples from a
:class:`GPfluxPredictor`\ 's predictive distribution, when the :class:`GPfluxPredictor` has
an underlying :class:`~gpflux.models.DeepGP`.
"""
def __init__(self, sample_size: int, model: GPfluxPredictor):
"""
:param sample_size: The number of samples for each batch of points. Must be positive.
:param model: The model to sample from.
:raise ValueError (or InvalidArgumentError): If ``sample_size`` is not positive, if the
model is not a :class:`GPfluxPredictor`, of if its underlying ``model_gpflux`` is not a
:class:`~gpflux.models.DeepGP`.
"""
if not isinstance(model, GPfluxPredictor):
raise ValueError(
f"Model must be a gpflux.interface.GPfluxPredictor, received {type(model)}"
)
super().__init__(sample_size, model)
if not isinstance(self._model_gpflux, DeepGP):
raise ValueError(
f"GPflux model must be a gpflux.models.DeepGP, received {type(self._model_gpflux)}"
)
# Each element of _eps_list is essentially a lazy constant. It is declared and assigned an
# empty tensor here, and populated on the first call to sample
self._eps_list = [
tf.Variable(tf.ones([sample_size, 0], dtype=tf.float64), shape=[sample_size, None])
for _ in range(len(self._model_gpflux.f_layers))
]
@property
def _model_gpflux(self) -> tf.Module:
return self._model.model_gpflux
def sample(self, at: TensorType, *, jitter: float = DEFAULTS.JITTER) -> TensorType:
"""
Return approximate samples from the `model` specified at :meth:`__init__`. Multiple calls to
:meth:`sample`, for any given :class:`DeepGaussianProcessReparamSampler` and ``at``, will
produce the exact same samples. Calls to :meth:`sample` on *different*
:class:`DeepGaussianProcessReparamSampler` instances will produce different samples.
:param at: Where to sample the predictive distribution, with shape `[..., 1, D]`, for points
of dimension `D`.
:param jitter: The size of the jitter to use when stabilizing the Cholesky
decomposition of the covariance matrix.
:return: The samples, of shape `[..., S, 1, L]`, where `S` is the `sample_size` and `L` is
the number of latent model dimensions.
:raise ValueError (or InvalidArgumentError): If ``at`` has an invalid shape or ``jitter``
is negative.
"""
tf.debugging.assert_shapes([(at, [..., 1, None])])
tf.debugging.assert_greater_equal(jitter, 0.0)
samples = tf.repeat(at[..., None, :, :], self._sample_size, axis=-3) # [..., S, 1, D]
for i, layer in enumerate(self._model_gpflux.f_layers):
if isinstance(layer, LatentVariableLayer):
if not self._initialized:
self._eps_list[i].assign(layer.prior.sample([tf.shape(samples)[:-1]]))
samples = layer.compositor([samples, self._eps_list[i]])
continue
mean, var = layer.predict(samples, full_cov=False, full_output_cov=False)
var = var + jitter
if not self._initialized:
self._eps_list[i].assign(
tf.random.normal([self._sample_size, tf.shape(mean)[-1]], dtype=tf.float64)
) # [S, L]
samples = mean + tf.sqrt(var) * tf.cast(self._eps_list[i][:, None, :], var.dtype)
if not self._initialized:
self._initialized.assign(True)
return samples # [..., S, 1, L]
class DeepGaussianProcessDecoupledTrajectorySampler(TrajectorySampler[GPfluxPredictor]):
r"""
This sampler employs decoupled sampling (see :cite:`wilson2020efficiently`) to build functions
that approximate a trajectory sampled from an underlying deep Gaussian process model. In
particular, this sampler provides trajectory functions for :class:`GPfluxPredictor`\s with
underlying :class:`~gpflux.models.DeepGP` models by using a feature decomposition using both
random Fourier features and canonical features centered at inducing point locations. This allows
for cheap approximate trajectory samples, as opposed to exact trajectory sampling, which scales
cubically in the number of query points.
"""
def __init__(
self,
model: GPfluxPredictor,
num_features: int = 1000,
):
"""
:param model: The model to sample from.
:param num_features: The number of random Fourier features to use.
:raise ValueError (or InvalidArgumentError): If the model is not a :class:`GPfluxPredictor`,
or its underlying ``model_gpflux`` is not a :class:`~gpflux.models.DeepGP`, or
``num_features`` is not positive.
"""
if not isinstance(model, GPfluxPredictor):
raise ValueError(
f"Model must be a gpflux.interface.GPfluxPredictor, received {type(model)}"
)
if not isinstance(model.model_gpflux, DeepGP):
raise ValueError(
f"GPflux model must be a gpflux.models.DeepGP, received {type(model.model_gpflux)}"
)
super().__init__(model)
tf.debugging.assert_positive(num_features)
self._num_features = num_features
def __repr__(self) -> str:
""""""
return f"""{self.__class__.__name__}(
{self._model!r},
{self._num_features!r})
"""
def get_trajectory(self) -> TrajectoryFunction:
"""
Generate an approximate function draw (trajectory) from the deep GP model.
:return: A trajectory function representing an approximate trajectory from the deep GP,
taking an input of shape `[N, B, D]` and returning shape `[N, B, L]`.
"""
return dgp_feature_decomposition_trajectory(self._model, self._num_features)
def update_trajectory(self, trajectory: TrajectoryFunction) -> TrajectoryFunction:
"""
Efficiently update a :const:`TrajectoryFunction` to reflect an update in its underlying
:class:`ProbabilisticModel` and resample accordingly.
:param trajectory: The trajectory function to be updated and resampled.
:return: The updated and resampled trajectory function.
:raise InvalidArgumentError: If ``trajectory`` is not a
:class:`dgp_feature_decomposition_trajectory`
"""
tf.debugging.Assert(
isinstance(trajectory, dgp_feature_decomposition_trajectory), [tf.constant([])]
)
cast(dgp_feature_decomposition_trajectory, trajectory).update()
return trajectory
def resample_trajectory(self, trajectory: TrajectoryFunction) -> TrajectoryFunction:
"""
Efficiently resample a :const:`TrajectoryFunction` in-place to avoid function retracing
with every new sample.
:param trajectory: The trajectory function to be resampled.
:return: The new resampled trajectory function.
:raise InvalidArgumentError: If ``trajectory`` is not a
:class:`dgp_feature_decomposition_trajectory`
"""
tf.debugging.Assert(
isinstance(trajectory, dgp_feature_decomposition_trajectory), [tf.constant([])]
)
cast(dgp_feature_decomposition_trajectory, trajectory).resample()
return trajectory
class DeepGaussianProcessDecoupledLayer(ABC):
"""
Layer that samples an approximate decoupled trajectory for a GPflux
:class:`~gpflux.layers.GPLayer` using Matheron's rule (:cite:`wilson2020efficiently`). Note
that the only multi-output kernel that is supported is a
:class:`~gpflow.kernels.SharedIndependent` kernel.
"""
def __init__(
self,
model: GPfluxPredictor,
layer_number: int,
num_features: int = 1000,
):
"""
:param model: The model to sample from.
:param layer_number: The index of the layer that we wish to sample from.
:param num_features: The number of features to use in the random feature approximation.
:raise ValueError (or InvalidArgumentError): If the layer is not a
:class:`~gpflux.layers.GPLayer`, the layer's kernel is not supported, or if
``num_features`` is not positive.
"""
self._model = model
self._layer_number = layer_number
layer = self._layer
if not isinstance(layer, GPLayer):
raise ValueError(
f"Layers other than gpflux.layers.GPLayer are not currently supported, received"
f"{type(layer)}"
)
if isinstance(
layer.inducing_variable, gpflow.inducing_variables.SeparateIndependentInducingVariables
):
raise ValueError(
"SeparateIndependentInducingVariables are not currently supported for decoupled "
"sampling."
)
tf.debugging.assert_positive(num_features)
self._num_features = num_features
self._kernel = layer.kernel
self._feature_functions = ResampleableDecoupledDeepGaussianProcessFeatureFunctions(
layer, num_features
)
self._weight_sampler = self._prepare_weight_sampler()
self._initialized = tf.Variable(False)
self._weights_sample = tf.Variable(
tf.ones([0, 0, 0], dtype=tf.float64), shape=[None, None, None]
)
self._batch_size = tf.Variable(0, dtype=tf.int32)
@property
def _layer(self) -> GPLayer:
return self._model.model_gpflux.f_layers[self._layer_number]
def __call__(self, x: TensorType) -> TensorType: # [N, B, D] -> [N, B, P]
"""
Evaluate trajectory function for layer at input.
:param x: Input location with shape `[N, B, D]`, where `N` is the number of points, `B` is
the batch dimension, and `D` is the input dimensionality.
:return: Trajectory for the layer evaluated at the input, with shape `[N, B, P]`, where `P`
is the number of latent GPs in the layer.
:raise InvalidArgumentError: If the provided batch size does not match with the layer's
batch size.
"""
if not self._initialized:
self._batch_size.assign(tf.shape(x)[-2])
self.resample()
self._initialized.assign(True)
tf.debugging.assert_equal(
tf.shape(x)[-2],
self._batch_size.value(),
message=f"""
This trajectory only supports batch sizes of {self._batch_size}.
If you wish to change the batch size you must get a new trajectory
by calling the get_trajectory method of the trajectory sampler.
""",
)
flat_x, unflatten = flatten_leading_dims(x)
flattened_feature_evaluations = self._feature_functions(
flat_x
) # [P, N, L + M] or [N, L + M]
if self._feature_functions.is_multioutput:
flattened_feature_evaluations = tf.transpose(
flattened_feature_evaluations, perm=[1, 2, 0]
)
feature_evaluations = unflatten(flattened_feature_evaluations) # [N, B, L + M, P]
else:
feature_evaluations = unflatten(flattened_feature_evaluations)[
..., None
] # [N, B, L + M, 1]
return tf.reduce_sum(
feature_evaluations * self._weights_sample, -2
) + self._layer.mean_function(
x
) # [N, B, P]
def resample(self) -> None:
"""
Efficiently resample in-place without retracing.
"""
self._weights_sample.assign(self._weight_sampler(self._batch_size))
def update(self) -> None:
"""
Efficiently update the trajectory with a new weight distribution and resample its weights.
"""
self._feature_functions.resample()
self._weight_sampler = self._prepare_weight_sampler()
self.resample()
def _prepare_weight_sampler(self) -> Callable[[int], TensorType]: # [B] -> [B, L+M, P]
"""
Prepare the sampler function that provides samples of the feature weights for both the
RFF and canonical feature functions, i.e. we return a function that takes in a batch size
`B` and returns `B` samples for the weights of each of the `L` RFF features and `M`
canonical features for `P` outputs.
"""
if isinstance(self._layer.inducing_variable, InducingPoints):
inducing_points = self._layer.inducing_variable.Z # [M, D]
else:
inducing_points = self._layer.inducing_variable.inducing_variable.Z # [M, D]
q_mu = self._layer.q_mu # [M, P]
q_sqrt = self._layer.q_sqrt # [P, M, M]
if self._feature_functions.is_multioutput:
Kmm = self._kernel.K(
inducing_points, inducing_points, full_output_cov=False
) # [P, M, M]
else:
Kmm = self._kernel.K(inducing_points, inducing_points) # [M, M]
Kmm += tf.eye(tf.shape(inducing_points)[0], dtype=Kmm.dtype) * DEFAULTS.JITTER
whiten = self._layer.whiten
M, P = tf.shape(q_mu)[0], tf.shape(q_mu)[1]
tf.debugging.assert_shapes(
[
(inducing_points, ["M", "D"]),
(q_mu, ["M", "P"]),
(q_sqrt, ["P", "M", "M"]),
]
)
def weight_sampler(batch_size: int) -> TensorType:
prior_weights = tf.random.normal(
[batch_size, P, self._num_features, 1], dtype=tf.float64
) # [B, P, L, 1]
u_noise_sample = tf.matmul(
q_sqrt, # [P, M, M]
tf.random.normal([batch_size, P, M, 1], dtype=tf.float64), # [B, P, M, 1]
) # [B, P, M, 1]
u_sample = tf.linalg.matrix_transpose(q_mu)[..., None] + u_noise_sample # [B, P, M, 1]
if whiten:
Luu = tf.linalg.cholesky(Kmm) # [M, M] or [P, M, M]
u_sample = tf.matmul(Luu, u_sample) # [B, P, M, 1]
phi_Z = self._feature_functions(inducing_points)[
..., : self._num_features
] # [M, L] or [P, M, L]
weight_space_prior_Z = phi_Z @ prior_weights # [B, P, M, 1]
diff = u_sample - weight_space_prior_Z # [B, P, M, 1]
v = compute_A_inv_b(Kmm, diff) # [B, P, M, 1]
return tf.transpose(
tf.concat([prior_weights, v], axis=2)[..., 0], perm=[0, 2, 1]
) # [B, L + M, P]
return weight_sampler
class ResampleableDecoupledDeepGaussianProcessFeatureFunctions(RandomFourierFeaturesCosine):
"""
A wrapper around GPflux's random Fourier feature function that allows for efficient in-place
updating when generating new decompositions. In addition to providing Fourier features,
this class concatenates a layer's Fourier feature expansion with evaluations of the canonical
basis functions.
"""
def __init__(self, layer: GPLayer, n_components: int):
"""
:param layer: The layer that will be approximated by the feature functions.
:param n_components: The number of features.
:raise ValueError: If the layer is not a :class:`~gpflux.layers.GPLayer`.
"""
if not isinstance(layer, GPLayer):
raise ValueError(
f"ResampleableDecoupledDeepGaussianProcessFeatureFunctions currently only work with"
f"gpflux.layers.GPLayer layers, received {type(layer)} instead"
)
self._kernel = layer.kernel
self._n_components = n_components
super().__init__(self._kernel, self._n_components, dtype=tf.float64)
if isinstance(layer.inducing_variable, InducingPoints):
inducing_points = layer.inducing_variable.Z
else:
inducing_points = layer.inducing_variable.inducing_variable.Z
if self.is_multioutput:
self._canonical_feature_functions = lambda x: tf.linalg.matrix_transpose(
self._kernel.K(inducing_points, x, full_output_cov=False)
)
else:
self._canonical_feature_functions = lambda x: tf.linalg.matrix_transpose(
self._kernel.K(inducing_points, x)
)
dummy_X = inducing_points[0:1, :]
self.__call__(dummy_X)
self.b: TensorType = tf.Variable(self.b)
self.W: TensorType = tf.Variable(self.W)
def resample(self) -> None:
"""
Resample weights and biases.
"""
if not hasattr(self, "_bias_init"):
self.b.assign(self._sample_bias(tf.shape(self.b), dtype=self._dtype))
self.W.assign(self._sample_weights(tf.shape(self.W), dtype=self._dtype))
else:
self.b.assign(self._bias_init(tf.shape(self.b), dtype=self._dtype))
self.W.assign(self._weights_init(tf.shape(self.W), dtype=self._dtype))
def __call__(self, x: TensorType) -> TensorType: # [N, D] -> [N, L + M] or [P, N, L + M]
"""
Evaluate and combine prior basis functions and canonical basic functions at the input.
"""
fourier_feature_eval = super().__call__(x) # [N, L] or [P, N, L]
canonical_feature_eval = self._canonical_feature_functions(x) # [P, N, M] or [N, M]
return tf.concat([fourier_feature_eval, canonical_feature_eval], axis=-1) # [P, N, L + M]
class dgp_feature_decomposition_trajectory(TrajectoryFunctionClass):
r"""
An approximate sample from a deep Gaussian process's posterior, where the samples are
represented as a finite weighted sum of features. This class essentially takes a list of
:class:`DeepGaussianProcessDecoupledLayer`\s and iterates through them to sample, update and
resample.
"""
def __init__(self, model: GPfluxPredictor, num_features: int):
"""
:param model: The model to sample from.
:param num_features: The number of random Fourier features to use.
"""
self._sampling_layers = [
DeepGaussianProcessDecoupledLayer(model, i, num_features)
for i in range(len(model.model_gpflux.f_layers))
]
@tf.function
def __call__(self, x: TensorType) -> TensorType:
"""
Call trajectory function by looping through layers.
:param x: Input location with shape `[N, B, D]`, where `N` is the number of points, `B` is
the batch dimension, and `D` is the input dimensionality.
:return: Trajectory samples with shape `[N, B, L]`, where `L` is the number of outputs.
"""
for layer in self._sampling_layers:
x = layer(x)
return x
def update(self) -> None:
"""Update the layers with new features and weights."""
for layer in self._sampling_layers:
layer.update()
def resample(self) -> None:
"""Resample the layer weights."""
for layer in self._sampling_layers:
layer.resample()
| 20,609 | 40.302605 | 100 | py |
trieste-develop | trieste-develop/trieste/models/gpflux/builders.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains builders for GPflux models supported in Trieste. We found the default
configurations used here to work well in most situation, but they should not be taken as
universally good solutions.
"""
from __future__ import annotations
from typing import Optional
import gpflow
import numpy as np
import tensorflow as tf
from gpflux.architectures import Config, build_constant_input_dim_deep_gp
from gpflux.models import DeepGP
from ...data import Dataset
from ...space import Box, SearchSpace
from ...types import TensorType
NUM_LAYERS: int = 2
"""
Default number of layers in the deep gaussian process model.
"""
MAX_NUM_INDUCING_POINTS: int = 500
"""
Default maximum number of inducing points.
"""
NUM_INDUCING_POINTS_PER_DIM: int = 50
"""
Default number of inducing points per dimension of the search space.
"""
INNER_LAYER_SQRT_FACTOR: float = 1e-5
"""
Default value for a multiplicative factor used to rescale hidden layers.
"""
LIKELIHOOD_VARIANCE: float = 1e-3
"""
Default value for an initial noise variance in the likelihood function.
"""
def build_vanilla_deep_gp(
data: Dataset,
search_space: SearchSpace,
num_layers: int = NUM_LAYERS,
num_inducing_points: Optional[int] = None,
inner_layer_sqrt_factor: float = INNER_LAYER_SQRT_FACTOR,
likelihood_variance: float = LIKELIHOOD_VARIANCE,
trainable_likelihood: bool = True,
) -> DeepGP:
"""
Build a :class:`~gpflux.models.DeepGP` model with sensible initial parameters. We found the
default configuration used here to work well in most situation, but it should not be taken as a
universally good solution.
Note that although we set all the relevant parameters to sensible values, we rely on
``build_constant_input_dim_deep_gp`` from :mod:`~gpflux.architectures` to build the model.
:param data: Dataset from the initial design, used to estimate the variance of observations
and to provide query points which are used to determine inducing point locations with
k-means.
:param search_space: Search space for performing Bayesian optimization. Used for initialization
of inducing locations if ``num_inducing_points`` is larger than the amount of data.
:param num_layers: Number of layers in deep GP. By default set to ``NUM_LAYERS``.
:param num_inducing_points: Number of inducing points to use in each layer. If left unspecified
(default), this number is set to either ``NUM_INDUCING_POINTS_PER_DIM``*dimensionality of
the search space or value given by ``MAX_NUM_INDUCING_POINTS``, whichever is smaller.
:param inner_layer_sqrt_factor: A multiplicative factor used to rescale hidden layers, see
:class:`~gpflux.architectures.Config` for details. By default set to
``INNER_LAYER_SQRT_FACTOR``.
:param likelihood_variance: Initial noise variance in the likelihood function, see
:class:`~gpflux.architectures.Config` for details. By default set to
``LIKELIHOOD_VARIANCE``.
:param trainable_likelihood: Trainable likelihood variance.
:return: A :class:`~gpflux.models.DeepGP` model with sensible default settings.
:raise: If non-positive ``num_layers``, ``inner_layer_sqrt_factor``, ``likelihood_variance``
or ``num_inducing_points`` is provided.
"""
tf.debugging.assert_positive(num_layers)
tf.debugging.assert_positive(inner_layer_sqrt_factor)
tf.debugging.assert_positive(likelihood_variance)
# Input data to ``build_constant_input_dim_deep_gp`` must be np.ndarray for k-means algorithm
query_points = data.query_points.numpy()
empirical_mean, empirical_variance, num_data_points = _get_data_stats(data)
if num_inducing_points is None:
num_inducing_points = min(
MAX_NUM_INDUCING_POINTS, NUM_INDUCING_POINTS_PER_DIM * int(search_space.dimension)
)
else:
tf.debugging.assert_positive(num_inducing_points)
# Pad query_points with additional random values to provide enough inducing points
if num_inducing_points > len(query_points):
if isinstance(search_space, Box):
additional_points = search_space.sample_sobol(
num_inducing_points - len(query_points)
).numpy()
else:
additional_points = search_space.sample(num_inducing_points - len(query_points)).numpy()
query_points = np.concatenate([query_points, additional_points], 0)
config = Config(
num_inducing_points,
inner_layer_sqrt_factor,
likelihood_variance,
whiten=True, # whiten = False not supported yet in GPflux for this model
)
model = build_constant_input_dim_deep_gp(query_points, num_layers, config)
model.f_layers[-1].kernel.kernel.variance.assign(empirical_variance)
model.f_layers[-1].mean_function = gpflow.mean_functions.Constant(empirical_mean)
gpflow.set_trainable(model.likelihood_layer.likelihood.variance, trainable_likelihood)
# If num_inducing_points is larger than the number of provided query points, the initialization
# for num_data_points will be wrong. We therefore make sure it is set correctly.
model.num_data = num_data_points
for layer in model.f_layers:
layer.num_data = num_data_points
return model
def _get_data_stats(data: Dataset) -> tuple[TensorType, TensorType, int]:
empirical_variance = tf.math.reduce_variance(data.observations)
empirical_mean = tf.math.reduce_mean(data.observations)
num_data_points = len(data.observations)
return empirical_mean, empirical_variance, num_data_points
| 6,191 | 38.43949 | 100 | py |
trieste-develop | trieste-develop/trieste/models/gpflux/models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Any, Callable, Optional
import dill
import gpflow
import tensorflow as tf
from gpflow.inducing_variables import InducingPoints
from gpflux.layers import GPLayer, LatentVariableLayer
from gpflux.models import DeepGP
from tensorflow.python.keras.callbacks import Callback
from ... import logging
from ...data import Dataset
from ...types import TensorType
from ..interfaces import (
HasReparamSampler,
HasTrajectorySampler,
ReparametrizationSampler,
TrainableProbabilisticModel,
TrajectorySampler,
)
from ..optimizer import KerasOptimizer
from ..utils import (
write_summary_data_based_metrics,
write_summary_kernel_parameters,
write_summary_likelihood_parameters,
)
from .interface import GPfluxPredictor
from .sampler import (
DeepGaussianProcessDecoupledTrajectorySampler,
DeepGaussianProcessReparamSampler,
)
class DeepGaussianProcess(
GPfluxPredictor, TrainableProbabilisticModel, HasReparamSampler, HasTrajectorySampler
):
"""
A :class:`TrainableProbabilisticModel` wrapper for a GPflux :class:`~gpflux.models.DeepGP` with
:class:`GPLayer` or :class:`LatentVariableLayer`: this class does not support e.g. keras layers.
We provide simple architectures that can be used with this class in the `architectures.py` file.
"""
def __init__(
self,
model: DeepGP | Callable[[], DeepGP],
optimizer: KerasOptimizer | None = None,
num_rff_features: int = 1000,
continuous_optimisation: bool = True,
):
"""
:param model: The underlying GPflux deep Gaussian process model. Passing in a named closure
rather than a model can help when copying or serialising.
:param optimizer: The optimizer wrapper with necessary specifications for compiling and
training the model. Defaults to :class:`~trieste.models.optimizer.KerasOptimizer` with
:class:`~tf.optimizers.Adam` optimizer, mean squared error metric and a dictionary of
default arguments for the Keras `fit` method: 400 epochs, batch size of 1000, and
verbose 0. A custom callback that reduces the optimizer learning rate is used as well.
See https://keras.io/api/models/model_training_apis/#fit-method for a list of possible
arguments.
:param num_rff_features: The number of random Fourier features used to approximate the
kernel when calling :meth:`trajectory_sampler`. We use a default of 1000 as it typically
performs well for a wide range of kernels. Note that very smooth kernels (e.g. RBF) can
be well-approximated with fewer features.
:param continuous_optimisation: if True (default), the optimizer will keep track of the
number of epochs across BO iterations and use this number as initial_epoch. This is
essential to allow monitoring of model training across BO iterations.
:raise ValueError: If ``model`` has unsupported layers, ``num_rff_features`` is less than 0,
or if the ``optimizer`` is not of a supported type.
"""
if isinstance(model, DeepGP):
self._model_closure = None
else:
self._model_closure = model
model = model()
for layer in model.f_layers:
if not isinstance(layer, (GPLayer, LatentVariableLayer)):
raise ValueError(
f"`DeepGaussianProcess` can only be built out of `GPLayer` or"
f"`LatentVariableLayer`, received {type(layer)} instead."
)
super().__init__(optimizer)
if num_rff_features <= 0:
raise ValueError(
f"num_rff_features must be greater or equal to zero, got {num_rff_features}."
)
self._num_rff_features = num_rff_features
if not isinstance(self.optimizer.optimizer, tf.optimizers.Optimizer):
raise ValueError(
f"Optimizer for `DeepGaussianProcess` must be an instance of a "
f"`tf.optimizers.Optimizer` or `tf.keras.optimizers.Optimizer`, "
f"received {type(self.optimizer.optimizer)} instead."
)
if not isinstance(
self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule
):
self.original_lr = self.optimizer.optimizer.lr.numpy()
epochs = 400
def scheduler(epoch: int, lr: float) -> float:
if epoch == epochs // 2:
return lr * 0.1
else:
return lr
if not self.optimizer.fit_args:
self.optimizer.fit_args = {
"verbose": 0,
"epochs": epochs,
"batch_size": 1000,
"callbacks": [tf.keras.callbacks.LearningRateScheduler(scheduler)],
}
if self.optimizer.metrics is None:
self.optimizer.metrics = ["mse"]
self._model_gpflux = model
# inputs and targets need to be redone with a float64 dtype to avoid setting the keras
# backend to float64, this is likely to be fixed in GPflux, see issue:
# https://github.com/secondmind-labs/GPflux/issues/76
self._model_gpflux.inputs = tf.keras.Input(
tuple(self._model_gpflux.inputs.shape[:-1]),
name=self._model_gpflux.inputs.name,
dtype=tf.float64,
)
self._model_gpflux.targets = tf.keras.Input(
tuple(self._model_gpflux.targets.shape[:-1]),
name=self._model_gpflux.targets.name,
dtype=tf.float64,
)
self._model_keras = model.as_training_model()
self._model_keras.compile(self.optimizer.optimizer, metrics=self.optimizer.metrics)
self._absolute_epochs = 0
self._continuous_optimisation = continuous_optimisation
def __getstate__(self) -> dict[str, Any]:
state = self.__dict__.copy()
# when using a model closure, store the model parameters, not the model itself
if self._model_closure is not None:
state["_model_gpflux"] = gpflow.utilities.parameter_dict(self._model_gpflux)
state["_model_keras"] = gpflow.utilities.parameter_dict(self._model_keras)
# use to_json and get_weights to save any optimizer fit_arg callback models
callbacks: list[Callback] = self._optimizer.fit_args.get("callbacks", [])
callback: Callback
saved_models: list[KerasOptimizer] = []
tensorboard_writers: list[dict[str, Any]] = []
try:
for callback in callbacks:
# serialize the callback models before pickling the optimizer
saved_models.append(callback.model)
if callback.model is self._model_keras:
# no need to serialize the main model, just use a special value instead
callback.model = ...
elif callback.model:
callback.model = (callback.model.to_json(), callback.model.get_weights())
# don't pickle tensorboard writers either; they'll be recreated when needed
if isinstance(callback, tf.keras.callbacks.TensorBoard):
tensorboard_writers.append(callback._writers)
callback._writers = {}
state["_optimizer"] = dill.dumps(state["_optimizer"])
except Exception as e:
raise NotImplementedError(
"Failed to copy DeepGaussianProcess optimizer due to unsupported callbacks."
) from e
finally:
# revert original state, even if the pickling failed
for callback, model in zip(self._optimizer.fit_args.get("callbacks", []), saved_models):
callback.model = model
for callback, writers in zip(
(cb for cb in callbacks if isinstance(cb, tf.keras.callbacks.TensorBoard)),
tensorboard_writers,
):
callback._writers = writers
# do the same thing for the history callback
if self._model_keras.history:
history_model = self._model_keras.history.model
try:
if history_model is self._model_keras:
# no need to serialize the main model, just use a special value instead
self._model_keras.history.model = ...
elif history_model:
self._model_keras.history.model = (
history_model.to_json(),
history_model.get_weights(),
)
state["_history"] = dill.dumps(self._model_keras.history)
finally:
self._model_keras.history.model = history_model
return state
def __setstate__(self, state: dict[str, Any]) -> None:
self.__dict__.update(state)
# regenerate the models using the model closure
if self._model_closure is not None:
dgp: DeepGP = state["_model_closure"]()
self._model_gpflux = dgp
# inputs and targets need to be redone with a float64 dtype to avoid setting the keras
# backend to float64, this is likely to be fixed in GPflux, see issue:
# https://github.com/secondmind-labs/GPflux/issues/76
self._model_gpflux.inputs = tf.keras.Input(
tuple(self._model_gpflux.inputs.shape[:-1]),
name=self._model_gpflux.inputs.name,
dtype=tf.float64,
)
self._model_gpflux.targets = tf.keras.Input(
tuple(self._model_gpflux.targets.shape[:-1]),
name=self._model_gpflux.targets.name,
dtype=tf.float64,
)
self._model_keras = dgp.as_training_model()
# unpickle the optimizer, and restore all the callback models
self._optimizer = dill.loads(self._optimizer)
for callback in self._optimizer.fit_args.get("callbacks", []):
if callback.model is ...:
callback.set_model(self._model_keras)
elif callback.model:
model_json, weights = callback.model
model = tf.keras.models.model_from_json(model_json)
model.set_weights(weights)
callback.set_model(model)
# recompile the model
self._model_keras.compile(self._optimizer.optimizer)
# assign the model parameters
if self._model_closure is not None:
gpflow.utilities.multiple_assign(self._model_gpflux, state["_model_gpflux"])
gpflow.utilities.multiple_assign(self._model_keras, state["_model_keras"])
# restore the history (including any model it contains)
if "_history" in state:
self._model_keras.history = dill.loads(state["_history"])
if self._model_keras.history.model is ...:
self._model_keras.history.set_model(self._model_keras)
elif self._model_keras.history.model:
model_json, weights = self._model_keras.history.model
model = tf.keras.models.model_from_json(model_json)
model.set_weights(weights)
self._model_keras.history.set_model(model)
def __repr__(self) -> str:
""""""
return f"DeepGaussianProcess({self.model_gpflux!r}, {self.optimizer.optimizer!r})"
@property
def model_gpflux(self) -> DeepGP:
return self._model_gpflux
@property
def model_keras(self) -> tf.keras.Model:
return self._model_keras
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
trajectory = self.trajectory_sampler().get_trajectory()
expanded_query_points = tf.expand_dims(query_points, -2) # [N, 1, D]
tiled_query_points = tf.tile(expanded_query_points, [1, num_samples, 1]) # [N, S, D]
return tf.transpose(trajectory(tiled_query_points), [1, 0, 2]) # [S, N, L]
def reparam_sampler(self, num_samples: int) -> ReparametrizationSampler[GPfluxPredictor]:
"""
Return a reparametrization sampler for a :class:`DeepGaussianProcess` model.
:param num_samples: The number of samples to obtain.
:return: The reparametrization sampler.
"""
return DeepGaussianProcessReparamSampler(num_samples, self)
def trajectory_sampler(self) -> TrajectorySampler[GPfluxPredictor]:
"""
Return a trajectory sampler. For :class:`DeepGaussianProcess`, we build
trajectories using the GPflux default sampler.
:return: The trajectory sampler.
"""
return DeepGaussianProcessDecoupledTrajectorySampler(self, self._num_rff_features)
def update(self, dataset: Dataset) -> None:
inputs = dataset.query_points
new_num_data = inputs.shape[0]
self.model_gpflux.num_data = new_num_data
# Update num_data for each layer, as well as make sure dataset shapes are ok
for i, layer in enumerate(self.model_gpflux.f_layers):
if hasattr(layer, "num_data"):
layer.num_data = new_num_data
if isinstance(layer, LatentVariableLayer):
inputs = layer(inputs)
continue
if isinstance(layer.inducing_variable, InducingPoints):
inducing_variable = layer.inducing_variable
else:
inducing_variable = layer.inducing_variable.inducing_variable
if inputs.shape[-1] != inducing_variable.Z.shape[-1]:
raise ValueError(
f"Shape {inputs.shape} of input to layer {layer} is incompatible with shape"
f" {inducing_variable.Z.shape} of that layer. Trailing dimensions must match."
)
if (
i == len(self.model_gpflux.f_layers) - 1
and dataset.observations.shape[-1] != layer.q_mu.shape[-1]
):
raise ValueError(
f"Shape {dataset.observations.shape} of new observations is incompatible"
f" with shape {layer.q_mu.shape} of existing observations. Trailing"
f" dimensions must match."
)
inputs = layer(inputs)
def optimize(self, dataset: Dataset) -> None:
"""
Optimize the model with the specified `dataset`.
:param dataset: The data with which to optimize the `model`.
"""
fit_args = dict(self.optimizer.fit_args)
# Tell optimizer how many epochs have been used before: the optimizer will "continue"
# optimization across multiple BO iterations rather than start fresh at each iteration.
# This allows us to monitor training across iterations.
if "epochs" in fit_args:
fit_args["epochs"] = fit_args["epochs"] + self._absolute_epochs
hist = self.model_keras.fit(
{"inputs": dataset.query_points, "targets": dataset.observations},
**fit_args,
initial_epoch=self._absolute_epochs,
)
if self._continuous_optimisation:
self._absolute_epochs = self._absolute_epochs + len(hist.history["loss"])
# Reset lr in case there was an lr schedule: a schedule will have changed the learning
# rate, so that the next time we call `optimize` the starting learning rate would be
# different. Therefore, we make sure the learning rate is set back to its initial value.
# However, this is not needed for `LearningRateSchedule` instances.
if not isinstance(
self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule
):
self.optimizer.optimizer.lr.assign(self.original_lr)
def log(self, dataset: Optional[Dataset] = None) -> None:
"""
Log model training information at a given optimization step to the Tensorboard.
We log a few summary statistics of losses, layer KL divergences and metrics (as provided in
``optimizer``): ``final`` value at the end of the training, ``diff`` value as a difference
between inital and final epoch. We also log epoch statistics, but as histograms, rather
than time series. We also log several training data based metrics, such as root mean square
error between predictions and observations and several others.
For custom logs user will need to subclass the model and overwrite this method.
:param dataset: Optional data that can be used to log additional data-based model summaries.
"""
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
logging.scalar("epochs/num_epochs", len(self.model_keras.history.epoch))
for idx, layer in enumerate(self.model_gpflux.f_layers):
write_summary_kernel_parameters(layer.kernel, prefix=f"layer[{idx}]/")
write_summary_likelihood_parameters(self.model_gpflux.likelihood_layer.likelihood)
for k, v in self.model_keras.history.history.items():
logging.histogram(f"{k}/epoch", lambda: v)
logging.scalar(f"{k}/final", lambda: v[-1])
logging.scalar(f"{k}/diff", lambda: v[0] - v[-1])
if dataset:
write_summary_data_based_metrics(
dataset=dataset, model=self, prefix="training_"
)
| 18,255 | 44.526185 | 100 | py |
trieste-develop | trieste-develop/trieste/models/gpflux/__init__.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This package contains the primary interface for deep Gaussian process models. It also contains a
number of :class:`TrainableProbabilisticModel` wrappers for GPflux-based models.
"""
from .builders import build_vanilla_deep_gp
from .interface import GPfluxPredictor
from .models import DeepGaussianProcess
from .sampler import (
DeepGaussianProcessDecoupledLayer,
DeepGaussianProcessDecoupledTrajectorySampler,
DeepGaussianProcessReparamSampler,
ResampleableDecoupledDeepGaussianProcessFeatureFunctions,
dgp_feature_decomposition_trajectory,
)
| 1,158 | 37.633333 | 96 | py |
trieste-develop | trieste-develop/trieste/models/gpflux/interface.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC, abstractmethod
import tensorflow as tf
from gpflow.base import Module
from ...types import TensorType
from ..interfaces import SupportsGetObservationNoise
from ..optimizer import KerasOptimizer
class GPfluxPredictor(SupportsGetObservationNoise, ABC):
"""
A trainable wrapper for a GPflux deep Gaussian process model. The code assumes subclasses
will use the Keras `fit` method for training, and so they should provide access to both a
`model_keras` and `model_gpflux`.
"""
def __init__(self, optimizer: KerasOptimizer | None = None):
"""
:param optimizer: The optimizer wrapper containing the optimizer with which to train the
model and arguments for the wrapper and the optimizer. The optimizer must
be an instance of a :class:`~tf.optimizers.Optimizer`. Defaults to
:class:`~tf.optimizers.Adam` optimizer with 0.01 learning rate.
"""
if optimizer is None:
optimizer = KerasOptimizer(tf.optimizers.Adam(0.01))
self._optimizer = optimizer
@property
@abstractmethod
def model_gpflux(self) -> Module:
"""The underlying GPflux model."""
@property
@abstractmethod
def model_keras(self) -> tf.keras.Model:
"""Returns the compiled Keras model for training."""
@property
def optimizer(self) -> KerasOptimizer:
"""The optimizer wrapper for training the model."""
return self._optimizer
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""Note: unless otherwise noted, this returns the mean and variance of the last layer
conditioned on one sample from the previous layers."""
return self.model_gpflux.predict_f(query_points)
@abstractmethod
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
raise NotImplementedError
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""Note: unless otherwise noted, this will return the prediction conditioned on one sample
from the lower layers."""
f_mean, f_var = self.model_gpflux.predict_f(query_points)
return self.model_gpflux.likelihood_layer.likelihood.predict_mean_and_var(
query_points, f_mean, f_var
)
def get_observation_noise(self) -> TensorType:
"""
Return the variance of observation noise for homoscedastic likelihoods.
:return: The observation noise.
:raise NotImplementedError: If the model does not have a homoscedastic likelihood.
"""
try:
noise_variance = self.model_gpflux.likelihood_layer.likelihood.variance
except AttributeError:
raise NotImplementedError(f"Model {self!r} does not have scalar observation noise")
return noise_variance
| 3,501 | 37.483516 | 98 | py |
trieste-develop | trieste-develop/trieste/models/keras/sampler.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is the home of the sampling functionality required by some
of the Trieste's Keras model wrappers.
"""
from __future__ import annotations
from typing import Dict, Optional
import tensorflow as tf
from ...types import TensorType
from ...utils import DEFAULTS, flatten_leading_dims
from ..interfaces import TrajectoryFunction, TrajectoryFunctionClass, TrajectorySampler
from .interface import DeepEnsembleModel
from .utils import sample_model_index
class DeepEnsembleTrajectorySampler(TrajectorySampler[DeepEnsembleModel]):
"""
This class builds functions that approximate a trajectory by randomly choosing a network from
the ensemble and using its predicted means as a trajectory.
Option `diversify` can be used to increase the diversity in case of optimizing very large
batches of trajectories. We use quantiles from the approximate Gaussian distribution of
the ensemble as trajectories, with randomly chosen quantiles approximating a trajectory and
using a reparametrisation trick to speed up computation. Note that quantiles are not true
trajectories, so this will likely have some performance costs.
"""
def __init__(
self, model: DeepEnsembleModel, diversify: bool = False, seed: Optional[int] = None
):
"""
:param model: The ensemble model to sample from.
:param diversify: Whether to use quantiles from the approximate Gaussian distribution of
the ensemble as trajectories (`False` by default). See class docstring for details.
:param seed: Random number seed to use for trajectory sampling.
:raise NotImplementedError: If we try to use the model that is not instance of
:class:`DeepEnsembleModel`.
"""
if not isinstance(model, DeepEnsembleModel):
raise NotImplementedError(
f"EnsembleTrajectorySampler only works with DeepEnsembleModel models, that support "
f"ensemble_size and ensemble_distributions methods; "
f"received {model.__repr__()}"
)
super().__init__(model)
self._model = model
self._diversify = diversify
self._seed = seed or int(tf.random.uniform(shape=(), maxval=10000, dtype=tf.int32))
def __repr__(self) -> str:
""""""
return f"{self.__class__.__name__}({self._model!r}"
def get_trajectory(self) -> TrajectoryFunction:
"""
Generate an approximate function draw (trajectory) from the ensemble.
:return: A trajectory function representing an approximate trajectory
from the model, taking an input of shape `[N, B, D]` and returning shape `[N, B, L]`.
"""
return deep_ensemble_trajectory(self._model, self._diversify, self._seed)
def update_trajectory(self, trajectory: TrajectoryFunction) -> TrajectoryFunction:
"""
Update a :const:`TrajectoryFunction` to reflect an update in its
underlying :class:`DeepEnsembleModel` and resample accordingly.
Here we rely on the underlying models being updated and we only resample the trajectory.
:param trajectory: The trajectory function to be resampled.
:return: The new trajectory function updated for a new model
"""
tf.debugging.Assert(isinstance(trajectory, deep_ensemble_trajectory), [tf.constant([])])
trajectory.resample() # type: ignore
return trajectory
def resample_trajectory(self, trajectory: TrajectoryFunction) -> TrajectoryFunction:
"""
Efficiently resample a :const:`TrajectoryFunction` in-place to avoid function retracing
with every new sample.
:param trajectory: The trajectory function to be resampled.
:return: The new resampled trajectory function.
"""
tf.debugging.Assert(isinstance(trajectory, deep_ensemble_trajectory), [tf.constant([])])
trajectory.resample() # type: ignore
return trajectory
class deep_ensemble_trajectory(TrajectoryFunctionClass):
"""
Generate an approximate function draw (trajectory) by randomly choosing a batch B of
networks from the ensemble and using their predicted means as trajectories.
Option `diversify` can be used to increase the diversity in case of optimizing very large
batches of trajectories. We use quantiles from the approximate Gaussian distribution of
the ensemble as trajectories, with randomly chosen quantiles approximating a trajectory and
using a reparametrisation trick to speed up computation. Note that quantiles are not true
trajectories, so this will likely have some performance costs.
"""
def __init__(self, model: DeepEnsembleModel, diversify: bool, seed: Optional[int] = None):
"""
:param model: The model of the objective function.
:param diversify: Whether to use samples from final probabilistic layer as trajectories
or mean predictions.
:param seed: Optional RNG seed.
"""
self._model = model
self._diversify = diversify
self._ensemble_size = self._model.ensemble_size
self._seed = seed
self._initialized = tf.Variable(False, trainable=False)
self._batch_size = tf.Variable(0, dtype=tf.int32, trainable=False)
if self._diversify:
self._eps = tf.Variable(
tf.zeros([0, 0], dtype=tf.float64), shape=[None, None], trainable=False
)
else:
self._indices = tf.Variable(
tf.zeros([0], dtype=tf.int32), shape=[None], trainable=False
)
@tf.function
def __call__(self, x: TensorType) -> TensorType: # [N, B, D] -> [N, B, L]
"""
Call trajectory function. Note that we are flattening the batch dimension and
doing a forward pass with each network in the ensemble with the whole batch. This is
somewhat wasteful, but is necessary given the underlying ``KerasEnsemble`` network
model.
"""
if not self._initialized: # work out desired batch size from input
self._batch_size.assign(tf.shape(x)[-2]) # B
self.resample() # sample network indices/quantiles
self._initialized.assign(True)
tf.debugging.assert_equal(
tf.shape(x)[-2],
self._batch_size,
message=f"""
This trajectory only supports batch sizes of {self._batch_size}.
If you wish to change the batch size you must get a new trajectory
by calling the get_trajectory method of the trajectory sampler.
""",
)
flat_x, unflatten = flatten_leading_dims(x) # [N*B, D]
if self._diversify:
predicted_means, predicted_vars = self._model.predict(flat_x) # ([N*B, L], [N*B, L])
predicted_vars = predicted_vars + tf.cast(DEFAULTS.JITTER, predicted_vars.dtype)
predictions = predicted_means + tf.sqrt(predicted_vars) * tf.tile(
tf.cast(self._eps, predicted_vars.dtype), [tf.shape(x)[0], 1]
) # [N*B, L]
return unflatten(predictions) # [N, B, L]
else:
ensemble_distributions = self._model.ensemble_distributions(flat_x)
predicted_means = tf.convert_to_tensor([dist.mean() for dist in ensemble_distributions])
predictions = tf.gather(predicted_means, self._indices) # [B, N*B, L]
tensor_predictions = tf.map_fn(unflatten, predictions) # [B, N, B, L]
# here we select simultaneously networks and batch dimension according to batch indices
# this is needed because we compute a whole batch with each network
batch_index = tf.range(self._batch_size)
indices = tf.stack([batch_index, batch_index], axis=1)
batch_predictions = tf.gather_nd(
tf.transpose(tensor_predictions, perm=[0, 2, 1, 3]), indices
) # [B,N]
return tf.transpose(batch_predictions, perm=[1, 0, 2]) # [N, B, L]
def resample(self) -> None:
"""
Efficiently resample network indices in-place, without retracing.
"""
if self._seed:
self._seed += 1 # increment operation seed
if self._diversify:
self._eps.assign(
tf.random.normal(
shape=(self._batch_size, self._model.num_outputs),
dtype=tf.float64,
seed=self._seed,
)
) # [B]
else:
self._indices.assign(
sample_model_index(self._ensemble_size, self._batch_size, seed=self._seed)
) # [B]
def get_state(self) -> Dict[str, TensorType]:
"""
Return internal state variables.
"""
state = {
"initialized": self._initialized,
"batch_size": self._batch_size,
}
if self._diversify:
state["eps"] = self._eps
else:
state["indices"] = self._indices
return state
| 9,679 | 41.643172 | 100 | py |
trieste-develop | trieste-develop/trieste/models/keras/utils.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional
import tensorflow as tf
import tensorflow_probability as tfp
from ...data import Dataset
from ...types import TensorType
def get_tensor_spec_from_data(dataset: Dataset) -> tuple[tf.TensorSpec, tf.TensorSpec]:
r"""
Extract tensor specifications for inputs and outputs of neural network models, based on the
dataset. This utility faciliates constructing neural networks, providing the required
dimensions for the input and the output of the network. For example
>>> data = Dataset(
... tf.constant([[0.1, 0.2], [0.3, 0.4]]),
... tf.constant([[0.5], [0.7]])
... )
>>> input_spec, output_spec = get_tensor_spec_from_data(data)
>>> input_spec
TensorSpec(shape=(2,), dtype=tf.float32, name='query_points')
>>> output_spec
TensorSpec(shape=(1,), dtype=tf.float32, name='observations')
:param dataset: A dataset with ``query_points`` and ``observations`` tensors.
:return: Tensor specification objects for the ``query_points`` and ``observations`` tensors.
:raise ValueError: If the dataset is not an instance of :class:`~trieste.data.Dataset`.
"""
if not isinstance(dataset, Dataset):
raise ValueError(
f"This function works only on trieste.data.Dataset objects, however got"
f"{type(dataset)} which is incompatible."
)
input_tensor_spec = tf.TensorSpec(
shape=(dataset.query_points.shape[1:]),
dtype=dataset.query_points.dtype,
name="query_points",
)
output_tensor_spec = tf.TensorSpec(
shape=(dataset.observations.shape[1:]),
dtype=dataset.observations.dtype,
name="observations",
)
return input_tensor_spec, output_tensor_spec
def sample_with_replacement(dataset: Dataset) -> Dataset:
"""
Create a new ``dataset`` with data sampled with replacement. This
function is useful for creating bootstrap samples of data for training ensembles.
:param dataset: The data that should be sampled.
:return: A (new) ``dataset`` with sampled data.
:raise ValueError (or InvalidArgumentError): If the dataset is not an instance of
:class:`~trieste.data.Dataset` or it is empty.
"""
if not isinstance(dataset, Dataset):
raise ValueError(
f"This function works only on trieste.data.Dataset objects, however got"
f"{type(dataset)} which is incompatible."
)
tf.debugging.assert_positive(len(dataset), message="Dataset must not be empty.")
n_rows = dataset.observations.shape[0]
index_tensor = tf.random.uniform((n_rows,), maxval=n_rows, dtype=tf.dtypes.int32)
observations = tf.gather(dataset.observations, index_tensor, axis=0)
query_points = tf.gather(dataset.query_points, index_tensor, axis=0)
return Dataset(query_points=query_points, observations=observations)
def sample_model_index(
size: TensorType,
num_samples: TensorType,
seed: Optional[int] = None,
) -> TensorType:
"""
Returns samples of indices of individual models in the ensemble.
If ``num_samples`` is smaller or equal to ``size`` (i.e. the ensemble size) indices are sampled
without replacement. When ``num_samples`` is larger than ``size`` then until ``size`` is reached
we sample without replacement, while after that we sample with replacement. The rationale of
this mixed scheme is that typically one wants to exhaust all networks and then resample them
only if required.
:param size: The maximum index, effectively the number of models in the ensemble.
:param num_samples: The number of samples to take.
:param seed: Optional RNG seed.
:return: A tensor with indices.
"""
shuffle_indices = tf.random.shuffle(tf.range(size), seed=seed)
if num_samples > size:
random_indices = tf.random.uniform(
shape=(tf.cast(num_samples - size, tf.int32),),
maxval=size,
dtype=tf.int32,
seed=seed,
)
indices = tf.concat([shuffle_indices, random_indices], 0)
else:
indices = shuffle_indices[:num_samples]
return indices
def negative_log_likelihood(
y_true: TensorType, y_pred: tfp.distributions.Distribution
) -> TensorType:
"""
Maximum likelihood objective function for training neural networks.
:param y_true: The output variable values.
:param y_pred: The output layer of the model. It has to be a probabilistic neural network
with a distribution as a final layer.
:return: Negative log likelihood values.
"""
return -y_pred.log_prob(y_true)
| 5,241 | 37.262774 | 100 | py |
trieste-develop | trieste-develop/trieste/models/keras/architectures.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains implementations of neural network architectures with Keras.
"""
from __future__ import annotations
from abc import abstractmethod
from typing import Any, Callable, Sequence
import dill
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.layers.distribution_layer import DistributionLambda, _serialize
from trieste.types import TensorType
class KerasEnsemble:
"""
This class builds an ensemble of neural networks, using Keras. Individual networks must
be instance of :class:`~trieste.models.keras_networks.KerasEnsembleNetwork`. This class
is meant to be used with :class:`~trieste.models.keras_networks.DeepEnsemble` model wrapper,
which compiles the model.
"""
def __init__(
self,
networks: Sequence[KerasEnsembleNetwork],
) -> None:
"""
:param networks: A list of neural network specifications, one for each member of the
ensemble. The ensemble will be built using these specifications.
:raise ValueError: If there are no objects in ``networks`` or we try to create
a model with networks whose input or output shapes are not the same.
"""
if not networks:
raise ValueError(
f"networks should consist of KerasEnsembleNetwork objects, however"
f"received {networks} instead."
)
input_shapes, output_shapes = [], []
for index, network in enumerate(networks):
network.network_name = f"model_{index}_"
input_shapes.append(network.input_tensor_spec.shape)
output_shapes.append(network.output_tensor_spec.shape)
if not all(x == input_shapes[0] for x in input_shapes):
raise ValueError(
f"Input shapes for all networks must be the same, however"
f"received {input_shapes} instead."
)
if not all(x == output_shapes[0] for x in output_shapes):
raise ValueError(
f"Output shapes for all networks must be the same, however"
f"received {output_shapes} instead."
)
self.num_outputs = networks[0].flattened_output_shape
self._networks = networks
self._model = self._build_ensemble()
def __repr__(self) -> str:
""""""
return f"KerasEnsemble({self._networks!r})"
@property
def model(self) -> tf.keras.Model:
"""Returns built but uncompiled Keras ensemble model."""
return self._model
@property
def ensemble_size(self) -> int:
"""
Returns the size of the ensemble, that is, the number of base learners or individual neural
network models in the ensemble.
"""
return len(self._networks)
def _build_ensemble(self) -> tf.keras.Model:
"""
Builds the ensemble model by combining all the individual networks in a single Keras model.
This method relies on ``connect_layers`` method of :class:`KerasEnsembleNetwork` objects
to construct individual networks.
:return: The Keras model.
"""
inputs, outputs = zip(*[network.connect_layers() for network in self._networks])
return tf.keras.Model(inputs=inputs, outputs=outputs)
def __getstate__(self) -> dict[str, Any]:
# When pickling use to_json to save the model.
state = self.__dict__.copy()
state["_model"] = self._model.to_json()
state["_weights"] = self._model.get_weights()
# Save the history callback (serializing any model)
if self._model.history:
history_model = self._model.history.model
try:
if history_model is self._model:
# no need to serialize the main model, just use a special value instead
self._model.history.model = ...
elif history_model:
self._model.history.model = (
history_model.to_json(),
history_model.get_weights(),
)
state["_history"] = dill.dumps(self._model.history)
finally:
self._model.history.model = history_model
return state
def __setstate__(self, state: dict[str, Any]) -> None:
# When unpickling restore the model using model_from_json.
self.__dict__.update(state)
self._model = tf.keras.models.model_from_json(
state["_model"], custom_objects={"MultivariateNormalTriL": MultivariateNormalTriL}
)
self._model.set_weights(state["_weights"])
# Restore the history (including any model it contains)
if "_history" in state:
self._model.history = dill.loads(state["_history"])
if self._model.history.model is ...:
self._model.history.set_model(self._model)
elif self._model.history.model:
model_json, weights = self._model.history.model
model = tf.keras.models.model_from_json(
model_json,
custom_objects={"MultivariateNormalTriL": MultivariateNormalTriL},
)
model.set_weights(weights)
self._model.history.set_model(model)
class KerasEnsembleNetwork:
"""
This class is an interface that defines necessary attributes and methods for neural networks
that are meant to be used for building ensembles by
:class:`~trieste.models.keras_networks.KerasEnsemble`. Subclasses are not meant to
build and compile Keras models, instead they are providing specification that
:class:`~trieste.models.keras_networks.KerasEnsemble` will use to build the Keras model.
"""
def __init__(
self,
input_tensor_spec: tf.TensorSpec,
output_tensor_spec: tf.TensorSpec,
network_name: str = "",
):
"""
:param input_tensor_spec: Tensor specification for the input to the network.
:param output_tensor_spec: Tensor specification for the output of the network.
:param network_name: The name to be used when building the network.
"""
if not isinstance(input_tensor_spec, tf.TensorSpec):
raise ValueError(
f"input_tensor_spec must be an instance of tf.TensorSpec, "
f"received {type(input_tensor_spec)} instead."
)
if not isinstance(output_tensor_spec, tf.TensorSpec):
raise ValueError(
f"output_tensor_spec must be an instance of tf.TensorSpec, "
f"received {type(output_tensor_spec)} instead."
)
self.input_tensor_spec = input_tensor_spec
self.output_tensor_spec = output_tensor_spec
self.network_name = network_name
@property
def input_layer_name(self) -> str:
return self.network_name + "input"
@property
def output_layer_name(self) -> str:
return self.network_name + "output"
@property
def flattened_output_shape(self) -> int:
return int(np.prod(self.output_tensor_spec.shape))
@abstractmethod
def connect_layers(self) -> tuple[tf.Tensor, tf.Tensor]:
"""
Connects the layers of the neural network. Architecture, layers and layer specifications
need to be defined by the subclasses.
:return: Input and output tensor of the network, required by :class:`tf.keras.Model` to
build a model.
"""
raise NotImplementedError
class MultivariateNormalTriL(tfp.layers.MultivariateNormalTriL): # type: ignore[misc]
"""Fixed version of tfp.layers.MultivariateNormalTriL that handles saving."""
def __init__(
self,
event_size: int,
convert_to_tensor_fn: Callable[
[tfp.python.distributions.Distribution], TensorType
] = tfp.python.distributions.Distribution.sample,
validate_args: bool = False,
**kwargs: Any,
) -> None:
self._event_size = event_size
self._validate_args = validate_args
super().__init__(event_size, convert_to_tensor_fn, validate_args, **kwargs)
def get_config(self) -> dict[str, Any]:
config = {
"event_size": self._event_size,
"validate_args": self._validate_args,
"convert_to_tensor_fn": _serialize(self._convert_to_tensor_fn),
}
# skip DistributionLambda's get_config because we don't want to serialize the
# make_distribution_fn: both to avoid confusing the constructor, and because it doesn't
# seem to work in TF2.4.
base_config = super(DistributionLambda, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class GaussianNetwork(KerasEnsembleNetwork):
"""
This class defines layers of a probabilistic neural network using Keras. The network
architecture is a multilayer fully-connected feed-forward network, with Gaussian
distribution as an output. The layers are meant to be built as an ensemble model by
:class:`KerasEnsemble`. Note that this is not a Bayesian neural network.
"""
def __init__(
self,
input_tensor_spec: tf.TensorSpec,
output_tensor_spec: tf.TensorSpec,
hidden_layer_args: Sequence[dict[str, Any]] = (
{"units": 50, "activation": "relu"},
{"units": 50, "activation": "relu"},
),
independent: bool = False,
):
"""
:param input_tensor_spec: Tensor specification for the input to the network.
:param output_tensor_spec: Tensor specification for the output of the network.
:param hidden_layer_args: Specification for building dense hidden layers. Each element in
the sequence should be a dictionary containing arguments (keys) and their values for a
:class:`~tf.keras.layers.Dense` hidden layer. Please check Keras Dense layer API for
available arguments. Objects in the sequence will sequentially be used to add
:class:`~tf.keras.layers.Dense` layers. Length of this sequence determines the number of
hidden layers in the network. Default value is two hidden layers, 50 nodes each, with
ReLu activation functions. Empty sequence needs to be passed to have no hidden layers.
:param independent: In case multiple outputs are modeled, if set to `True` then
:class:`~tfp.layers.IndependentNormal` layer
is used as the output layer. This models outputs as independent, only the diagonal
elements of the covariance matrix are parametrized. If left as the default `False`,
then :class:`~tfp.layers.MultivariateNormalTriL` layer is used where correlations
between outputs are learned as well.
:raise ValueError: If objects in ``hidden_layer_args`` are not dictionaries.
"""
super().__init__(input_tensor_spec, output_tensor_spec)
self._hidden_layer_args = hidden_layer_args
self._independent = independent
def _gen_input_tensor(self) -> tf.keras.Input:
input_tensor = tf.keras.Input(
shape=self.input_tensor_spec.shape,
dtype=self.input_tensor_spec.dtype,
name=self.input_layer_name,
)
return input_tensor
def _gen_hidden_layers(self, input_tensor: tf.Tensor) -> tf.Tensor:
for index, hidden_layer_args in enumerate(self._hidden_layer_args):
layer_name = f"{self.network_name}dense_{index}"
layer = tf.keras.layers.Dense(**hidden_layer_args, name=layer_name)
input_tensor = layer(input_tensor)
return input_tensor
def _gen_multi_output_layer(self, input_tensor: tf.Tensor) -> tf.Tensor:
dist_layer = tfp.layers.IndependentNormal if self._independent else MultivariateNormalTriL
n_params = dist_layer.params_size(self.flattened_output_shape)
parameter_layer = tf.keras.layers.Dense(
n_params, name=self.network_name + "dense_parameters"
)(input_tensor)
distribution = dist_layer(
self.flattened_output_shape,
tfp.python.distributions.Distribution.mean,
name=self.output_layer_name,
)(parameter_layer)
return distribution
def _gen_single_output_layer(self, input_tensor: tf.Tensor) -> tf.Tensor:
parameter_layer = tf.keras.layers.Dense(2, name=self.network_name + "dense_parameters")(
input_tensor
)
def distribution_fn(inputs: TensorType) -> tfp.distributions.Distribution:
return tfp.distributions.Normal(inputs[..., :1], tf.math.softplus(inputs[..., 1:]))
distribution = tfp.layers.DistributionLambda(
make_distribution_fn=distribution_fn,
convert_to_tensor_fn=tfp.distributions.Distribution.mean,
name=self.output_layer_name,
)(parameter_layer)
return distribution
def connect_layers(self) -> tuple[tf.Tensor, tf.Tensor]:
"""
Connect all layers in the network. We start by generating an input tensor based on input
tensor specification. Next we generate a sequence of hidden dense layers based on
hidden layer arguments. Finally, we generate a dense layer whose nodes act as parameters of
a Gaussian distribution in the final probabilistic layer.
:return: Input and output tensor of the sequence of layers.
"""
input_tensor = self._gen_input_tensor()
hidden_tensor = self._gen_hidden_layers(input_tensor)
if self.flattened_output_shape == 1:
output_tensor = self._gen_single_output_layer(hidden_tensor)
else:
output_tensor = self._gen_multi_output_layer(hidden_tensor)
return input_tensor, output_tensor
| 14,523 | 40.497143 | 100 | py |
trieste-develop | trieste-develop/trieste/models/keras/builders.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains builders for Keras models supported in Trieste. We found the default
configurations used here to work well in most situation, but they should not be taken as
universally good solutions.
"""
from __future__ import annotations
from typing import Union
import tensorflow as tf
from ...data import Dataset
from .architectures import GaussianNetwork, KerasEnsemble
from .utils import get_tensor_spec_from_data
def build_keras_ensemble(
data: Dataset,
ensemble_size: int = 5,
num_hidden_layers: int = 2,
units: int = 25,
activation: Union[str, tf.keras.layers.Activation] = "relu",
independent_normal: bool = False,
) -> KerasEnsemble:
"""
Builds a simple ensemble of neural networks in Keras where each network has the same
architecture: number of hidden layers, nodes in hidden layers and activation function.
Default ensemble size and activation function seem to work well in practice, in regression type
of problems at least. Number of hidden layers and units per layer should be modified according
to the dataset size and complexity of the function - the default values seem to work well
for small datasets common in Bayesian optimization. Using the independent normal is relevant
only if one is modelling multiple output variables, as it simplifies the distribution by
ignoring correlations between outputs.
:param data: Data for training, used for extracting input and output tensor specifications.
:param ensemble_size: The size of the ensemble, that is, the number of base learners or
individual neural networks in the ensemble.
:param num_hidden_layers: The number of hidden layers in each network.
:param units: The number of nodes in each hidden layer.
:param activation: The activation function in each hidden layer.
:param independent_normal: If set to `True` then :class:`~tfp.layers.IndependentNormal` layer
is used as the output layer. This models outputs as independent, only the diagonal
elements of the covariance matrix are parametrized. If left as the default `False`,
then :class:`~tfp.layers.MultivariateNormalTriL` layer is used where correlations
between outputs are learned as well. Note that this is only relevant for multi-output
models.
:return: Keras ensemble model.
"""
input_tensor_spec, output_tensor_spec = get_tensor_spec_from_data(data)
hidden_layer_args = []
for i in range(num_hidden_layers):
hidden_layer_args.append({"units": units, "activation": activation})
networks = [
GaussianNetwork(
input_tensor_spec,
output_tensor_spec,
hidden_layer_args,
independent_normal,
)
for _ in range(ensemble_size)
]
keras_ensemble = KerasEnsemble(networks)
return keras_ensemble
| 3,471 | 40.831325 | 99 | py |
trieste-develop | trieste-develop/trieste/models/keras/models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import re
from typing import Any, Dict, Optional
import dill
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_probability.python.distributions as tfd
from tensorflow.python.keras.callbacks import Callback
from ... import logging
from ...data import Dataset
from ...types import TensorType
from ...utils import flatten_leading_dims
from ..interfaces import HasTrajectorySampler, TrainableProbabilisticModel, TrajectorySampler
from ..optimizer import KerasOptimizer
from ..utils import write_summary_data_based_metrics
from .architectures import KerasEnsemble, MultivariateNormalTriL
from .interface import DeepEnsembleModel, KerasPredictor
from .sampler import DeepEnsembleTrajectorySampler
from .utils import negative_log_likelihood, sample_model_index, sample_with_replacement
class DeepEnsemble(
KerasPredictor, TrainableProbabilisticModel, DeepEnsembleModel, HasTrajectorySampler
):
"""
A :class:`~trieste.model.TrainableProbabilisticModel` wrapper for deep ensembles built using
Keras.
Deep ensembles are ensembles of deep neural networks that have been found to have good
representation of uncertainty in practice (<cite data-cite="lakshminarayanan2017simple"/>).
This makes them a potentially attractive model for Bayesian optimization for use-cases with
large number of observations, non-stationary objective functions and need for fast predictions,
in which standard Gaussian process models are likely to struggle. The model consists of simple
fully connected multilayer probabilistic networks as base learners, with Gaussian distribution
as a final layer, using the negative log-likelihood loss for training the networks. The
model relies on differences in random initialization of weights for generating diversity among
base learners.
The original formulation of the model does not include boostrapping of the data. The authors
found that it does not improve performance the model. We include bootstrapping as an option
as later work that more precisely measured uncertainty quantification found that boostrapping
does help with uncertainty representation (see <cite data-cite="osband2021epistemic"/>).
We provide classes for constructing ensembles using Keras
(:class:`~trieste.models.keras.KerasEnsemble`) in the `architectures` package that should be
used with the :class:`~trieste.models.keras.DeepEnsemble` wrapper. There we also provide a
:class:`~trieste.models.keras.GaussianNetwork` base learner following the original
formulation in <cite data-cite="lakshminarayanan2017simple"/>, but any user-specified network
can be supplied, as long as it has a Gaussian distribution as a final layer and follows the
:class:`~trieste.models.keras.KerasEnsembleNetwork` interface.
A word of caution in case a learning rate scheduler is used in ``fit_args`` to
:class:`KerasOptimizer` optimizer instance. Typically one would not want to continue with the
reduced learning rate in the subsequent Bayesian optimization step. Hence, we reset the
learning rate to the original one after calling the ``fit`` method. In case this is not the
behaviour you would like, you will need to subclass the model and overwrite the
:meth:`optimize` method.
Currently we do not support setting up the model with dictionary config.
"""
def __init__(
self,
model: KerasEnsemble,
optimizer: Optional[KerasOptimizer] = None,
bootstrap: bool = False,
diversify: bool = False,
continuous_optimisation: bool = True,
) -> None:
"""
:param model: A Keras ensemble model with probabilistic networks as ensemble members. The
model has to be built but not compiled.
:param optimizer: The optimizer wrapper with necessary specifications for compiling and
training the model. Defaults to :class:`~trieste.models.optimizer.KerasOptimizer` with
:class:`~tf.optimizers.Adam` optimizer, negative log likelihood loss, mean squared
error metric and a dictionary of default arguments for Keras `fit` method: 3000 epochs,
batch size 16, early stopping callback with patience of 50, and verbose 0.
See https://keras.io/api/models/model_training_apis/#fit-method for a list of possible
arguments.
:param bootstrap: Sample with replacement data for training each network in the ensemble.
By default set to `False`.
:param diversify: Whether to use quantiles from the approximate Gaussian distribution of
the ensemble as trajectories instead of mean predictions when calling
:meth:`trajectory_sampler`. This mode can be used to increase the diversity
in case of optimizing very large batches of trajectories. By
default set to `False`.
:param continuous_optimisation: If True (default), the optimizer will keep track of the
number of epochs across BO iterations and use this number as initial_epoch. This is
essential to allow monitoring of model training across BO iterations.
:raise ValueError: If ``model`` is not an instance of
:class:`~trieste.models.keras.KerasEnsemble` or ensemble has less than two base
learners (networks).
"""
if model.ensemble_size < 2:
raise ValueError(f"Ensemble size must be greater than 1 but got {model.ensemble_size}.")
super().__init__(optimizer)
if not self.optimizer.fit_args:
self.optimizer.fit_args = {
"verbose": 0,
"epochs": 3000,
"batch_size": 16,
"callbacks": [
tf.keras.callbacks.EarlyStopping(
monitor="loss", patience=50, restore_best_weights=True
)
],
}
if self.optimizer.loss is None:
self.optimizer.loss = negative_log_likelihood
if self.optimizer.metrics is None:
self.optimizer.metrics = ["mse"]
model.model.compile(
self.optimizer.optimizer,
loss=[self.optimizer.loss] * model.ensemble_size,
metrics=[self.optimizer.metrics] * model.ensemble_size,
)
if not isinstance(
self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule
):
self.original_lr = self.optimizer.optimizer.lr.numpy()
self._absolute_epochs = 0
self._continuous_optimisation = continuous_optimisation
self._model = model
self._bootstrap = bootstrap
self._diversify = diversify
def __repr__(self) -> str:
""""""
return (
f"DeepEnsemble({self.model!r}, {self.optimizer!r}, {self._bootstrap!r}, "
f"{self._diversify!r})"
)
@property
def model(self) -> tf.keras.Model:
""" " Returns compiled Keras ensemble model."""
return self._model.model
@property
def ensemble_size(self) -> int:
"""
Returns the size of the ensemble, that is, the number of base learners or individual neural
network models in the ensemble.
"""
return self._model.ensemble_size
@property
def num_outputs(self) -> int:
"""
Returns the number of outputs trained on by each member network.
"""
return self._model.num_outputs
def prepare_dataset(
self, dataset: Dataset
) -> tuple[Dict[str, TensorType], Dict[str, TensorType]]:
"""
Transform ``dataset`` into inputs and outputs with correct names that can be used for
training the :class:`KerasEnsemble` model.
If ``bootstrap`` argument in the :class:`~trieste.models.keras.DeepEnsemble` is set to
`True`, data will be additionally sampled with replacement, independently for
each network in the ensemble.
:param dataset: A dataset with ``query_points`` and ``observations`` tensors.
:return: A dictionary with input data and a dictionary with output data.
"""
inputs = {}
outputs = {}
for index in range(self.ensemble_size):
if self._bootstrap:
resampled_data = sample_with_replacement(dataset)
else:
resampled_data = dataset
input_name = self.model.input_names[index]
output_name = self.model.output_names[index]
inputs[input_name], outputs[output_name] = resampled_data.astuple()
return inputs, outputs
def prepare_query_points(self, query_points: TensorType) -> Dict[str, TensorType]:
"""
Transform ``query_points`` into inputs with correct names that can be used for
predicting with the model.
:param query_points: A tensor with ``query_points``.
:return: A dictionary with query_points prepared for predictions.
"""
inputs = {}
for index in range(self.ensemble_size):
inputs[self.model.input_names[index]] = query_points
return inputs
def ensemble_distributions(self, query_points: TensorType) -> tuple[tfd.Distribution, ...]:
"""
Return distributions for each member of the ensemble.
:param query_points: The points at which to return distributions.
:return: The distributions for the observations at the specified
``query_points`` for each member of the ensemble.
"""
x_transformed: dict[str, TensorType] = self.prepare_query_points(query_points)
return self._model.model(x_transformed)
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
r"""
Returns mean and variance at ``query_points`` for the whole ensemble.
Following <cite data-cite="lakshminarayanan2017simple"/> we treat the ensemble as a
uniformly-weighted Gaussian mixture model and combine the predictions as
.. math:: p(y|\mathbf{x}) = M^{-1} \Sum_{m=1}^M \mathcal{N}
(\mu_{\theta_m}(\mathbf{x}),\,\sigma_{\theta_m}^{2}(\mathbf{x}))
We further approximate the ensemble prediction as a Gaussian whose mean and variance
are respectively the mean and variance of the mixture, given by
.. math:: \mu_{*}(\mathbf{x}) = M^{-1} \Sum_{m=1}^M \mu_{\theta_m}(\mathbf{x})
.. math:: \sigma^2_{*}(\mathbf{x}) = M^{-1} \Sum_{m=1}^M (\sigma_{\theta_m}^{2}(\mathbf{x})
+ \mu^2_{\theta_m}(\mathbf{x})) - \mu^2_{*}(\mathbf{x})
This method assumes that the final layer in each member of the ensemble is
probabilistic, an instance of :class:`~tfp.distributions.Distribution`. In particular, given
the nature of the approximations stated above the final layer should be a Gaussian
distribution with `mean` and `variance` methods.
:param query_points: The points at which to make predictions.
:return: The predicted mean and variance of the observations at the specified
``query_points``.
"""
# handle leading batch dimensions, while still allowing `Functional` to
# "allow (None,) and (None, 1) Tensors to be passed interchangeably"
input_dims = min(len(query_points.shape), len(self.model.input_shape[0]))
flat_x, unflatten = flatten_leading_dims(query_points, output_dims=input_dims)
ensemble_distributions = self.ensemble_distributions(flat_x)
predicted_means = tf.math.reduce_mean(
[dist.mean() for dist in ensemble_distributions], axis=0
)
predicted_vars = (
tf.math.reduce_mean(
[dist.variance() + dist.mean() ** 2 for dist in ensemble_distributions], axis=0
)
- predicted_means**2
)
return unflatten(predicted_means), unflatten(predicted_vars)
def predict_ensemble(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
Returns mean and variance at ``query_points`` for each member of the ensemble. First tensor
is the mean and second is the variance, where each has shape [..., M, N, 1], where M is
the ``ensemble_size``.
This method assumes that the final layer in each member of the ensemble is
probabilistic, an instance of :class:`¬tfp.distributions.Distribution`, in particular
`mean` and `variance` methods should be available.
:param query_points: The points at which to make predictions.
:return: The predicted mean and variance of the observations at the specified
``query_points`` for each member of the ensemble.
"""
ensemble_distributions = self.ensemble_distributions(query_points)
predicted_means = tf.convert_to_tensor([dist.mean() for dist in ensemble_distributions])
predicted_vars = tf.convert_to_tensor([dist.variance() for dist in ensemble_distributions])
return predicted_means, predicted_vars
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
"""
Return ``num_samples`` samples at ``query_points``. We use the mixture approximation in
:meth:`predict` for ``query_points`` and sample ``num_samples`` times from a Gaussian
distribution given by the predicted mean and variance.
:param query_points: The points at which to sample, with shape [..., N, D].
:param num_samples: The number of samples at each point.
:return: The samples. For a predictive distribution with event shape E, this has shape
[..., S, N] + E, where S is the number of samples.
"""
predicted_means, predicted_vars = self.predict(query_points)
normal = tfp.distributions.Normal(predicted_means, tf.sqrt(predicted_vars))
samples = normal.sample(num_samples)
return samples # [num_samples, len(query_points), 1]
def sample_ensemble(self, query_points: TensorType, num_samples: int) -> TensorType:
"""
Return ``num_samples`` samples at ``query_points``. Each sample is taken from a Gaussian
distribution given by the predicted mean and variance of a randomly chosen network in the
ensemble. This avoids using the Gaussian mixture approximation and samples directly from
individual Gaussian distributions given by each network in the ensemble.
:param query_points: The points at which to sample, with shape [..., N, D].
:param num_samples: The number of samples at each point.
:return: The samples. For a predictive distribution with event shape E, this has shape
[..., S, N] + E, where S is the number of samples.
"""
ensemble_distributions = self.ensemble_distributions(query_points)
network_indices = sample_model_index(self.ensemble_size, num_samples)
stacked_samples = []
for i in range(num_samples):
stacked_samples.append(ensemble_distributions[network_indices[i]].sample())
samples = tf.stack(stacked_samples, axis=0)
return samples # [num_samples, len(query_points), 1]
def trajectory_sampler(self) -> TrajectorySampler[DeepEnsemble]:
"""
Return a trajectory sampler. For :class:`DeepEnsemble`, we use an ensemble
sampler that randomly picks a network from the ensemble and uses its predicted means
for generating a trajectory, or optionally randomly sampled quantiles rather than means.
:return: The trajectory sampler.
"""
return DeepEnsembleTrajectorySampler(self, self._diversify)
def update(self, dataset: Dataset) -> None:
"""
Neural networks are parametric models and do not need to update data.
`TrainableProbabilisticModel` interface, however, requires an update method, so
here we simply pass the execution.
"""
return
def optimize(self, dataset: Dataset) -> None:
"""
Optimize the underlying Keras ensemble model with the specified ``dataset``.
Optimization is performed by using the Keras `fit` method, rather than applying the
optimizer and using the batches supplied with the optimizer wrapper. User can pass
arguments to the `fit` method through ``minimize_args`` argument in the optimizer wrapper.
These default to using 100 epochs, batch size 100, and verbose 0. See
https://keras.io/api/models/model_training_apis/#fit-method for a list of possible
arguments.
Note that optimization does not return the result, instead optimization results are
stored in a history attribute of the model object.
:param dataset: The data with which to optimize the model.
"""
fit_args = dict(self.optimizer.fit_args)
# Tell optimizer how many epochs have been used before: the optimizer will "continue"
# optimization across multiple BO iterations rather than start fresh at each iteration.
# This allows us to monitor training across iterations.
if "epochs" in fit_args:
fit_args["epochs"] = fit_args["epochs"] + self._absolute_epochs
x, y = self.prepare_dataset(dataset)
history = self.model.fit(
x=x,
y=y,
**fit_args,
initial_epoch=self._absolute_epochs,
)
if self._continuous_optimisation:
self._absolute_epochs = self._absolute_epochs + len(history.history["loss"])
# Reset lr in case there was an lr schedule: a schedule will have changed the learning
# rate, so that the next time we call `optimize` the starting learning rate would be
# different. Therefore, we make sure the learning rate is set back to its initial value.
# However, this is not needed for `LearningRateSchedule` instances.
if not isinstance(
self.optimizer.optimizer.lr, tf.keras.optimizers.schedules.LearningRateSchedule
):
self.optimizer.optimizer.lr.assign(self.original_lr)
def log(self, dataset: Optional[Dataset] = None) -> None:
"""
Log model training information at a given optimization step to the Tensorboard.
We log several summary statistics of losses and metrics given in ``fit_args`` to
``optimizer`` (final, difference between inital and final loss, min and max). We also log
epoch statistics, but as histograms, rather than time series. We also log several training
data based metrics, such as root mean square error between predictions and observations,
and several others.
We do not log statistics of individual models in the ensemble unless specifically switched
on with ``trieste.logging.set_summary_filter(lambda name: True)``.
For custom logs user will need to subclass the model and overwrite this method.
:param dataset: Optional data that can be used to log additional data-based model summaries.
"""
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
logging.scalar("epochs/num_epochs", len(self.model.history.epoch))
for k, v in self.model.history.history.items():
KEY_SPLITTER = {
# map history keys to prefix and suffix
"loss": ("loss", ""),
r"(?P<model>model_\d+)_output_loss": ("loss", r"_\g<model>"),
r"(?P<model>model_\d+)_output_(?P<metric>.+)": (
r"\g<metric>",
r"_\g<model>",
),
}
for pattern, (pre_sub, post_sub) in KEY_SPLITTER.items():
if re.match(pattern, k):
pre = re.sub(pattern, pre_sub, k)
post = re.sub(pattern, post_sub, k)
break
else:
# unrecognised history key; ignore
continue
if "model" in post and not logging.include_summary("_ensemble"):
break
else:
if "model" in post:
pre = pre + "/_ensemble"
logging.histogram(f"{pre}/epoch{post}", lambda: v)
logging.scalar(f"{pre}/final{post}", lambda: v[-1])
logging.scalar(f"{pre}/diff{post}", lambda: v[0] - v[-1])
logging.scalar(f"{pre}/min{post}", lambda: tf.reduce_min(v))
logging.scalar(f"{pre}/max{post}", lambda: tf.reduce_max(v))
if dataset:
write_summary_data_based_metrics(
dataset=dataset, model=self, prefix="training_"
)
if logging.include_summary("_ensemble"):
predict_ensemble_variance = self.predict_ensemble(dataset.query_points)[1]
for i in range(predict_ensemble_variance.shape[0]):
logging.histogram(
f"variance/_ensemble/predict_variance_model_{i}",
predict_ensemble_variance[i, ...],
)
logging.scalar(
f"variance/_ensemble/predict_variance_mean_model_{i}",
tf.reduce_mean(predict_ensemble_variance[i, ...]),
)
def __getstate__(self) -> dict[str, Any]:
# use to_json and get_weights to save any optimizer fit_arg callback models
state = self.__dict__.copy()
if self._optimizer:
callbacks: list[Callback] = self._optimizer.fit_args.get("callbacks", [])
saved_models: list[KerasOptimizer] = []
tensorboard_writers: list[dict[str, Any]] = []
try:
for callback in callbacks:
# serialize the callback models before pickling the optimizer
saved_models.append(callback.model)
if callback.model is self.model:
# no need to serialize the main model, just use a special value instead
callback.model = ...
elif callback.model:
callback.model = (callback.model.to_json(), callback.model.get_weights())
# don't pickle tensorboard writers either; they'll be recreated when needed
if isinstance(callback, tf.keras.callbacks.TensorBoard):
tensorboard_writers.append(callback._writers)
callback._writers = {}
state["_optimizer"] = dill.dumps(state["_optimizer"])
except Exception as e:
raise NotImplementedError(
"Failed to copy DeepEnsemble optimizer due to unsupported callbacks."
) from e
finally:
# revert original state, even if the pickling failed
for callback, model in zip(callbacks, saved_models):
callback.model = model
for callback, writers in zip(
(cb for cb in callbacks if isinstance(cb, tf.keras.callbacks.TensorBoard)),
tensorboard_writers,
):
callback._writers = writers
return state
def __setstate__(self, state: dict[str, Any]) -> None:
# Restore optimizer and callback models after depickling, and recompile.
self.__dict__.update(state)
# Unpickle the optimizer, and restore all the callback models
self._optimizer = dill.loads(self._optimizer)
for callback in self._optimizer.fit_args.get("callbacks", []):
if callback.model is ...:
callback.set_model(self.model)
elif callback.model:
model_json, weights = callback.model
model = tf.keras.models.model_from_json(
model_json,
custom_objects={"MultivariateNormalTriL": MultivariateNormalTriL},
)
model.set_weights(weights)
callback.set_model(model)
# Recompile the model
self.model.compile(
self.optimizer.optimizer,
loss=[self.optimizer.loss] * self._model.ensemble_size,
metrics=[self.optimizer.metrics] * self._model.ensemble_size,
)
| 25,439 | 47.923077 | 100 | py |
trieste-develop | trieste-develop/trieste/models/keras/__init__.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This package contains the primary interface for deep neural network models. It also contains a
number of :class:`TrainableProbabilisticModel` wrappers for neural network models. Note that
currently copying/saving models is not supported, so when
:class:`~trieste.bayesian_optimizer.BayesianOptimizer` is used ``track_state`` should be set
to `False`.
"""
from .architectures import GaussianNetwork, KerasEnsemble, KerasEnsembleNetwork
from .builders import build_keras_ensemble
from .interface import DeepEnsembleModel, KerasPredictor
from .models import DeepEnsemble
from .sampler import DeepEnsembleTrajectorySampler, deep_ensemble_trajectory
from .utils import get_tensor_spec_from_data, negative_log_likelihood, sample_with_replacement
| 1,334 | 45.034483 | 94 | py |
trieste-develop | trieste-develop/trieste/models/keras/interface.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Optional
import tensorflow as tf
import tensorflow_probability as tfp
from typing_extensions import Protocol, runtime_checkable
from ...types import TensorType
from ..interfaces import ProbabilisticModel
from ..optimizer import KerasOptimizer
class KerasPredictor(ProbabilisticModel, ABC):
"""
This is an interface for trainable wrappers of TensorFlow and Keras neural network models.
"""
def __init__(self, optimizer: Optional[KerasOptimizer] = None):
"""
:param optimizer: The optimizer wrapper containing the optimizer with which to train the
model and arguments for the wrapper and the optimizer. The optimizer must
be an instance of a :class:`~tf.optimizers.Optimizer`. Defaults to
:class:`~tf.optimizers.Adam` optimizer with default parameters.
:raise ValueError: If the optimizer is not an instance of :class:`~tf.optimizers.Optimizer`.
"""
if optimizer is None:
optimizer = KerasOptimizer(tf.optimizers.Adam())
self._optimizer = optimizer
if not isinstance(optimizer.optimizer, tf.optimizers.Optimizer):
raise ValueError(
f"Optimizer for `KerasPredictor` models must be an instance of a "
f"`tf.optimizers.Optimizer`, received {type(optimizer.optimizer)} instead."
)
@property
@abstractmethod
def model(self) -> tf.keras.Model:
"""The compiled Keras model."""
raise NotImplementedError
@property
def optimizer(self) -> KerasOptimizer:
"""The optimizer wrapper for training the model."""
return self._optimizer
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
return self.model.predict(query_points)
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
raise NotImplementedError(
"""
KerasPredictor does not implement sampling. Acquisition
functions relying on it cannot be used with this class by default. Certain
types of neural networks might be able to generate samples and
such subclasses should overwrite this method.
"""
)
@runtime_checkable
class DeepEnsembleModel(ProbabilisticModel, Protocol):
"""
This is an interface for deep ensemble type of model, primarily for usage by trajectory
samplers, to avoid circular imports. These models can act as probabilistic models
by deriving estimates of epistemic uncertainty from the diversity of predictions made by
individual models in the ensemble.
"""
@property
@abstractmethod
def ensemble_size(self) -> int:
"""
Returns the size of the ensemble, that is, the number of base learners or individual
models in the ensemble.
"""
raise NotImplementedError
@property
@abstractmethod
def num_outputs(self) -> int:
"""
Returns the number of outputs trained on by each member network.
"""
raise NotImplementedError
@abstractmethod
def ensemble_distributions(
self, query_points: TensorType
) -> tuple[tfp.distributions.Distribution, ...]:
"""
Return distributions for each member of the ensemble. Type of the output will depend on the
subclass, it might be a predicted value or a distribution.
:param query_points: The points at which to return outputs.
:return: The outputs for the observations at the specified ``query_points`` for each member
of the ensemble.
"""
raise NotImplementedError
| 4,335 | 36.37931 | 100 | py |
trieste-develop | trieste-develop/trieste/models/gpflow/inducing_point_selectors.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is the home of Trieste's functionality for choosing the inducing points
of sparse variational Gaussian processes (i.e. our :class:`SparseVariational` wrapper).
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Generic
import gpflow
import tensorflow as tf
import tensorflow_probability as tfp
from scipy.cluster.vq import kmeans
from ...data import Dataset
from ...space import Box, DiscreteSearchSpace, SearchSpace
from ...types import TensorType
from ..interfaces import ProbabilisticModelType
from .interface import GPflowPredictor
class InducingPointSelector(ABC, Generic[ProbabilisticModelType]):
"""
This class provides functionality to update the inducing points of an inducing point-based model
as the Bayesian optimization progresses.
The only constraint on subclasses of :class:`InducingPointSelector` is that they preserve
the shape of the inducing points so not to trigger expensive retracing.
It can often be beneficial to change the inducing points during optimization, for example
to allow the model to focus its limited modelling resources into promising areas of the space.
See :cite:`vakili2021scalable` for demonstrations of some of
our :class:`InducingPointSelectors`.
"""
def __init__(self, recalc_every_model_update: bool = True):
"""
:param recalc_every_model_update: If True then recalculate the inducing points for each
model update, otherwise just recalculate on the first call.
"""
self._recalc_every_model_update = recalc_every_model_update
self._initialized = False
def calculate_inducing_points(
self,
current_inducing_points: TensorType,
model: ProbabilisticModelType,
dataset: Dataset,
) -> TensorType:
"""
Calculate the new inducing points given the existing inducing points.
If `recalc_every_model_update` is set to False then we only generate new inducing points
for the first :meth:`calculate_inducing_points` call, otherwise we just return the current
inducing points.
:param current_inducing_points: The current inducing points used by the model.
:param model: The sparse model.
:param dataset: The data from the observer.
:return: The new updated inducing points.
:raise NotImplementedError: If model has more than one set of inducing variables.
"""
tf.debugging.Assert(current_inducing_points is not None, [tf.constant([])])
if isinstance(current_inducing_points, list):
raise NotImplementedError(
"""
InducingPointSelectors only currently support models with a single set
of inducing points.
"""
)
if (
not self._initialized
) or self._recalc_every_model_update: # calculate new inducing points when required
self._initialized = True
M = tf.shape(current_inducing_points)[0]
new_inducing_points = self._recalculate_inducing_points(M, model, dataset) # [M, D]
tf.assert_equal(tf.shape(current_inducing_points), tf.shape(new_inducing_points))
return new_inducing_points # [M, D]
else: # otherwise dont recalculate
return current_inducing_points # [M, D]
@abstractmethod
def _recalculate_inducing_points(
self, M: int, model: ProbabilisticModelType, dataset: Dataset
) -> TensorType:
"""
Method for calculating new inducing points given a `model` and `dataset`.
This method is to be implemented by all subclasses of :class:`InducingPointSelector`.
:param M: Desired number of inducing points.
:param model: The sparse model.
:param dataset: The data from the observer.
:return: The new updated inducing points.
"""
raise NotImplementedError
class UniformInducingPointSelector(InducingPointSelector[GPflowPredictor]):
"""
An :class:`InducingPointSelector` that chooses points sampled uniformly across the search space.
"""
def __init__(self, search_space: SearchSpace, recalc_every_model_update: bool = True):
"""
:param search_space: The global search space over which the optimization is defined.
:param recalc_every_model_update: If True then recalculate the inducing points for each
model update, otherwise just recalculate on the first call.
"""
super().__init__(recalc_every_model_update)
self._search_space = search_space
def _recalculate_inducing_points(
self, M: int, model: GPflowPredictor, dataset: Dataset
) -> TensorType:
"""
Sample `M` points. If `search_space` is a :class:`Box` then we use a space-filling Sobol
design to ensure high diversity.
:param M: Desired number of inducing points.
:param model: The sparse model .
:param dataset: The data from the observer.
:return: The new updated inducing points.
"""
if isinstance(self._search_space, Box):
return self._search_space.sample_sobol(M)
else:
return self._search_space.sample(M)
class RandomSubSampleInducingPointSelector(InducingPointSelector[GPflowPredictor]):
"""
An :class:`InducingPointSelector` that chooses points at random from the training data.
"""
def _recalculate_inducing_points(
self, M: int, model: GPflowPredictor, dataset: Dataset
) -> TensorType:
"""
Sample `M` points from the training data without replacement. If we require more
inducing points than training data, then we fill the remaining points with random
samples across the search space.
:param M: Desired number of inducing points.
:param model: The sparse model.
:param dataset: The data from the observer. Must be populated.
:return: The new updated inducing points.
:raise tf.errors.InvalidArgumentError: If ``dataset`` is empty.
"""
tf.debugging.Assert(len(dataset.query_points) is not None, [tf.constant([])])
N = tf.shape(dataset.query_points)[0] # training data size
shuffled_query_points = tf.random.shuffle(dataset.query_points) # [N, d]
sub_sample = shuffled_query_points[: tf.math.minimum(N, M), :]
if N < M: # if fewer data than inducing points then sample remaining uniformly
data_as_discrete_search_space = DiscreteSearchSpace(dataset.query_points)
convex_hull_of_data = Box(
lower=data_as_discrete_search_space.lower,
upper=data_as_discrete_search_space.upper,
)
uniform_sampler = UniformInducingPointSelector(convex_hull_of_data)
uniform_sample = uniform_sampler._recalculate_inducing_points(
M - N, model, dataset
) # [M-N, d]
sub_sample = tf.concat([sub_sample, uniform_sample], 0) # [M, d]
return sub_sample # [M, d]
class KMeansInducingPointSelector(InducingPointSelector[GPflowPredictor]):
"""
An :class:`InducingPointSelector` that chooses points as centroids of a K-means clustering
of the training data.
"""
def _recalculate_inducing_points(
self, M: int, model: GPflowPredictor, dataset: Dataset
) -> TensorType:
"""
Calculate `M` centroids from a K-means clustering of the training data.
If the clustering returns fewer than `M` centroids or if we have fewer than `M` training
data, then we fill the remaining points with random samples across the search space.
:param M: Desired number of inducing points.
:param model: The sparse model.
:param dataset: The data from the observer. Must be populated.
:return: The new updated inducing points.
:raise tf.errors.InvalidArgumentError: If ``dataset`` is empty.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
query_points = dataset.query_points # [N, d]
N = tf.shape(query_points)[0]
shuffled_query_points = tf.random.shuffle(query_points) # [N, d]
query_points_stds = tf.math.reduce_std(shuffled_query_points, 0) # [d]
if (
tf.math.count_nonzero(query_points_stds, dtype=N.dtype) == N
): # standardize if all stds non zero
normalize = True
shuffled_query_points = shuffled_query_points / query_points_stds # [N, d]
else:
normalize = False
centroids, _ = kmeans(shuffled_query_points, int(tf.math.minimum(M, N))) # [C, d]
if normalize:
centroids *= query_points_stds # [M, d]
if len(centroids) < M: # choose remaining points as random samples
data_as_discrete_search_space = DiscreteSearchSpace(dataset.query_points)
convex_hull_of_data = Box(
lower=data_as_discrete_search_space.lower,
upper=data_as_discrete_search_space.upper,
)
uniform_sampler = UniformInducingPointSelector(convex_hull_of_data)
extra_centroids = uniform_sampler._recalculate_inducing_points( # [M-C, d]
M - len(centroids), model, dataset
)
centroids = tf.concat([centroids, extra_centroids], axis=0) # [M, d]
return centroids # [M, d]
class QualityFunction(ABC):
"""
A :const:`QualityFunction` uses a `model` to measure the quality of each of
the `N` query points in the provided `dataset`, returning shape `[N]`.
"""
@abstractmethod
def __call__(self, model: GPflowPredictor, dataset: Dataset) -> TensorType:
"""
Evaluate the quality of the data-points according to the model.
:param model: The sparse model.
:param dataset: The data from the observer. Must be populated.
:return: The quality scores.
"""
class DPPInducingPointSelector(InducingPointSelector[GPflowPredictor]):
"""
An :class:`InducingPointSelector` that follows :cite:`chen2018fast` to get a greedy appoximation
to the MAP estimate of the specified Determinantal Point Process (DPP).
The DPP is defined through its diveristy-quality decomposition, i.e. its similarity kernel
is just the kernel of the considered model and its quality scores come from the
provided :class:`QualityFunction`.
"""
def __init__(self, quality_function: QualityFunction, recalc_every_model_update: bool = True):
"""
:param quality_function: A function measuring the quality of each candidate inducing point.
:param recalc_every_model_update: If True then recalculate the inducing points for each
model update, otherwise just recalculate on the first call.
"""
super().__init__(recalc_every_model_update)
self._quality_function = quality_function
def _recalculate_inducing_points(
self,
M: int,
model: GPflowPredictor,
dataset: Dataset,
) -> TensorType:
"""
:param M: Desired number of inducing points.
:param model: The sparse model.
:param dataset: The data from the observer. Must be populated.
:return: The new updated inducing points.
:raise tf.errors.InvalidArgumentError: If ``dataset`` is empty.
"""
tf.debugging.Assert(dataset is not None, [])
N = tf.shape(dataset.query_points)[0]
quality_scores = self._quality_function(model, dataset)
chosen_inducing_points = greedy_inference_dpp(
M=tf.minimum(M, N),
kernel=model.get_kernel(),
quality_scores=quality_scores,
dataset=dataset,
) # [min(M,N), d]
if N < M: # if fewer data than inducing points then sample remaining uniformly
data_as_discrete_search_space = DiscreteSearchSpace(dataset.query_points)
convex_hull_of_data = Box(
lower=data_as_discrete_search_space.lower,
upper=data_as_discrete_search_space.upper,
)
uniform_sampler = UniformInducingPointSelector(convex_hull_of_data)
uniform_sample = uniform_sampler._recalculate_inducing_points(
M - N,
model,
dataset,
) # [M-N, d]
chosen_inducing_points = tf.concat(
[chosen_inducing_points, uniform_sample], 0
) # [M, d]
return chosen_inducing_points # [M, d]
class UnitQualityFunction(QualityFunction):
"""
A :class:`QualityFunction` where all points are considered equal, i.e. using
this quality function for inducing point allocation corresponds to allocating
inducing points with the sole aim of minimizing predictive variance.
"""
def __call__(self, model: GPflowPredictor, dataset: Dataset) -> TensorType:
"""
Evaluate the quality of the data-points according to the model.
:param model: The sparse model.
:param dataset: The data from the observer. Must be populated.
:return: The quality scores.
"""
return tf.ones(tf.shape(dataset.query_points)[0], dtype=tf.float64) # [N]
class ModelBasedImprovementQualityFunction(QualityFunction):
"""
A :class:`QualityFunction` where the quality of points are given by their expected
improvement with respect to a conservative baseline. Expectations are according
to the model from the previous BO step). See :cite:`moss2023IPA` for details
and justification.
"""
def __call__(self, model: GPflowPredictor, dataset: Dataset) -> TensorType:
"""
Evaluate the quality of the data-points according to the model.
:param model: The sparse model.
:param dataset: The data from the observer. Must be populated.
:return: The quality scores.
"""
mean, variance = model.predict(dataset.query_points) # [N, 1], [N, 1]
baseline = tf.reduce_max(mean)
normal = tfp.distributions.Normal(mean, tf.sqrt(variance))
improvement = (baseline - mean) * normal.cdf(baseline) + variance * normal.prob(
baseline
) # [N, 1]
return improvement[:, 0] # [N]
class ConditionalVarianceReduction(DPPInducingPointSelector):
"""
An :class:`InducingPointSelector` that greedily chooses the points with maximal (conditional)
predictive variance, see :cite:`burt2019rates`.
"""
def __init__(self, recalc_every_model_update: bool = True):
"""
:param recalc_every_model_update: If True then recalculate the inducing points for each
model update, otherwise just recalculate on the first call.
"""
super().__init__(UnitQualityFunction(), recalc_every_model_update)
class ConditionalImprovementReduction(DPPInducingPointSelector):
"""
An :class:`InducingPointSelector` that greedily chooses points with large predictive variance
and that are likely to be in promising regions of the search space, see :cite:`moss2023IPA`.
"""
def __init__(
self,
recalc_every_model_update: bool = True,
):
"""
:param recalc_every_model_update: If True then recalculate the inducing points for each
model update, otherwise just recalculate on the first call.
"""
super().__init__(ModelBasedImprovementQualityFunction(), recalc_every_model_update)
def greedy_inference_dpp(
M: int,
kernel: gpflow.kernels.Kernel,
quality_scores: TensorType,
dataset: Dataset,
) -> TensorType:
"""
Get a greedy approximation of the MAP estimate of the Determinantal Point Process (DPP)
over ``dataset`` following the algorithm of :cite:`chen2018fast`. Note that we are using the
quality-diversity decomposition of a DPP, specifying both a similarity ``kernel``
and ``quality_scores``.
:param M: Desired set size.
:param kernel: The underlying kernel of the DPP.
:param quality_scores: The quality score of each item in ``dataset``.
:return: The MAP estimate of the DPP.
:raise tf.errors.InvalidArgumentError: If ``dataset`` is empty or if the shape of
``quality_scores`` does not match that of ``dataset.observations``.
"""
tf.debugging.Assert(dataset is not None, [])
tf.debugging.assert_equal(tf.shape(dataset.observations)[0], tf.shape(quality_scores)[0])
tf.debugging.Assert(len(dataset.query_points) >= M, [])
chosen_indicies = [] # iteratively store chosen points
N = tf.shape(dataset.query_points)[0]
c = tf.zeros((M - 1, N)) # [M-1,N]
d_squared = kernel.K_diag(dataset.query_points) # [N]
scores = d_squared * quality_scores**2 # [N]
chosen_indicies.append(tf.argmax(scores)) # get first element
for m in range(M - 1): # get remaining elements
ix = tf.cast(chosen_indicies[-1], dtype=tf.int32) # increment Cholesky with newest point
newest_point = dataset.query_points[ix]
d_temp = tf.math.sqrt(d_squared[ix]) # [1]
L = kernel.K(dataset.query_points, newest_point[None, :])[:, 0] # [N]
if m == 0:
e = L / d_temp
c = tf.expand_dims(e, 0) # [1,N]
else:
c_temp = c[:, ix : ix + 1] # [m,1]
e = (L - tf.matmul(tf.transpose(c_temp), c[:m])) / d_temp # [N]
c = tf.concat([c, e], axis=0) # [m+1, N]
e = tf.squeeze(e, 0)
d_squared -= e**2
d_squared = tf.maximum(d_squared, 1e-50) # numerical stability
scores = d_squared * quality_scores**2 # [N]
chosen_indicies.append(tf.argmax(scores)) # get next element as point with largest score
return tf.gather(dataset.query_points, chosen_indicies)
| 18,538 | 39.655702 | 100 | py |
trieste-develop | trieste-develop/trieste/models/gpflow/sampler.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is the home of the sampling functionality required by Trieste's
GPflow wrappers.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Callable, Optional, Tuple, TypeVar, Union, cast
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.kernels import Kernel, MultioutputKernel
from gpflux.layers.basis_functions.fourier_features import RandomFourierFeaturesCosine
from gpflux.math import compute_A_inv_b
from typing_extensions import Protocol, TypeGuard, runtime_checkable
from ...types import TensorType
from ...utils import DEFAULTS, flatten_leading_dims
from ..interfaces import (
ProbabilisticModel,
ReparametrizationSampler,
SupportsGetInducingVariables,
SupportsGetInternalData,
SupportsGetKernel,
SupportsGetMeanFunction,
SupportsGetObservationNoise,
SupportsPredictJoint,
TrajectoryFunction,
TrajectoryFunctionClass,
TrajectorySampler,
)
_IntTensorType = Union[tf.Tensor, int]
def qmc_normal_samples(
num_samples: _IntTensorType, n_sample_dim: _IntTensorType, skip: _IntTensorType = 0
) -> tf.Tensor:
"""
Generates `num_samples` sobol samples, skipping the first `skip`, where each
sample has dimension `n_sample_dim`.
"""
if num_samples == 0 or n_sample_dim == 0:
return tf.zeros(shape=(num_samples, n_sample_dim), dtype=tf.float64)
sobol_samples = tf.math.sobol_sample(
dim=n_sample_dim,
num_results=num_samples,
dtype=tf.float64,
skip=skip,
)
dist = tfp.distributions.Normal(
loc=tf.constant(0.0, dtype=tf.float64),
scale=tf.constant(1.0, dtype=tf.float64),
)
normal_samples = dist.quantile(sobol_samples)
return normal_samples
class IndependentReparametrizationSampler(ReparametrizationSampler[ProbabilisticModel]):
r"""
This sampler employs the *reparameterization trick* to approximate samples from a
:class:`ProbabilisticModel`\ 's predictive distribution as
.. math:: x \mapsto \mu(x) + \epsilon \sigma(x)
where :math:`\epsilon \sim \mathcal N (0, 1)` is constant for a given sampler, thus ensuring
samples form a continuous curve.
"""
skip: TensorType = tf.Variable(0, trainable=False)
"""Number of sobol sequence points to skip. This is incremented for each sampler."""
def __init__(
self, sample_size: int, model: ProbabilisticModel, qmc: bool = False, qmc_skip: bool = True
):
"""
:param sample_size: The number of samples to take at each point. Must be positive.
:param model: The model to sample from.
:param qmc: Whether to use QMC sobol sampling instead of random normal sampling. QMC
sampling more accurately approximates a normal distribution than truly random samples.
:param qmc_skip: Whether to use the skip parameter to ensure the QMC sampler gives different
samples whenever it is reset. This is not supported with XLA.
:raise ValueError (or InvalidArgumentError): If ``sample_size`` is not positive.
"""
super().__init__(sample_size, model)
self._eps: Optional[tf.Variable] = None
self._qmc = qmc
self._qmc_skip = qmc_skip
def sample(self, at: TensorType, *, jitter: float = DEFAULTS.JITTER) -> TensorType:
"""
Return approximate samples from the `model` specified at :meth:`__init__`. Multiple calls to
:meth:`sample`, for any given :class:`IndependentReparametrizationSampler` and ``at``, will
produce the exact same samples. Calls to :meth:`sample` on *different*
:class:`IndependentReparametrizationSampler` instances will produce different samples.
:param at: Where to sample the predictive distribution, with shape `[..., 1, D]`, for points
of dimension `D`.
:param jitter: The size of the jitter to use when stabilising the Cholesky decomposition of
the covariance matrix.
:return: The samples, of shape `[..., S, 1, L]`, where `S` is the `sample_size` and `L` is
the number of latent model dimensions.
:raise ValueError (or InvalidArgumentError): If ``at`` has an invalid shape or ``jitter``
is negative.
"""
tf.debugging.assert_shapes([(at, [..., 1, None])])
tf.debugging.assert_greater_equal(jitter, 0.0)
mean, var = self._model.predict(at[..., None, :, :]) # [..., 1, 1, L], [..., 1, 1, L]
var = var + jitter
def sample_eps() -> tf.Tensor:
self._initialized.assign(True)
if self._qmc:
if self._qmc_skip:
skip = IndependentReparametrizationSampler.skip
IndependentReparametrizationSampler.skip.assign(skip + self._sample_size)
else:
skip = tf.constant(0)
normal_samples = qmc_normal_samples(self._sample_size, mean.shape[-1], skip)
else:
normal_samples = tf.random.normal(
[self._sample_size, tf.shape(mean)[-1]], dtype=tf.float64
)
return normal_samples # [S, L]
if self._eps is None:
self._eps = tf.Variable(sample_eps())
tf.cond(
self._initialized,
lambda: self._eps,
lambda: self._eps.assign(sample_eps()),
)
return mean + tf.sqrt(var) * tf.cast(self._eps[:, None, :], var.dtype) # [..., S, 1, L]
class BatchReparametrizationSampler(ReparametrizationSampler[SupportsPredictJoint]):
r"""
This sampler employs the *reparameterization trick* to approximate batches of samples from a
:class:`ProbabilisticModel`\ 's predictive joint distribution as
.. math:: x \mapsto \mu(x) + \epsilon L(x)
where :math:`L` is the Cholesky factor s.t. :math:`LL^T` is the covariance, and
:math:`\epsilon \sim \mathcal N (0, 1)` is constant for a given sampler, thus ensuring samples
form a continuous curve.
"""
skip: TensorType = tf.Variable(0, trainable=False)
"""Number of sobol sequence points to skip. This is incremented for each sampler."""
def __init__(
self,
sample_size: int,
model: SupportsPredictJoint,
qmc: bool = False,
qmc_skip: bool = True,
):
"""
:param sample_size: The number of samples for each batch of points. Must be positive.
:param model: The model to sample from.
:param qmc: Whether to use QMC sobol sampling instead of random normal sampling. QMC
sampling more accurately approximates a normal distribution than truly random samples.
:param qmc_skip: Whether to use the skip parameter to ensure the QMC sampler gives different
samples whenever it is reset. This is not supported with XLA.
:raise ValueError (or InvalidArgumentError): If ``sample_size`` is not positive.
"""
super().__init__(sample_size, model)
if not isinstance(model, SupportsPredictJoint):
raise NotImplementedError(
f"BatchReparametrizationSampler only works with models that support "
f"predict_joint; received {model.__repr__()}"
)
self._eps: Optional[tf.Variable] = None
self._qmc = qmc
self._qmc_skip = qmc_skip
def sample(self, at: TensorType, *, jitter: float = DEFAULTS.JITTER) -> TensorType:
"""
Return approximate samples from the `model` specified at :meth:`__init__`. Multiple calls to
:meth:`sample`, for any given :class:`BatchReparametrizationSampler` and ``at``, will
produce the exact same samples. Calls to :meth:`sample` on *different*
:class:`BatchReparametrizationSampler` instances will produce different samples.
:param at: Batches of query points at which to sample the predictive distribution, with
shape `[..., B, D]`, for batches of size `B` of points of dimension `D`. Must have a
consistent batch size across all calls to :meth:`sample` for any given
:class:`BatchReparametrizationSampler`.
:param jitter: The size of the jitter to use when stabilising the Cholesky decomposition of
the covariance matrix.
:return: The samples, of shape `[..., S, B, L]`, where `S` is the `sample_size`, `B` the
number of points per batch, and `L` the dimension of the model's predictive
distribution.
:raise ValueError (or InvalidArgumentError): If any of the following are true:
- ``at`` is a scalar.
- The batch size `B` of ``at`` is not positive.
- The batch size `B` of ``at`` differs from that of previous calls.
- ``jitter`` is negative.
"""
tf.debugging.assert_rank_at_least(at, 2)
tf.debugging.assert_greater_equal(jitter, 0.0)
batch_size = at.shape[-2]
tf.debugging.assert_positive(batch_size)
mean, cov = self._model.predict_joint(at) # [..., B, L], [..., L, B, B]
def sample_eps() -> tf.Tensor:
self._initialized.assign(True)
if self._qmc:
if self._qmc_skip:
skip = IndependentReparametrizationSampler.skip
IndependentReparametrizationSampler.skip.assign(skip + self._sample_size)
else:
skip = tf.constant(0)
normal_samples = qmc_normal_samples(
self._sample_size * mean.shape[-1], batch_size, skip
) # [S*L, B]
normal_samples = tf.reshape(
normal_samples, (mean.shape[-1], self._sample_size, batch_size)
) # [L, S, B]
normal_samples = tf.transpose(normal_samples, perm=[0, 2, 1]) # [L, B, S]
else:
normal_samples = tf.random.normal(
[tf.shape(mean)[-1], batch_size, self._sample_size], dtype=tf.float64
) # [L, B, S]
return normal_samples
if self._eps is None:
# dynamically shaped as the same sampler may be called with different sized batches
self._eps = tf.Variable(sample_eps(), shape=[None, None, self._sample_size])
tf.cond(
self._initialized,
lambda: self._eps,
lambda: self._eps.assign(sample_eps()),
)
if self._initialized:
tf.debugging.assert_equal(
batch_size,
tf.shape(self._eps)[-2],
f"{type(self).__name__} requires a fixed batch size. Got batch size {batch_size}"
f" but previous batch size was {tf.shape(self._eps)[-2]}.",
)
identity = tf.eye(batch_size, dtype=cov.dtype) # [B, B]
cov_cholesky = tf.linalg.cholesky(cov + jitter * identity) # [..., L, B, B]
variance_contribution = cov_cholesky @ tf.cast(self._eps, cov.dtype) # [..., L, B, S]
leading_indices = tf.range(tf.rank(variance_contribution) - 3)
absolute_trailing_indices = [-1, -2, -3] + tf.rank(variance_contribution)
new_order = tf.concat([leading_indices, absolute_trailing_indices], axis=0)
return mean[..., None, :, :] + tf.transpose(variance_contribution, new_order)
@runtime_checkable
class FeatureDecompositionInternalDataModel(
SupportsGetKernel,
SupportsGetMeanFunction,
SupportsGetObservationNoise,
SupportsGetInternalData,
Protocol,
):
"""
A probabilistic model that supports get_kernel, get_mean_function, get_observation_noise
and get_internal_data methods.
"""
pass
@runtime_checkable
class FeatureDecompositionInducingPointModel(
SupportsGetKernel, SupportsGetMeanFunction, SupportsGetInducingVariables, Protocol
):
"""
A probabilistic model that supports get_kernel, get_mean_function
and get_inducing_point methods.
"""
pass
FeatureDecompositionTrajectorySamplerModel = Union[
FeatureDecompositionInducingPointModel,
FeatureDecompositionInternalDataModel,
]
FeatureDecompositionTrajectorySamplerModelType = TypeVar(
"FeatureDecompositionTrajectorySamplerModelType",
bound=FeatureDecompositionTrajectorySamplerModel,
contravariant=True,
)
def _is_multioutput_kernel(kernel: Kernel) -> TypeGuard[MultioutputKernel]:
return isinstance(kernel, MultioutputKernel)
def _get_kernel_function(kernel: Kernel) -> Callable[[TensorType, TensorType], tf.Tensor]:
# Select between a multioutput kernel and a single-output kernel.
def K(X: TensorType, X2: Optional[TensorType] = None) -> tf.Tensor:
if _is_multioutput_kernel(kernel):
return kernel.K(X, X2, full_output_cov=False) # [L, M, M]
else:
return tf.expand_dims(kernel.K(X, X2), axis=0) # [1, M, M]
return K
class FeatureDecompositionTrajectorySampler(
TrajectorySampler[FeatureDecompositionTrajectorySamplerModelType],
ABC,
):
r"""
This is a general class to build functions that approximate a trajectory sampled from an
underlying Gaussian process model.
In particular, we approximate the Gaussian processes' posterior samples as the finite feature
approximation
.. math:: \hat{f}(x) = \sum_{i=1}^m \phi_i(x)\theta_i
where :math:`\phi_i` are m features and :math:`\theta_i` are feature weights sampled from a
given distribution
Achieving consistency (ensuring that the same sample draw for all evalutions of a particular
trajectory function) for exact sample draws from a GP is prohibitively costly because it scales
cubically with the number of query points. However, finite feature representations can be
evaluated with constant cost regardless of the required number of queries.
"""
def __init__(
self,
model: FeatureDecompositionTrajectorySamplerModelType,
feature_functions: ResampleableRandomFourierFeatureFunctions,
):
"""
:param model: The model to sample from.
:raise ValueError: If ``dataset`` is empty.
"""
super().__init__(model)
self._feature_functions = feature_functions
self._weight_sampler: Optional[Callable[[int], TensorType]] = None # lazy init
self._mean_function = model.get_mean_function()
def __repr__(self) -> str:
""""""
return f"""{self.__class__.__name__}(
{self._model!r},
{self._feature_functions!r})
"""
def get_trajectory(self) -> TrajectoryFunction:
"""
Generate an approximate function draw (trajectory) by sampling weights
and evaluating the feature functions.
:return: A trajectory function representing an approximate trajectory from the Gaussian
process, taking an input of shape `[N, B, D]` and returning shape `[N, B, L]`
where `L` is the number of outputs of the model.
"""
weight_sampler = self._prepare_weight_sampler() # prep feature weight distribution
return feature_decomposition_trajectory(
feature_functions=self._feature_functions,
weight_sampler=weight_sampler,
mean_function=self._mean_function,
)
def update_trajectory(self, trajectory: TrajectoryFunction) -> TrajectoryFunction:
"""
Efficiently update a :const:`TrajectoryFunction` to reflect an update in its
underlying :class:`ProbabilisticModel` and resample accordingly.
For a :class:`FeatureDecompositionTrajectorySampler`, updating the sampler
corresponds to resampling the feature functions (taking into account any
changed kernel parameters) and recalculating the weight distribution.
:param trajectory: The trajectory function to be resampled.
:return: The new resampled trajectory function.
"""
tf.debugging.Assert(
isinstance(trajectory, feature_decomposition_trajectory), [tf.constant([])]
)
self._feature_functions.resample() # resample Fourier feature decomposition
weight_sampler = self._prepare_weight_sampler() # recalculate weight distribution
cast(feature_decomposition_trajectory, trajectory).update(weight_sampler=weight_sampler)
return trajectory # return trajectory with updated features and weight distribution
def resample_trajectory(self, trajectory: TrajectoryFunction) -> TrajectoryFunction:
"""
Efficiently resample a :const:`TrajectoryFunction` in-place to avoid function retracing
with every new sample.
:param trajectory: The trajectory function to be resampled.
:return: The new resampled trajectory function.
"""
tf.debugging.Assert(
isinstance(trajectory, feature_decomposition_trajectory), [tf.constant([])]
)
cast(feature_decomposition_trajectory, trajectory).resample()
return trajectory # return trajectory with resampled weights
@abstractmethod
def _prepare_weight_sampler(self) -> Callable[[int], TensorType]: # [B] -> [B, F, L]
"""
Calculate the posterior of the feature weights for the specified feature functions,
returning a function that takes in a batch size `B` and returns `B` samples for
the weights of each of the `F` features for `L` outputs.
"""
raise NotImplementedError
class RandomFourierFeatureTrajectorySampler(
FeatureDecompositionTrajectorySampler[FeatureDecompositionInternalDataModel]
):
r"""
This class builds functions that approximate a trajectory sampled from an underlying Gaussian
process model. For tractibility, the Gaussian process is approximated with a Bayesian
Linear model across a set of features sampled from the Fourier feature decomposition of
the model's kernel. See :cite:`hernandez2014predictive` for details. Currently we do not
support models with multiple latent Gaussian processes.
In particular, we approximate the Gaussian processes' posterior samples as the finite feature
approximation
.. math:: \hat{f}(x) = \sum_{i=1}^m \phi_i(x)\theta_i
where :math:`\phi_i` are m Fourier features and :math:`\theta_i` are
feature weights sampled from a posterior distribution that depends on the feature values at the
model's datapoints.
Our implementation follows :cite:`hernandez2014predictive`, with our calculations
differing slightly depending on properties of the problem. In particular, we used different
calculation strategies depending on the number of considered features m and the number
of data points n.
If :math:`m<n` then we follow Appendix A of :cite:`hernandez2014predictive` and calculate the
posterior distribution for :math:`\theta` following their Bayesian linear regression motivation,
i.e. the computation revolves around an O(m^3) inversion of a design matrix.
If :math:`n<m` then we use the kernel trick to recast computation to revolve around an O(n^3)
inversion of a gram matrix. As well as being more efficient in early BO
steps (where :math:`n<m`), this second computation method allows much larger choices
of m (as required to approximate very flexible kernels).
"""
def __init__(
self,
model: FeatureDecompositionInternalDataModel,
num_features: int = 1000,
):
"""
:param model: The model to sample from.
:param num_features: The number of features used to approximate the kernel. We use a default
of 1000 as it typically perfoms well for a wide range of kernels. Note that very smooth
kernels (e.g. RBF) can be well-approximated with fewer features.
:raise ValueError: If ``dataset`` is empty.
"""
if not isinstance(model, FeatureDecompositionInternalDataModel):
raise NotImplementedError(
f"RandomFourierFeatureTrajectorySampler only works with models with "
f"get_kernel, get_observation_noise and get_internal_data methods; "
f"but received {model.__repr__()}."
)
tf.debugging.assert_positive(num_features)
self._num_features = num_features
feature_functions = ResampleableRandomFourierFeatureFunctions(model, self._num_features)
super().__init__(model, feature_functions)
def _prepare_weight_sampler(self) -> Callable[[int], TensorType]: # [B] -> [B, F, 1]
"""
Calculate the posterior of theta (the feature weights) for the RFFs, returning
a function that takes in a batch size `B` and returns `B` samples for
the weights of each of the RFF `F` features for one output.
"""
dataset = self._model.get_internal_data()
num_data = tf.shape(dataset.query_points)[0] # n
if (
self._num_features < num_data
): # if m < n then calculate posterior in design space (an m*m matrix inversion)
theta_posterior = self._prepare_theta_posterior_in_design_space()
else: # if n <= m then calculate posterior in gram space (an n*n matrix inversion)
theta_posterior = self._prepare_theta_posterior_in_gram_space()
return lambda b: tf.expand_dims(theta_posterior.sample(b), axis=-1)
def _prepare_theta_posterior_in_design_space(self) -> tfp.distributions.MultivariateNormalTriL:
r"""
Calculate the posterior of theta (the feature weights) in the design space. This
distribution is a Gaussian
.. math:: \theta \sim N(D^{-1}\Phi^Ty,D^{-1}\sigma^2)
where the [m,m] design matrix :math:`D=(\Phi^T\Phi + \sigma^2I_m)` is defined for
the [n,m] matrix of feature evaluations across the training data :math:`\Phi`
and observation noise variance :math:`\sigma^2`.
"""
dataset = self._model.get_internal_data()
phi = self._feature_functions(tf.convert_to_tensor(dataset.query_points)) # [n, m]
D = tf.matmul(phi, phi, transpose_a=True) # [m, m]
s = self._model.get_observation_noise() * tf.eye(self._num_features, dtype=phi.dtype)
L = tf.linalg.cholesky(D + s)
D_inv = tf.linalg.cholesky_solve(L, tf.eye(self._num_features, dtype=phi.dtype))
residuals = dataset.observations - self._model.get_mean_function()(dataset.query_points)
theta_posterior_mean = tf.matmul(D_inv, tf.matmul(phi, residuals, transpose_a=True))[
:, 0
] # [m,]
theta_posterior_chol_covariance = tf.linalg.cholesky(
D_inv * self._model.get_observation_noise()
) # [m, m]
return tfp.distributions.MultivariateNormalTriL(
theta_posterior_mean, theta_posterior_chol_covariance
)
def _prepare_theta_posterior_in_gram_space(self) -> tfp.distributions.MultivariateNormalTriL:
r"""
Calculate the posterior of theta (the feature weights) in the gram space.
.. math:: \theta \sim N(\Phi^TG^{-1}y,I_m - \Phi^TG^{-1}\Phi)
where the [n,n] gram matrix :math:`G=(\Phi\Phi^T + \sigma^2I_n)` is defined for the [n,m]
matrix of feature evaluations across the training data :math:`\Phi` and
observation noise variance :math:`\sigma^2`.
"""
dataset = self._model.get_internal_data()
num_data = tf.shape(dataset.query_points)[0] # n
phi = self._feature_functions(tf.convert_to_tensor(dataset.query_points)) # [n, m]
G = tf.matmul(phi, phi, transpose_b=True) # [n, n]
s = self._model.get_observation_noise() * tf.eye(num_data, dtype=phi.dtype)
L = tf.linalg.cholesky(G + s)
L_inv_phi = tf.linalg.triangular_solve(L, phi) # [n, m]
residuals = dataset.observations - self._model.get_mean_function()(
dataset.query_points
) # [n, 1]
L_inv_y = tf.linalg.triangular_solve(L, residuals) # [n, 1]
theta_posterior_mean = tf.tensordot(tf.transpose(L_inv_phi), L_inv_y, [[-1], [-2]])[
:, 0
] # [m,]
theta_posterior_covariance = tf.eye(self._num_features, dtype=phi.dtype) - tf.tensordot(
tf.transpose(L_inv_phi), L_inv_phi, [[-1], [-2]]
) # [m, m]
theta_posterior_chol_covariance = tf.linalg.cholesky(theta_posterior_covariance) # [m, m]
return tfp.distributions.MultivariateNormalTriL(
theta_posterior_mean, theta_posterior_chol_covariance
)
class DecoupledTrajectorySampler(
FeatureDecompositionTrajectorySampler[
Union[
FeatureDecompositionInducingPointModel,
FeatureDecompositionInternalDataModel,
]
]
):
r"""
This class builds functions that approximate a trajectory sampled from an underlying Gaussian
process model using decoupled sampling. See :cite:`wilson2020efficiently` for an introduction
to decoupled sampling.
Unlike our :class:`RandomFourierFeatureTrajectorySampler` which uses a RFF decomposition to
aprroximate the Gaussian process posterior, a :class:`DecoupledTrajectorySampler` only
uses an RFF decomposition to approximate the Gausian process prior and instead using
a canonical decomposition to discretize the effect of updating the prior on the given data.
In particular, we approximate the Gaussian processes' posterior samples as the finite feature
approximation
.. math:: \hat{f}(.) = \sum_{i=1}^L w_i\phi_i(.) + \sum_{j=1}^m v_jk(.,z_j)
where :math:`\phi_i(.)` and :math:`w_i` are the Fourier features and their weights that
discretize the prior. In contrast, `k(.,z_j)` and :math:`v_i` are the canonical features and
their weights that discretize the data update.
The expression for :math:`v_i` depends on if we are using an exact Gaussian process or a sparse
approximations. See eq. (13) in :cite:`wilson2020efficiently` for details.
Note that if a model is both of :class:`FeatureDecompositionInducingPointModel` type and
:class:`FeatureDecompositionInternalDataModel` type,
:class:`FeatureDecompositionInducingPointModel` will take a priority and inducing points
will be used for computations rather than data.
"""
def __init__(
self,
model: Union[
FeatureDecompositionInducingPointModel,
FeatureDecompositionInternalDataModel,
],
num_features: int = 1000,
):
"""
:param model: The model to sample from.
:param num_features: The number of features used to approximate the kernel. We use a default
of 1000 as it typically perfoms well for a wide range of kernels. Note that very smooth
kernels (e.g. RBF) can be well-approximated with fewer features.
:raise NotImplementedError: If the model is not of valid type.
"""
if not isinstance(
model, (FeatureDecompositionInducingPointModel, FeatureDecompositionInternalDataModel)
):
raise NotImplementedError(
f"DecoupledTrajectorySampler only works with models that either support "
f"get_kernel, get_observation_noise and get_internal_data or support get_kernel "
f"and get_inducing_variables; but received {model.__repr__()}."
)
tf.debugging.assert_positive(num_features)
self._num_features = num_features
feature_functions = ResampleableDecoupledFeatureFunctions(model, self._num_features)
super().__init__(model, feature_functions)
def _prepare_weight_sampler(self) -> Callable[[int], TensorType]: # [B] -> [B, F + M, L]
"""
Prepare the sampler function that provides samples of the feature weights
for both the RFF and canonical feature functions, i.e. we return a function
that takes in a batch size `B` and returns `B` samples for the weights of each of
the `F` RFF features and `M` canonical features for `L` outputs.
"""
kernel_K = _get_kernel_function(self._model.get_kernel())
if isinstance(self._model, FeatureDecompositionInducingPointModel):
( # extract variational parameters
inducing_points,
q_mu,
q_sqrt,
whiten,
) = self._model.get_inducing_variables() # [M, D], [M, L], [L, M, M], []
Kmm = kernel_K(inducing_points, inducing_points) # [L, M, M]
Kmm += tf.eye(tf.shape(inducing_points)[0], dtype=Kmm.dtype) * DEFAULTS.JITTER
else: # massage quantities from GP to look like variational parameters
internal_data = self._model.get_internal_data()
inducing_points = internal_data.query_points # [M, D]
q_mu = self._model.get_internal_data().observations # [M, L]
q_mu = q_mu - self._model.get_mean_function()(
inducing_points
) # account for mean function
q_sqrt = tf.eye(tf.shape(inducing_points)[0], dtype=tf.float64) # [M, M]
q_sqrt = tf.expand_dims(q_sqrt, axis=0) # [1, M, M]
q_sqrt = tf.math.sqrt(self._model.get_observation_noise()) * q_sqrt
whiten = False
Kmm = kernel_K(inducing_points, inducing_points) + q_sqrt**2 # [L, M, M]
M, L = tf.shape(q_mu)
tf.debugging.assert_shapes(
[
(inducing_points, ["M", "D"]),
(q_mu, ["M", "L"]),
(q_sqrt, ["L", "M", "M"]),
(Kmm, ["L", "M", "M"]),
]
)
def weight_sampler(batch_size: int) -> Tuple[TensorType, TensorType]:
prior_weights = tf.random.normal( # Non-RFF features will require scaling here
[L, self._num_features, batch_size], dtype=tf.float64
) # [L, F, B]
u_noise_sample = tf.matmul(
q_sqrt, # [L, M, M]
tf.random.normal((L, M, batch_size), dtype=tf.float64), # [L, M, B]
) # [L, M, B]
u_sample = tf.linalg.matrix_transpose(q_mu)[..., None] + u_noise_sample # [L, M, B]
if whiten:
Luu = tf.linalg.cholesky(Kmm) # [L, M, M]
u_sample = tf.matmul(Luu, u_sample) # [L, M, B]
# It is important that the feature-function is called with a tensor, instead of a
# parameter (which inducing points can be). This is to ensure pickling works correctly.
# First time a Keras layer (i.e. feature-functions) is built, the shape of the input is
# used to set the input-spec. If the input is a parameter, the input-spec will not be
# for an ordinary tensor and pickling will fail.
phi_Z = self._feature_functions(tf.convert_to_tensor(inducing_points))[
..., : self._num_features
] # [M, F] or [L, M, F]
weight_space_prior_Z = phi_Z @ prior_weights # [L, M, B]
diff = u_sample - weight_space_prior_Z # [L, M, B]
v = compute_A_inv_b(Kmm, diff) # [L, M, B]
tf.debugging.assert_shapes([(v, ["L", "M", "B"]), (prior_weights, ["L", "F", "B"])])
return tf.transpose(
tf.concat([prior_weights, v], axis=1), perm=[2, 1, 0]
) # [B, F + M, L]
return weight_sampler
class ResampleableRandomFourierFeatureFunctions(RandomFourierFeaturesCosine):
"""
A wrapper around GPFlux's random Fourier feature function that allows for
efficient in-place updating when generating new decompositions.
In particular, the bias and weights are stored as variables, which can then be
updated by calling :meth:`resample` without triggering expensive graph retracing.
Note that if a model is both of :class:`FeatureDecompositionInducingPointModel` type and
:class:`FeatureDecompositionInternalDataModel` type,
:class:`FeatureDecompositionInducingPointModel` will take a priority and inducing points
will be used for computations rather than data.
"""
def __init__(
self,
model: Union[
FeatureDecompositionInducingPointModel,
FeatureDecompositionInternalDataModel,
],
n_components: int,
):
"""
:param model: The model that will be approximed by these feature functions.
:param n_components: The desired number of features.
:raise NotImplementedError: If the model is not of valid type.
"""
if not isinstance(
model,
(
FeatureDecompositionInducingPointModel,
FeatureDecompositionInternalDataModel,
),
):
raise NotImplementedError(
f"ResampleableRandomFourierFeatureFunctions only work with models that either"
f"support get_kernel, get_observation_noise and get_internal_data or support "
f"get_kernel and get_inducing_variables;"
f"but received {model.__repr__()}."
)
super().__init__(model.get_kernel(), n_components, dtype=tf.float64)
if isinstance(model, SupportsGetInducingVariables):
dummy_X = model.get_inducing_variables()[0][0:1, :]
else:
dummy_X = model.get_internal_data().query_points[0:1, :]
# Always build the weights and biases. This is important for saving the trajectory (using
# tf.saved_model.save) before it has been used.
self.build(dummy_X.shape)
def resample(self) -> None:
"""
Resample weights and biases
"""
self.b.assign(self._bias_init(tf.shape(self.b), dtype=self._dtype))
self.W.assign(self._weights_init(tf.shape(self.W), dtype=self._dtype))
class ResampleableDecoupledFeatureFunctions(ResampleableRandomFourierFeatureFunctions):
"""
A wrapper around our :class:`ResampleableRandomFourierFeatureFunctions` which rather
than evaluates just `F` RFF functions instead evaluates the concatenation of
`F` RFF functions with evaluations of the canonical basis functions.
Note that if a model is both of :class:`FeatureDecompositionInducingPointModel` type and
:class:`FeatureDecompositionInternalDataModel` type,
:class:`FeatureDecompositionInducingPointModel` will take a priority and inducing points
will be used for computations rather than data.
"""
def __init__(
self,
model: Union[
FeatureDecompositionInducingPointModel,
FeatureDecompositionInternalDataModel,
],
n_components: int,
):
"""
:param model: The model that will be approximed by these feature functions.
:param n_components: The desired number of features.
"""
super().__init__(model, n_components)
if isinstance(model, SupportsGetInducingVariables):
self._inducing_points = model.get_inducing_variables()[0] # [M, D]
else:
self._inducing_points = model.get_internal_data().query_points # [M, D]
kernel_K = _get_kernel_function(self.kernel)
self._canonical_feature_functions = lambda x: tf.linalg.matrix_transpose(
kernel_K(self._inducing_points, x)
)
def call(self, x: TensorType) -> TensorType: # [N, D] -> [N, F + M] or [L, N, F + M]
"""
combine prior basis functions with canonical basis functions
"""
fourier_feature_eval = super().call(x) # [N, F] or [L, N, F]
canonical_feature_eval = self._canonical_feature_functions(x) # [1, N, M] or [L, N, M]
# ensure matching rank between features, i.e. drop the leading 1 dimension
matched_shape = tf.shape(canonical_feature_eval)[-tf.rank(fourier_feature_eval) :]
canonical_feature_eval = tf.reshape(canonical_feature_eval, matched_shape)
return tf.concat([fourier_feature_eval, canonical_feature_eval], axis=-1)
class feature_decomposition_trajectory(TrajectoryFunctionClass):
r"""
An approximate sample from a Gaussian processes' posterior samples represented as a
finite weighted sum of features.
A trajectory is given by
.. math:: \hat{f}(x) = \sum_{i=1}^m \phi_i(x)\theta_i
where :math:`\phi_i` are m feature functions and :math:`\theta_i` are
feature weights sampled from a posterior distribution.
The number of trajectories (i.e. batch size) is determined from the first call of the
trajectory. In order to change the batch size, a new :class:`TrajectoryFunction` must be built.
"""
def __init__(
self,
feature_functions: Callable[[TensorType], TensorType],
weight_sampler: Callable[[int], TensorType],
mean_function: Callable[[TensorType], TensorType],
):
"""
:param feature_functions: Set of feature function.
:param weight_sampler: New sampler that generates feature weight samples.
:param mean_function: The underlying model's mean function.
"""
self._feature_functions = feature_functions
self._mean_function = mean_function
self._weight_sampler = weight_sampler
self._initialized = tf.Variable(False)
self._weights_sample = tf.Variable( # dummy init to be updated before trajectory evaluation
tf.ones([0, 0, 0], dtype=tf.float64), shape=[None, None, None]
)
self._batch_size = tf.Variable(
0, dtype=tf.int32
) # dummy init to be updated before trajectory evaluation
@tf.function
def __call__(self, x: TensorType) -> TensorType: # [N, B, D] -> [N, B, L]
"""Call trajectory function."""
if not self._initialized: # work out desired batch size from input
self._batch_size.assign(tf.shape(x)[-2]) # B
self.resample() # sample B feature weights
self._initialized.assign(True)
tf.debugging.assert_equal(
tf.shape(x)[-2],
self._batch_size.value(),
message=f"""
This trajectory only supports batch sizes of {self._batch_size}.
If you wish to change the batch size you must get a new trajectory
by calling the get_trajectory method of the trajectory sampler.
""",
)
flat_x, unflatten = flatten_leading_dims(x) # [N*B, D]
flattened_feature_evaluations = self._feature_functions(
flat_x
) # [N*B, F + M] or [L, N*B, F + M]
# ensure tensor is always rank 3
rank3_shape = tf.concat([[1], tf.shape(flattened_feature_evaluations)], axis=0)[-3:]
flattened_feature_evaluations = tf.reshape(flattened_feature_evaluations, rank3_shape)
flattened_feature_evaluations = tf.transpose(
flattened_feature_evaluations, perm=[1, 2, 0]
) # [N*B, F + M, L]
feature_evaluations = unflatten(flattened_feature_evaluations) # [N, B, F + M, L]
mean = self._mean_function(x) # account for the model's mean function
return tf.reduce_sum(feature_evaluations * self._weights_sample, -2) + mean # [N, B, L]
def resample(self) -> None:
"""
Efficiently resample in-place without retracing.
"""
self._weights_sample.assign( # [B, F + M, L]
self._weight_sampler(self._batch_size)
) # resample weights
def update(self, weight_sampler: Callable[[int], TensorType]) -> None:
"""
Efficiently update the trajectory with a new weight distribution and resample its weights.
:param weight_sampler: New sampler that generates feature weight samples.
"""
self._weight_sampler = weight_sampler # update weight sampler
self.resample() # resample weights
| 40,450 | 42.402361 | 100 | py |
trieste-develop | trieste-develop/trieste/models/gpflow/utils.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Tuple, Union
import gpflow
import tensorflow as tf
import tensorflow_probability as tfp
from ...data import Dataset
from ...types import TensorType
from ...utils import DEFAULTS
from ..optimizer import BatchOptimizer, Optimizer
from .interface import GPflowPredictor
def assert_data_is_compatible(new_data: Dataset, existing_data: Dataset) -> None:
"""
Checks that new data is compatible with existing data.
:param new_data: New data.
:param existing_data: Existing data.
:raise ValueError: if trailing dimensions of the query point or observation differ.
"""
if new_data.query_points.shape[-1] != existing_data.query_points.shape[-1]:
raise ValueError(
f"Shape {new_data.query_points.shape} of new query points is incompatible with"
f" shape {existing_data.query_points.shape} of existing query points. Trailing"
f" dimensions must match."
)
if new_data.observations.shape[-1] != existing_data.observations.shape[-1]:
raise ValueError(
f"Shape {new_data.observations.shape} of new observations is incompatible with"
f" shape {existing_data.observations.shape} of existing observations. Trailing"
f" dimensions must match."
)
def randomize_hyperparameters(object: gpflow.Module) -> None:
"""
Sets hyperparameters to random samples from their constrained domains or (if not constraints
are available) their prior distributions.
:param object: Any gpflow Module.
"""
for param in object.trainable_parameters:
if isinstance(param.bijector, tfp.bijectors.Sigmoid):
sample = tf.random.uniform(
param.bijector.low.shape,
minval=param.bijector.low,
maxval=param.bijector.high,
dtype=param.bijector.low.dtype,
)
param.assign(sample)
elif param.prior is not None:
# handle constant priors for multi-dimensional parameters
if param.prior.batch_shape == param.prior.event_shape == [] and tf.rank(param) == 1:
sample = param.prior.sample(tf.shape(param))
else:
sample = param.prior.sample()
param.assign(sample)
def squeeze_hyperparameters(
object: gpflow.Module, alpha: float = 1e-2, epsilon: float = 1e-7
) -> None:
"""
Squeezes the parameters to be strictly inside their range defined by the Sigmoid,
or strictly greater than the limit defined by the Shift+Softplus.
This avoids having Inf unconstrained values when the parameters are exactly at the boundary.
:param object: Any gpflow Module.
:param alpha: the proportion of the range with which to squeeze for the Sigmoid case
:param epsilon: the value with which to offset the shift for the Softplus case.
:raise ValueError: If ``alpha`` is not in (0,1) or epsilon <= 0
"""
if not (0 < alpha < 1):
raise ValueError(f"squeeze factor alpha must be in (0, 1), found {alpha}")
if not (0 < epsilon):
raise ValueError(f"offset factor epsilon must be > 0, found {epsilon}")
for param in object.trainable_parameters:
if isinstance(param.bijector, tfp.bijectors.Sigmoid):
delta = (param.bijector.high - param.bijector.low) * alpha
squeezed_param = tf.math.minimum(param, param.bijector.high - delta)
squeezed_param = tf.math.maximum(squeezed_param, param.bijector.low + delta)
param.assign(squeezed_param)
elif (
isinstance(param.bijector, tfp.bijectors.Chain)
and len(param.bijector.bijectors) == 2
and isinstance(param.bijector.bijectors[0], tfp.bijectors.Shift)
and isinstance(param.bijector.bijectors[1], tfp.bijectors.Softplus)
):
if isinstance(param.bijector.bijectors[0], tfp.bijectors.Shift) and isinstance(
param.bijector.bijectors[1], tfp.bijectors.Softplus
):
low = param.bijector.bijectors[0].shift
squeezed_param = tf.math.maximum(param, low + epsilon * tf.ones_like(param))
param.assign(squeezed_param)
def check_optimizer(optimizer: Union[BatchOptimizer, Optimizer]) -> None:
"""
Check that the optimizer for the GPflow models is using a correct optimizer wrapper.
Stochastic gradient descent based methods implemented in TensorFlow would not
work properly without mini-batches and hence :class:`~trieste.models.optimizers.BatchOptimizer`
that prepares mini-batches and calls the optimizer iteratively needs to be used. GPflow's
:class:`~gpflow.optimizers.Scipy` optimizer on the other hand should use the non-batch wrapper
:class:`~trieste.models.optimizers.Optimizer`.
:param optimizer: An instance of the optimizer wrapper with the underlying optimizer.
:raise ValueError: If :class:`~tf.optimizers.Optimizer` is not using
:class:`~trieste.models.optimizers.BatchOptimizer` or :class:`~gpflow.optimizers.Scipy` is
using :class:`~trieste.models.optimizers.BatchOptimizer`.
"""
if isinstance(optimizer.optimizer, gpflow.optimizers.Scipy):
if isinstance(optimizer, BatchOptimizer):
raise ValueError(
f"""
The gpflow.optimizers.Scipy can only be used with an Optimizer wrapper,
however received {optimizer}.
"""
)
if isinstance(optimizer.optimizer, tf.optimizers.Optimizer):
if not isinstance(optimizer, BatchOptimizer):
raise ValueError(
f"""
The tf.optimizers.Optimizer can only be used with a BatchOptimizer wrapper,
however received {optimizer}.
"""
)
def _covariance_between_points_for_variational_models(
kernel: gpflow.kernels.Kernel,
inducing_points: TensorType,
q_sqrt: TensorType,
query_points_1: TensorType,
query_points_2: TensorType,
whiten: bool,
) -> TensorType:
r"""
Compute the posterior covariance between sets of query points.
.. math:: \Sigma_{12} = K_{1x}BK_{x2} + K_{12} - K_{1x}K_{xx}^{-1}K_{x2}
where :math:`B = K_{xx}^{-1}(q_{sqrt}q_{sqrt}^T)K_{xx}^{-1}`
or :math:`B = L^{-1}(q_{sqrt}q_{sqrt}^T)(L^{-1})^T` if we are using
a whitened representation in our variational approximation. Here
:math:`L` is the Cholesky decomposition of :math:`K_{xx}`.
See :cite:`titsias2009variational` for a derivation.
Note that this function can also be applied to
our :class:`VariationalGaussianProcess` models by passing in the training
data rather than the locations of the inducing points.
Although query_points_2 must be a rank 2 tensor, query_points_1 can
have leading dimensions.
:inducing points: The input locations chosen for our variational approximation.
:q_sqrt: The Cholesky decomposition of the covariance matrix of our
variational distribution.
:param query_points_1: Set of query points with shape [..., A, D]
:param query_points_2: Sets of query points with shape [B, D]
:param whiten: If True then use whitened representations.
:return: Covariance matrix between the sets of query points with shape [..., L, A, B]
(L being the number of latent GPs = number of output dimensions)
"""
tf.debugging.assert_shapes([(query_points_1, [..., "A", "D"]), (query_points_2, ["B", "D"])])
num_latent = q_sqrt.shape[0]
K, Kx1, Kx2, K12 = _compute_kernel_blocks(
kernel, inducing_points, query_points_1, query_points_2, num_latent
)
L = tf.linalg.cholesky(K) # [L, M, M]
Linv_Kx1 = tf.linalg.triangular_solve(L, Kx1) # [..., L, M, A]
Linv_Kx2 = tf.linalg.triangular_solve(L, Kx2) # [..., L, M, B]
def _leading_mul(M_1: TensorType, M_2: TensorType, transpose_a: bool) -> TensorType:
if transpose_a: # The einsum below is just A^T*B over the last 2 dimensions.
return tf.einsum("...lji,ljk->...lik", M_1, M_2)
else: # The einsum below is just A*B^T over the last 2 dimensions.
return tf.einsum("...lij,lkj->...lik", M_1, M_2)
if whiten:
first_cov_term = _leading_mul(
_leading_mul(Linv_Kx1, q_sqrt, transpose_a=True), # [..., L, A, M]
_leading_mul(Linv_Kx2, q_sqrt, transpose_a=True), # [..., L, B, M]
transpose_a=False,
) # [..., L, A, B]
else:
Linv_qsqrt = tf.linalg.triangular_solve(L, q_sqrt) # [L, M, M]
first_cov_term = _leading_mul(
_leading_mul(Linv_Kx1, Linv_qsqrt, transpose_a=True), # [..., L, A, M]
_leading_mul(Linv_Kx2, Linv_qsqrt, transpose_a=True), # [..., L, B, M]
transpose_a=False,
) # [..., L, A, B]
second_cov_term = K12 # [..., L, A, B]
third_cov_term = _leading_mul(Linv_Kx1, Linv_Kx2, transpose_a=True) # [..., L, A, B]
cov = first_cov_term + second_cov_term - third_cov_term # [..., L, A, B]
tf.debugging.assert_shapes(
[
(query_points_1, [..., "N", "D"]),
(query_points_2, ["M", "D"]),
(cov, [..., "L", "N", "M"]),
]
)
return cov
def _compute_kernel_blocks(
kernel: gpflow.kernels.Kernel,
inducing_points: TensorType,
query_points_1: TensorType,
query_points_2: TensorType,
num_latent: int,
) -> tuple[TensorType, TensorType, TensorType, TensorType]:
"""
Return all the prior covariances required to calculate posterior covariances for each latent
Gaussian process, as specified by the `num_latent` input.
This function returns the covariance between: `inducing_points` and `query_points_1`;
`inducing_points` and `query_points_2`; `query_points_1` and `query_points_2`;
`inducing_points` and `inducing_points`.
The calculations are performed differently depending on the type of
kernel (single output, separate independent multi-output or shared independent
multi-output) and inducing variables (simple set, SharedIndependent or SeparateIndependent).
Note that `num_latents` is only used when we use a single kernel for a multi-output model.
"""
if isinstance(kernel, (gpflow.kernels.SharedIndependent, gpflow.kernels.SeparateIndependent)):
if isinstance(inducing_points, list):
K = tf.concat(
[ker(Z)[None, ...] for ker, Z in zip(kernel.kernels, inducing_points)], axis=0
)
Kx1 = tf.concat(
[
ker(Z, query_points_1)[None, ...]
for ker, Z in zip(kernel.kernels, inducing_points)
],
axis=0,
) # [..., L, M, A]
Kx2 = tf.concat(
[
ker(Z, query_points_2)[None, ...]
for ker, Z in zip(kernel.kernels, inducing_points)
],
axis=0,
) # [L, M, B]
K12 = tf.concat(
[ker(query_points_1, query_points_2)[None, ...] for ker in kernel.kernels], axis=0
) # [L, M, B]
else:
K = kernel(inducing_points, full_cov=True, full_output_cov=False) # [L, M, M]
Kx1 = kernel(
inducing_points, query_points_1, full_cov=True, full_output_cov=False
) # [..., L, M, A]
Kx2 = kernel(
inducing_points, query_points_2, full_cov=True, full_output_cov=False
) # [L, M, B]
K12 = kernel(
query_points_1, query_points_2, full_cov=True, full_output_cov=False
) # [..., L, A, B]
else: # simple calculations for the single output case
K = kernel(inducing_points) # [M, M]
Kx1 = kernel(inducing_points, query_points_1) # [..., M, A]
Kx2 = kernel(inducing_points, query_points_2) # [M, B]
K12 = kernel(query_points_1, query_points_2) # [..., A, B]
if len(tf.shape(K)) == 2: # if single kernel then repeat for all latent dimensions
K = tf.repeat(tf.expand_dims(K, -3), num_latent, axis=-3)
Kx1 = tf.repeat(tf.expand_dims(Kx1, -3), num_latent, axis=-3)
Kx2 = tf.repeat(tf.expand_dims(Kx2, -3), num_latent, axis=-3)
K12 = tf.repeat(tf.expand_dims(K12, -3), num_latent, axis=-3)
elif len(tf.shape(K)) > 3:
raise NotImplementedError(
"Covariance between points is not supported " "for kernels of type " f"{type(kernel)}."
)
tf.debugging.assert_shapes(
[
(K, ["L", "M", "M"]),
(Kx1, ["L", "M", "A"]),
(Kx2, ["L", "M", "B"]),
(K12, ["L", "A", "B"]),
]
)
return K, Kx1, Kx2, K12
def _whiten_points(
model: GPflowPredictor, inducing_points: TensorType
) -> Tuple[TensorType, TensorType]:
"""
GPFlow's VGP and SVGP can use whitened representation, i.e.
q_mu and q_sqrt parametrize q(v), and u = f(X) = L v, where L = cholesky(K(X, X))
Hence we need to back-transform from f_mu and f_cov to obtain the updated
new_q_mu and new_q_sqrt.
:param model: The whitened model.
:para inducing_points: The new inducing point locations.
:return: The updated q_mu and q_sqrt with shapes [N, L] and [L, N, N], respectively.
"""
f_mu, f_cov = model.model.predict_f(inducing_points, full_cov=True) # [N, L], [L, N, N]
f_mu -= model.model.mean_function(inducing_points)
Knn = model.get_kernel()(inducing_points, full_cov=True) # [N, N]
jitter_mat = DEFAULTS.JITTER * tf.eye(tf.shape(inducing_points)[0], dtype=Knn.dtype)
Lnn = tf.linalg.cholesky(Knn + jitter_mat) # [N, N]
new_q_mu = tf.linalg.triangular_solve(Lnn, f_mu) # [N, L]
tmp = tf.linalg.triangular_solve(Lnn[None], f_cov) # [L, N, N], L⁻¹ f_cov
S_v = tf.linalg.triangular_solve(Lnn[None], tf.linalg.matrix_transpose(tmp)) # [L, N, N]
new_q_sqrt = tf.linalg.cholesky(S_v + jitter_mat) # [L, N, N]
return new_q_mu, new_q_sqrt
| 14,693 | 41.964912 | 99 | py |
trieste-develop | trieste-develop/trieste/models/gpflow/builders.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains builders for GPflow models supported in Trieste. We found the default
configurations used here to work well in most situation, but they should not be taken as
universally good solutions.
"""
from __future__ import annotations
import math
from typing import Optional, Sequence, Type
import gpflow
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.kernels import Stationary
from gpflow.models import GPR, SGPR, SVGP, VGP, GPModel
from ...data import Dataset, split_dataset_by_fidelity
from ...space import Box, SearchSpace
from ...types import TensorType
from ..gpflow.models import GaussianProcessRegression
KERNEL_LENGTHSCALE = tf.cast(0.2, dtype=gpflow.default_float())
"""
Default value of the kernel lengthscale parameter.
"""
KERNEL_PRIOR_SCALE = tf.cast(1.0, dtype=gpflow.default_float())
"""
Default value of the scaling factor for the kernel lengthscale and variance parameters.
"""
CLASSIFICATION_KERNEL_VARIANCE_NOISE_FREE = tf.cast(100.0, dtype=gpflow.default_float())
"""
Default value of the kernel variance parameter for classification models in the noise free case.
"""
CLASSIFICATION_KERNEL_VARIANCE = tf.cast(1.0, dtype=gpflow.default_float())
"""
Default value of the kernel variance parameter for classification models.
"""
MAX_NUM_INDUCING_POINTS = tf.cast(500, dtype=tf.int32)
"""
Default maximum number of inducing points.
"""
NUM_INDUCING_POINTS_PER_DIM = tf.cast(25, dtype=tf.int32)
"""
Default number of inducing points per dimension of the search space.
"""
SIGNAL_NOISE_RATIO_LIKELIHOOD = tf.cast(10, dtype=gpflow.default_float())
"""
Default value used for initializing (noise) variance parameter of the likelihood function.
If user does not specify it, the noise variance is set to maintain the signal to noise ratio
determined by this default value. Signal variance in the kernel is set to the empirical variance.
"""
def build_gpr(
data: Dataset,
search_space: Optional[SearchSpace] = None,
kernel_priors: bool = True,
likelihood_variance: Optional[float] = None,
trainable_likelihood: bool = False,
kernel: Optional[gpflow.kernels.Kernel] = None,
) -> GPR:
"""
Build a :class:`~gpflow.models.GPR` model with sensible initial parameters and
priors. By default, we use :class:`~gpflow.kernels.Matern52` kernel and
:class:`~gpflow.mean_functions.Constant` mean function in the model. We found the default
configuration used here to work well in most situations, but it should not be taken as a
universally good solution.
We set priors for kernel hyperparameters by default in order to stabilize model fitting. We
found the priors below to be highly effective for objective functions defined over the unit
hypercube. They do seem to work for other search space sizes, but we advise caution when using
them in such search spaces. Using priors allows for using maximum a posteriori estimate of
these kernel parameters during model fitting.
Note that although we scale parameters as a function of the size of the search space, ideally
inputs should be normalised to the unit hypercube before building a model.
:param data: Dataset from the initial design, used for estimating the variance of observations.
:param search_space: Search space for performing Bayesian optimization, used for scaling the
parameters. Required unless a kernel is passed.
:param kernel_priors: If set to `True` (default) priors are set for kernel parameters (variance
and lengthscale).
:param likelihood_variance: Likelihood (noise) variance parameter can be optionally set to a
certain value. If left unspecified (default), the noise variance is set to maintain the
signal to noise ratio of value given by ``SIGNAL_NOISE_RATIO_LIKELIHOOD``, where signal
variance in the kernel is set to the empirical variance.
:param trainable_likelihood: If set to `True` Gaussian likelihood parameter is set to
non-trainable. By default set to `False`.
:param kernel: The kernel to use in the model, defaults to letting the function set up a
:class:`~gpflow.kernels.Matern52` kernel.
:return: A :class:`~gpflow.models.GPR` model.
"""
empirical_mean, empirical_variance, _ = _get_data_stats(data)
if kernel is None and search_space is None:
raise ValueError(
"'build_gpr' function requires one of 'search_space' or 'kernel' arguments,"
" but got neither"
)
elif kernel is None and search_space is not None:
kernel = _get_kernel(empirical_variance, search_space, kernel_priors, kernel_priors)
mean = _get_mean_function(empirical_mean)
assert isinstance(kernel, gpflow.kernels.Kernel)
model = gpflow.models.GPR(data.astuple(), kernel, mean)
_set_gaussian_likelihood_variance(model, empirical_variance, likelihood_variance)
gpflow.set_trainable(model.likelihood, trainable_likelihood)
return model
def build_sgpr(
data: Dataset,
search_space: SearchSpace,
kernel_priors: bool = True,
likelihood_variance: Optional[float] = None,
trainable_likelihood: bool = False,
num_inducing_points: Optional[int] = None,
trainable_inducing_points: bool = False,
) -> SGPR:
"""
Build a :class:`~gpflow.models.SGPR` model with sensible initial parameters and
priors. We use :class:`~gpflow.kernels.Matern52` kernel and
:class:`~gpflow.mean_functions.Constant` mean function in the model. We found the default
configuration used here to work well in most situation, but it should not be taken as a
universally good solution.
We set priors for kernel hyperparameters by default in order to stabilize model fitting. We
found the priors below to be highly effective for objective functions defined over the unit
hypercube. They do seem to work for other search space sizes, but we advise caution when using
them in such search spaces. Using priors allows for using maximum a posteriori estimate of
these kernel parameters during model fitting.
For performance reasons number of inducing points should not be changed during Bayesian
optimization. Hence, even if the initial dataset is smaller, we advise setting this to a higher
number. By default inducing points are set to Sobol samples for the continuous search space,
and simple random samples for discrete or mixed search spaces. This carries
the risk that optimization gets stuck if they are not trainable, which calls for adaptive
inducing point selection during the optimization. This functionality will be added to Trieste
in future.
Note that although we scale parameters as a function of the size of the search space, ideally
inputs should be normalised to the unit hypercube before building a model.
:param data: Dataset from the initial design, used for estimating the variance of observations.
:param search_space: Search space for performing Bayesian optimization, used for scaling the
parameters.
:param kernel_priors: If set to `True` (default) priors are set for kernel parameters (variance
and lengthscale).
:param likelihood_variance: Likelihood (noise) variance parameter can be optionally set to a
certain value. If left unspecified (default), the noise variance is set to maintain the
signal to noise ratio of value given by ``SIGNAL_NOISE_RATIO_LIKELIHOOD``, where signal
variance in the kernel is set to the empirical variance.
:param trainable_likelihood: If set to `True` Gaussian likelihood parameter is set to
be trainable. By default set to `False`.
:param num_inducing_points: The number of inducing points can be optionally set to a
certain value. If left unspecified (default), this number is set to either
``NUM_INDUCING_POINTS_PER_DIM``*dimensionality of the search space or value given by
``MAX_NUM_INDUCING_POINTS``, whichever is smaller.
:param trainable_inducing_points: If set to `True` inducing points will be set to
be trainable. This option should be used with caution. By default set to `False`.
:return: An :class:`~gpflow.models.SGPR` model.
"""
empirical_mean, empirical_variance, _ = _get_data_stats(data)
kernel = _get_kernel(empirical_variance, search_space, kernel_priors, kernel_priors)
mean = _get_mean_function(empirical_mean)
inducing_points = gpflow.inducing_variables.InducingPoints(
_get_inducing_points(search_space, num_inducing_points)
)
model = SGPR(data.astuple(), kernel, inducing_points, mean_function=mean)
_set_gaussian_likelihood_variance(model, empirical_variance, likelihood_variance)
gpflow.set_trainable(model.likelihood, trainable_likelihood)
gpflow.set_trainable(model.inducing_variable, trainable_inducing_points)
return model
def build_vgp_classifier(
data: Dataset,
search_space: SearchSpace,
kernel_priors: bool = True,
noise_free: bool = False,
kernel_variance: Optional[float] = None,
) -> VGP:
"""
Build a :class:`~gpflow.models.VGP` binary classification model with sensible initial
parameters and priors. We use :class:`~gpflow.kernels.Matern52` kernel and
:class:`~gpflow.mean_functions.Constant` mean function in the model. We found the default
configuration used here to work well in most situation, but it should not be taken as a
universally good solution.
We set priors for kernel hyperparameters by default in order to stabilize model fitting. We
found the priors below to be highly effective for objective functions defined over the unit
hypercube. They do seem to work for other search space sizes, but we advise caution when using
them in such search spaces. Using priors allows for using maximum a posteriori estimate of
these kernel parameters during model fitting. In the ``noise_free`` case we do not use prior
for the kernel variance parameters.
Note that although we scale parameters as a function of the size of the search space, ideally
inputs should be normalised to the unit hypercube before building a model.
:param data: Dataset from the initial design, used for estimating the variance of observations.
:param search_space: Search space for performing Bayesian optimization, used for scaling the
parameters.
:param kernel_priors: If set to `True` (default) priors are set for kernel parameters (variance
and lengthscale). In the ``noise_free`` case kernel variance prior is not set.
:param noise_free: If there is a prior information that the classification problem is a
deterministic one, this should be set to `True` and kernel variance will be fixed to a
higher default value ``CLASSIFICATION_KERNEL_VARIANCE_NOISE_FREE`` leading to sharper
classification boundary. In this case prior for the kernel variance parameter is also not
set. By default set to `False`.
:param kernel_variance: Kernel variance parameter can be optionally set to a
certain value. If left unspecified (default), the kernel variance is set to
``CLASSIFICATION_KERNEL_VARIANCE_NOISE_FREE`` in the ``noise_free`` case and to
``CLASSIFICATION_KERNEL_VARIANCE`` otherwise.
:return: A :class:`~gpflow.models.VGP` model.
"""
if kernel_variance is not None:
tf.debugging.assert_positive(kernel_variance)
variance = tf.cast(kernel_variance, dtype=gpflow.default_float())
else:
if noise_free:
variance = CLASSIFICATION_KERNEL_VARIANCE_NOISE_FREE
else:
variance = CLASSIFICATION_KERNEL_VARIANCE
if noise_free:
add_prior_to_variance = False
else:
add_prior_to_variance = kernel_priors
model_likelihood = gpflow.likelihoods.Bernoulli()
kernel = _get_kernel(variance, search_space, kernel_priors, add_prior_to_variance)
mean = _get_mean_function(tf.cast(0.0, dtype=gpflow.default_float()))
model = VGP(data.astuple(), kernel, model_likelihood, mean_function=mean)
gpflow.set_trainable(model.kernel.variance, (not noise_free))
return model
def build_svgp(
data: Dataset,
search_space: SearchSpace,
classification: bool = False,
kernel_priors: bool = True,
likelihood_variance: Optional[float] = None,
trainable_likelihood: bool = False,
num_inducing_points: Optional[int] = None,
trainable_inducing_points: bool = False,
) -> SVGP:
"""
Build a :class:`~gpflow.models.SVGP` model with sensible initial parameters and
priors. Both regression and binary classification models are
available. We use :class:`~gpflow.kernels.Matern52` kernel and
:class:`~gpflow.mean_functions.Constant` mean function in the model. We found the default
configuration used here to work well in most situation, but it should not be taken as a
universally good solution.
We set priors for kernel hyperparameters by default in order to stabilize model fitting. We
found the priors below to be highly effective for objective functions defined over the unit
hypercube. They do seem to work for other search space sizes, but we advise caution when using
them in such search spaces. Using priors allows for using maximum a posteriori estimate of
these kernel parameters during model fitting.
For performance reasons number of inducing points should not be changed during Bayesian
optimization. Hence, even if the initial dataset is smaller, we advise setting this to a higher
number. By default inducing points are set to Sobol samples for the continuous search space,
and simple random samples for discrete or mixed search spaces. This carries
the risk that optimization gets stuck if they are not trainable, which calls for adaptive
inducing point selection during the optimization. This functionality will be added to Trieste
in future.
Note that although we scale parameters as a function of the size of the search space, ideally
inputs should be normalised to the unit hypercube before building a model.
:param data: Dataset from the initial design, used for estimating the variance of observations.
:param search_space: Search space for performing Bayesian optimization, used for scaling the
parameters.
:param classification: If a classification model is needed, this should be set to `True`, in
which case a Bernoulli likelihood will be used. If a regression model is required, this
should be set to `False` (default), in which case a Gaussian likelihood is used.
:param kernel_priors: If set to `True` (default) priors are set for kernel parameters (variance
and lengthscale).
:param likelihood_variance: Likelihood (noise) variance parameter can be optionally set to a
certain value. If left unspecified (default), the noise variance is set to maintain the
signal to noise ratio of value given by ``SIGNAL_NOISE_RATIO_LIKELIHOOD``, where signal
variance in the kernel is set to the empirical variance. This argument is ignored in the
classification case.
:param trainable_likelihood: If set to `True` likelihood parameter is set to
be trainable. By default set to `False`. This argument is ignored in the classification
case.
:param num_inducing_points: The number of inducing points can be optionally set to a
certain value. If left unspecified (default), this number is set to either
``NUM_INDUCING_POINTS_PER_DIM``*dimensionality of the search space or value given by
``MAX_NUM_INDUCING_POINTS``, whichever is smaller.
:param trainable_inducing_points: If set to `True` inducing points will be set to
be trainable. This option should be used with caution. By default set to `False`.
:return: An :class:`~gpflow.models.SVGP` model.
"""
empirical_mean, empirical_variance, num_data_points = _get_data_stats(data)
if classification:
empirical_variance = CLASSIFICATION_KERNEL_VARIANCE
empirical_mean = tf.cast(0.0, dtype=gpflow.default_float())
model_likelihood = gpflow.likelihoods.Bernoulli()
else:
model_likelihood = gpflow.likelihoods.Gaussian()
kernel = _get_kernel(empirical_variance, search_space, kernel_priors, kernel_priors)
mean = _get_mean_function(empirical_mean)
inducing_points = _get_inducing_points(search_space, num_inducing_points)
model = SVGP(
kernel,
model_likelihood,
inducing_points,
mean_function=mean,
num_data=num_data_points,
)
if not classification:
_set_gaussian_likelihood_variance(model, empirical_variance, likelihood_variance)
gpflow.set_trainable(model.likelihood, trainable_likelihood)
gpflow.set_trainable(model.inducing_variable, trainable_inducing_points)
return model
def _get_data_stats(data: Dataset) -> tuple[TensorType, TensorType, int]:
empirical_variance = tf.math.reduce_variance(data.observations)
empirical_mean = tf.math.reduce_mean(data.observations)
num_data_points = len(data.observations)
return empirical_mean, empirical_variance, num_data_points
def _get_kernel(
variance: TensorType,
search_space: SearchSpace,
add_prior_to_lengthscale: bool,
add_prior_to_variance: bool,
) -> gpflow.kernels.Kernel:
lengthscales = _get_lengthscales(search_space)
kernel = gpflow.kernels.Matern52(variance=variance, lengthscales=lengthscales)
if add_prior_to_lengthscale:
kernel.lengthscales.prior = tfp.distributions.LogNormal(
tf.math.log(lengthscales), KERNEL_PRIOR_SCALE
)
if add_prior_to_variance:
kernel.variance.prior = tfp.distributions.LogNormal(
tf.math.log(variance), KERNEL_PRIOR_SCALE
)
return kernel
def _get_lengthscales(search_space: SearchSpace) -> TensorType:
lengthscales = (
KERNEL_LENGTHSCALE
* (search_space.upper - search_space.lower)
* math.sqrt(search_space.dimension)
)
search_space_collapsed = tf.equal(search_space.upper, search_space.lower)
lengthscales = tf.where(
search_space_collapsed, tf.cast(1.0, dtype=gpflow.default_float()), lengthscales
)
return lengthscales
def _get_mean_function(mean: TensorType) -> gpflow.mean_functions.MeanFunction:
mean_function = gpflow.mean_functions.Constant(mean)
return mean_function
def _set_gaussian_likelihood_variance(
model: GPModel, variance: TensorType, likelihood_variance: Optional[float]
) -> None:
if likelihood_variance is None:
noise_variance = variance / SIGNAL_NOISE_RATIO_LIKELIHOOD**2
else:
tf.debugging.assert_positive(likelihood_variance)
noise_variance = tf.cast(likelihood_variance, dtype=gpflow.default_float())
model.likelihood.variance = gpflow.base.Parameter(
noise_variance, transform=gpflow.utilities.positive(lower=1e-12)
)
def _get_inducing_points(
search_space: SearchSpace, num_inducing_points: Optional[int]
) -> TensorType:
if num_inducing_points is not None:
tf.debugging.assert_positive(num_inducing_points)
else:
num_inducing_points = min(
MAX_NUM_INDUCING_POINTS, NUM_INDUCING_POINTS_PER_DIM * search_space.dimension
)
if isinstance(search_space, Box):
inducing_points = search_space.sample_sobol(num_inducing_points)
else:
inducing_points = search_space.sample(num_inducing_points)
return inducing_points
def build_multifidelity_autoregressive_models(
dataset: Dataset,
num_fidelities: int,
input_search_space: SearchSpace,
likelihood_variance: float = 1e-6,
kernel_priors: bool = False,
trainable_likelihood: bool = False,
) -> Sequence[GaussianProcessRegression]:
"""
Build the individual GPR models required for constructing an MultifidelityAutoregressive model
with `num_fidelities` fidelities.
:param dataset: Dataset of points with which to initialise the individual models,
where the final column of the final dimension of the query points contains the fidelity
:param num_fidelities: Number of fidelities desired for the MultifidelityAutoregressive model
:param input_search_space: The input search space of the models
:return: List of initialised GPR models
"""
# Split data into fidelities
data = split_dataset_by_fidelity(dataset=dataset, num_fidelities=num_fidelities)
_validate_multifidelity_data_modellable(data, num_fidelities)
gprs = [
GaussianProcessRegression(
build_gpr(
data[fidelity],
input_search_space,
likelihood_variance=likelihood_variance,
kernel_priors=kernel_priors,
trainable_likelihood=trainable_likelihood,
)
)
for fidelity in range(num_fidelities)
]
return gprs
def build_multifidelity_nonlinear_autoregressive_models(
dataset: Dataset,
num_fidelities: int,
input_search_space: SearchSpace,
kernel_base_class: Type[Stationary] = gpflow.kernels.Matern32,
kernel_priors: bool = True,
trainable_likelihood: bool = False,
) -> Sequence[GaussianProcessRegression]:
"""
Build models for training the trieste.models.gpflow.MultifidelityNonlinearAutoregressive` model
Builds a basic Matern32 kernel for the lowest fidelity, and the custom kernel described in
:cite:`perdikaris2017nonlinear` for the higher fidelities, which also have an extra input
dimension. Note that the initial data that the models with fidelity greater than 0 are
initialised with contain dummy data in this extra dimension, and so an `update` of the
`MultifidelityNonlinearAutoregressive` is required to propagate real data through to these
models.
:param dataset: The dataset to use to initialise the models
:param num_fidelities: The number of fidelities to model
:param input_search_space: the search space, used to initialise the kernel parameters
:param kernel_base_class: a stationary kernel type
:param kernel_priors: If set to `True` (default) priors are set for kernel parameters (variance
and lengthscale).
:return: gprs: A list containing gprs that can be used for the multifidelity model
"""
# Split data into fidelities
data = split_dataset_by_fidelity(dataset=dataset, num_fidelities=num_fidelities)
_validate_multifidelity_data_modellable(data, num_fidelities)
# Input dim requires excluding fidelity row
input_dim = dataset.query_points.shape[1] - 1
# Create kernels
kernels = _create_multifidelity_nonlinear_autoregressive_kernels(
kernel_base_class,
num_fidelities,
input_dim,
input_search_space,
kernel_priors,
kernel_priors,
)
# Initialise low fidelity GP
gprs = [
GaussianProcessRegression(
build_gpr(
data[0],
search_space=input_search_space, # This isn't actually used when we pass a kernel
kernel=kernels[0],
likelihood_variance=1e-6,
trainable_likelihood=trainable_likelihood,
)
)
]
for fidelity in range(1, num_fidelities):
# Get query points for this fidelity
qps = data[fidelity].query_points
samples_column = tf.random.normal([qps.shape[0], 1], dtype=tf.float64)
augmented_qps = tf.concat([qps, samples_column], axis=1)
augmented_dataset = Dataset(augmented_qps, data[fidelity].observations)
gprs.append(
GaussianProcessRegression(
build_gpr(
augmented_dataset,
input_search_space, # This isn't actually used when we pass a kernel
kernel=kernels[fidelity],
likelihood_variance=1e-6,
)
)
)
return gprs
def _validate_multifidelity_data_modellable(data: Sequence[Dataset], num_fidelities: int) -> None:
if num_fidelities < 2:
raise ValueError(
"Invalid number of fidelities to build Multifidelity model for,"
f" need at least 2 fidelities, got {num_fidelities}"
)
for i, fidelity_data in enumerate(data):
if len(fidelity_data) < 2:
raise ValueError(
f"Not enough data to create model for fidelity {i},"
f" need at least 2 datapoints, got {len(fidelity_data)}"
)
def _create_multifidelity_nonlinear_autoregressive_kernels(
kernel_base_class: Type[Stationary],
n_fidelities: int,
n_input_dims: int,
search_space: SearchSpace,
add_prior_to_lengthscale: bool,
add_prior_to_variance: bool,
) -> Sequence[Stationary]:
dims = list(range(n_input_dims + 1))
lengthscales = _get_lengthscales(search_space)
scale_lengthscale = 1.0
kernels = [kernel_base_class(lengthscales=lengthscales)]
for i in range(1, n_fidelities):
interaction_kernel = kernel_base_class(lengthscales=lengthscales, active_dims=dims[:-1])
scale_kernel = kernel_base_class(lengthscales=scale_lengthscale, active_dims=[dims[-1]])
bias_kernel = kernel_base_class(lengthscales=lengthscales, active_dims=dims[:-1])
gpflow.set_trainable(scale_kernel.variance, False)
if add_prior_to_lengthscale:
interaction_kernel.lengthscales.prior = tfp.distributions.LogNormal(
tf.math.log(lengthscales), KERNEL_PRIOR_SCALE
)
bias_kernel.lengthscales.prior = tfp.distributions.LogNormal(
tf.math.log(lengthscales), KERNEL_PRIOR_SCALE
)
scale_kernel.lengthscales.prior = tfp.distributions.LogNormal(
tf.math.log(tf.cast(scale_lengthscale, dtype=gpflow.default_float())),
KERNEL_PRIOR_SCALE,
)
if add_prior_to_variance:
interaction_kernel.variance.prior = tfp.distributions.LogNormal(
tf.cast(0.0, dtype=gpflow.default_float()), KERNEL_PRIOR_SCALE
)
bias_kernel.variance.prior = tfp.distributions.LogNormal(
tf.cast(0.0, dtype=gpflow.default_float()), KERNEL_PRIOR_SCALE
)
kernels.append(interaction_kernel * scale_kernel + bias_kernel)
return kernels
| 27,128 | 42.68599 | 99 | py |
trieste-develop | trieste-develop/trieste/models/gpflow/models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional, Sequence, Tuple, Union, cast
import gpflow
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.conditionals.util import sample_mvn
from gpflow.inducing_variables import (
SeparateIndependentInducingVariables,
SharedIndependentInducingVariables,
)
from gpflow.logdensities import multivariate_normal
from gpflow.models import GPR, SGPR, SVGP, VGP
from gpflow.models.vgp import update_vgp_data
from gpflow.utilities import add_noise_cov, is_variable, multiple_assign, read_values
from gpflow.utilities.ops import leading_transpose
from ...data import (
Dataset,
add_fidelity_column,
check_and_extract_fidelity_query_points,
split_dataset_by_fidelity,
)
from ...types import TensorType
from ...utils import DEFAULTS, jit
from ...utils.misc import flatten_leading_dims
from ..interfaces import (
FastUpdateModel,
HasTrajectorySampler,
SupportsCovarianceWithTopFidelity,
SupportsGetInducingVariables,
SupportsGetInternalData,
TrainableProbabilisticModel,
TrajectorySampler,
)
from ..optimizer import BatchOptimizer, Optimizer
from .inducing_point_selectors import InducingPointSelector
from .interface import GPflowPredictor, SupportsCovarianceBetweenPoints
from .sampler import DecoupledTrajectorySampler, RandomFourierFeatureTrajectorySampler
from .utils import (
_covariance_between_points_for_variational_models,
_whiten_points,
assert_data_is_compatible,
check_optimizer,
randomize_hyperparameters,
squeeze_hyperparameters,
)
class GaussianProcessRegression(
GPflowPredictor,
TrainableProbabilisticModel,
FastUpdateModel,
SupportsCovarianceBetweenPoints,
SupportsGetInternalData,
HasTrajectorySampler,
):
"""
A :class:`TrainableProbabilisticModel` wrapper for a GPflow :class:`~gpflow.models.GPR`.
As Bayesian optimization requires a large number of sequential predictions (i.e. when maximizing
acquisition functions), rather than calling the model directly at prediction time we instead
call the posterior objects built by these models. These posterior objects store the
pre-computed Gram matrices, which can be reused to allow faster subsequent predictions. However,
note that these posterior objects need to be updated whenever the underlying model is changed
by calling :meth:`update_posterior_cache` (this
happens automatically after calls to :meth:`update` or :math:`optimize`).
"""
def __init__(
self,
model: GPR,
optimizer: Optimizer | None = None,
num_kernel_samples: int = 10,
num_rff_features: int = 1000,
use_decoupled_sampler: bool = True,
):
"""
:param model: The GPflow model to wrap.
:param optimizer: The optimizer with which to train the model. Defaults to
:class:`~trieste.models.optimizer.Optimizer` with :class:`~gpflow.optimizers.Scipy`.
:param num_kernel_samples: Number of randomly sampled kernels (for each kernel parameter) to
evaluate before beginning model optimization. Therefore, for a kernel with `p`
(vector-valued) parameters, we evaluate `p * num_kernel_samples` kernels.
:param num_rff_features: The number of random Fourier features used to approximate the
kernel when calling :meth:`trajectory_sampler`. We use a default of 1000 as it
typically perfoms well for a wide range of kernels. Note that very smooth
kernels (e.g. RBF) can be well-approximated with fewer features.
:param use_decoupled_sampler: If True use a decoupled random Fourier feature sampler, else
just use a random Fourier feature sampler. The decoupled sampler suffers less from
overestimating variance and can typically get away with a lower num_rff_features.
"""
super().__init__(optimizer)
self._model = model
check_optimizer(self.optimizer)
if num_kernel_samples < 0:
raise ValueError(
f"num_kernel_samples must be greater or equal to zero but got {num_kernel_samples}."
)
self._num_kernel_samples = num_kernel_samples
if num_rff_features <= 0:
raise ValueError(
f"num_rff_features must be greater than zero but got {num_rff_features}."
)
self._num_rff_features = num_rff_features
self._use_decoupled_sampler = use_decoupled_sampler
self._ensure_variable_model_data()
self.create_posterior_cache()
def __repr__(self) -> str:
""""""
return (
f"GaussianProcessRegression({self.model!r}, {self.optimizer!r},"
f"{self._num_kernel_samples!r}, {self._num_rff_features!r},"
f"{self._use_decoupled_sampler!r})"
)
@property
def model(self) -> GPR:
return self._model
def _ensure_variable_model_data(self) -> None:
# GPflow stores the data in Tensors. However, since we want to be able to update the data
# without having to retrace the acquisition functions, put it in Variables instead.
# Data has to be stored in variables with dynamic shape to allow for changes
# Sometimes, for instance after serialization-deserialization, the shape can be overridden
# Thus here we ensure data is stored in dynamic shape Variables
if all(is_variable(x) and x.shape[0] is None for x in self._model.data):
# both query points and observations are in right shape
# nothing to do
return
self._model.data = (
tf.Variable(
self._model.data[0], trainable=False, shape=[None, *self._model.data[0].shape[1:]]
),
tf.Variable(
self._model.data[1], trainable=False, shape=[None, *self._model.data[1].shape[1:]]
),
)
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
f_mean, f_var = self.predict(query_points)
return self.model.likelihood.predict_mean_and_var(query_points, f_mean, f_var)
def update(self, dataset: Dataset) -> None:
self._ensure_variable_model_data()
x, y = self.model.data[0].value(), self.model.data[1].value()
assert_data_is_compatible(dataset, Dataset(x, y))
if dataset.query_points.shape[-1] != x.shape[-1]:
raise ValueError
if dataset.observations.shape[-1] != y.shape[-1]:
raise ValueError
self.model.data[0].assign(dataset.query_points)
self.model.data[1].assign(dataset.observations)
self.update_posterior_cache()
def covariance_between_points(
self, query_points_1: TensorType, query_points_2: TensorType
) -> TensorType:
r"""
Compute the posterior covariance between sets of query points.
.. math:: \Sigma_{12} = K_{12} - K_{x1}(K_{xx} + \sigma^2 I)^{-1}K_{x2}
Note that query_points_2 must be a rank 2 tensor, but query_points_1 can
have leading dimensions.
:param query_points_1: Set of query points with shape [..., N, D]
:param query_points_2: Sets of query points with shape [M, D]
:return: Covariance matrix between the sets of query points with shape [..., L, N, M]
(L being the number of latent GPs = number of output dimensions)
"""
tf.debugging.assert_shapes(
[(query_points_1, [..., "N", "D"]), (query_points_2, ["M", "D"])]
)
x = self.model.data[0].value()
num_data = tf.shape(x)[0]
s = tf.linalg.diag(tf.fill([num_data], self.model.likelihood.variance))
K = self.model.kernel(x) # [num_data, num_data] or [L, num_data, num_data]
Kx1 = self.model.kernel(query_points_1, x) # [..., N, num_data] or [..., L, N, num_data]
Kx2 = self.model.kernel(x, query_points_2) # [num_data, M] or [L, num_data, M]
K12 = self.model.kernel(query_points_1, query_points_2) # [..., N, M] or [..., L, N, M]
if len(tf.shape(K)) == 2:
# if single output GPR, the kernel does not return the latent dimension so
# we add it back here
K = tf.expand_dims(K, -3)
Kx1 = tf.expand_dims(Kx1, -3)
Kx2 = tf.expand_dims(Kx2, -3)
K12 = tf.expand_dims(K12, -3)
elif len(tf.shape(K)) > 3:
raise NotImplementedError(
"Covariance between points is not supported "
"for kernels of type "
f"{type(self.model.kernel)}."
)
L = tf.linalg.cholesky(K + s) # [L, num_data, num_data]
Kx1 = leading_transpose(Kx1, [..., -1, -2]) # [..., L, num_data, N]
Linv_Kx1 = tf.linalg.triangular_solve(L, Kx1) # [..., L, num_data, N]
Linv_Kx2 = tf.linalg.triangular_solve(L, Kx2) # [L, num_data, M]
# The line below is just A^T*B over the last 2 dimensions.
cov = K12 - tf.einsum("...lji,ljk->...lik", Linv_Kx1, Linv_Kx2) # [..., L, N, M]
num_latent = self.model.num_latent_gps
if cov.shape[-3] == 1 and num_latent > 1:
# For multioutput GPR with shared kernel, we need to duplicate cov
# for each output
cov = tf.repeat(cov, num_latent, axis=-3)
tf.debugging.assert_shapes(
[
(query_points_1, [..., "N", "D"]),
(query_points_2, ["M", "D"]),
(cov, [..., "L", "N", "M"]),
]
)
return cov
def optimize(self, dataset: Dataset) -> None:
"""
Optimize the model with the specified `dataset`.
For :class:`GaussianProcessRegression`, we (optionally) try multiple randomly sampled
kernel parameter configurations as well as the configuration specified when initializing
the kernel. The best configuration is used as the starting point for model optimization.
For trainable parameters constrained to lie in a finite interval (through a sigmoid
bijector), we begin model optimization from the best of a random sample from these
parameters' acceptable domains.
For trainable parameters without constraints but with priors, we begin model optimization
from the best of a random sample from these parameters' priors.
For trainable parameters with neither priors nor constraints, we begin optimization from
their initial values.
:param dataset: The data with which to optimize the `model`.
"""
num_trainable_params_with_priors_or_constraints = tf.reduce_sum(
[
tf.size(param)
for param in self.model.trainable_parameters
if param.prior is not None or isinstance(param.bijector, tfp.bijectors.Sigmoid)
]
)
if (
min(num_trainable_params_with_priors_or_constraints, self._num_kernel_samples) >= 1
): # Find a promising kernel initialization
self.find_best_model_initialization(
self._num_kernel_samples * num_trainable_params_with_priors_or_constraints
)
self.optimizer.optimize(self.model, dataset)
self.update_posterior_cache()
def find_best_model_initialization(self, num_kernel_samples: int) -> None:
"""
Test `num_kernel_samples` models with sampled kernel parameters. The model's kernel
parameters are then set to the sample achieving maximal likelihood.
:param num_kernel_samples: Number of randomly sampled kernels to evaluate.
"""
@tf.function
def evaluate_loss_of_model_parameters() -> tf.Tensor:
randomize_hyperparameters(self.model)
return self.model.training_loss()
squeeze_hyperparameters(self.model)
current_best_parameters = read_values(self.model)
min_loss = self.model.training_loss()
for _ in tf.range(num_kernel_samples):
try:
train_loss = evaluate_loss_of_model_parameters()
except tf.errors.InvalidArgumentError: # allow badly specified kernel params
train_loss = 1e100
if train_loss < min_loss: # only keep best kernel params
min_loss = train_loss
current_best_parameters = read_values(self.model)
multiple_assign(self.model, current_best_parameters)
def trajectory_sampler(self) -> TrajectorySampler[GaussianProcessRegression]:
"""
Return a trajectory sampler. For :class:`GaussianProcessRegression`, we build
trajectories using a random Fourier feature approximation.
At the moment only models with single latent GP are supported.
:return: The trajectory sampler.
:raise NotImplementedError: If we try to use the
sampler with a model that has more than one latent GP.
"""
if self.model.num_latent_gps > 1:
raise NotImplementedError(
f"""
Trajectory sampler does not currently support models with multiple latent
GPs, however received a model with {self.model.num_latent_gps} latent GPs.
"""
)
if self._use_decoupled_sampler:
return DecoupledTrajectorySampler(self, self._num_rff_features)
else:
return RandomFourierFeatureTrajectorySampler(self, self._num_rff_features)
def get_internal_data(self) -> Dataset:
"""
Return the model's training data.
:return: The model's training data.
"""
return Dataset(self.model.data[0], self.model.data[1])
def conditional_predict_f(
self, query_points: TensorType, additional_data: Dataset
) -> tuple[TensorType, TensorType]:
"""
Returns the marginal GP distribution at query_points conditioned on both the model
and some additional data, using exact formula. See :cite:`chevalier2014corrected`
(eqs. 8-10) for details.
:param query_points: Set of query points with shape [M, D]
:param additional_data: Dataset with query_points with shape [..., N, D] and observations
with shape [..., N, L]
:return: mean_qp_new: predictive mean at query_points, with shape [..., M, L],
and var_qp_new: predictive variance at query_points, with shape [..., M, L]
"""
tf.debugging.assert_shapes(
[
(additional_data.query_points, [..., "N", "D"]),
(additional_data.observations, [..., "N", "L"]),
(query_points, ["M", "D"]),
],
message="additional_data must have query_points with shape [..., N, D]"
" and observations with shape [..., N, L], and query_points "
"should have shape [M, D]",
)
mean_add, cov_add = self.predict_joint(
additional_data.query_points
) # [..., N, L], [..., L, N, N]
mean_qp, var_qp = self.predict(query_points) # [M, L], [M, L]
cov_cross = self.covariance_between_points(
additional_data.query_points, query_points
) # [..., L, N, M]
cov_shape = tf.shape(cov_add)
noise = self.get_observation_noise() * tf.eye(
cov_shape[-2], batch_shape=cov_shape[:-2], dtype=cov_add.dtype
)
L_add = tf.linalg.cholesky(cov_add + noise) # [..., L, N, N]
A = tf.linalg.triangular_solve(L_add, cov_cross, lower=True) # [..., L, N, M]
var_qp_new = var_qp - leading_transpose(
tf.reduce_sum(A**2, axis=-2), [..., -1, -2]
) # [..., M, L]
mean_add_diff = additional_data.observations - mean_add # [..., N, L]
mean_add_diff = leading_transpose(mean_add_diff, [..., -1, -2])[..., None] # [..., L, N, 1]
AM = tf.linalg.triangular_solve(L_add, mean_add_diff) # [..., L, N, 1]
mean_qp_new = mean_qp + leading_transpose(
(tf.matmul(A, AM, transpose_a=True)[..., 0]), [..., -1, -2]
) # [..., M, L]
tf.debugging.assert_shapes(
[
(additional_data.observations, [..., "N", "L"]),
(query_points, ["M", "D"]),
(mean_qp_new, [..., "M", "L"]),
(var_qp_new, [..., "M", "L"]),
],
message="received unexpected shapes computing conditional_predict_f,"
"check model kernel structure?",
)
return mean_qp_new, var_qp_new
def conditional_predict_joint(
self, query_points: TensorType, additional_data: Dataset
) -> tuple[TensorType, TensorType]:
"""
Predicts the joint GP distribution at query_points conditioned on both the model
and some additional data, using exact formula. See :cite:`chevalier2014corrected`
(eqs. 8-10) for details.
:param query_points: Set of query points with shape [M, D]
:param additional_data: Dataset with query_points with shape [..., N, D] and observations
with shape [..., N, L]
:return: mean_qp_new: predictive mean at query_points, with shape [..., M, L],
and cov_qp_new: predictive covariance between query_points, with shape
[..., L, M, M]
"""
tf.debugging.assert_shapes(
[
(additional_data.query_points, [..., "N", "D"]),
(additional_data.observations, [..., "N", "L"]),
(query_points, ["M", "D"]),
],
message="additional_data must have query_points with shape [..., N, D]"
" and observations with shape [..., N, L], and query_points "
"should have shape [M, D]",
)
leading_dims = tf.shape(additional_data.query_points)[:-2] # [...]
new_shape = tf.concat([leading_dims, tf.shape(query_points)], axis=0) # [..., M, D]
query_points_r = tf.broadcast_to(query_points, new_shape) # [..., M, D]
points = tf.concat([additional_data.query_points, query_points_r], axis=-2) # [..., N+M, D]
mean, cov = self.predict_joint(points) # [..., N+M, L], [..., L, N+M, N+M]
N = tf.shape(additional_data.query_points)[-2]
mean_add = mean[..., :N, :] # [..., N, L]
mean_qp = mean[..., N:, :] # [..., M, L]
cov_add = cov[..., :N, :N] # [..., L, N, N]
cov_qp = cov[..., N:, N:] # [..., L, M, M]
cov_cross = cov[..., :N, N:] # [..., L, N, M]
cov_shape = tf.shape(cov_add)
noise = self.get_observation_noise() * tf.eye(
cov_shape[-2], batch_shape=cov_shape[:-2], dtype=cov_add.dtype
)
L_add = tf.linalg.cholesky(cov_add + noise) # [..., L, N, N]
A = tf.linalg.triangular_solve(L_add, cov_cross, lower=True) # [..., L, N, M]
cov_qp_new = cov_qp - tf.matmul(A, A, transpose_a=True) # [..., L, M, M]
mean_add_diff = additional_data.observations - mean_add # [..., N, L]
mean_add_diff = leading_transpose(mean_add_diff, [..., -1, -2])[..., None] # [..., L, N, 1]
AM = tf.linalg.triangular_solve(L_add, mean_add_diff) # [..., L, N, 1]
mean_qp_new = mean_qp + leading_transpose(
(tf.matmul(A, AM, transpose_a=True)[..., 0]), [..., -1, -2]
) # [..., M, L]
tf.debugging.assert_shapes(
[
(additional_data.observations, [..., "N", "L"]),
(query_points, ["M", "D"]),
(mean_qp_new, [..., "M", "L"]),
(cov_qp_new, [..., "L", "M", "M"]),
],
message="received unexpected shapes computing conditional_predict_joint,"
"check model kernel structure?",
)
return mean_qp_new, cov_qp_new
def conditional_predict_f_sample(
self, query_points: TensorType, additional_data: Dataset, num_samples: int
) -> TensorType:
"""
Generates samples of the GP at query_points conditioned on both the model
and some additional data.
:param query_points: Set of query points with shape [M, D]
:param additional_data: Dataset with query_points with shape [..., N, D] and observations
with shape [..., N, L]
:param num_samples: number of samples
:return: samples of f at query points, with shape [..., num_samples, M, L]
"""
mean_new, cov_new = self.conditional_predict_joint(query_points, additional_data)
mean_for_sample = tf.linalg.adjoint(mean_new) # [..., L, N]
samples = sample_mvn(
mean_for_sample, cov_new, full_cov=True, num_samples=num_samples
) # [..., (S), P, N]
return tf.linalg.adjoint(samples) # [..., (S), N, L]
def conditional_predict_y(
self, query_points: TensorType, additional_data: Dataset
) -> tuple[TensorType, TensorType]:
"""
Generates samples of y from the GP at query_points conditioned on both the model
and some additional data.
:param query_points: Set of query points with shape [M, D]
:param additional_data: Dataset with query_points with shape [..., N, D] and observations
with shape [..., N, L]
:return: predictive variance at query_points, with shape [..., M, L],
and predictive variance at query_points, with shape [..., M, L]
"""
f_mean, f_var = self.conditional_predict_f(query_points, additional_data)
return self.model.likelihood.predict_mean_and_var(query_points, f_mean, f_var)
class SparseGaussianProcessRegression(
GPflowPredictor,
TrainableProbabilisticModel,
SupportsCovarianceBetweenPoints,
SupportsGetInducingVariables,
SupportsGetInternalData,
HasTrajectorySampler,
):
"""
A :class:`TrainableProbabilisticModel` wrapper for a GPflow :class:`~gpflow.models.SGPR`.
At the moment we only support models with a single latent GP. This is due to ``compute_qu``
method in :class:`~gpflow.models.SGPR` that is used for computing covariance between
query points and trajectory sampling, which at the moment works only for single latent GP.
Similarly to our :class:`GaussianProcessRegression`, our :class:`~gpflow.models.SGPR` wrapper
directly calls the posterior objects built by these models at prediction
time. These posterior objects store the pre-computed Gram matrices, which can be reused to allow
faster subsequent predictions. However, note that these posterior objects need to be updated
whenever the underlying model is changed by calling :meth:`update_posterior_cache` (this
happens automatically after calls to :meth:`update` or :math:`optimize`).
"""
def __init__(
self,
model: SGPR,
optimizer: Optimizer | None = None,
num_rff_features: int = 1000,
inducing_point_selector: Optional[
InducingPointSelector[SparseGaussianProcessRegression]
] = None,
):
"""
:param model: The GPflow model to wrap.
:param optimizer: The optimizer with which to train the model. Defaults to
:class:`~trieste.models.optimizer.Optimizer` with :class:`~gpflow.optimizers.Scipy`.
:param num_rff_features: The number of random Fourier features used to approximate the
kernel when calling :meth:`trajectory_sampler`. We use a default of 1000 as it
typically perfoms well for a wide range of kernels. Note that very smooth
kernels (e.g. RBF) can be well-approximated with fewer features.
:param inducing_point_selector: The (optional) desired inducing point selector that
will update the underlying GPflow SGPR model's inducing points as
the optimization progresses.
:raise NotImplementedError (or ValueError): If we try to use a model with invalid
``num_rff_features``, or an ``inducing_point_selector`` with a model
that has more than one set of inducing points.
"""
super().__init__(optimizer)
self._model = model
check_optimizer(self.optimizer)
if num_rff_features <= 0:
raise ValueError(
f"num_rff_features must be greater or equal to zero but got {num_rff_features}."
)
self._num_rff_features = num_rff_features
if isinstance(self.model.inducing_variable, SeparateIndependentInducingVariables):
if inducing_point_selector is not None:
raise NotImplementedError(
f"""
InducingPointSelectors only currently support models with a single set
of inducing points however received inducing points of
type {type(self.model.inducing_variable)}.
"""
)
self._inducing_point_selector = inducing_point_selector
self._ensure_variable_model_data()
self.create_posterior_cache()
def __repr__(self) -> str:
""""""
return (
f"SparseGaussianProcessRegression({self.model!r}, {self.optimizer!r},"
f"{self._num_rff_features!r}, {self._inducing_point_selector!r})"
)
@property
def model(self) -> SGPR:
return self._model
@property
def inducing_point_selector(
self,
) -> Optional[InducingPointSelector[SparseGaussianProcessRegression]]:
return self._inducing_point_selector
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
f_mean, f_var = self.predict(query_points)
return self.model.likelihood.predict_mean_and_var(query_points, f_mean, f_var)
def _ensure_variable_model_data(self) -> None:
# GPflow stores the data in Tensors. However, since we want to be able to update the data
# without having to retrace the acquisition functions, put it in Variables instead.
# Data has to be stored in variables with dynamic shape to allow for changes
# Sometimes, for instance after serialization-deserialization, the shape can be overridden
# Thus here we ensure data is stored in dynamic shape Variables
if not all(is_variable(x) and x.shape[0] is None for x in self._model.data):
self._model.data = (
tf.Variable(
self._model.data[0],
trainable=False,
shape=[None, *self._model.data[0].shape[1:]],
),
tf.Variable(
self._model.data[1],
trainable=False,
shape=[None, *self._model.data[1].shape[1:]],
),
)
if not is_variable(self._model.num_data):
self._model.num_data = tf.Variable(self._model.num_data, trainable=False)
def optimize(self, dataset: Dataset) -> None:
"""
Optimize the model with the specified `dataset`.
:param dataset: The data with which to optimize the `model`.
"""
self.optimizer.optimize(self.model, dataset)
self.update_posterior_cache()
def update(self, dataset: Dataset) -> None:
self._ensure_variable_model_data()
x, y = self.model.data[0].value(), self.model.data[1].value()
assert_data_is_compatible(dataset, Dataset(x, y))
if dataset.query_points.shape[-1] != x.shape[-1]:
raise ValueError
if dataset.observations.shape[-1] != y.shape[-1]:
raise ValueError
self.model.data[0].assign(dataset.query_points)
self.model.data[1].assign(dataset.observations)
current_inducing_points, q_mu, _, _ = self.get_inducing_variables()
if isinstance(current_inducing_points, list):
inducing_points_trailing_dim = current_inducing_points[0].shape[-1]
else:
inducing_points_trailing_dim = current_inducing_points.shape[-1]
if dataset.query_points.shape[-1] != inducing_points_trailing_dim:
raise ValueError(
f"Shape {dataset.query_points.shape} of new query points is incompatible with"
f" shape {self.model.inducing_variable.Z.shape} of existing query points."
f" Trailing dimensions must match."
)
if dataset.observations.shape[-1] != q_mu.shape[-1]:
raise ValueError(
f"Shape {dataset.observations.shape} of new observations is incompatible with"
f" shape {self.model.q_mu.shape} of existing observations. Trailing"
f" dimensions must match."
)
num_data = dataset.query_points.shape[0]
self.model.num_data.assign(num_data)
if self._inducing_point_selector is not None:
new_inducing_points = self._inducing_point_selector.calculate_inducing_points(
current_inducing_points, self, dataset
)
if not tf.reduce_all(
tf.math.equal(
new_inducing_points,
current_inducing_points,
)
): # only bother updating if points actually change
self._update_inducing_variables(new_inducing_points)
self.update_posterior_cache()
def _update_inducing_variables(self, new_inducing_points: TensorType) -> None:
"""
When updating the inducing points of a model, we must also update the other
inducing variables, i.e. `q_mu` and `q_sqrt` accordingly. The exact form of this update
depends if we are using whitened representations of the inducing variables.
See :meth:`_whiten_points` for details.
:param new_inducing_points: The desired values for the new inducing points.
:raise NotImplementedError: If we try to update the inducing variables of a model
that has more than one set of inducing points.
"""
if isinstance(new_inducing_points, list):
raise NotImplementedError(
f"""
We do not currently support updating models with multiple sets of
inducing points however received; {new_inducing_points}
"""
)
old_inducing_points, _, _, _ = self.get_inducing_variables()
tf.assert_equal(
tf.shape(old_inducing_points), tf.shape(new_inducing_points)
) # number of inducing points must not change
if isinstance(self.model.inducing_variable, SharedIndependentInducingVariables):
# gpflow says inducing_variable might be a ndarray; it won't
cast(TensorType, self.model.inducing_variable.inducing_variable).Z.assign(
new_inducing_points
) # [M, D]
else:
self.model.inducing_variable.Z.assign(new_inducing_points) # [M, D]
def get_inducing_variables(
self,
) -> Tuple[Union[TensorType, list[TensorType]], TensorType, TensorType, bool]:
"""
Return the model's inducing variables. The SGPR model does not have ``q_mu``, ``q_sqrt`` and
``whiten`` objects. We can use ``compute_qu`` method to obtain ``q_mu`` and ``q_sqrt``,
while the SGPR model does not use the whitened representation. Note that at the moment
``compute_qu`` works only for single latent GP and returns ``q_sqrt`` in a shape that is
inconsistent with the SVGP model (hence we need to do modify its shape).
:return: The inducing points (i.e. locations of the inducing variables), as a Tensor or a
list of Tensors (when the model has multiple inducing points); a tensor containing the
variational mean ``q_mu``; a tensor containing the Cholesky decomposition of the
variational covariance ``q_sqrt``; and a bool denoting if we are using whitened or
non-whitened representations.
:raise NotImplementedError: If the model has more than one latent GP.
"""
if self.model.num_latent_gps > 1:
raise NotImplementedError(
f"""
We do not currently support models with more than one latent GP,
however received a model with {self.model.num_latent_gps} outputs.
"""
)
inducing_variable = self.model.inducing_variable
if isinstance(inducing_variable, SharedIndependentInducingVariables):
# gpflow says inducing_variable might be a ndarray; it won't
inducing_points = cast(TensorType, inducing_variable.inducing_variable).Z # [M, D]
elif isinstance(inducing_variable, SeparateIndependentInducingVariables):
inducing_points = [
cast(TensorType, inducing_variable).Z
for inducing_variable in inducing_variable.inducing_variables
] # list of L [M, D] tensors
else:
inducing_points = inducing_variable.Z # [M, D]
q_mu, q_var = self.model.compute_qu()
q_sqrt = tf.linalg.cholesky(q_var)
q_sqrt = tf.expand_dims(q_sqrt, 0)
whiten = False
return inducing_points, q_mu, q_sqrt, whiten
def covariance_between_points(
self, query_points_1: TensorType, query_points_2: TensorType
) -> TensorType:
r"""
Compute the posterior covariance between sets of query points.
Note that query_points_2 must be a rank 2 tensor, but query_points_1 can
have leading dimensions.
:param query_points_1: Set of query points with shape [..., A, D]
:param query_points_2: Sets of query points with shape [B, D]
:return: Covariance matrix between the sets of query points with shape [..., L, A, B]
(L being the number of latent GPs = number of output dimensions)
"""
inducing_points, _, q_sqrt, whiten = self.get_inducing_variables()
return _covariance_between_points_for_variational_models(
kernel=self.get_kernel(),
inducing_points=inducing_points,
q_sqrt=q_sqrt,
query_points_1=query_points_1,
query_points_2=query_points_2,
whiten=whiten,
)
def trajectory_sampler(self) -> TrajectorySampler[SparseGaussianProcessRegression]:
"""
Return a trajectory sampler. For :class:`SparseGaussianProcessRegression`, we build
trajectories using a decoupled random Fourier feature approximation. Note that this
is available only for single output models.
At the moment only models with single latent GP are supported.
:return: The trajectory sampler.
:raise NotImplementedError: If we try to use the
sampler with a model that has more than one latent GP.
"""
if self.model.num_latent_gps > 1:
raise NotImplementedError(
f"""
Trajectory sampler does not currently support models with multiple latent
GPs, however received a model with {self.model.num_latent_gps} latent GPs.
"""
)
return DecoupledTrajectorySampler(self, self._num_rff_features)
def get_internal_data(self) -> Dataset:
"""
Return the model's training data.
:return: The model's training data.
"""
return Dataset(self.model.data[0], self.model.data[1])
class SparseVariational(
GPflowPredictor,
TrainableProbabilisticModel,
SupportsCovarianceBetweenPoints,
SupportsGetInducingVariables,
HasTrajectorySampler,
):
"""
A :class:`TrainableProbabilisticModel` wrapper for a GPflow :class:`~gpflow.models.SVGP`.
Similarly to our :class:`GaussianProcessRegression`, our :class:`~gpflow.models.SVGP` wrapper
directly calls the posterior objects built by these models at prediction
time. These posterior objects store the pre-computed Gram matrices, which can be reused to allow
faster subsequent predictions. However, note that these posterior objects need to be updated
whenever the underlying model is changed by calling :meth:`update_posterior_cache` (this
happens automatically after calls to :meth:`update` or :math:`optimize`).
"""
def __init__(
self,
model: SVGP,
optimizer: Optimizer | None = None,
num_rff_features: int = 1000,
inducing_point_selector: Optional[InducingPointSelector[SparseVariational]] = None,
):
"""
:param model: The underlying GPflow sparse variational model.
:param optimizer: The optimizer with which to train the model. Defaults to
:class:`~trieste.models.optimizer.BatchOptimizer` with :class:`~tf.optimizers.Adam` with
batch size 100.
:param num_rff_features: The number of random Fourier features used to approximate the
kernel when performing decoupled Thompson sampling through
its :meth:`trajectory_sampler`. We use a default of 1000 as it typically
perfoms well for a wide range of kernels. Note that very smooth kernels (e.g. RBF)
can be well-approximated with fewer features.
:param inducing_point_selector: The (optional) desired inducing_point_selector that
will update the underlying GPflow sparse variational model's inducing points as
the optimization progresses.
:raise NotImplementedError: If we try to use an inducing_point_selector with a model
that has more than one set of inducing points.
"""
tf.debugging.assert_rank(
model.q_sqrt, 3, "SparseVariational requires an SVGP model with q_diag=False."
)
if optimizer is None:
optimizer = BatchOptimizer(tf.optimizers.Adam(), batch_size=100, compile=True)
super().__init__(optimizer)
self._model = model
if num_rff_features <= 0:
raise ValueError(
f"num_rff_features must be greater or equal to zero but got {num_rff_features}."
)
self._num_rff_features = num_rff_features
check_optimizer(optimizer)
if isinstance(self.model.inducing_variable, SeparateIndependentInducingVariables):
if inducing_point_selector is not None:
raise NotImplementedError(
f"""
InducingPointSelectors only currently support models with a single set
of inducing points however received inducing points of
type {type(self.model.inducing_variable)}.
"""
)
self._inducing_point_selector = inducing_point_selector
self._ensure_variable_model_data()
self.create_posterior_cache()
def _ensure_variable_model_data(self) -> None:
# GPflow stores the data in Tensors. However, since we want to be able to update the data
# without having to retrace the acquisition functions, put it in Variables instead.
# Data has to be stored in variables with dynamic shape to allow for changes
# Sometimes, for instance after serialization-deserialization, the shape can be overridden
# Thus here we ensure data is stored in dynamic shape Variables
if not is_variable(self._model.num_data):
self._model.num_data = tf.Variable(self._model.num_data, trainable=False)
def __repr__(self) -> str:
""""""
return (
f"SparseVariational({self.model!r}, {self.optimizer!r},"
f"{self._num_rff_features!r}, {self._inducing_point_selector!r})"
)
@property
def model(self) -> SVGP:
return self._model
@property
def inducing_point_selector(self) -> Optional[InducingPointSelector[SparseVariational]]:
return self._inducing_point_selector
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
f_mean, f_var = self.predict(query_points)
return self.model.likelihood.predict_mean_and_var(query_points, f_mean, f_var)
def update(self, dataset: Dataset) -> None:
self._ensure_variable_model_data()
# Hard-code asserts from _assert_data_is_compatible because model doesn't store dataset
current_inducing_points, q_mu, _, _ = self.get_inducing_variables()
if isinstance(current_inducing_points, list):
inducing_points_trailing_dim = current_inducing_points[0].shape[-1]
else:
inducing_points_trailing_dim = current_inducing_points.shape[-1]
if dataset.query_points.shape[-1] != inducing_points_trailing_dim:
raise ValueError(
f"Shape {dataset.query_points.shape} of new query points is incompatible with"
f" shape {self.model.inducing_variable.Z.shape} of existing query points."
f" Trailing dimensions must match."
)
if dataset.observations.shape[-1] != q_mu.shape[-1]:
raise ValueError(
f"Shape {dataset.observations.shape} of new observations is incompatible with"
f" shape {self.model.q_mu.shape} of existing observations. Trailing"
f" dimensions must match."
)
num_data = dataset.query_points.shape[0]
assert self.model.num_data is not None
self.model.num_data.assign(num_data)
if self._inducing_point_selector is not None:
new_inducing_points = self._inducing_point_selector.calculate_inducing_points(
current_inducing_points, self, dataset
)
if not tf.reduce_all(
tf.math.equal(
new_inducing_points,
current_inducing_points,
)
): # only bother updating if points actually change
self._update_inducing_variables(new_inducing_points)
self.update_posterior_cache()
def optimize(self, dataset: Dataset) -> None:
"""
Optimize the model with the specified `dataset`.
:param dataset: The data with which to optimize the `model`.
"""
self.optimizer.optimize(self.model, dataset)
self.update_posterior_cache()
def _update_inducing_variables(self, new_inducing_points: TensorType) -> None:
"""
When updating the inducing points of a model, we must also update the other
inducing variables, i.e. `q_mu` and `q_sqrt` accordingly. The exact form of this update
depends if we are using whitened representations of the inducing variables.
See :meth:`_whiten_points` for details.
:param new_inducing_points: The desired values for the new inducing points.
:raise NotImplementedError: If we try to update the inducing variables of a model
that has more than one set of inducing points.
"""
if isinstance(new_inducing_points, list):
raise NotImplementedError(
f"""
We do not currently support updating models with multiple sets of
inducing points however received; {new_inducing_points}
"""
)
old_inducing_points, _, _, whiten = self.get_inducing_variables()
tf.assert_equal(
tf.shape(old_inducing_points), tf.shape(new_inducing_points)
) # number of inducing points must not change
if whiten:
new_q_mu, new_q_sqrt = _whiten_points(self, new_inducing_points)
else:
new_q_mu, new_f_cov = self.predict_joint(new_inducing_points) # [N, L], [L, N, N]
new_q_mu -= self.model.mean_function(new_inducing_points)
jitter_mat = DEFAULTS.JITTER * tf.eye(
tf.shape(new_inducing_points)[0], dtype=new_f_cov.dtype
)
new_q_sqrt = tf.linalg.cholesky(new_f_cov + jitter_mat)
self.model.q_mu.assign(new_q_mu) # [N, L]
self.model.q_sqrt.assign(new_q_sqrt) # [L, N, N]
if isinstance(self.model.inducing_variable, SharedIndependentInducingVariables):
# gpflow says inducing_variable might be a ndarray; it won't
cast(TensorType, self.model.inducing_variable.inducing_variable).Z.assign(
new_inducing_points
) # [M, D]
else:
self.model.inducing_variable.Z.assign(new_inducing_points) # [M, D]
def get_inducing_variables(
self,
) -> Tuple[Union[TensorType, list[TensorType]], TensorType, TensorType, bool]:
"""
Return the model's inducing variables.
:return: The inducing points (i.e. locations of the inducing variables), as a Tensor or a
list of Tensors (when the model has multiple inducing points); A tensor containing the
variational mean q_mu; a tensor containing the Cholesky decomposition of the variational
covariance q_sqrt; and a bool denoting if we are using whitened or
non-whitened representations.
"""
inducing_variable = self.model.inducing_variable
if isinstance(inducing_variable, SharedIndependentInducingVariables):
# gpflow says inducing_variable might be a ndarray; it won't
inducing_points = cast(TensorType, inducing_variable.inducing_variable).Z # [M, D]
elif isinstance(inducing_variable, SeparateIndependentInducingVariables):
inducing_points = [
cast(TensorType, inducing_variable).Z
for inducing_variable in inducing_variable.inducing_variables
] # list of L [M, D] tensors
else:
inducing_points = inducing_variable.Z # [M, D]
return inducing_points, self.model.q_mu, self.model.q_sqrt, self.model.whiten
def covariance_between_points(
self, query_points_1: TensorType, query_points_2: TensorType
) -> TensorType:
r"""
Compute the posterior covariance between sets of query points.
Note that query_points_2 must be a rank 2 tensor, but query_points_1 can
have leading dimensions.
:param query_points_1: Set of query points with shape [..., A, D]
:param query_points_2: Sets of query points with shape [B, D]
:return: Covariance matrix between the sets of query points with shape [..., L, A, B]
(L being the number of latent GPs = number of output dimensions)
"""
inducing_points, _, q_sqrt, whiten = self.get_inducing_variables()
return _covariance_between_points_for_variational_models(
kernel=self.get_kernel(),
inducing_points=inducing_points,
q_sqrt=q_sqrt,
query_points_1=query_points_1,
query_points_2=query_points_2,
whiten=whiten,
)
def trajectory_sampler(self) -> TrajectorySampler[SparseVariational]:
"""
Return a trajectory sampler. For :class:`SparseVariational`, we build
trajectories using a decoupled random Fourier feature approximation.
:return: The trajectory sampler.
"""
return DecoupledTrajectorySampler(self, self._num_rff_features)
class VariationalGaussianProcess(
GPflowPredictor,
TrainableProbabilisticModel,
SupportsCovarianceBetweenPoints,
SupportsGetInducingVariables,
HasTrajectorySampler,
):
r"""
A :class:`TrainableProbabilisticModel` wrapper for a GPflow :class:`~gpflow.models.VGP`.
A Variational Gaussian Process (VGP) approximates the posterior of a GP
using the multivariate Gaussian closest to the posterior of the GP by minimizing the
KL divergence between approximated and exact posteriors. See :cite:`opper2009variational`
for details.
The VGP provides (approximate) GP modelling under non-Gaussian likelihoods, for example
when fitting a classification model over binary data.
A whitened representation and (optional) natural gradient steps are used to aid
model optimization.
Similarly to our :class:`GaussianProcessRegression`, our :class:`~gpflow.models.VGP` wrapper
directly calls the posterior objects built by these models at prediction
time. These posterior objects store the pre-computed Gram matrices, which can be reused to allow
faster subsequent predictions. However, note that these posterior objects need to be updated
whenever the underlying model is changed by calling :meth:`update_posterior_cache` (this
happens automatically after calls to :meth:`update` or :math:`optimize`).
"""
def __init__(
self,
model: VGP,
optimizer: Optimizer | None = None,
use_natgrads: bool = False,
natgrad_gamma: Optional[float] = None,
num_rff_features: int = 1000,
):
"""
:param model: The GPflow :class:`~gpflow.models.VGP`.
:param optimizer: The optimizer with which to train the model. Defaults to
:class:`~trieste.models.optimizer.Optimizer` with :class:`~gpflow.optimizers.Scipy`.
:param use_natgrads: If True then alternate model optimization steps with natural
gradient updates. Note that natural gradients requires
a :class:`~trieste.models.optimizer.BatchOptimizer` wrapper with
:class:`~tf.optimizers.Optimizer` optimizer.
:natgrad_gamma: Gamma parameter for the natural gradient optimizer.
:param num_rff_features: The number of random Fourier features used to approximate the
kernel when performing decoupled Thompson sampling through
its :meth:`trajectory_sampler`. We use a default of 1000 as it typically perfoms
well for a wide range of kernels. Note that very smooth kernels (e.g. RBF) can
be well-approximated with fewer features.
:raise ValueError (or InvalidArgumentError): If ``model``'s :attr:`q_sqrt` is not rank 3
or if attempting to combine natural gradients with a :class:`~gpflow.optimizers.Scipy`
optimizer.
"""
tf.debugging.assert_rank(model.q_sqrt, 3)
if optimizer is None and not use_natgrads:
optimizer = Optimizer(gpflow.optimizers.Scipy(), compile=True)
elif optimizer is None and use_natgrads:
optimizer = BatchOptimizer(tf.optimizers.Adam(), batch_size=100, compile=True)
super().__init__(optimizer)
check_optimizer(self.optimizer)
if use_natgrads:
if not isinstance(self.optimizer.optimizer, tf.optimizers.Optimizer):
raise ValueError(
f"""
Natgrads can only be used with a BatchOptimizer wrapper using an instance of
tf.optimizers.Optimizer, however received {self.optimizer}.
"""
)
natgrad_gamma = 0.1 if natgrad_gamma is None else natgrad_gamma
else:
if isinstance(self.optimizer.optimizer, tf.optimizers.Optimizer):
raise ValueError(
f"""
If not using natgrads an Optimizer wrapper should be used with
gpflow.optimizers.Scipy, however received {self.optimizer}.
"""
)
if natgrad_gamma is not None:
raise ValueError(
"""
natgrad_gamma is only to be specified when use_natgrads is True.
"""
)
if num_rff_features <= 0:
raise ValueError(
f"num_rff_features must be greater or equal to zero but got {num_rff_features}."
)
self._num_rff_features = num_rff_features
self._model = model
self._use_natgrads = use_natgrads
self._natgrad_gamma = natgrad_gamma
self._ensure_variable_model_data()
self.create_posterior_cache()
def _ensure_variable_model_data(self) -> None:
# GPflow stores the data in Tensors. However, since we want to be able to update the data
# without having to retrace the acquisition functions, put it in Variables instead.
# Data has to be stored in variables with dynamic shape to allow for changes
# Sometimes, for instance after serialization-deserialization, the shape can be overridden
# Thus here we ensure data is stored in dynamic shape Variables
model = self.model
if not all(isinstance(x, tf.Variable) and x.shape[0] is None for x in model.data):
variable_data = (
tf.Variable(
model.data[0],
trainable=False,
shape=[None, *model.data[0].shape[1:]],
),
tf.Variable(
model.data[1],
trainable=False,
shape=[None, *model.data[1].shape[1:]],
),
)
# reinitialise the model so that the underlying Parameters have the right shape
# and then reassign the original values
old_q_mu = model.q_mu
old_q_sqrt = model.q_sqrt
model.__init__( # type: ignore[misc]
variable_data,
model.kernel,
model.likelihood,
model.mean_function,
model.num_latent_gps,
)
model.q_mu.assign(old_q_mu)
model.q_sqrt.assign(old_q_sqrt)
def __repr__(self) -> str:
""""""
return (
f"VariationalGaussianProcess({self.model!r}, {self.optimizer!r})"
f"{self._use_natgrads!r}, {self._natgrad_gamma!r}, {self._num_rff_features!r})"
)
@property
def model(self) -> VGP:
return self._model
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
f_mean, f_var = self.predict(query_points)
return self.model.likelihood.predict_mean_and_var(query_points, f_mean, f_var)
def update(self, dataset: Dataset, *, jitter: float = DEFAULTS.JITTER) -> None:
"""
Update the model given the specified ``dataset``. Does not train the model.
:param dataset: The data with which to update the model.
:param jitter: The size of the jitter to use when stabilizing the Cholesky decomposition of
the covariance matrix.
"""
self._ensure_variable_model_data()
update_vgp_data(self.model, (dataset.query_points, dataset.observations))
self.update_posterior_cache()
def optimize(self, dataset: Dataset) -> None:
"""
:class:`VariationalGaussianProcess` has a custom `optimize` method that (optionally) permits
alternating between standard optimization steps (for kernel parameters) and natural gradient
steps for the variational parameters (`q_mu` and `q_sqrt`). See :cite:`salimbeni2018natural`
for details. Using natural gradients can dramatically speed up model fitting, especially for
ill-conditioned posteriors.
If using natural gradients, our optimizer inherits the mini-batch behavior and number
of optimization steps as the base optimizer specified when initializing
the :class:`VariationalGaussianProcess`.
"""
model = self.model
if self._use_natgrads: # optimize variational params with natgrad optimizer
natgrad_optimizer = gpflow.optimizers.NaturalGradient(gamma=self._natgrad_gamma)
base_optimizer = self.optimizer
gpflow.set_trainable(model.q_mu, False) # variational params optimized by natgrad
gpflow.set_trainable(model.q_sqrt, False)
variational_params = [(model.q_mu, model.q_sqrt)]
model_params = model.trainable_variables
loss_fn = base_optimizer.create_loss(model, dataset)
@jit(apply=self.optimizer.compile)
def perform_optimization_step() -> None: # alternate with natgrad optimizations
natgrad_optimizer.minimize(loss_fn, variational_params)
base_optimizer.optimizer.minimize(
loss_fn, model_params, **base_optimizer.minimize_args
)
for _ in range(base_optimizer.max_iter): # type: ignore
perform_optimization_step()
gpflow.set_trainable(model.q_mu, True) # revert varitional params to trainable
gpflow.set_trainable(model.q_sqrt, True)
else:
self.optimizer.optimize(model, dataset)
self.update_posterior_cache()
def get_inducing_variables(self) -> Tuple[TensorType, TensorType, TensorType, bool]:
"""
Return the model's inducing variables. Note that GPflow's VGP model is
hard-coded to use the whitened representation.
:return: Tensors containing: the inducing points (i.e. locations of the inducing
variables); the variational mean q_mu; the Cholesky decomposition of the
variational covariance q_sqrt; and a bool denoting if we are using whitened
or non-whitened representations.
"""
inducing_points = self.model.data[0]
q_mu = self.model.q_mu
q_sqrt = self.model.q_sqrt
whiten = True # GPflow's VGP model is hard-coded to use the whitened representation
return inducing_points, q_mu, q_sqrt, whiten
def trajectory_sampler(self) -> TrajectorySampler[VariationalGaussianProcess]:
"""
Return a trajectory sampler. For :class:`VariationalGaussianProcess`, we build
trajectories using a decoupled random Fourier feature approximation.
At the moment only models with single latent GP are supported.
:return: The trajectory sampler.
:raise NotImplementedError: If we try to use the
sampler with a model that has more than one latent GP.
"""
if self.model.num_latent_gps > 1:
raise NotImplementedError(
f"""
Trajectory sampler does not currently support models with multiple latent
GPs, however received a model with {self.model.num_latent_gps} latent GPs.
"""
)
return DecoupledTrajectorySampler(self, self._num_rff_features)
def covariance_between_points(
self, query_points_1: TensorType, query_points_2: TensorType
) -> TensorType:
r"""
Compute the posterior covariance between sets of query points.
Note that query_points_2 must be a rank 2 tensor, but query_points_1 can
have leading dimensions.
:param query_points_1: Set of query points with shape [..., A, D]
:param query_points_2: Sets of query points with shape [B, D]
:return: Covariance matrix between the sets of query points with shape [..., L, A, B]
(L being the number of latent GPs = number of output dimensions)
"""
inducing_points, _, q_sqrt, whiten = self.get_inducing_variables()
return _covariance_between_points_for_variational_models(
kernel=self.get_kernel(),
inducing_points=self.model.data[0],
q_sqrt=q_sqrt,
query_points_1=query_points_1,
query_points_2=query_points_2,
whiten=whiten,
)
class MultifidelityAutoregressive(TrainableProbabilisticModel, SupportsCovarianceWithTopFidelity):
r"""
A :class:`TrainableProbabilisticModel` implementation of the model
from :cite:`Kennedy2000`. This is a multi-fidelity model that works with an
arbitrary number of fidelities. It relies on there being a linear relationship
between fidelities, and may not perform well for more complex relationships.
Precisely, it models the relationship between sequential fidelities as
.. math:: f_{i}(x) = \rho f_{i-1}(x) + \delta(x)
where :math:`\rho` is a scalar and :math:`\delta` models the residual between the fidelities.
The only base models supported in this implementation are :class:`~gpflow.models.GPR` models.
Note: Currently only supports single output problems.
"""
def __init__(
self,
fidelity_models: Sequence[GaussianProcessRegression],
):
"""
:param fidelity_models: List of
:class:`~trieste.models.gpflow.models.GaussianProcessRegression`
models, one for each fidelity. The model at index 0 will be used as the signal model
for the lowest fidelity and models at higher indices will be used as the residual
model for each higher fidelity.
"""
self._num_fidelities = len(fidelity_models)
self.lowest_fidelity_signal_model = fidelity_models[0]
# Note: The 0th index in the below is not a residual model, and should not be used.
self.fidelity_residual_models: Sequence[GaussianProcessRegression] = fidelity_models
# set this as a Parameter so that we can optimize it
rho = [
gpflow.Parameter(1.0, trainable=True, name=f"rho_{i}")
for i in range(self.num_fidelities - 1)
]
self.rho: list[gpflow.Parameter] = [
gpflow.Parameter(1.0, trainable=False, name="dummy_variable"),
*rho,
]
@property
def num_fidelities(self) -> int:
return self._num_fidelities
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
Predict the marginal mean and variance at query_points.
:param query_points: Query points with shape [N, D+1], where the
final column of the final dimension contains the fidelity of the query point
:return: mean: The mean at query_points with shape [N, P],
and var: The variance at query_points with shape [N, P]
"""
(
query_points_wo_fidelity, # [..., N, D]
query_points_fidelity_col, # [..., N, 1]
) = check_and_extract_fidelity_query_points(
query_points, max_fidelity=self.num_fidelities - 1
)
signal_mean, signal_var = self.lowest_fidelity_signal_model.predict(
query_points_wo_fidelity
) # [..., N, P], [..., N, P]
for fidelity, (fidelity_residual_model, rho) in enumerate(
zip(self.fidelity_residual_models, self.rho)
):
if fidelity == 0:
continue
# Find indices of query points that need predicting for
fidelity_float = tf.cast(fidelity, query_points.dtype)
mask = query_points_fidelity_col >= fidelity_float # [..., N, 1]
fidelity_indices = tf.where(mask)[..., :-1]
# Gather necessary query points and predict
fidelity_filtered_query_points = tf.gather_nd(
query_points_wo_fidelity, fidelity_indices
)
(
filtered_fidelity_residual_mean,
filtered_fidelity_residual_var,
) = fidelity_residual_model.predict(fidelity_filtered_query_points)
# Scatter predictions back into correct location
fidelity_residual_mean = tf.tensor_scatter_nd_update(
signal_mean, fidelity_indices, filtered_fidelity_residual_mean
)
fidelity_residual_var = tf.tensor_scatter_nd_update(
signal_var, fidelity_indices, filtered_fidelity_residual_var
)
# Calculate mean and var for all columns (will be incorrect for qps with fid < fidelity)
new_fidelity_signal_mean = rho * signal_mean + fidelity_residual_mean
new_fidelity_signal_var = fidelity_residual_var + (rho**2) * signal_var
# Mask out incorrect values and update mean and var for correct ones
mask = query_points_fidelity_col >= fidelity_float
signal_mean = tf.where(mask, new_fidelity_signal_mean, signal_mean)
signal_var = tf.where(mask, new_fidelity_signal_var, signal_var)
return signal_mean, signal_var
def _calculate_residual(self, dataset: Dataset, fidelity: int) -> TensorType:
r"""
Calculate the true residuals for a set of datapoints at a given fidelity.
Dataset should be made up of points that you have observations for at fidelity `fidelity`.
The residuals calculated here are the difference between the data and the prediction at the
lower fidelity multiplied by the rho value at this fidelity. This produces the training
data for the residual models.
.. math:: r_{i} = y - \rho_{i} * f_{i-1}(x)
:param dataset: Dataset of points for which to calculate the residuals. Must have
observations at fidelity `fidelity`. Query points shape is [N, D], observations is [N,P].
:param fidelity: The fidelity for which to calculate the residuals
:return: The true residuals at given datapoints for given fidelity, shape is [N,1].
"""
fidelity_query_points = add_fidelity_column(dataset.query_points, fidelity - 1)
residuals = (
dataset.observations - self.rho[fidelity] * self.predict(fidelity_query_points)[0]
)
return residuals
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
"""
Sample `num_samples` samples from the posterior distribution at `query_points`
:param query_points: The query points at which to sample of shape [N, D+1], where the
final column of the final dimension contains the fidelity of the query point
:param num_samples: The number of samples (S) to generate for each query point.
:return: samples from the posterior of shape [..., S, N, P]
"""
(
query_points_wo_fidelity,
query_points_fidelity_col,
) = check_and_extract_fidelity_query_points(
query_points, max_fidelity=self.num_fidelities - 1
)
signal_sample = self.lowest_fidelity_signal_model.sample(
query_points_wo_fidelity, num_samples
) # [S, N, P]
for fidelity in range(1, int(tf.reduce_max(query_points_fidelity_col)) + 1):
fidelity_residual_sample = self.fidelity_residual_models[fidelity].sample(
query_points_wo_fidelity, num_samples
)
new_fidelity_signal_sample = (
self.rho[fidelity] * signal_sample + fidelity_residual_sample
) # [S, N, P]
mask = query_points_fidelity_col >= fidelity # [N, P]
mask = tf.broadcast_to(mask[..., None, :, :], new_fidelity_signal_sample.shape)
signal_sample = tf.where(mask, new_fidelity_signal_sample, signal_sample)
return signal_sample
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
Predict the marginal mean and variance at `query_points` including observation noise
:param query_points: Query points with shape [..., N, D+1], where the
final column of the final dimension contains the fidelity of the query point
:return: mean: The mean at query_points with shape [N, P],
and var: The variance at query_points with shape [N, P]
"""
f_mean, f_var = self.predict(query_points)
query_points_fidelity_col = query_points[..., -1:]
# Get fidelity 0 observation noise
observation_noise = (
tf.ones_like(query_points_fidelity_col)
* self.lowest_fidelity_signal_model.get_observation_noise()
)
for fidelity in range(1, self.num_fidelities):
fidelity_observation_noise = (
self.rho[fidelity] ** 2
) * observation_noise + self.fidelity_residual_models[fidelity].get_observation_noise()
mask = query_points_fidelity_col >= fidelity
observation_noise = tf.where(mask, fidelity_observation_noise, observation_noise)
return f_mean, f_var + observation_noise
def update(self, dataset: Dataset) -> None:
"""
Update the models on their corresponding data. The data for each model is
extracted by splitting the observations in ``dataset`` by fidelity level.
:param dataset: The query points and observations for *all* the wrapped models.
"""
check_and_extract_fidelity_query_points(
dataset.query_points, max_fidelity=self.num_fidelities - 1
)
dataset_per_fidelity = split_dataset_by_fidelity(dataset, self.num_fidelities)
for fidelity, dataset_for_fidelity in enumerate(dataset_per_fidelity):
if fidelity == 0:
self.lowest_fidelity_signal_model.update(dataset_for_fidelity)
else:
# Make query points but with final column corresponding to
# fidelity we wish to predict at
self.fidelity_residual_models[fidelity].update(
Dataset(
dataset_for_fidelity.query_points,
self._calculate_residual(dataset_for_fidelity, fidelity),
)
)
def optimize(self, dataset: Dataset) -> None:
"""
Optimize all the models on their corresponding data. The data for each model is
extracted by splitting the observations in ``dataset`` by fidelity level.
Note that we have to code up a custom loss function when optimizing our residual
model, so that we can include the correlation parameter as an optimisation variable.
:param dataset: The query points and observations for *all* the wrapped models.
"""
check_and_extract_fidelity_query_points(
dataset.query_points, max_fidelity=self.num_fidelities - 1
)
dataset_per_fidelity = split_dataset_by_fidelity(dataset, self.num_fidelities)
for fidelity, dataset_for_fidelity in enumerate(dataset_per_fidelity):
if fidelity == 0:
self.lowest_fidelity_signal_model.optimize(dataset_for_fidelity)
else:
gpf_residual_model = self.fidelity_residual_models[fidelity].model
fidelity_observations = dataset_for_fidelity.observations
fidelity_query_points = dataset_for_fidelity.query_points
prev_fidelity_query_points = add_fidelity_column(
dataset_for_fidelity.query_points, fidelity - 1
)
predictions_from_lower_fidelity = self.predict(prev_fidelity_query_points)[0]
def loss() -> TensorType: # hardcoded log liklihood calculation for residual model
residuals = (
fidelity_observations - self.rho[fidelity] * predictions_from_lower_fidelity
)
K = gpf_residual_model.kernel(fidelity_query_points)
ks = add_noise_cov(K, gpf_residual_model.likelihood.variance)
L = tf.linalg.cholesky(ks)
m = gpf_residual_model.mean_function(fidelity_query_points)
log_prob = multivariate_normal(residuals, m, L)
return -1.0 * tf.reduce_sum(log_prob)
trainable_variables = (
gpf_residual_model.trainable_variables + self.rho[fidelity].variables
)
self.fidelity_residual_models[fidelity].optimizer.optimizer.minimize(
loss, trainable_variables
)
residuals = self._calculate_residual(dataset_for_fidelity, fidelity)
self.fidelity_residual_models[fidelity].update(
Dataset(fidelity_query_points, residuals)
)
def covariance_with_top_fidelity(self, query_points: TensorType) -> TensorType:
"""
Calculate the covariance of the output at `query_point` and a given fidelity with the
highest fidelity output at the same `query_point`.
:param query_points: The query points to calculate the covariance for, of shape [N, D+1],
where the final column of the final dimension contains the fidelity of the query point
:return: The covariance with the top fidelity for the `query_points`, of shape [N, P]
"""
fidelities = query_points[..., -1:] # [..., 1]
_, f_var = self.predict(query_points)
for fidelity in range(self.num_fidelities - 1, -1, -1):
mask = fidelities < fidelity
f_var = tf.where(mask, f_var, f_var * self.rho[fidelity])
return f_var
class MultifidelityNonlinearAutoregressive(
TrainableProbabilisticModel, SupportsCovarianceWithTopFidelity
):
r"""
A :class:`TrainableProbabilisticModel` implementation of the model from
:cite:`perdikaris2017nonlinear`. This is a multifidelity model that works with
an arbitrary number of fidelities. It is capable of modelling both linear and non-linear
relationships between fidelities. It models the relationship between sequential fidelities as
.. math:: f_{i}(x) = g_{i}(x, f_{*i-1}(x))
where :math:`f{*i-1}` is the posterior of the previous fidelity.
The only base models supported in this implementation are :class:`~gpflow.models.GPR` models.
Note: Currently only supports single output problems.
"""
def __init__(
self,
fidelity_models: Sequence[GaussianProcessRegression],
num_monte_carlo_samples: int = 100,
):
"""
:param fidelity_models: List of
:class:`~trieste.models.gpflow.models.GaussianProcessRegression`
models, one for each fidelity. The model at index 0 should take
inputs with the same number of dimensions as `x` and can use any kernel,
whilst the later models should take an extra input dimesion, and use the kernel
described in :cite:`perdikaris2017nonlinear`.
:param num_monte_carlo_samples: The number of Monte Carlo samples to use for the
sections of prediction and sampling that require the use of Monte Carlo methods.
"""
self._num_fidelities = len(fidelity_models)
self.fidelity_models = fidelity_models
self.monte_carlo_random_numbers = tf.random.normal(
[num_monte_carlo_samples, 1], dtype=tf.float64
)
@property
def num_fidelities(self) -> int:
return self._num_fidelities
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
"""
Return ``num_samples`` samples from the independent marginal distributions at
``query_points``.
:param query_points: The points at which to sample, with shape [..., N, D].
:param num_samples: The number of samples at each point.
:return: The samples, with shape [..., S, N], where S is the number of samples.
"""
(
query_points_wo_fidelity,
query_points_fidelity_col,
) = check_and_extract_fidelity_query_points(
query_points, max_fidelity=self.num_fidelities - 1
) # [..., N, D], [..., N, 1]
signal_sample = self.fidelity_models[0].sample(
query_points_wo_fidelity, num_samples
) # [..., S, N, 1]
# Repeat query_points to get same shape as signal sample
query_points_fidelity_col = tf.broadcast_to(
query_points_fidelity_col[..., None, :, :], signal_sample.shape
) # [..., S, N, 1]
for fidelity in range(1, self.num_fidelities):
qp_repeated = tf.broadcast_to(
query_points_wo_fidelity[..., None, :, :],
signal_sample.shape[:-1] + query_points_wo_fidelity.shape[-1],
) # [..., S, N, D]
qp_augmented = tf.concat([qp_repeated, signal_sample], axis=-1) # [..., S, N, D + 1]
new_signal_sample = self.fidelity_models[fidelity].sample(
qp_augmented, 1
) # [..., S, 1, N, 1]
# Remove second dimension caused by getting a single sample
new_signal_sample = new_signal_sample[..., :, 0, :, :]
mask = query_points_fidelity_col >= fidelity
signal_sample = tf.where(mask, new_signal_sample, signal_sample)
return signal_sample
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
Predict the marginal mean and variance at query_points.
:param query_points: Query points with shape [..., N, D+1], where the
final column of the final dimension contains the fidelity of the query point
:return: mean: The mean at query_points with shape [..., N, P],
and var: The variance at query_points with shape [..., N, P]
"""
check_and_extract_fidelity_query_points(query_points, max_fidelity=self.num_fidelities - 1)
sample_mean, sample_var = self._sample_mean_and_var_at_fidelities(
query_points
) # [..., N, 1, S], [..., N, 1, S]
variance = tf.reduce_mean(sample_var, axis=-1) + tf.math.reduce_variance(
sample_mean, axis=-1
)
mean = tf.reduce_mean(sample_mean, axis=-1)
return mean, variance
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
Predict the marginal mean and variance at `query_points` including observation noise
:param query_points: Query points with shape [..., N, D+1], where the
final column of the final dimension contains the fidelity of the query point
:return: mean: The mean at query_points with shape [N, P],
and var: The variance at query_points with shape [N, P]
"""
_, query_points_fidelity_col = check_and_extract_fidelity_query_points(
query_points, max_fidelity=self.num_fidelities - 1
)
f_mean, f_var = self.predict(query_points)
# Get fidelity 0 observation noise
observation_noise = (
tf.ones_like(query_points_fidelity_col)
* self.fidelity_models[0].get_observation_noise()
)
for fidelity in range(1, self.num_fidelities):
fidelity_observation_noise = self.fidelity_models[fidelity].get_observation_noise()
mask = query_points_fidelity_col >= fidelity
observation_noise = tf.where(mask, fidelity_observation_noise, observation_noise)
return f_mean, f_var + observation_noise
def _sample_mean_and_var_at_fidelities(
self, query_points: TensorType
) -> tuple[TensorType, TensorType]:
"""
Draw `num_monte_carlo_samples` samples of mean and variance from the model at the fidelities
passed in the final column of the query points.
:param query_points: Query points with shape [..., N, D+1], where the
final column of the final dimension contains the fidelity of the query point
:return: sample_mean: Samples of the mean at the query points with shape [..., N, 1, S]
and sample_var: Samples of the variance at the query points with shape [..., N, 1, S]
"""
(
query_points_wo_fidelity,
query_points_fidelity_col,
) = check_and_extract_fidelity_query_points(
query_points, max_fidelity=self.num_fidelities - 1
) # [..., N, D], [..., N, 1]
sample_mean, sample_var = self.fidelity_models[0].predict(
query_points_wo_fidelity
) # [..., N, 1], [..., N, 1]
# Create new dimension to store samples for each query point
# Repeat the inital sample mean and variance S times and add a dimension in the
# middle (so that sample mean and query points can be concatenated sensibly)
sample_mean = tf.broadcast_to(
sample_mean, sample_mean.shape[:-1] + self.monte_carlo_random_numbers.shape[0]
)[
..., :, None, :
] # [..., N, 1, S]
sample_var = tf.broadcast_to(
sample_var, sample_var.shape[:-1] + self.monte_carlo_random_numbers.shape[0]
)[
..., :, None, :
] # [..., N, 1, S]
# Repeat fidelity points for each sample to match shapes for masking
query_points_fidelity_col = tf.broadcast_to(
query_points_fidelity_col,
query_points_fidelity_col.shape[:-1] + self.monte_carlo_random_numbers.shape[0],
)[
..., :, None, :
] # [..., N, 1, S]
# Predict for all fidelities but stop updating once we have
# reached desired fidelity for each query point
for fidelity in range(1, self.num_fidelities):
# sample_mean [..., N, 1, S]
# sample_var [..., N, 1, S]
(
next_fidelity_sample_mean,
next_fidelity_sample_var,
) = self._propagate_samples_through_level(
query_points_wo_fidelity, fidelity, sample_mean, sample_var
)
mask = query_points_fidelity_col >= fidelity # [..., N, 1, S]
sample_mean = tf.where(mask, next_fidelity_sample_mean, sample_mean) # [..., N, 1, S]
sample_var = tf.where(mask, next_fidelity_sample_var, sample_var) # [..., N, 1, S]
return sample_mean, sample_var
def _propagate_samples_through_level(
self,
query_point: TensorType,
fidelity: int,
sample_mean: TensorType,
sample_var: TensorType,
) -> tuple[TensorType, TensorType]:
"""
Propagate samples through a given fidelity.
This takes a set of query points without a fidelity column and calculates samples
at the given fidelity, using the sample means and variances from the previous fidelity.
:param query_points: The query points to sample at, with no fidelity column,
with shape[..., N, D]
:param fidelity: The fidelity to propagate the samples through
:param sample_mean: Samples of the posterior mean at the previous fidelity,
with shape [..., N, 1, S]
:param sample_var: Samples of the posterior variance at the previous fidelity,
with shape [..., N, 1, S]
:return: sample_mean: Samples of the posterior mean at the given fidelity,
of shape [..., N, 1, S]
and sample_var: Samples of the posterior variance at the given fidelity,
of shape [..., N, 1, S]
"""
# Repeat random numbers for each query point and add middle dimension
# for concatentation with query points This also means that it has the same
# shape as sample_var and sample_mean, so there's no broadcasting required
# Note: at the moment we use the same monte carlo values for every value in the batch dim
reshaped_random_numbers = tf.broadcast_to(
tf.transpose(self.monte_carlo_random_numbers)[..., None, :],
sample_mean.shape,
) # [..., N, 1, S]
samples = reshaped_random_numbers * tf.sqrt(sample_var) + sample_mean # [..., N, 1, S]
# Add an extra unit dim to query_point and repeat for each of the samples
qp_repeated = tf.broadcast_to(
query_point[..., :, :, None], # [..., N, D, 1]
query_point.shape + samples.shape[-1],
) # [..., N, D, S]
qp_augmented = tf.concat([qp_repeated, samples], axis=-2) # [..., N, D+1, S]
# Flatten sample dimension to n_qp dimension to pass through predictor
# Switch dims to make reshape match up correct dimensions for query points
# Use Einsum to switch last two dimensions
qp_augmented = tf.linalg.matrix_transpose(qp_augmented) # [..., N, S, D+1]
flat_qp_augmented, unflatten = flatten_leading_dims(qp_augmented) # [...*N*S, D+1]
# Dim of flat qp augmented is now [n_qps*n_samples, qp_dims], as the model expects
sample_mean, sample_var = self.fidelity_models[fidelity].predict(
flat_qp_augmented
) # [...*N*S, 1], [...*N*S, 1]
# Reshape back to have samples as final dimension
sample_mean = unflatten(sample_mean) # [..., N, S, 1]
sample_var = unflatten(sample_var) # [..., N, S, 1]
sample_mean = tf.linalg.matrix_transpose(sample_mean) # [..., N, 1, S]
sample_var = tf.linalg.matrix_transpose(sample_var) # [..., N, 1, S]
return sample_mean, sample_var
def update(self, dataset: Dataset) -> None:
"""
Update the models on their corresponding data. The data for each model is
extracted by splitting the observations in ``dataset`` by fidelity level.
:param dataset: The query points and observations for *all* the wrapped models.
"""
check_and_extract_fidelity_query_points(
dataset.query_points, max_fidelity=self.num_fidelities - 1
)
dataset_per_fidelity = split_dataset_by_fidelity(
dataset, num_fidelities=self.num_fidelities
)
for fidelity, dataset_for_fidelity in enumerate(dataset_per_fidelity):
if fidelity == 0:
self.fidelity_models[0].update(dataset_for_fidelity)
else:
cur_fidelity_model = self.fidelity_models[fidelity]
new_final_query_point_col, _ = self.predict(
add_fidelity_column(dataset_for_fidelity.query_points, fidelity - 1)
)
new_query_points = tf.concat(
[dataset_for_fidelity.query_points, new_final_query_point_col], axis=1
)
cur_fidelity_model.update(
Dataset(new_query_points, dataset_for_fidelity.observations)
)
def optimize(self, dataset: Dataset) -> None:
"""
Optimize all the models on their corresponding data. The data for each model is
extracted by splitting the observations in ``dataset`` by fidelity level.
:param dataset: The query points and observations for *all* the wrapped models.
"""
check_and_extract_fidelity_query_points(
dataset.query_points, max_fidelity=self.num_fidelities - 1
)
dataset_per_fidelity = split_dataset_by_fidelity(dataset, self.num_fidelities)
for fidelity, dataset_for_fidelity in enumerate(dataset_per_fidelity):
if fidelity == 0:
self.fidelity_models[0].optimize(dataset_for_fidelity)
else:
fidelity_observations = dataset_for_fidelity.observations
fidelity_query_points = dataset_for_fidelity.query_points
prev_fidelity_query_points = add_fidelity_column(
fidelity_query_points, fidelity - 1
)
means_from_lower_fidelity = self.predict(prev_fidelity_query_points)[0]
augmented_qps = tf.concat(
[fidelity_query_points, means_from_lower_fidelity], axis=1
)
self.fidelity_models[fidelity].optimize(
Dataset(augmented_qps, fidelity_observations)
)
self.fidelity_models[fidelity].update(Dataset(augmented_qps, fidelity_observations))
def covariance_with_top_fidelity(self, query_points: TensorType) -> TensorType:
"""
Calculate the covariance of the output at `query_point` and a given fidelity with the
highest fidelity output at the same `query_point`.
:param query_points: The query points to calculate the covariance for, of shape [N, D+1],
where the final column of the final dimension contains the fidelity of the query point
:return: The covariance with the top fidelity for the `query_points`, of shape [N, P]
"""
num_samples = 100
(
query_points_wo_fidelity,
query_points_fidelity_col,
) = check_and_extract_fidelity_query_points(
query_points, max_fidelity=self.num_fidelities - 1
) # [N, D], [N, 1]
# Signal sample stops updating once fidelity is reached for that query point
signal_sample = self.fidelity_models[0].model.predict_f_samples(
query_points_wo_fidelity, num_samples, full_cov=False
)
# Repeat query_points to get same shape as signal sample
query_points_fidelity_col = tf.broadcast_to(
query_points_fidelity_col[None, :, :], signal_sample.shape
)
# Max fidelity sample keeps updating to the max fidelity
max_fidelity_sample = tf.identity(signal_sample)
for fidelity in range(1, self.num_fidelities):
qp_repeated = tf.broadcast_to(
query_points_wo_fidelity[None, :, :],
tf.TensorShape(num_samples) + query_points_wo_fidelity.shape,
) # [S, N, D]
# We use max fidelity sample here, which is okay because anything
# with a lower fidelity will not be updated
qp_augmented = tf.concat([qp_repeated, max_fidelity_sample], axis=-1) # [S, N, D + 1]
new_signal_sample = self.fidelity_models[fidelity].model.predict_f_samples(
qp_augmented, 1, full_cov=False
)
# Remove second dimension caused by getting a single sample
new_signal_sample = new_signal_sample[:, 0, :, :]
mask = query_points_fidelity_col >= fidelity
signal_sample = tf.where(mask, new_signal_sample, signal_sample) # [S, N, 1]
max_fidelity_sample = new_signal_sample # [S, N , 1]
cov = tfp.stats.covariance(signal_sample, max_fidelity_sample)[:, :, 0]
return cov
| 90,637 | 43.825915 | 100 | py |
trieste-develop | trieste-develop/trieste/models/gpflow/__init__.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This package contains the primary interface for Gaussian process models. It also contains a
number of :class:`TrainableProbabilisticModel` wrappers for GPflow-based models.
"""
from . import optimizer
from .builders import (
build_gpr,
build_multifidelity_autoregressive_models,
build_multifidelity_nonlinear_autoregressive_models,
build_sgpr,
build_svgp,
build_vgp_classifier,
)
from .inducing_point_selectors import (
ConditionalImprovementReduction,
ConditionalVarianceReduction,
InducingPointSelector,
KMeansInducingPointSelector,
RandomSubSampleInducingPointSelector,
UniformInducingPointSelector,
)
from .interface import GPflowPredictor
from .models import (
GaussianProcessRegression,
MultifidelityAutoregressive,
MultifidelityNonlinearAutoregressive,
SparseGaussianProcessRegression,
SparseVariational,
VariationalGaussianProcess,
)
from .sampler import (
BatchReparametrizationSampler,
DecoupledTrajectorySampler,
IndependentReparametrizationSampler,
RandomFourierFeatureTrajectorySampler,
feature_decomposition_trajectory,
)
from .utils import (
assert_data_is_compatible,
check_optimizer,
randomize_hyperparameters,
squeeze_hyperparameters,
)
| 1,858 | 30.508475 | 91 | py |
trieste-develop | trieste-develop/trieste/models/gpflow/interface.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Optional
import gpflow
import tensorflow as tf
from gpflow.models import GPModel
from gpflow.posteriors import BasePosterior, PrecomputeCacheType
from typing_extensions import Protocol
from ... import logging
from ...data import Dataset
from ...types import TensorType
from ..interfaces import (
HasReparamSampler,
ReparametrizationSampler,
SupportsGetKernel,
SupportsGetObservationNoise,
SupportsPredictJoint,
)
from ..optimizer import Optimizer
from ..utils import (
write_summary_data_based_metrics,
write_summary_kernel_parameters,
write_summary_likelihood_parameters,
)
from .sampler import BatchReparametrizationSampler
class GPflowPredictor(
SupportsPredictJoint, SupportsGetKernel, SupportsGetObservationNoise, HasReparamSampler, ABC
):
"""A trainable wrapper for a GPflow Gaussian process model."""
def __init__(self, optimizer: Optimizer | None = None):
"""
:param optimizer: The optimizer with which to train the model. Defaults to
:class:`~trieste.models.optimizer.Optimizer` with :class:`~gpflow.optimizers.Scipy`.
"""
if optimizer is None:
optimizer = Optimizer(gpflow.optimizers.Scipy(), compile=True)
self._optimizer = optimizer
self._posterior: Optional[BasePosterior] = None
@property
def optimizer(self) -> Optimizer:
"""The optimizer with which to train the model."""
return self._optimizer
def create_posterior_cache(self) -> None:
"""
Create a posterior cache for fast sequential predictions. Note that this must happen
at initialisation and *after* we ensure the model data is variable. Furthermore,
the cache must be updated whenever the underlying model is changed.
"""
self._posterior = self.model.posterior(PrecomputeCacheType.VARIABLE)
def _ensure_variable_model_data(self) -> None:
"""Ensure GPflow data, which is normally stored in Tensors, is instead stored in
dynamically shaped Variables. Override this as required."""
def __setstate__(self, state: dict[str, Any]) -> None:
# when unpickling we may need to regenerate the posterior cache
self.__dict__.update(state)
self._ensure_variable_model_data()
if self._posterior is not None:
self.create_posterior_cache()
def update_posterior_cache(self) -> None:
"""Update the posterior cache. This needs to be called whenever the underlying model
is changed."""
if self._posterior is not None:
self._posterior.update_cache()
@property
@abstractmethod
def model(self) -> GPModel:
"""The underlying GPflow model."""
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
mean, cov = (self._posterior or self.model).predict_f(query_points)
# posterior predict can return negative variance values [cf GPFlow issue #1813]
if self._posterior is not None:
cov = tf.clip_by_value(cov, 1e-12, cov.dtype.max)
return mean, cov
def predict_joint(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
mean, cov = (self._posterior or self.model).predict_f(query_points, full_cov=True)
# posterior predict can return negative variance values [cf GPFlow issue #1813]
if self._posterior is not None:
cov = tf.linalg.set_diag(
cov, tf.clip_by_value(tf.linalg.diag_part(cov), 1e-12, cov.dtype.max)
)
return mean, cov
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
return self.model.predict_f_samples(query_points, num_samples)
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
return self.model.predict_y(query_points)
def get_kernel(self) -> gpflow.kernels.Kernel:
"""
Return the kernel of the model.
:return: The kernel.
"""
return self.model.kernel
def get_mean_function(self) -> gpflow.mean_functions.MeanFunction:
"""
Return the mean function of the model.
:return: The mean function.
"""
return self.model.mean_function
def get_observation_noise(self) -> TensorType:
"""
Return the variance of observation noise for homoscedastic likelihoods.
:return: The observation noise.
:raise NotImplementedError: If the model does not have a homoscedastic likelihood.
"""
try:
noise_variance = self.model.likelihood.variance
except AttributeError:
raise NotImplementedError(f"Model {self!r} does not have scalar observation noise")
return noise_variance
def optimize(self, dataset: Dataset) -> None:
"""
Optimize the model with the specified `dataset`.
:param dataset: The data with which to optimize the `model`.
"""
self.optimizer.optimize(self.model, dataset)
def log(self, dataset: Optional[Dataset] = None) -> None:
"""
Log model training information at a given optimization step to the Tensorboard.
We log kernel and likelihood parameters. We also log several training data based metrics,
such as root mean square error between predictions and observations and several others.
:param dataset: Optional data that can be used to log additional data-based model summaries.
"""
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
write_summary_kernel_parameters(self.get_kernel())
write_summary_likelihood_parameters(self.model.likelihood)
if dataset:
write_summary_data_based_metrics(
dataset=dataset, model=self, prefix="training_"
)
def reparam_sampler(self, num_samples: int) -> ReparametrizationSampler[GPflowPredictor]:
"""
Return a reparametrization sampler providing `num_samples` samples.
:return: The reparametrization sampler.
"""
return BatchReparametrizationSampler(num_samples, self)
class SupportsCovarianceBetweenPoints(SupportsPredictJoint, Protocol):
"""A probabilistic model that supports covariance_between_points."""
@abstractmethod
def covariance_between_points(
self, query_points_1: TensorType, query_points_2: TensorType
) -> TensorType:
r"""
Compute the posterior covariance between sets of query points.
.. math:: \Sigma_{12} = K_{12} - K_{x1}(K_{xx} + \sigma^2 I)^{-1}K_{x2}
Note that query_points_2 must be a rank 2 tensor, but query_points_1 can
have leading dimensions.
:param query_points_1: Set of query points with shape [..., N, D]
:param query_points_2: Sets of query points with shape [M, D]
:return: Covariance matrix between the sets of query points with shape [..., L, N, M]
(L being the number of latent GPs = number of output dimensions)
"""
raise NotImplementedError
| 7,905 | 37.754902 | 100 | py |
trieste-develop | trieste-develop/trieste/models/gpflow/optimizer.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This module registers the GPflow specific loss functions.
"""
from __future__ import annotations
from typing import Any, Callable, Optional
import tensorflow as tf
from gpflow.models import ExternalDataTrainingLossMixin, InternalDataTrainingLossMixin
from tensorflow.python.data.ops.iterator_ops import OwnedIterator as DatasetOwnedIterator
from ..optimizer import LossClosure, TrainingData, create_loss_function
@create_loss_function.register
def _create_loss_function_internal(
model: InternalDataTrainingLossMixin,
data: TrainingData,
compile: bool = False,
) -> LossClosure:
return model.training_loss_closure(compile=compile)
class _TrainingLossClosureBuilder:
# A cached, compiled training loss closure builder to avoid having to generate a new
# closure each time. Stored in a separate class, so we can avoid pickling it.
def __init__(self) -> None:
self.closure_builder: Optional[Callable[[TrainingData], LossClosure]] = None
def __getstate__(self) -> dict[str, Any]:
return {}
def __setstate__(self, state: dict[str, Any]) -> None:
self.closure_builder = None
@create_loss_function.register
def _create_loss_function_external(
model: ExternalDataTrainingLossMixin,
data: TrainingData,
compile: bool = False,
) -> LossClosure:
if not compile:
return model.training_loss_closure(data, compile=False)
# when compiling, we want to avoid generating a new closure every optimization step
# instead we compile and save a single function that can handle the dynamic data shape
X, Y = next(data) if isinstance(data, DatasetOwnedIterator) else data
if not hasattr(model, "_training_loss_closure_builder"):
setattr(model, "_training_loss_closure_builder", _TrainingLossClosureBuilder())
builder: _TrainingLossClosureBuilder = getattr(model, "_training_loss_closure_builder")
if builder.closure_builder is None:
shape_spec = (
data.element_spec
if isinstance(data, DatasetOwnedIterator)
else (
tf.TensorSpec([None, *X.shape[1:]], dtype=X.dtype),
tf.TensorSpec([None, *Y.shape[1:]], dtype=Y.dtype),
)
)
@tf.function(input_signature=shape_spec)
def training_loss_builder(x: tf.Tensor, y: tf.Tensor) -> tf.Tensor:
return model.training_loss((x, y))
def closure_builder(data: TrainingData) -> LossClosure:
x, y = next(data) if isinstance(data, DatasetOwnedIterator) else data
def compiled_closure() -> tf.Tensor:
return training_loss_builder(x, y)
return compiled_closure
builder.closure_builder = closure_builder
return builder.closure_builder((X, Y))
| 3,374 | 34.526316 | 91 | py |
trieste-develop | trieste-develop/trieste/utils/misc.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import ABC, abstractmethod
from time import perf_counter
from types import TracebackType
from typing import Any, Callable, Generic, Mapping, NoReturn, Optional, Tuple, Type, TypeVar
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest
from typing_extensions import Final, final
from ..types import TensorType
C = TypeVar("C", bound=Callable[..., object])
""" A type variable bound to `typing.Callable`. """
def jit(apply: bool = True, **optimize_kwargs: Any) -> Callable[[C], C]:
"""
A decorator that conditionally wraps a function with `tf.function`.
:param apply: If `True`, the decorator is equivalent to `tf.function`. If `False`, the decorator
does nothing.
:param optimize_kwargs: Additional arguments to `tf.function`.
:return: The decorator.
"""
def decorator(func: C) -> C:
return tf.function(func, **optimize_kwargs) if apply else func
return decorator
def shapes_equal(this: TensorType, that: TensorType) -> TensorType:
"""
Return a scalar tensor containing: `True` if ``this`` and ``that`` have equal runtime shapes,
else `False`.
"""
return tf.rank(this) == tf.rank(that) and tf.reduce_all(tf.shape(this) == tf.shape(that))
def to_numpy(t: TensorType) -> "np.ndarray[Any, Any]":
"""
:param t: An array-like object.
:return: ``t`` as a NumPy array.
"""
if isinstance(t, tf.Tensor):
return t.numpy()
return t
ResultType = TypeVar("ResultType", covariant=True)
""" An unbounded covariant type variable. """
class Result(Generic[ResultType], ABC):
"""
Represents the result of an operation that can fail with an exception. It contains either the
operation return value (in an :class:`Ok`), or the exception raised (in an :class:`Err`).
To check whether instances such as
>>> res = Ok(1)
>>> other_res = Err(ValueError("whoops"))
contain a value, use :attr:`is_ok` (or :attr:`is_err`)
>>> res.is_ok
True
>>> other_res.is_ok
False
We can access the value if it :attr:`is_ok` using :meth:`unwrap`.
>>> res.unwrap()
1
Trying to access the value of a failed :class:`Result`, or :class:`Err`, will raise the wrapped
exception
>>> other_res.unwrap()
Traceback (most recent call last):
...
ValueError: whoops
**Note:** This class is not intended to be subclassed other than by :class:`Ok` and
:class:`Err`.
"""
@property
@abstractmethod
def is_ok(self) -> bool:
"""`True` if this :class:`Result` contains a value, else `False`."""
@property
def is_err(self) -> bool:
"""
`True` if this :class:`Result` contains an error, else `False`. The opposite of
:attr:`is_ok`.
"""
return not self.is_ok
@abstractmethod
def unwrap(self) -> ResultType:
"""
:return: The contained value, if it exists.
:raise Exception: If there is no contained value.
"""
@final
class Ok(Result[ResultType]):
"""Wraps the result of a successful evaluation."""
def __init__(self, value: ResultType):
"""
:param value: The result of a successful evaluation.
"""
self._value = value
def __repr__(self) -> str:
""""""
return f"Ok({self._value!r})"
@property
def is_ok(self) -> bool:
"""`True` always."""
return True
def unwrap(self) -> ResultType:
"""
:return: The wrapped value.
"""
return self._value
@final
class Err(Result[NoReturn]):
"""Wraps the exception that occurred during a failed evaluation."""
def __init__(self, exc: Exception):
"""
:param exc: The exception that occurred.
"""
self._exc = exc
def __repr__(self) -> str:
""""""
return f"Err({self._exc!r})"
@property
def is_ok(self) -> bool:
"""`False` always."""
return False
def unwrap(self) -> NoReturn:
"""
:raise Exception: Always. Raises the wrapped exception.
"""
raise self._exc
class DEFAULTS:
"""Default constants used in Trieste."""
JITTER: Final[float] = 1e-6
"""
The default jitter, typically used to stabilise computations near singular points, such as in
Cholesky decomposition.
"""
K = TypeVar("K")
""" An unbound type variable. """
U = TypeVar("U")
""" An unbound type variable. """
V = TypeVar("V")
""" An unbound type variable. """
def map_values(f: Callable[[U], V], mapping: Mapping[K, U]) -> Mapping[K, V]:
"""
Apply ``f`` to each value in ``mapping`` and return the result. If ``f`` does not modify its
argument, :func:`map_values` does not modify ``mapping``. For example:
>>> import math
>>> squares = {'a': 1, 'b': 4, 'c': 9}
>>> map_values(math.sqrt, squares)['b']
2.0
>>> squares
{'a': 1, 'b': 4, 'c': 9}
:param f: The function to apply to the values in ``mapping``.
:param mapping: A mapping.
:return: A new mapping, whose keys are the same as ``mapping``, and values are the result of
applying ``f`` to each value in ``mapping``.
"""
return {k: f(u) for k, u in mapping.items()}
class Timer:
"""
Functionality for timing chunks of code. For example:
>>> from time import sleep
>>> with Timer() as timer: sleep(2.0)
>>> timer.time # doctest: +SKIP
2.0
"""
def __enter__(self) -> Timer:
self.start = perf_counter()
return self
def __exit__(
self,
type: Optional[Type[BaseException]],
value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
self.end = perf_counter()
self.time = self.end - self.start
def flatten_leading_dims(
x: TensorType, output_dims: int = 2
) -> Tuple[TensorType, Callable[[TensorType], TensorType]]:
"""
Flattens the leading dimensions of `x` (all but the last `output_dims` dimensions), and returns
a function that can be used to restore them (typically after first manipulating the
flattened tensor).
"""
tf.debugging.assert_positive(output_dims, message="output_dims must be positive")
tf.debugging.assert_less_equal(
output_dims, tf.rank(x), message="output_dims must no greater than tensor rank"
)
x_batched_shape = tf.shape(x)
batch_shape = x_batched_shape[: -output_dims + 1] if output_dims > 1 else x_batched_shape
input_shape = x_batched_shape[-output_dims + 1 :] if output_dims > 1 else []
x_flat_shape = tf.concat([[-1], input_shape], axis=0)
def unflatten(y: TensorType) -> TensorType:
y_flat_shape = tf.shape(y)
output_shape = y_flat_shape[1:]
y_batched_shape = tf.concat([batch_shape, output_shape], axis=0)
y_batched = tf.reshape(y, y_batched_shape)
return y_batched
return tf.reshape(x, x_flat_shape), unflatten
def get_variables(object: Any) -> tuple[tf.Variable, ...]:
"""
Return the sequence of variables in an object.
This is essentially a reimplementation of the `variables` property of tf.Module
but doesn't require that we, or any of our substructures, inherit from that.
:return: A sequence of variables of the object (sorted by attribute
name) followed by variables from all submodules recursively (breadth
first).
"""
def _is_variable(obj: Any) -> bool:
return isinstance(obj, tf.Variable)
return tuple(_flatten(object, predicate=_is_variable, expand_composites=True))
_TF_MODULE_IGNORED_PROPERTIES = frozenset(
("_self_unconditional_checkpoint_dependencies", "_self_unconditional_dependency_names")
)
def _flatten( # type: ignore[no-untyped-def]
model,
recursive=True,
predicate=None,
attribute_traversal_key=None,
with_path=False,
expand_composites=False,
):
"""
Flattened attribute values in sorted order by attribute name.
This is taken verbatim from tensorflow core but uses a modified _flatten_module.
"""
if predicate is None:
predicate = lambda _: True # noqa: E731
return _flatten_module(
model,
recursive=recursive,
predicate=predicate,
attributes_to_ignore=_TF_MODULE_IGNORED_PROPERTIES,
attribute_traversal_key=attribute_traversal_key,
with_path=with_path,
expand_composites=expand_composites,
)
def _flatten_module( # type: ignore[no-untyped-def]
module,
recursive,
predicate,
attribute_traversal_key,
attributes_to_ignore,
with_path,
expand_composites,
module_path=(),
seen=None,
):
"""
Implementation of `flatten`.
This is a reimplementation of the equivalent function in tf.Module so
that we can extract the list of variables from a Trieste model wrapper
without the need to inherit from it.
"""
if seen is None:
seen = {id(module)}
# [CHANGED] Differently from the original version, here we catch an exception
# as some of the components of the wrapper do not implement __dict__
try:
module_dict = vars(module)
except TypeError:
module_dict = {}
submodules = []
for key in sorted(module_dict, key=attribute_traversal_key):
if key in attributes_to_ignore:
continue
prop = module_dict[key]
try:
leaves = nest.flatten_with_tuple_paths(prop, expand_composites=expand_composites)
except Exception: # pylint: disable=broad-except
leaves = []
for leaf_path, leaf in leaves:
leaf_path = (key,) + leaf_path
if not with_path:
leaf_id = id(leaf)
if leaf_id in seen:
continue
seen.add(leaf_id)
if predicate(leaf):
if with_path:
yield module_path + leaf_path, leaf
else:
yield leaf
# [CHANGED] Differently from the original, here we skip checking whether the leaf
# is a module, since the trieste models do NOT inherit from tf.Module
if recursive: # and _is_module(leaf):
# Walk direct properties first then recurse.
submodules.append((module_path + leaf_path, leaf))
for submodule_path, submodule in submodules:
subvalues = _flatten_module(
submodule,
recursive=recursive,
predicate=predicate,
attribute_traversal_key=attribute_traversal_key,
attributes_to_ignore=_TF_MODULE_IGNORED_PROPERTIES,
with_path=with_path,
expand_composites=expand_composites,
module_path=submodule_path,
seen=seen,
)
for subvalue in subvalues:
# Predicate is already tested for these values.
yield subvalue
| 11,630 | 28.371212 | 100 | py |
trieste-develop | trieste-develop/trieste/utils/__init__.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This package contains library utilities. """
from .misc import (
DEFAULTS,
Err,
K,
Ok,
Result,
ResultType,
Timer,
U,
V,
flatten_leading_dims,
jit,
map_values,
shapes_equal,
to_numpy,
)
| 833 | 25.903226 | 74 | py |
trieste-develop | trieste-develop/trieste/acquisition/sampler.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module is the home of the sampling functionality required by Trieste's
acquisition functions.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Callable, Generic
import tensorflow as tf
import tensorflow_probability as tfp
from scipy.optimize import bisect
from ..models import ProbabilisticModel
from ..models.interfaces import HasTrajectorySampler, ProbabilisticModelType
from ..types import TensorType
from .utils import select_nth_output
class ThompsonSampler(ABC, Generic[ProbabilisticModelType]):
r"""
A :class:`ThompsonSampler` samples either the minimum values or minimisers of a function
modeled by an underlying :class:`ProbabilisticModel` across a discrete set of points.
"""
def __init__(self, sample_min_value: bool = False):
"""
:sample_min_value: If True then sample from the minimum value of the function,
else sample the function's minimiser.
"""
self._sample_min_value = sample_min_value
@property
def sample_min_value(self) -> bool:
return self._sample_min_value
def __repr__(self) -> str:
""""""
return f"""{self.__class__.__name__}(
{self._sample_min_value})
"""
@abstractmethod
def sample(
self,
model: ProbabilisticModelType,
sample_size: int,
at: TensorType,
select_output: Callable[[TensorType], TensorType] = select_nth_output,
) -> TensorType:
"""
:param model: The model to sample from.
:param sample_size: The desired number of samples.
:param at: Input points that define the sampler.
:param select_output: A method that returns the desired output from the model sampler, with
shape `[S, N]` where `S` is the number of samples and `N` is the number of locations.
Defaults to the :func:~`trieste.acquisition.utils.select_nth_output` function with
output dimension 0.
:return: Samples.
"""
class ExactThompsonSampler(ThompsonSampler[ProbabilisticModel]):
r"""
This sampler provides exact Thompson samples of the objective function's
minimiser :math:`x^*` over a discrete set of input locations.
Although exact Thompson sampling is costly (incuring with an :math:`O(N^3)` complexity to
sample over a set of `N` locations), this method can be used for any probabilistic model
with a sampling method.
"""
def sample(
self,
model: ProbabilisticModel,
sample_size: int,
at: TensorType,
select_output: Callable[[TensorType], TensorType] = select_nth_output,
) -> TensorType:
"""
Return exact samples from either the objective function's minimiser or its minimal value
over the candidate set `at`. Note that minimiser ties aren't broken randomly.
:param model: The model to sample from.
:param sample_size: The desired number of samples.
:param at: Where to sample the predictive distribution, with shape `[N, D]`, for points
of dimension `D`.
:param select_output: A method that returns the desired output from the model sampler, with
shape `[S, N]` where `S` is the number of samples and `N` is the number of locations.
Defaults to the :func:~`trieste.acquisition.utils.select_nth_output` function with
output dimension 0.
:return: The samples, of shape `[S, D]` (where `S` is the `sample_size`) if sampling
the function's minimiser or shape `[S, 1]` if sampling the function's mimimal value.
:raise ValueError: If ``at`` has an invalid shape or if ``sample_size`` is not positive.
"""
tf.debugging.assert_positive(sample_size)
tf.debugging.assert_shapes([(at, ["N", None])])
samples = select_output(model.sample(at, sample_size))[..., None] # [S, N, 1]
if self._sample_min_value:
thompson_samples = tf.reduce_min(samples, axis=1) # [S, 1]
else:
samples_2d = tf.squeeze(samples, -1) # [S, N]
indices = tf.math.argmin(samples_2d, axis=1)
thompson_samples = tf.gather(at, indices) # [S, D]
return thompson_samples
class GumbelSampler(ThompsonSampler[ProbabilisticModel]):
r"""
This sampler follows :cite:`wang2017max` and yields approximate samples of the objective
minimum value :math:`y^*` via the empirical cdf :math:`\operatorname{Pr}(y^*<y)`. The cdf
is approximated by a Gumbel distribution
.. math:: \mathcal G(y; a, b) = 1 - e^{-e^\frac{y - a}{b}}
where :math:`a, b \in \mathbb R` are chosen such that the quartiles of the Gumbel and cdf match.
Samples are obtained via the Gumbel distribution by sampling :math:`r` uniformly from
:math:`[0, 1]` and applying the inverse probability integral transform
:math:`y = \mathcal G^{-1}(r; a, b)`.
Note that the :class:`GumbelSampler` can only sample a function's minimal value and not
its minimiser.
"""
def __init__(self, sample_min_value: bool = False):
"""
:sample_min_value: If True then sample from the minimum value of the function,
else sample the function's minimiser.
"""
if not sample_min_value:
raise ValueError(
f"""
Gumbel samplers can only sample a function's minimal value,
however received sample_min_value={sample_min_value}
"""
)
super().__init__(sample_min_value)
def sample(
self,
model: ProbabilisticModel,
sample_size: int,
at: TensorType,
select_output: Callable[[TensorType], TensorType] = select_nth_output,
) -> TensorType:
"""
Return approximate samples from of the objective function's minimum value.
:param model: The model to sample from.
:param sample_size: The desired number of samples.
:param at: Points at where to fit the Gumbel distribution, with shape `[N, D]`, for points
of dimension `D`. We recommend scaling `N` with search space dimension.
:param select_output: A method that returns the desired output from the model sampler, with
shape `[S, N]` where `S` is the number of samples and `N` is the number of locations.
Currently unused.
:return: The samples, of shape `[S, 1]`, where `S` is the `sample_size`.
:raise ValueError: If ``at`` has an invalid shape or if ``sample_size`` is not positive.
"""
tf.debugging.assert_positive(sample_size)
tf.debugging.assert_shapes([(at, ["N", None])])
try:
fmean, fvar = model.predict_y(at)
except NotImplementedError:
fmean, fvar = model.predict(at)
fsd = tf.math.sqrt(fvar)
def probf(y: tf.Tensor) -> tf.Tensor: # Build empirical CDF for Pr(y*^hat<y)
unit_normal = tfp.distributions.Normal(tf.cast(0, fmean.dtype), tf.cast(1, fmean.dtype))
log_cdf = unit_normal.log_cdf(-(y - fmean) / fsd)
return 1 - tf.exp(tf.reduce_sum(log_cdf, axis=0))
left = tf.reduce_min(fmean - 5 * fsd)
right = tf.reduce_max(fmean + 5 * fsd)
def binary_search(val: float) -> float: # Find empirical interquartile range
return bisect(lambda y: probf(y) - val, left, right, maxiter=10000)
q1, q2 = map(binary_search, [0.25, 0.75])
log = tf.math.log
l1 = log(log(4.0 / 3.0))
l2 = log(log(4.0))
b = (q1 - q2) / (l1 - l2)
a = (q2 * l1 - q1 * l2) / (l1 - l2)
uniform_samples = tf.random.uniform([sample_size], dtype=fmean.dtype)
gumbel_samples = log(-log(1 - uniform_samples)) * tf.cast(b, fmean.dtype) + tf.cast(
a, fmean.dtype
)
gumbel_samples = tf.expand_dims(gumbel_samples, axis=-1) # [S, 1]
return gumbel_samples
class ThompsonSamplerFromTrajectory(ThompsonSampler[HasTrajectorySampler]):
r"""
This sampler provides approximate Thompson samples of the objective function's
minimiser :math:`x^*` by minimizing approximate trajectories sampled from the
underlying probabilistic model. This sampling method can be used for any
probabilistic model with a :meth:`trajectory_sampler` method.
"""
def sample(
self,
model: ProbabilisticModel,
sample_size: int,
at: TensorType,
select_output: Callable[[TensorType], TensorType] = select_nth_output,
) -> TensorType:
"""
Return approximate samples from either the objective function's minimser or its minimal
value over the candidate set `at`. Note that minimiser ties aren't broken randomly.
:param model: The model to sample from.
:param sample_size: The desired number of samples.
:param at: Where to sample the predictive distribution, with shape `[N, D]`, for points
of dimension `D`.
:param select_output: A method that returns the desired output from the model sampler, with
shape `[S, N]` where `S` is the number of samples and `N` is the number of locations.
Defaults to the :func:~`trieste.acquisition.utils.select_nth_output` function with
output dimension 0.
:return: The samples, of shape `[S, D]` (where `S` is the `sample_size`) if sampling
the function's minimser or shape `[S, 1]` if sampling the function's mimimal value.
:raise ValueError: If ``at`` has an invalid shape or if ``sample_size`` is not positive.
"""
tf.debugging.assert_positive(sample_size)
tf.debugging.assert_shapes([(at, ["N", None])])
if not isinstance(model, HasTrajectorySampler):
raise ValueError(
f"Thompson sampling from trajectory only supports models with a trajectory_sampler "
f"method; received {model.__repr__()}"
)
trajectory_sampler = model.trajectory_sampler()
if self._sample_min_value:
thompson_samples = tf.zeros([0, 1], dtype=at.dtype) # [0,1]
else:
thompson_samples = tf.zeros([0, tf.shape(at)[1]], dtype=at.dtype) # [0,D]
for _ in tf.range(sample_size):
sampled_trajectory = trajectory_sampler.get_trajectory()
expanded_at = tf.expand_dims(at, -2) # [N, 1, D]
evaluated_trajectory = select_output(sampled_trajectory(expanded_at)) # [N, 1]
if self._sample_min_value:
sample = tf.reduce_min(evaluated_trajectory, keepdims=True) # [1, 1]
else:
sample = tf.gather(at, tf.math.argmin(evaluated_trajectory)) # [1, D]
thompson_samples = tf.concat([thompson_samples, sample], axis=0)
return thompson_samples # [S, D] or [S, 1]
| 11,521 | 41.674074 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/rule.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains acquisition rules, which choose the optimal point(s) to query on each step of
the Bayesian optimization process.
"""
from __future__ import annotations
import copy
import math
from abc import ABC, abstractmethod
from collections.abc import Mapping
from dataclasses import dataclass
from typing import Any, Callable, Generic, Optional, TypeVar, Union, cast, overload
import numpy as np
try:
import pymoo
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.core.problem import Problem as PymooProblem
from pymoo.optimize import minimize
except ImportError: # pragma: no cover (tested but not by coverage)
pymoo = None
PymooProblem = object
import tensorflow as tf
from .. import logging, types
from ..data import Dataset
from ..models import ProbabilisticModel
from ..models.interfaces import (
HasReparamSampler,
ModelStack,
ProbabilisticModelType,
TrainableSupportsGetKernel,
)
from ..observer import OBJECTIVE
from ..space import Box, SearchSpace
from ..types import State, Tag, TensorType
from .function import (
BatchMonteCarloExpectedImprovement,
ExpectedImprovement,
ProbabilityOfImprovement,
)
from .interface import (
AcquisitionFunction,
AcquisitionFunctionBuilder,
GreedyAcquisitionFunctionBuilder,
SingleModelAcquisitionBuilder,
SingleModelGreedyAcquisitionBuilder,
SingleModelVectorizedAcquisitionBuilder,
VectorizedAcquisitionFunctionBuilder,
)
from .multi_objective import Pareto
from .optimizer import (
AcquisitionOptimizer,
automatic_optimizer_selector,
batchify_joint,
batchify_vectorize,
)
from .sampler import ExactThompsonSampler, ThompsonSampler
from .utils import get_local_dataset, select_nth_output
ResultType = TypeVar("ResultType", covariant=True)
""" Unbound covariant type variable. """
SearchSpaceType = TypeVar("SearchSpaceType", bound=SearchSpace, contravariant=True)
""" Contravariant type variable bound to :class:`~trieste.space.SearchSpace`. """
class AcquisitionRule(ABC, Generic[ResultType, SearchSpaceType, ProbabilisticModelType]):
"""
The central component of the acquisition API.
An :class:`AcquisitionRule` can produce any value from the search space for this step, and the
historic data and models. This value is typically a set of query points, either on its own as
a `TensorType` (see e.g. :class:`EfficientGlobalOptimization`), or within some context
(see e.g. :class:`TrustRegion`). Indeed, to use an :class:`AcquisitionRule` in the main
:class:`~trieste.bayesian_optimizer.BayesianOptimizer` Bayesian optimization loop, the rule
must return either a `TensorType` or `State`-ful `TensorType`.
Note that an :class:`AcquisitionRule` might only support models with specific features (for
example, if it uses an acquisition function that relies on those features). The type of
models supported by a rule is indicated by the generic type variable
class:`ProbabilisticModelType`.
"""
@abstractmethod
def acquire(
self,
search_space: SearchSpaceType,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> ResultType:
"""
Return a value of type `T_co`. Typically this will be a set of query points, either on its
own as a `TensorType` (see e.g. :class:`EfficientGlobalOptimization`), or within some
context (see e.g. :class:`TrustRegion`). We assume that this requires at least models, but
it may sometimes also need data.
**Type hints:**
- The search space must be a :class:`~trieste.space.SearchSpace`. The exact type of
:class:`~trieste.space.SearchSpace` depends on the specific :class:`AcquisitionRule`.
:param search_space: The local acquisition search space for *this step*.
:param models: The model for each tag.
:param datasets: The known observer query points and observations for each tag (optional).
:return: A value of type `T_co`.
"""
def acquire_single(
self,
search_space: SearchSpaceType,
model: ProbabilisticModelType,
dataset: Optional[Dataset] = None,
) -> ResultType:
"""
A convenience wrapper for :meth:`acquire` that uses only one model, dataset pair.
:param search_space: The global search space over which the optimization problem
is defined.
:param model: The model to use.
:param dataset: The known observer query points and observations (optional).
:return: A value of type `T_co`.
"""
if isinstance(dataset, dict) or isinstance(model, dict):
raise ValueError(
"AcquisitionRule.acquire_single method does not support multiple datasets "
"or models: use acquire instead"
)
return self.acquire(
search_space,
{OBJECTIVE: model},
datasets=None if dataset is None else {OBJECTIVE: dataset},
)
class EfficientGlobalOptimization(
AcquisitionRule[TensorType, SearchSpaceType, ProbabilisticModelType]
):
"""Implements the Efficient Global Optimization, or EGO, algorithm."""
@overload
def __init__(
self: "EfficientGlobalOptimization[SearchSpaceType, ProbabilisticModel]",
builder: None = None,
optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None,
num_query_points: int = 1,
initial_acquisition_function: Optional[AcquisitionFunction] = None,
):
...
@overload
def __init__(
self: "EfficientGlobalOptimization[SearchSpaceType, ProbabilisticModelType]",
builder: (
AcquisitionFunctionBuilder[ProbabilisticModelType]
| GreedyAcquisitionFunctionBuilder[ProbabilisticModelType]
| SingleModelAcquisitionBuilder[ProbabilisticModelType]
| SingleModelGreedyAcquisitionBuilder[ProbabilisticModelType]
),
optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None,
num_query_points: int = 1,
initial_acquisition_function: Optional[AcquisitionFunction] = None,
):
...
def __init__(
self,
builder: Optional[
AcquisitionFunctionBuilder[ProbabilisticModelType]
| GreedyAcquisitionFunctionBuilder[ProbabilisticModelType]
| VectorizedAcquisitionFunctionBuilder[ProbabilisticModelType]
| SingleModelAcquisitionBuilder[ProbabilisticModelType]
| SingleModelGreedyAcquisitionBuilder[ProbabilisticModelType]
| SingleModelVectorizedAcquisitionBuilder[ProbabilisticModelType]
] = None,
optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None,
num_query_points: int = 1,
initial_acquisition_function: Optional[AcquisitionFunction] = None,
):
"""
:param builder: The acquisition function builder to use. Defaults to
:class:`~trieste.acquisition.ExpectedImprovement`.
:param optimizer: The optimizer with which to optimize the acquisition function built by
``builder``. This should *maximize* the acquisition function, and must be compatible
with the global search space. Defaults to
:func:`~trieste.acquisition.optimizer.automatic_optimizer_selector`.
:param num_query_points: The number of points to acquire.
:param initial_acquisition_function: The initial acquisition function to use. Defaults
to using the builder to construct one, but passing in a previously constructed
function can occasionally be useful (e.g. to preserve random seeds).
"""
if num_query_points <= 0:
raise ValueError(
f"Number of query points must be greater than 0, got {num_query_points}"
)
if builder is None:
if num_query_points == 1:
builder = ExpectedImprovement()
else:
raise ValueError(
"""Need to specify a batch acquisition function when number of query points
is greater than 1"""
)
if optimizer is None:
optimizer = automatic_optimizer_selector
if isinstance(
builder,
(
SingleModelAcquisitionBuilder,
SingleModelGreedyAcquisitionBuilder,
SingleModelVectorizedAcquisitionBuilder,
),
):
builder = builder.using(OBJECTIVE)
if num_query_points > 1: # need to build batches of points
if isinstance(builder, VectorizedAcquisitionFunctionBuilder):
# optimize batch elements independently
optimizer = batchify_vectorize(optimizer, num_query_points)
elif isinstance(builder, AcquisitionFunctionBuilder):
# optimize batch elements jointly
optimizer = batchify_joint(optimizer, num_query_points)
elif isinstance(builder, GreedyAcquisitionFunctionBuilder):
# optimize batch elements sequentially using the logic in acquire.
pass
self._builder: Union[
AcquisitionFunctionBuilder[ProbabilisticModelType],
GreedyAcquisitionFunctionBuilder[ProbabilisticModelType],
VectorizedAcquisitionFunctionBuilder[ProbabilisticModelType],
] = builder
self._optimizer = optimizer
self._num_query_points = num_query_points
self._acquisition_function: Optional[AcquisitionFunction] = initial_acquisition_function
def __repr__(self) -> str:
""""""
return f"""EfficientGlobalOptimization(
{self._builder!r},
{self._optimizer!r},
{self._num_query_points!r})"""
@property
def acquisition_function(self) -> Optional[AcquisitionFunction]:
"""The current acquisition function, updated last time :meth:`acquire` was called."""
return self._acquisition_function
def acquire(
self,
search_space: SearchSpaceType,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> TensorType:
"""
Return the query point(s) that optimizes the acquisition function produced by ``builder``
(see :meth:`__init__`).
:param search_space: The local acquisition search space for *this step*.
:param models: The model for each tag.
:param datasets: The known observer query points and observations. Whether this is required
depends on the acquisition function used.
:return: The single (or batch of) points to query.
"""
if self._acquisition_function is None:
self._acquisition_function = self._builder.prepare_acquisition_function(
models,
datasets=datasets,
)
else:
self._acquisition_function = self._builder.update_acquisition_function(
self._acquisition_function,
models,
datasets=datasets,
)
summary_writer = logging.get_tensorboard_writer()
step_number = logging.get_step_number()
greedy = isinstance(self._builder, GreedyAcquisitionFunctionBuilder)
with tf.name_scope("EGO.optimizer" + "[0]" * greedy):
points = self._optimizer(search_space, self._acquisition_function)
if summary_writer:
with summary_writer.as_default(step=step_number):
batched_points = tf.expand_dims(points, axis=0)
values = self._acquisition_function(batched_points)[0]
if len(values) == 1:
logging.scalar(
"EGO.acquisition_function/maximum_found" + "[0]" * greedy, values[0]
)
else: # vectorized acquisition function
logging.histogram(
"EGO.acquisition_function/maximums_found" + "[0]" * greedy, values
)
if isinstance(self._builder, GreedyAcquisitionFunctionBuilder):
for i in range(
self._num_query_points - 1
): # greedily allocate remaining batch elements
self._acquisition_function = self._builder.update_acquisition_function(
self._acquisition_function,
models,
datasets=datasets,
pending_points=points,
new_optimization_step=False,
)
with tf.name_scope(f"EGO.optimizer[{i+1}]"):
chosen_point = self._optimizer(search_space, self._acquisition_function)
points = tf.concat([points, chosen_point], axis=0)
if summary_writer:
with summary_writer.as_default(step=step_number):
batched_points = tf.expand_dims(chosen_point, axis=0)
values = self._acquisition_function(batched_points)[0]
if len(values) == 1:
logging.scalar(
f"EGO.acquisition_function/maximum_found[{i + 1}]", values[0]
)
else: # vectorized acquisition function
logging.histogram(
f"EGO.acquisition_function/maximums_found[{i+1}]", values
)
return points
@dataclass(frozen=True)
class AsynchronousRuleState:
"""Stores pending points for asynchronous rules.
These are points which were requested but are not observed yet.
"""
pending_points: Optional[TensorType] = None
def __post_init__(self) -> None:
if self.pending_points is None:
# that's fine, no validation needed
return
tf.debugging.assert_shapes(
[(self.pending_points, ["N", "D"])],
message=f"""Pending points are expected to be a 2D tensor,
instead received tensor of shape {tf.shape(self.pending_points)}""",
)
@property
def has_pending_points(self) -> bool:
"""Returns `True` if there is at least one pending point, and `False` otherwise."""
return (self.pending_points is not None) and tf.size(self.pending_points) > 0
def remove_points(self, points_to_remove: TensorType) -> AsynchronousRuleState:
"""Removes all rows from current `pending_points` that are present in `points_to_remove`.
If a point to remove occurs multiple times in the list of pending points,
only first occurrence of it will be removed.
:param points_to_remove: Points to remove.
:return: New instance of `AsynchronousRuleState` with updated pending points.
"""
@tf.function
def _remove_point(pending_points: TensorType, point_to_remove: TensorType) -> TensorType:
# find all points equal to the one we need to remove
are_points_equal = tf.reduce_all(tf.equal(pending_points, point_to_remove), axis=1)
if not tf.reduce_any(are_points_equal):
# point to remove isn't there, nothing to do
return pending_points
# since we're compiling, we still need to handle pending_points = [] here
top = tf.cond(tf.math.greater(1, tf.shape(are_points_equal)[0]), lambda: 0, lambda: 1)
# this line converts all bool values to 0 and 1
# then finds first 1 and returns its index as 1x1 tensor
_, first_index_tensor = tf.math.top_k(tf.cast(are_points_equal, tf.int8), k=top)
# to use it as index for slicing, we need to convert 1x1 tensor to a TF scalar
first_index = tf.reshape(first_index_tensor, [])
return tf.concat(
[pending_points[:first_index, :], pending_points[first_index + 1 :, :]], axis=0
)
if not self.has_pending_points:
# nothing to do if there are no pending points
return self
tf.debugging.assert_shapes(
[(self.pending_points, [None, "D"]), (points_to_remove, [None, "D"])],
message=f"""Point to remove shall be 1xD where D is
the last dimension of pending points.
Got {tf.shape(self.pending_points)} for pending points
and {tf.shape(points_to_remove)} for other points.""",
)
new_pending_points = tf.foldl(
_remove_point, points_to_remove, initializer=self.pending_points
)
return AsynchronousRuleState(new_pending_points)
def add_pending_points(self, new_points: TensorType) -> AsynchronousRuleState:
"""Adds `new_points` to the already known pending points.
:param new_points: Points to add.
:return: New instance of `AsynchronousRuleState` with updated pending points.
"""
if not self.has_pending_points:
return AsynchronousRuleState(new_points)
tf.debugging.assert_shapes(
[(self.pending_points, [None, "D"]), (new_points, [None, "D"])],
message=f"""New points shall be 2D and have same last dimension as pending points.
Got {tf.shape(self.pending_points)} for pending points
and {tf.shape(new_points)} for new points.""",
)
new_pending_points = tf.concat([self.pending_points, new_points], axis=0)
return AsynchronousRuleState(new_pending_points)
class AsynchronousOptimization(
AcquisitionRule[
State[Optional["AsynchronousRuleState"], TensorType],
SearchSpaceType,
ProbabilisticModelType,
]
):
"""AsynchronousOptimization rule is designed for asynchronous BO scenarios.
By asynchronous BO we understand a use case when multiple objective function
can be launched in parallel and are expected to arrive at different times.
Instead of waiting for the rest of observations to return, we want to immediately
use acquisition function to launch a new observation and avoid wasting computational resources.
See :cite:`Alvi:2019` or :cite:`kandasamy18a` for more details.
To make the best decision about next point to observe, acquisition function
needs to be aware of currently running observations.
We call such points "pending", and consider them a part of acquisition state.
We use :class:`AsynchronousRuleState` to store these points.
`AsynchronousOptimization` works with non-greedy batch acquisition functions.
For example, it would work with
:class:`~trieste.acquisition.BatchMonteCarloExpectedImprovement`,
but cannot be used with :class:`~trieste.acquisition.ExpectedImprovement`.
If there are P pending points and the batch of size B is requested,
the acquisition function is used with batch size P+B.
During optimization first P points are fixed to pending,
and thus we optimize and return the last B points only.
"""
@overload
def __init__(
self: "AsynchronousOptimization[SearchSpaceType, HasReparamSampler]",
builder: None = None,
optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None,
num_query_points: int = 1,
):
...
@overload
def __init__(
self: "AsynchronousOptimization[SearchSpaceType, ProbabilisticModelType]",
builder: (
AcquisitionFunctionBuilder[ProbabilisticModelType]
| SingleModelAcquisitionBuilder[ProbabilisticModelType]
),
optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None,
num_query_points: int = 1,
):
...
def __init__(
self,
builder: Optional[
AcquisitionFunctionBuilder[ProbabilisticModelType]
| SingleModelAcquisitionBuilder[ProbabilisticModelType]
] = None,
optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None,
num_query_points: int = 1,
):
"""
:param builder: Batch acquisition function builder. Defaults to
:class:`~trieste.acquisition.BatchMonteCarloExpectedImprovement` with 10 000 samples.
:param optimizer: The optimizer with which to optimize the acquisition function built by
``builder``. This should *maximize* the acquisition function, and must be compatible
with the global search space. Defaults to
:func:`~trieste.acquisition.optimizer.automatic_optimizer_selector`.
:param num_query_points: The number of points to acquire.
"""
if num_query_points <= 0:
raise ValueError(
f"Number of query points must be greater than 0, got {num_query_points}"
)
if builder is None:
builder = cast(
SingleModelAcquisitionBuilder[ProbabilisticModelType],
BatchMonteCarloExpectedImprovement(10_000),
)
if optimizer is None:
optimizer = automatic_optimizer_selector
if isinstance(builder, SingleModelAcquisitionBuilder):
builder = builder.using(OBJECTIVE)
# even though we are only using batch acquisition functions
# there is no need to batchify_joint the optimizer if our batch size is 1
if num_query_points > 1:
optimizer = batchify_joint(optimizer, num_query_points)
self._builder: AcquisitionFunctionBuilder[ProbabilisticModelType] = builder
self._optimizer = optimizer
self._acquisition_function: Optional[AcquisitionFunction] = None
def __repr__(self) -> str:
""""""
return f"""AsynchronousOptimization(
{self._builder!r},
{self._optimizer!r})"""
def acquire(
self,
search_space: SearchSpaceType,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> types.State[AsynchronousRuleState | None, TensorType]:
"""
Constructs a function that, given ``AsynchronousRuleState``,
returns a new state object and points to evaluate.
The state object contains currently known pending points,
that is points that were requested for evaluation,
but observation for which was not received yet.
To keep them up to date, pending points are compared against the given dataset,
and whatever points are in the dataset are deleted.
Let's suppose we have P pending points. To optimize the acquisition function
we call it with batches of size P+1, where first P points are fixed to pending points.
Optimization therefore happens over the last point only, which is returned.
:param search_space: The local acquisition search space for *this step*.
:param models: The model of the known data. Uses the single key `OBJECTIVE`.
:param datasets: The known observer query points and observations.
:return: A function that constructs the next acquisition state and the recommended query
points from the previous acquisition state.
"""
if models.keys() != {OBJECTIVE}:
raise ValueError(
f"dict of models must contain the single key {OBJECTIVE}, got keys {models.keys()}"
)
if datasets is None or datasets.keys() != {OBJECTIVE}:
raise ValueError(
f"""datasets must be provided and contain the single key {OBJECTIVE}"""
)
if self._acquisition_function is None:
self._acquisition_function = self._builder.prepare_acquisition_function(
models,
datasets=datasets,
)
else:
self._acquisition_function = self._builder.update_acquisition_function(
self._acquisition_function,
models,
datasets=datasets,
)
def state_func(
state: AsynchronousRuleState | None,
) -> tuple[AsynchronousRuleState | None, TensorType]:
tf.debugging.Assert(self._acquisition_function is not None, [tf.constant([])])
if state is None:
state = AsynchronousRuleState(None)
assert datasets is not None
state = state.remove_points(datasets[OBJECTIVE].query_points)
if state.has_pending_points:
pending_points: TensorType = state.pending_points
def function_with_pending_points(x: TensorType) -> TensorType:
# stuff below is quite tricky, and thus deserves an elaborate comment
# we receive unknown number N of batches to evaluate
# and need to collect batch of B new points
# so the shape of `x` is [N, B, D]
# we want to add P pending points to each batch
# so that acquisition actually receives N batches of shape [P+B, D] each
# therefore here we prepend each batch with all pending points
# resulting a shape [N, P+B, D]
# we do that by repeating pending points N times and concatenating with x
# pending points are 2D, we need 3D and repeat along first axis
expanded = tf.expand_dims(pending_points, axis=0)
pending_points_repeated = tf.repeat(expanded, [tf.shape(x)[0]], axis=0)
all_points = tf.concat([pending_points_repeated, x], axis=1)
return cast(AcquisitionFunction, self._acquisition_function)(all_points)
acquisition_function = cast(AcquisitionFunction, function_with_pending_points)
else:
acquisition_function = cast(AcquisitionFunction, self._acquisition_function)
with tf.name_scope("AsynchronousOptimization.optimizer"):
new_points = self._optimizer(search_space, acquisition_function)
state = state.add_pending_points(new_points)
return state, new_points
return state_func
class AsynchronousGreedy(
AcquisitionRule[
State[Optional["AsynchronousRuleState"], TensorType],
SearchSpaceType,
ProbabilisticModelType,
]
):
"""AsynchronousGreedy rule, as name suggests,
is designed for asynchronous BO scenarios. To see what we understand by
asynchronous BO, see documentation for :class:`~trieste.acquisition.AsynchronousOptimization`.
AsynchronousGreedy rule works with greedy batch acquisition functions
and performs B steps of a greedy batch collection process,
where B is the requested batch size.
"""
def __init__(
self,
builder: GreedyAcquisitionFunctionBuilder[ProbabilisticModelType]
| SingleModelGreedyAcquisitionBuilder[ProbabilisticModelType],
optimizer: AcquisitionOptimizer[SearchSpaceType] | None = None,
num_query_points: int = 1,
):
"""
:param builder: Acquisition function builder. Only greedy batch approaches are supported,
because they can be told what points are pending.
:param optimizer: The optimizer with which to optimize the acquisition function built by
``builder``. This should *maximize* the acquisition function, and must be compatible
with the global search space. Defaults to
:func:`~trieste.acquisition.optimizer.automatic_optimizer_selector`.
:param num_query_points: The number of points to acquire.
"""
if num_query_points <= 0:
raise ValueError(
f"Number of query points must be greater than 0, got {num_query_points}"
)
if builder is None:
raise ValueError("Please specify an acquisition builder")
if not isinstance(
builder, (GreedyAcquisitionFunctionBuilder, SingleModelGreedyAcquisitionBuilder)
):
raise NotImplementedError(
f"""Only greedy acquisition strategies are supported,
got {type(builder)}"""
)
if optimizer is None:
optimizer = automatic_optimizer_selector
if isinstance(builder, SingleModelGreedyAcquisitionBuilder):
builder = builder.using(OBJECTIVE)
self._builder: GreedyAcquisitionFunctionBuilder[ProbabilisticModelType] = builder
self._optimizer = optimizer
self._acquisition_function: Optional[AcquisitionFunction] = None
self._num_query_points = num_query_points
def __repr__(self) -> str:
""""""
return f"""AsynchronousGreedy(
{self._builder!r},
{self._optimizer!r},
{self._num_query_points!r})"""
def acquire(
self,
search_space: SearchSpaceType,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> types.State[AsynchronousRuleState | None, TensorType]:
"""
Constructs a function that, given ``AsynchronousRuleState``,
returns a new state object and points to evaluate.
The state object contains currently known pending points,
that is points that were requested for evaluation,
but observation for which was not received yet.
To keep them up to date, pending points are compared against the given dataset,
and whatever points are in the dataset are deleted.
Then the current batch is generated by calling the acquisition function,
and all points in the batch are added to the known pending points.
:param search_space: The local acquisition search space for *this step*.
:param models: The model of the known data. Uses the single key `OBJECTIVE`.
:param datasets: The known observer query points and observations.
:return: A function that constructs the next acquisition state and the recommended query
points from the previous acquisition state.
"""
if models.keys() != {OBJECTIVE}:
raise ValueError(
f"dict of models must contain the single key {OBJECTIVE}, got keys {models.keys()}"
)
if datasets is None or datasets.keys() != {OBJECTIVE}:
raise ValueError(
f"""datasets must be provided and contain the single key {OBJECTIVE}"""
)
def state_func(
state: AsynchronousRuleState | None,
) -> tuple[AsynchronousRuleState | None, TensorType]:
if state is None:
state = AsynchronousRuleState(None)
assert datasets is not None
state = state.remove_points(datasets[OBJECTIVE].query_points)
if self._acquisition_function is None:
self._acquisition_function = self._builder.prepare_acquisition_function(
models,
datasets=datasets,
pending_points=state.pending_points,
)
else:
self._acquisition_function = self._builder.update_acquisition_function(
self._acquisition_function,
models,
datasets=datasets,
pending_points=state.pending_points,
)
with tf.name_scope("AsynchronousOptimization.optimizer[0]"):
new_points_batch = self._optimizer(search_space, self._acquisition_function)
state = state.add_pending_points(new_points_batch)
summary_writer = logging.get_tensorboard_writer()
step_number = logging.get_step_number()
for i in range(self._num_query_points - 1):
# greedily allocate additional batch elements
self._acquisition_function = self._builder.update_acquisition_function(
self._acquisition_function,
models,
datasets=datasets,
pending_points=state.pending_points,
new_optimization_step=False,
)
with tf.name_scope(f"AsynchronousOptimization.optimizer[{i+1}]"):
new_point = self._optimizer(search_space, self._acquisition_function)
if summary_writer:
with summary_writer.as_default(step=step_number):
batched_point = tf.expand_dims(new_point, axis=0)
value = self._acquisition_function(batched_point)[0][0]
logging.scalar(
f"AsyncGreedy.acquisition_function/maximum_found[{i}]", value
)
state = state.add_pending_points(new_point)
new_points_batch = tf.concat([new_points_batch, new_point], axis=0)
return state, new_points_batch
return state_func
class RandomSampling(AcquisitionRule[TensorType, SearchSpace, ProbabilisticModel]):
"""
This class performs random search for choosing optimal points. It uses ``sample`` method
from :class:`~trieste.space.SearchSpace` to take random samples from the search space that
are used as optimal points. Hence, it does not use any acquisition function. This
acquisition rule can be useful as a baseline for other acquisition functions of interest.
"""
def __init__(self, num_query_points: int = 1):
"""
:param num_query_points: The number of points to acquire. By default set to 1 point.
:raise ValueError: If ``num_query_points`` is less or equal to 0.
"""
if num_query_points <= 0:
raise ValueError(
f"Number of query points must be greater than 0, got {num_query_points}"
)
self._num_query_points = num_query_points
def __repr__(self) -> str:
""""""
return f"""RandomSampling({self._num_query_points!r})"""
def acquire(
self,
search_space: SearchSpace,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> TensorType:
"""
Sample ``num_query_points`` (see :meth:`__init__`) points from the
``search_space``.
:param search_space: The acquisition search space.
:param models: Unused.
:param datasets: Unused.
:return: The ``num_query_points`` points to query.
"""
samples = search_space.sample(self._num_query_points)
return samples
class DiscreteThompsonSampling(AcquisitionRule[TensorType, SearchSpace, ProbabilisticModelType]):
r"""
Implements Thompson sampling for choosing optimal points.
This rule returns the minimizers of functions sampled from our model and evaluated across
a discretization of the search space (containing `N` candidate points).
The model is sampled either exactly (with an :math:`O(N^3)` complexity), or sampled
approximately through a random Fourier `M` feature decompisition
(with an :math:`O(\min(n^3,M^3))` complexity for a model trained on `n` points). The number
`M` of Fourier features is specified when building the model.
"""
@overload
def __init__(
self: "DiscreteThompsonSampling[ProbabilisticModel]",
num_search_space_samples: int,
num_query_points: int,
thompson_sampler: None = None,
select_output: Callable[[TensorType], TensorType] = select_nth_output,
):
...
@overload
def __init__(
self: "DiscreteThompsonSampling[ProbabilisticModelType]",
num_search_space_samples: int,
num_query_points: int,
thompson_sampler: Optional[ThompsonSampler[ProbabilisticModelType]] = None,
select_output: Callable[[TensorType], TensorType] = select_nth_output,
):
...
def __init__(
self,
num_search_space_samples: int,
num_query_points: int,
thompson_sampler: Optional[ThompsonSampler[ProbabilisticModelType]] = None,
select_output: Callable[[TensorType], TensorType] = select_nth_output,
):
"""
:param num_search_space_samples: The number of points at which to sample the posterior.
:param num_query_points: The number of points to acquire.
:param thompson_sampler: Sampler to sample maximisers from the underlying model.
:param select_output: A method that returns the desired trajectory from a trajectory
sampler with shape [..., B], where B is a batch dimension. Defaults to the
:func:~`trieste.acquisition.utils.select_nth_output` function with output dimension 0.
"""
if not num_search_space_samples > 0:
raise ValueError(f"Search space must be greater than 0, got {num_search_space_samples}")
if not num_query_points > 0:
raise ValueError(
f"Number of query points must be greater than 0, got {num_query_points}"
)
if thompson_sampler is not None:
if thompson_sampler.sample_min_value:
raise ValueError(
"""
Thompson sampling requires a thompson_sampler that samples minimizers,
not just minimum values. However the passed sampler has sample_min_value=True.
"""
)
else:
thompson_sampler = ExactThompsonSampler(sample_min_value=False)
self._thompson_sampler = thompson_sampler
self._num_search_space_samples = num_search_space_samples
self._num_query_points = num_query_points
self._select_output = select_output
def __repr__(self) -> str:
""""""
return f"""DiscreteThompsonSampling(
{self._num_search_space_samples!r},
{self._num_query_points!r},
{self._thompson_sampler!r},
{self._select_output!r})"""
def acquire(
self,
search_space: SearchSpace,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> TensorType:
"""
Sample `num_search_space_samples` (see :meth:`__init__`) points from the
``search_space``. Of those points, return the `num_query_points` points at which
random samples yield the **minima** of the model posterior.
:param search_space: The local acquisition search space for *this step*.
:param models: The model of the known data. Uses the single key `OBJECTIVE`.
:param datasets: The known observer query points and observations.
:return: The ``num_query_points`` points to query.
:raise ValueError: If ``models`` do not contain the key `OBJECTIVE`, or it contains any
other key.
"""
if models.keys() != {OBJECTIVE}:
raise ValueError(
f"dict of models must contain the single key {OBJECTIVE}, got keys {models.keys()}"
)
if datasets is None or datasets.keys() != {OBJECTIVE}:
raise ValueError(
f"""datasets must be provided and contain the single key {OBJECTIVE}"""
)
query_points = search_space.sample(self._num_search_space_samples)
thompson_samples = self._thompson_sampler.sample(
models[OBJECTIVE],
self._num_query_points,
query_points,
select_output=self._select_output,
)
return thompson_samples
class TrustRegion(
AcquisitionRule[
types.State[Optional["TrustRegion.State"], TensorType], Box, ProbabilisticModelType
]
):
"""Implements the *trust region* acquisition algorithm."""
@dataclass(frozen=True)
class State:
"""The acquisition state for the :class:`TrustRegion` acquisition rule."""
acquisition_space: Box
""" The search space. """
eps: TensorType
"""
The (maximum) vector from the current best point to each bound of the acquisition space.
"""
y_min: TensorType
""" The minimum observed value. """
is_global: bool | TensorType
"""
`True` if the search space was global, else `False` if it was local. May be a scalar boolean
`TensorType` instead of a `bool`.
"""
def __deepcopy__(self, memo: dict[int, object]) -> TrustRegion.State:
box_copy = copy.deepcopy(self.acquisition_space, memo)
return TrustRegion.State(box_copy, self.eps, self.y_min, self.is_global)
@overload
def __init__(
self: "TrustRegion[ProbabilisticModel]",
rule: None = None,
beta: float = 0.7,
kappa: float = 1e-4,
):
...
@overload
def __init__(
self: "TrustRegion[ProbabilisticModelType]",
rule: AcquisitionRule[TensorType, Box, ProbabilisticModelType],
beta: float = 0.7,
kappa: float = 1e-4,
):
...
def __init__(
self,
rule: AcquisitionRule[TensorType, Box, ProbabilisticModelType] | None = None,
beta: float = 0.7,
kappa: float = 1e-4,
):
"""
:param rule: The acquisition rule that defines how to search for a new query point in a
given search space. Defaults to :class:`EfficientGlobalOptimization` with default
arguments.
:param beta: The inverse of the trust region contraction factor.
:param kappa: Scales the threshold for the minimal improvement required for a step to be
considered a success.
"""
if rule is None:
rule = EfficientGlobalOptimization()
self._rule = rule
self._beta = beta
self._kappa = kappa
def __repr__(self) -> str:
""""""
return f"TrustRegion({self._rule!r}, {self._beta!r}, {self._kappa!r})"
def acquire(
self,
search_space: Box,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> types.State[State | None, TensorType]:
"""
Construct a local search space from ``search_space`` according the trust region algorithm,
and use that with the ``rule`` specified at :meth:`~TrustRegion.__init__` to find new
query points. Return a function that constructs these points given a previous trust region
state.
If no ``state`` is specified (it is `None`), ``search_space`` is used as the search space
for this step.
If a ``state`` is specified, and the new optimum improves over the previous optimum
by some threshold (that scales linearly with ``kappa``), the previous acquisition is
considered successful.
If the previous acquisition was successful, ``search_space`` is used as the new
search space. If the previous step was unsuccessful, the search space is changed to the
trust region if it was global, and vice versa.
If the previous acquisition was over the trust region, the size of the trust region is
modified. If the previous acquisition was successful, the size is increased by a factor
``1 / beta``. Conversely, if it was unsuccessful, the size is reduced by the factor
``beta``.
**Note:** The acquisition search space will never extend beyond the boundary of the
``search_space``. For a local search, the actual search space will be the
intersection of the trust region and ``search_space``.
:param search_space: The local acquisition search space for *this step*.
:param models: The model for each tag.
:param datasets: The known observer query points and observations. Uses the data for key
`OBJECTIVE` to calculate the new trust region.
:return: A function that constructs the next acquisition state and the recommended query
points from the previous acquisition state.
:raise KeyError: If ``datasets`` does not contain the key `OBJECTIVE`.
"""
if datasets is None or OBJECTIVE not in datasets.keys():
raise ValueError(f"""datasets must be provided and contain the key {OBJECTIVE}""")
dataset = datasets[OBJECTIVE]
global_lower = search_space.lower
global_upper = search_space.upper
y_min = tf.reduce_min(dataset.observations, axis=0)
def state_func(
state: TrustRegion.State | None,
) -> tuple[TrustRegion.State | None, TensorType]:
if state is None:
eps = 0.5 * (global_upper - global_lower) / (5.0 ** (1.0 / global_lower.shape[-1]))
is_global = True
else:
tr_volume = tf.reduce_prod(
state.acquisition_space.upper - state.acquisition_space.lower
)
step_is_success = y_min < state.y_min - self._kappa * tr_volume
eps = (
state.eps
if state.is_global
else state.eps / self._beta
if step_is_success
else state.eps * self._beta
)
is_global = step_is_success or not state.is_global
if is_global:
acquisition_space = search_space
else:
xmin = dataset.query_points[tf.argmin(dataset.observations)[0], :]
acquisition_space = Box(
tf.reduce_max([global_lower, xmin - eps], axis=0),
tf.reduce_min([global_upper, xmin + eps], axis=0),
)
points = self._rule.acquire(acquisition_space, models, datasets=datasets)
state_ = TrustRegion.State(acquisition_space, eps, y_min, is_global)
return state_, points
return state_func
class TURBO(
AcquisitionRule[
types.State[Optional["TURBO.State"], TensorType], Box, TrainableSupportsGetKernel
]
):
"""Implements the TURBO algorithm as detailed in :cite:`eriksson2019scalable`."""
@dataclass(frozen=True)
class State:
"""The acquisition state for the :class:`TURBO` acquisition rule."""
acquisition_space: Box
""" The search space. """
L: float
""" Length of the trust region (before standardizing by model lengthscales) """
failure_counter: int
""" Number of consecutive failures (reset if we see a success). """
success_counter: int
""" Number of consecutive successes (reset if we see a failure). """
y_min: TensorType
""" The minimum observed value. """
def __deepcopy__(self, memo: dict[int, object]) -> TURBO.State:
box_copy = copy.deepcopy(self.acquisition_space, memo)
return TURBO.State(
box_copy, self.L, self.failure_counter, self.success_counter, self.y_min
)
def __init__(
self,
search_space: SearchSpace,
num_trust_regions: int = 1,
rule: Optional[AcquisitionRule[ResultType, Box, TrainableSupportsGetKernel]] = None,
L_min: Optional[float] = None,
L_init: Optional[float] = None,
L_max: Optional[float] = None,
success_tolerance: int = 3,
failure_tolerance: Optional[int] = None,
local_models: Optional[Mapping[Tag, TrainableSupportsGetKernel]] = None,
):
"""
Note that the optional parameters are set by a heuristic if not given by the user.
:param search_space: The search space.
:param num_trust_regions: Number of trust regions controlled by TURBO
:param rule: rule used to select points from within the trust region, using the local model.
:param L_min: Minimum allowed length of the trust region.
:param L_init: Initial length of the trust region.
:param L_max: Maximum allowed length of the trust region.
:param success_tolerance: Number of consecutive successes before changing region size.
:param failure tolerance: Number of consecutive failures before changing region size.
:param local_models: Optional model to act as the local model. This will be refit using
the data from each trust region. If no local_models are provided then we just
copy the global model.
"""
if not num_trust_regions > 0:
raise ValueError(f"Num trust regions must be greater than 0, got {num_trust_regions}")
if num_trust_regions > 1:
raise NotImplementedError(
f"TURBO does not yet support multiple trust regions, but got {num_trust_regions}"
)
# implement heuristic defaults for TURBO if not specified by user
if rule is None: # default to Thompson sampling with batches of size 1
rule = DiscreteThompsonSampling(tf.minimum(100 * search_space.dimension, 5_000), 1)
if failure_tolerance is None:
if isinstance(
rule,
(
EfficientGlobalOptimization,
DiscreteThompsonSampling,
RandomSampling,
AsynchronousOptimization,
),
):
failure_tolerance = math.ceil(search_space.dimension / rule._num_query_points)
else:
failure_tolerance == search_space.dimension
assert isinstance(failure_tolerance, int)
search_space_max_width = tf.reduce_max(search_space.upper - search_space.lower)
if L_min is None:
L_min = (0.5**7) * search_space_max_width
if L_init is None:
L_init = 0.8 * search_space_max_width
if L_max is None:
L_max = 1.6 * search_space_max_width
if not success_tolerance > 0:
raise ValueError(
f"success tolerance must be an integer greater than 0, got {success_tolerance}"
)
if not failure_tolerance > 0:
raise ValueError(
f"success tolerance must be an integer greater than 0, got {failure_tolerance}"
)
if L_min <= 0:
raise ValueError(f"L_min must be postive, got {L_min}")
if L_init <= 0:
raise ValueError(f"L_min must be postive, got {L_init}")
if L_max <= 0:
raise ValueError(f"L_min must be postive, got {L_max}")
self._num_trust_regions = num_trust_regions
self._L_min = L_min
self._L_init = L_init
self._L_max = L_max
self._success_tolerance = success_tolerance
self._failure_tolerance = failure_tolerance
self._rule = rule
self._local_models = local_models
def __repr__(self) -> str:
""""""
return f"TURBO({self._num_trust_regions!r}, {self._rule})"
def acquire(
self,
search_space: Box,
models: Mapping[Tag, TrainableSupportsGetKernel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> types.State[State | None, TensorType]:
"""
Construct a local search space from ``search_space`` according the TURBO algorithm,
and use that with the ``rule`` specified at :meth:`~TURBO.__init__` to find new
query points. Return a function that constructs these points given a previous trust region
state.
If no ``state`` is specified (it is `None`), then we build the initial trust region.
If a ``state`` is specified, and the new optimum improves over the previous optimum,
the previous acquisition is considered successful.
If ``success_tolerance`` previous consecutive acquisitions were successful then the search
space is made larger. If ``failure_tolerance`` consecutive acquisitions were unsuccessful
then the search space is shrunk. If neither condition is triggered then the search space
remains the same.
**Note:** The acquisition search space will never extend beyond the boundary of the
``search_space``. For a local search, the actual search space will be the
intersection of the trust region and ``search_space``.
:param search_space: The local acquisition search space for *this step*.
:param models: The model for each tag.
:param datasets: The known observer query points and observations. Uses the data for key
`OBJECTIVE` to calculate the new trust region.
:return: A function that constructs the next acquisition state and the recommended query
points from the previous acquisition state.
:raise KeyError: If ``datasets`` does not contain the key `OBJECTIVE`.
"""
if self._local_models is None: # if user doesnt specifiy a local model
self._local_models = copy.copy(models) # copy global model (will be fit locally later)
if self._local_models.keys() != {OBJECTIVE}:
raise ValueError(
f"dict of models must contain the single key {OBJECTIVE}, got keys {models.keys()}"
)
if datasets is None or datasets.keys() != {OBJECTIVE}:
raise ValueError(
f"""datasets must be provided and contain the single key {OBJECTIVE}"""
)
dataset = datasets[OBJECTIVE]
local_model = self._local_models[OBJECTIVE]
global_lower = search_space.lower
global_upper = search_space.upper
y_min = tf.reduce_min(dataset.observations, axis=0)
def state_func(
state: TURBO.State | None,
) -> tuple[TURBO.State | None, TensorType]:
if state is None: # initialise first TR
L, failure_counter, success_counter = self._L_init, 0, 0
else: # update TR
step_is_success = y_min < state.y_min - 1e-10 # maybe make this stronger?
failure_counter = (
0 if step_is_success else state.failure_counter + 1
) # update or reset counter
success_counter = (
state.success_counter + 1 if step_is_success else 0
) # update or reset counter
L = state.L
if success_counter == self._success_tolerance:
L *= 2.0 # make region bigger
success_counter = 0
elif failure_counter == self._failure_tolerance:
L *= 0.5 # make region smaller
failure_counter = 0
L = tf.minimum(L, self._L_max)
if L < self._L_min: # if gets too small then start again
L, failure_counter, success_counter = self._L_init, 0, 0
# build region with volume according to length L but stretched according to lengthscales
xmin = dataset.query_points[tf.argmin(dataset.observations)[0], :] # centre of region
lengthscales = (
local_model.get_kernel().lengthscales
) # stretch region according to model lengthscales
tr_width = (
lengthscales * L / tf.reduce_prod(lengthscales) ** (1.0 / global_lower.shape[-1])
) # keep volume fixed
acquisition_space = Box(
tf.reduce_max([global_lower, xmin - tr_width / 2.0], axis=0),
tf.reduce_min([global_upper, xmin + tr_width / 2.0], axis=0),
)
# fit the local model using just data from the trust region
local_dataset = get_local_dataset(acquisition_space, dataset)
local_model.update(local_dataset)
local_model.optimize(local_dataset)
# use local model and local dataset to choose next query point(s)
points = self._rule.acquire_single(acquisition_space, local_model, local_dataset)
state_ = TURBO.State(acquisition_space, L, failure_counter, success_counter, y_min)
return state_, points
return state_func
class BatchHypervolumeSharpeRatioIndicator(
AcquisitionRule[TensorType, SearchSpace, ProbabilisticModel]
):
"""Implements the Batch Hypervolume Sharpe-ratio indicator acquisition
rule, designed for large batches, introduced by Binois et al, 2021.
See :cite:`binois2021portfolio` for details.
"""
def __init__(
self,
num_query_points: int = 1,
ga_population_size: int = 500,
ga_n_generations: int = 200,
filter_threshold: float = 0.1,
noisy_observations: bool = True,
):
"""
:param num_query_points: The number of points in a batch. Defaults to 5.
:param ga_population_size: The population size used in the genetic algorithm
that finds points on the Pareto front. Defaults to 500.
:param ga_n_generations: The number of genenrations to run in the genetic
algorithm. Defaults to 200.
:param filter_threshold: The probability of improvement below which to exlude
points from the Sharpe ratio optimisation. Defaults to 0.1.
:param noisy_observations: Whether the observations have noise. Defaults to True.
"""
if not num_query_points > 0:
raise ValueError(f"Num query points must be greater than 0, got {num_query_points}")
if not ga_population_size >= num_query_points:
raise ValueError(
"Population size must be greater or equal to num query points size, got num"
f" query points as {num_query_points} and population size as {ga_population_size}"
)
if not ga_n_generations > 0:
raise ValueError(f"Number of generation must be greater than 0, got {ga_n_generations}")
if not 0.0 <= filter_threshold < 1.0:
raise ValueError(f"Filter threshold must be in range [0.0,1.0), got {filter_threshold}")
if pymoo is None:
raise ImportError(
"BatchHypervolumeSharpeRatioIndicator requires pymoo, "
"which can be installed via `pip install trieste[qhsri]`"
)
builder = ProbabilityOfImprovement().using(OBJECTIVE)
self._builder: AcquisitionFunctionBuilder[ProbabilisticModel] = builder
self._num_query_points: int = num_query_points
self._population_size: int = ga_population_size
self._n_generations: int = ga_n_generations
self._filter_threshold: float = filter_threshold
self._noisy_observations: bool = noisy_observations
self._acquisition_function: Optional[AcquisitionFunction] = None
def __repr__(self) -> str:
""""""
return f"""BatchHypervolumeSharpeRatioIndicator(
num_query_points={self._num_query_points}, ga_population_size={self._population_size},
ga_n_generations={self._n_generations}, filter_threshold={self._filter_threshold},
noisy_observations={self._noisy_observations}
)
"""
def _find_non_dominated_points(
self, model: ProbabilisticModel, search_space: SearchSpaceType
) -> tuple[TensorType, TensorType]:
"""Uses NSGA-II to find high-quality non-dominated points"""
problem = _MeanStdTradeoff(model, search_space)
algorithm = NSGA2(pop_size=self._population_size)
res = minimize(problem, algorithm, ("n_gen", self._n_generations), seed=1, verbose=False)
return res.X, res.F
def _filter_points(
self, nd_points: TensorType, nd_mean_std: TensorType
) -> tuple[TensorType, TensorType]:
if self._acquisition_function is None:
raise ValueError("Acquisition function has not been defined yet")
probs_of_improvement = np.array(
self._acquisition_function(np.expand_dims(nd_points, axis=-2))
)
above_threshold = probs_of_improvement > self._filter_threshold
if np.sum(above_threshold) >= self._num_query_points and nd_mean_std.shape[1] == 2:
# There are enough points above the threshold to get a batch
out_points, out_mean_std = (
nd_points[above_threshold.squeeze(), :],
nd_mean_std[above_threshold.squeeze(), :],
)
else:
# We don't filter
out_points, out_mean_std = nd_points, nd_mean_std
return out_points, out_mean_std
def acquire(
self,
search_space: SearchSpace,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> TensorType:
"""Acquire a batch of points to observe based on the batch hypervolume
Sharpe ratio indicator method.
This method uses NSGA-II to create a Pareto set of the mean and standard
deviation of the posterior of the probabilistic model, and then selects
points to observe based on maximising the Sharpe ratio.
:param search_space: The local acquisition search space for *this step*.
:param models: The model for each tag.
:param datasets: The known observer query points and observations.
:return: The batch of points to query.
"""
if models.keys() != {OBJECTIVE}:
raise ValueError(
f"dict of models must contain the single key {OBJECTIVE}, got keys {models.keys()}"
)
if datasets is None or datasets.keys() != {OBJECTIVE}:
raise ValueError(
f"""datasets must be provided and contain the single key {OBJECTIVE}"""
)
if self._acquisition_function is None:
self._acquisition_function = self._builder.prepare_acquisition_function(
models, datasets=datasets
)
else:
self._acquisition_function = self._builder.update_acquisition_function(
self._acquisition_function,
models,
datasets=datasets,
)
# Find non-dominated points
nd_points, nd_mean_std = self._find_non_dominated_points(models[OBJECTIVE], search_space)
# Filter out points below a threshold probability of improvement
filtered_points, filtered_mean_std = self._filter_points(nd_points, nd_mean_std)
# Set up a Pareto set of the filtered points
pareto_set = Pareto(filtered_mean_std, already_non_dominated=True)
# Sample points from set using qHSRI
_, batch_ids = pareto_set.sample_diverse_subset(
self._num_query_points, allow_repeats=self._noisy_observations
)
batch = filtered_points[batch_ids]
return batch
class _MeanStdTradeoff(PymooProblem): # type: ignore[misc]
"""Inner class that formulates the mean/std optimisation problem as a
pymoo problem"""
def __init__(self, probabilistic_model: ProbabilisticModel, search_space: SearchSpaceType):
"""
:param probabilistic_model: The probabilistic model to find optimal mean/stds from
:param search_space: The search space for the optimisation
"""
# If we have a stack of models we have mean and std for each
if isinstance(probabilistic_model, ModelStack):
n_obj = 2 * len(probabilistic_model._models)
else:
n_obj = 2
super().__init__(
n_var=int(search_space.dimension),
n_obj=n_obj,
n_constr=0,
xl=np.array(search_space.lower),
xu=np.array(search_space.upper),
)
self.probabilistic_model = probabilistic_model
def _evaluate(
self, x: TensorType, out: dict[str, TensorType], *args: Any, **kwargs: Any
) -> None:
mean, var = self.probabilistic_model.predict(x)
# Flip sign on std so that minimising is increasing std
std = -1 * np.sqrt(np.array(var))
out["F"] = np.concatenate([np.array(mean), std], axis=1)
| 64,805 | 41.6917 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/utils.py | # Copyright 2022 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Tuple, Union
import tensorflow as tf
from ..data import Dataset
from ..space import SearchSpaceType
from ..types import TensorType
from .interface import AcquisitionFunction
from .optimizer import AcquisitionOptimizer
def split_acquisition_function(
fn: AcquisitionFunction,
split_size: int,
) -> AcquisitionFunction:
"""
A wrapper around an :const:`AcquisitionFunction` to split its input into batches.
Splits `x` into batches along the first dimension, calls `fn` on each batch, and then stitches
the results back together, so that it looks like `fn` was called with all of `x` in one batch.
:param fn: Acquisition function to split.
:param split_size: Call fn with tensors of at most this size.
:return: Split acquisition function.
"""
if split_size <= 0:
raise ValueError(f"split_size must be positive, got {split_size}")
@functools.wraps(fn)
def wrapper(x: TensorType) -> TensorType:
x = tf.convert_to_tensor(x)
# this currently assumes leading dimension of x is the split dimension.
length = x.shape[0]
if length == 0:
return fn(x)
elements_per_block = tf.size(x) / length
blocks_per_batch = tf.cast(tf.math.ceil(split_size / elements_per_block), tf.int32)
num_batches = tf.cast(tf.math.ceil(length / blocks_per_batch) - 1, tf.int32)
batch_sizes = tf.concat(
[
tf.ones(num_batches, tf.int32) * blocks_per_batch,
[length - num_batches * blocks_per_batch],
],
axis=0,
)
if batch_sizes.shape[0] <= 1:
return fn(x)
batch_inputs = tf.split(x, batch_sizes)
batch_outputs = []
for batch_input in batch_inputs:
output = fn(batch_input)
batch_outputs.append(output)
return tf.concat(batch_outputs, axis=0)
return wrapper
def split_acquisition_function_calls(
optimizer: AcquisitionOptimizer[SearchSpaceType],
split_size: int,
) -> AcquisitionOptimizer[SearchSpaceType]:
"""
A wrapper around our :const:`AcquisitionOptimizer`s. This class wraps a
:const:`AcquisitionOptimizer` so that evaluations of the acquisition functions
are split into batches on the first dimension and then stitched back together.
This can be useful to reduce memory usage when evaluating functions over large spaces.
:param optimizer: An optimizer that returns batches of points with shape [V, ...].
:param split_size: The desired maximum number of points in acquisition function evaluations.
:return: An :const:`AcquisitionOptimizer` that still returns points with the shape [V, ...]
but evaluates at most split_size points at a time.
"""
if split_size <= 0:
raise ValueError(f"split_size must be positive, got {split_size}")
def split_optimizer(
search_space: SearchSpaceType,
f: Union[AcquisitionFunction, Tuple[AcquisitionFunction, int]],
) -> TensorType:
af, n = f if isinstance(f, tuple) else (f, 1)
taf = split_acquisition_function(af, split_size)
return optimizer(search_space, (taf, n) if isinstance(f, tuple) else taf)
return split_optimizer
def select_nth_output(x: TensorType, output_dim: int = 0) -> TensorType:
"""
A utility function for trajectory sampler-related acquisition functions which selects the `n`th
output as the trajectory to be used, with `n` specified by ``output_dim``. Defaults to the first
output.
:param x: Input with shape [..., B, L], where L is the number of outputs of the model.
:param output_dim: Dimension of the output to be selected. Defaults to the first output.
:return: TensorType with shape [..., B], where the output_dim dimension has been selected to
reduce the input.
"""
return x[..., output_dim]
def get_local_dataset(local_space: SearchSpaceType, dataset: Dataset) -> Dataset:
"""
A utility function that takes in a dataset and returns the entries lying
within a given search space.
:param local_space: A search space.
:param dataset: A Dataset.
:return: A Dataset containing entries only in the local_space.
"""
if tf.shape(dataset.query_points)[1] != local_space.dimension:
raise ValueError("Dataset and search space must have equal dimensions")
is_in_region_mask = local_space.contains(dataset.query_points)
local_dataset = Dataset(
query_points=tf.boolean_mask(dataset.query_points, is_in_region_mask),
observations=tf.boolean_mask(dataset.observations, is_in_region_mask),
)
return local_dataset
| 5,297 | 37.391304 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/__init__.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
The acquisition process aims to find the optimal point(s) at which to next evaluate the objective
function, with the aim of minimising it. The functionality in this package implements that process.
It typically uses the current observations of the objective function, or a posterior over those
observations.
In this library, the acquisition rule is the central object of the API, while acquisition functions
are supplementary. This is because some acquisition rules, such as Thompson sampling,
do not require an acquisition function. This contrasts with other libraries which may consider
the acquisition function as the central component of this process and assume Efficient Global
Optimization (EGO) for the acquisition rule.
This package contains acquisition rules, as implementations of
:class:`~trieste.acquisition.rule.AcquisitionRule`, and acquisition functions. It also contains
:class:`AcquisitionFunctionBuilder`\ s which provide a common interface for the rules to build
acquisition functions.
Acquisition rules in this library that use acquisition functions, such as
:class:`EfficientGlobalOptimization`, *maximize* these functions. This defines the sign the
acquisition function should take. Additionally, acquisition functions and builders in this library
are designed to minimize the objective function. For example, we do not provide an implementation of
UCB.
"""
from . import optimizer, rule
from .combination import Product, Reducer, Sum
from .function import (
GIBBON,
HIPPO,
AugmentedExpectedImprovement,
BatchExpectedImprovement,
BatchMonteCarloExpectedHypervolumeImprovement,
BatchMonteCarloExpectedImprovement,
BayesianActiveLearningByDisagreement,
ExpectedConstrainedHypervolumeImprovement,
ExpectedConstrainedImprovement,
ExpectedFeasibility,
ExpectedHypervolumeImprovement,
ExpectedImprovement,
Fantasizer,
FastConstraintsFeasibility,
GreedyContinuousThompsonSampling,
IntegratedVarianceReduction,
LocalPenalization,
MakePositive,
MinValueEntropySearch,
MonteCarloAugmentedExpectedImprovement,
MonteCarloExpectedImprovement,
MultipleOptimismNegativeLowerConfidenceBound,
NegativeLowerConfidenceBound,
NegativePredictiveMean,
ParallelContinuousThompsonSampling,
PredictiveVariance,
ProbabilityOfFeasibility,
augmented_expected_improvement,
batch_ehvi,
batch_expected_improvement,
bayesian_active_learning_by_disagreement,
bichon_ranjan_criterion,
expected_hv_improvement,
expected_improvement,
fast_constraints_feasibility,
gibbon_quality_term,
gibbon_repulsion_term,
hard_local_penalizer,
integrated_variance_reduction,
lower_confidence_bound,
min_value_entropy_search,
multiple_optimism_lower_confidence_bound,
predictive_variance,
probability_below_threshold,
soft_local_penalizer,
)
from .interface import (
AcquisitionFunction,
AcquisitionFunctionBuilder,
AcquisitionFunctionClass,
GreedyAcquisitionFunctionBuilder,
PenalizationFunction,
SingleModelAcquisitionBuilder,
SingleModelGreedyAcquisitionBuilder,
SingleModelVectorizedAcquisitionBuilder,
UpdatablePenalizationFunction,
VectorizedAcquisitionFunctionBuilder,
)
from .rule import (
TURBO,
AcquisitionRule,
AsynchronousGreedy,
AsynchronousOptimization,
BatchHypervolumeSharpeRatioIndicator,
DiscreteThompsonSampling,
EfficientGlobalOptimization,
RandomSampling,
TrustRegion,
)
from .sampler import (
ExactThompsonSampler,
GumbelSampler,
ThompsonSampler,
ThompsonSamplerFromTrajectory,
)
| 4,241 | 35.886957 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/interface.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the interfaces relating to acquisition function --- functions that estimate
the utility of evaluating sets of candidate points.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Callable, Generic, Mapping, Optional
from ..data import Dataset
from ..models.interfaces import ProbabilisticModelType
from ..types import Tag, TensorType
AcquisitionFunction = Callable[[TensorType], TensorType]
"""
Type alias for acquisition functions.
An :const:`AcquisitionFunction` maps a set of `B` query points (each of dimension `D`) to a single
value that describes how useful it would be evaluate all these points together (to our goal of
optimizing the objective function). Thus, with leading dimensions, an :const:`AcquisitionFunction`
takes input shape `[..., B, D]` and returns shape `[..., 1]`.
Note that :const:`AcquisitionFunction`s which do not support batch optimization still expect inputs
with a batch dimension, i.e. an input of shape `[..., 1, D]`.
"""
class AcquisitionFunctionClass(ABC):
"""An :class:`AcquisitionFunctionClass` is an acquisition function represented using a class
rather than as a standalone function. Using a class to represent an acquisition function
makes it easier to update it, to avoid having to retrace the function on every call.
"""
@abstractmethod
def __call__(self, x: TensorType) -> TensorType:
"""Call acquisition function."""
class AcquisitionFunctionBuilder(Generic[ProbabilisticModelType], ABC):
"""An :class:`AcquisitionFunctionBuilder` builds and updates an acquisition function."""
@abstractmethod
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
"""
Prepare an acquisition function. We assume that this requires at least models, but
it may sometimes also need data.
:param models: The models for each tag.
:param datasets: The data from the observer (optional).
:return: An acquisition function.
"""
def update_acquisition_function(
self,
function: AcquisitionFunction,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
"""
Update an acquisition function. By default this generates a new acquisition function each
time. However, if the function is decorated with `@tf.function`, then you can override
this method to update its variables instead and avoid retracing the acquisition function on
every optimization loop.
:param function: The acquisition function to update.
:param models: The models for each tag.
:param datasets: The data from the observer (optional).
:return: The updated acquisition function.
"""
return self.prepare_acquisition_function(models, datasets=datasets)
class SingleModelAcquisitionBuilder(Generic[ProbabilisticModelType], ABC):
"""
Convenience acquisition function builder for an acquisition function (or component of a
composite acquisition function) that requires only one model, dataset pair.
"""
def using(self, tag: Tag) -> AcquisitionFunctionBuilder[ProbabilisticModelType]:
"""
:param tag: The tag for the model, dataset pair to use to build this acquisition function.
:return: An acquisition function builder that selects the model and dataset specified by
``tag``, as defined in :meth:`prepare_acquisition_function`.
"""
class _Anon(AcquisitionFunctionBuilder[ProbabilisticModelType]):
def __init__(
self, single_builder: SingleModelAcquisitionBuilder[ProbabilisticModelType]
):
self.single_builder = single_builder
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return self.single_builder.prepare_acquisition_function(
models[tag], dataset=None if datasets is None else datasets[tag]
)
def update_acquisition_function(
self,
function: AcquisitionFunction,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return self.single_builder.update_acquisition_function(
function, models[tag], dataset=None if datasets is None else datasets[tag]
)
def __repr__(self) -> str:
return f"{self.single_builder!r} using tag {tag!r}"
return _Anon(self)
@abstractmethod
def prepare_acquisition_function(
self,
model: ProbabilisticModelType,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: The data to use to build the acquisition function (optional).
:return: An acquisition function.
"""
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModelType,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer (optional).
:return: The updated acquisition function.
"""
return self.prepare_acquisition_function(model, dataset=dataset)
class GreedyAcquisitionFunctionBuilder(Generic[ProbabilisticModelType], ABC):
"""
A :class:`GreedyAcquisitionFunctionBuilder` builds an acquisition function
suitable for greedily building batches for batch Bayesian
Optimization. A :class:`GreedyAcquisitionFunctionBuilder` differs
from an :class:`AcquisitionFunctionBuilder` by requiring that a set
of pending points is passed to the builder. Note that this acquisition function
is typically called `B` times each Bayesian optimization step, when building batches
of size `B`.
"""
@abstractmethod
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
"""
Generate a new acquisition function. The first time this is called, ``pending_points``
will be `None`. Subsequent calls will be via ``update_acquisition_function`` below,
unless that has been overridden.
:param models: The models over each tag.
:param datasets: The data from the observer (optional).
:param pending_points: Points already chosen to be in the current batch (of shape [M,D]),
where M is the number of pending points and D is the search space dimension.
:return: An acquisition function.
"""
def update_acquisition_function(
self,
function: AcquisitionFunction,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
pending_points: Optional[TensorType] = None,
new_optimization_step: bool = True,
) -> AcquisitionFunction:
"""
Update an acquisition function. By default this generates a new acquisition function each
time. However, if the function is decorated with`@tf.function`, then you can override
this method to update its variables instead and avoid retracing the acquisition function on
every optimization loop.
:param function: The acquisition function to update.
:param models: The models over each tag.
:param datasets: The data from the observer (optional).
:param pending_points: Points already chosen to be in the current batch (of shape [M,D]),
where M is the number of pending points and D is the search space dimension.
:param new_optimization_step: Indicates whether this call to update_acquisition_function
is to start of a new optimization step, of to continue collecting batch of points
for the current step. Defaults to ``True``.
:return: The updated acquisition function.
"""
return self.prepare_acquisition_function(
models, datasets=datasets, pending_points=pending_points
)
class SingleModelGreedyAcquisitionBuilder(Generic[ProbabilisticModelType], ABC):
"""
Convenience acquisition function builder for a greedy acquisition function (or component of a
composite greedy acquisition function) that requires only one model, dataset pair.
"""
def using(self, tag: Tag) -> GreedyAcquisitionFunctionBuilder[ProbabilisticModelType]:
"""
:param tag: The tag for the model, dataset pair to use to build this acquisition function.
:return: An acquisition function builder that selects the model and dataset specified by
``tag``, as defined in :meth:`prepare_acquisition_function`.
"""
class _Anon(GreedyAcquisitionFunctionBuilder[ProbabilisticModelType]):
def __init__(
self, single_builder: SingleModelGreedyAcquisitionBuilder[ProbabilisticModelType]
):
self.single_builder = single_builder
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
return self.single_builder.prepare_acquisition_function(
models[tag],
dataset=None if datasets is None else datasets[tag],
pending_points=pending_points,
)
def update_acquisition_function(
self,
function: AcquisitionFunction,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
pending_points: Optional[TensorType] = None,
new_optimization_step: bool = True,
) -> AcquisitionFunction:
return self.single_builder.update_acquisition_function(
function,
models[tag],
dataset=None if datasets is None else datasets[tag],
pending_points=pending_points,
new_optimization_step=new_optimization_step,
)
def __repr__(self) -> str:
return f"{self.single_builder!r} using tag {tag!r}"
return _Anon(self)
@abstractmethod
def prepare_acquisition_function(
self,
model: ProbabilisticModelType,
dataset: Optional[Dataset] = None,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: The data from the observer (optional).
:param pending_points: Points already chosen to be in the current batch (of shape [M,D]),
where M is the number of pending points and D is the search space dimension.
:return: An acquisition function.
"""
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModelType,
dataset: Optional[Dataset] = None,
pending_points: Optional[TensorType] = None,
new_optimization_step: bool = True,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer (optional).
:param pending_points: Points already chosen to be in the current batch (of shape [M,D]),
where M is the number of pending points and D is the search space dimension.
:param new_optimization_step: Indicates whether this call to update_acquisition_function
is to start of a new optimization step, of to continue collecting batch of points
for the current step. Defaults to ``True``.
:return: The updated acquisition function.
"""
return self.prepare_acquisition_function(
model,
dataset=dataset,
pending_points=pending_points,
)
class VectorizedAcquisitionFunctionBuilder(AcquisitionFunctionBuilder[ProbabilisticModelType]):
"""
An :class:`VectorizedAcquisitionFunctionBuilder` builds and updates a vectorized
acquisition function These differ from normal acquisition functions only by their output shape:
rather than returning a single value, they return one value per potential query point.
Thus, with leading dimensions, they take input shape `[..., B, D]` and returns shape `[..., B]`.
"""
class SingleModelVectorizedAcquisitionBuilder(
SingleModelAcquisitionBuilder[ProbabilisticModelType]
):
"""
Convenience acquisition function builder for vectorized acquisition functions (or component
of a composite vectorized acquisition function) that requires only one model, dataset pair.
"""
def using(self, tag: Tag) -> AcquisitionFunctionBuilder[ProbabilisticModelType]:
"""
:param tag: The tag for the model, dataset pair to use to build this acquisition function.
:return: An acquisition function builder that selects the model and dataset specified by
``tag``, as defined in :meth:`prepare_acquisition_function`.
"""
class _Anon(VectorizedAcquisitionFunctionBuilder[ProbabilisticModelType]):
def __init__(
self,
single_builder: SingleModelVectorizedAcquisitionBuilder[ProbabilisticModelType],
):
self.single_builder = single_builder
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return self.single_builder.prepare_acquisition_function(
models[tag], dataset=None if datasets is None else datasets[tag]
)
def update_acquisition_function(
self,
function: AcquisitionFunction,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return self.single_builder.update_acquisition_function(
function, models[tag], dataset=None if datasets is None else datasets[tag]
)
def __repr__(self) -> str:
return f"{self.single_builder!r} using tag {tag!r}"
return _Anon(self)
PenalizationFunction = Callable[[TensorType], TensorType]
"""
An :const:`PenalizationFunction` maps a query point (of dimension `D`) to a single
value that described how heavily it should be penalized (a positive quantity).
As penalization is applied multiplicatively to acquisition functions, small
penalization outputs correspond to a stronger penalization effect. Thus, with
leading dimensions, an :const:`PenalizationFunction` takes input
shape `[..., 1, D]` and returns shape `[..., 1]`.
"""
class UpdatablePenalizationFunction(ABC):
"""An :class:`UpdatablePenalizationFunction` builds and updates a penalization function.
Defining a penalization function that can be updated avoids having to retrace on every call."""
@abstractmethod
def __call__(self, x: TensorType) -> TensorType:
"""Call penalization function.."""
@abstractmethod
def update(
self,
pending_points: TensorType,
lipschitz_constant: TensorType,
eta: TensorType,
) -> None:
"""Update penalization function."""
| 16,951 | 41.916456 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/combination.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import abstractmethod
from collections.abc import Mapping, Sequence
from typing import Callable, Optional
import tensorflow as tf
from ..data import Dataset
from ..models import ProbabilisticModelType
from ..types import Tag, TensorType
from .interface import AcquisitionFunction, AcquisitionFunctionBuilder
class Reducer(AcquisitionFunctionBuilder[ProbabilisticModelType]):
r"""
A :class:`Reducer` builds an :const:`~trieste.acquisition.AcquisitionFunction` whose output is
calculated from the outputs of a number of other
:const:`~trieste.acquisition.AcquisitionFunction`\ s. How these outputs are composed is defined
by the method :meth:`_reduce`.
"""
def __init__(self, *builders: AcquisitionFunctionBuilder[ProbabilisticModelType]):
r"""
:param \*builders: Acquisition function builders. At least one must be provided.
:raise `~tf.errors.InvalidArgumentError`: If no builders are specified.
"""
tf.debugging.assert_positive(
len(builders), "At least one acquisition builder expected, got none."
)
self._acquisitions = builders
def __repr__(self) -> str:
""""""
return "{}({})".format(self.__class__.__name__, ", ".join(map(repr, self._acquisitions)))
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
r"""
Return an acquisition function. This acquisition function is defined by first building
acquisition functions from each of the
:class:`~trieste.acquisition.AcquisitionFunctionBuilder`\ s specified at
:meth:`__init__`, then reducing, with :meth:`_reduce`, the output of each of those
acquisition functions.
:param datasets: The data from the observer.
:param models: The models over each dataset in ``datasets``.
:return: The reduced acquisition function.
"""
self.functions = tuple(
acq.prepare_acquisition_function(models, datasets=datasets) for acq in self.acquisitions
)
def evaluate_acquisition_function_fn(at: TensorType) -> TensorType:
return self._reduce_acquisition_functions(at, self.functions)
return evaluate_acquisition_function_fn
def update_acquisition_function(
self,
function: AcquisitionFunction,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param models: The model.
:param datasets: Unused.
"""
self.functions = tuple(
acq.update_acquisition_function(function, models, datasets=datasets)
for function, acq in zip(self.functions, self.acquisitions)
)
def evaluate_acquisition_function_fn(at: TensorType) -> TensorType:
return self._reduce_acquisition_functions(at, self.functions)
return evaluate_acquisition_function_fn
@property
def acquisitions(self) -> Sequence[AcquisitionFunctionBuilder[ProbabilisticModelType]]:
"""The acquisition function builders specified at class initialisation."""
return self._acquisitions
def _reduce_acquisition_functions(
self, at: TensorType, acquisition_functions: Sequence[AcquisitionFunction]
) -> TensorType:
return self._reduce([fn(at) for fn in acquisition_functions])
@abstractmethod
def _reduce(self, inputs: Sequence[TensorType]) -> TensorType:
"""
:param inputs: The output of each constituent acquisition function.
:return: The output of the reduced acquisition function.
"""
raise NotImplementedError()
class Sum(Reducer[ProbabilisticModelType]):
"""
:class:`Reducer` whose resulting acquisition function returns the element-wise sum of the
outputs of constituent acquisition functions.
"""
def _reduce(self, inputs: Sequence[TensorType]) -> TensorType:
"""
:param inputs: The outputs of each acquisition function.
:return: The element-wise sum of the ``inputs``.
"""
return tf.add_n(inputs)
class Product(Reducer[ProbabilisticModelType]):
"""
:class:`Reducer` whose resulting acquisition function returns the element-wise product of the
outputs of constituent acquisition functions.
"""
def _reduce(self, inputs: Sequence[TensorType]) -> TensorType:
"""
:param inputs: The outputs of each acquisition function.
:return: The element-wise product of the ``inputs``.
"""
return tf.reduce_prod(inputs, axis=0)
class Map(Reducer[ProbabilisticModelType]):
"""
:class:`Reducer` that accepts just one acquisition function builder and applies a
given function to its output. For example ``Map(lambda x: -x, builder)`` would generate
an acquisition function that returns the negative of the output of ``builder``.
"""
def __init__(
self,
map_fn: Callable[[TensorType], TensorType],
builder: AcquisitionFunctionBuilder[ProbabilisticModelType],
):
"""
:param map_fn: Function to apply.
:param builder: Acquisition function builder.
"""
super().__init__(builder)
self._map_fn = map_fn
def _reduce(self, inputs: Sequence[TensorType]) -> TensorType:
"""
:param inputs: The outputs of the acquisition function.
:return: The result of applying the map function to ``inputs``.
"""
tf.debugging.assert_equal(len(inputs), 1)
return self._map_fn(inputs[0])
| 6,417 | 36.752941 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/optimizer.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
This module contains functionality for optimizing
:data:`~trieste.acquisition.AcquisitionFunction`\ s over :class:`~trieste.space.SearchSpace`\ s.
"""
from __future__ import annotations
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union, cast
import greenlet as gr
import numpy as np
import scipy.optimize as spo
import tensorflow as tf
import tensorflow_probability as tfp
from .. import logging
from ..space import (
Box,
Constraint,
DiscreteSearchSpace,
SearchSpace,
SearchSpaceType,
TaggedProductSearchSpace,
)
from ..types import TensorType
from .interface import AcquisitionFunction
NUM_SAMPLES_MIN: int = 5000
"""
The default minimum number of initial samples for :func:`generate_continuous_optimizer` and
:func:`generate_random_search_optimizer` function, used for determining the number of initial
samples in the multi-start acquisition function optimization.
"""
NUM_SAMPLES_DIM: int = 1000
"""
The default minimum number of initial samples per dimension of the search space for
:func:`generate_continuous_optimizer` function in :func:`automatic_optimizer_selector`, used for
determining the number of initial samples in the multi-start acquisition function optimization.
"""
NUM_RUNS_DIM: int = 10
"""
The default minimum number of optimization runs per dimension of the search space for
:func:`generate_continuous_optimizer` function in :func:`automatic_optimizer_selector`, used for
determining the number of acquisition function optimizations to be performed in parallel.
"""
class FailedOptimizationError(Exception):
"""Raised when an acquisition optimizer fails to optimize"""
AcquisitionOptimizer = Callable[
[SearchSpaceType, Union[AcquisitionFunction, Tuple[AcquisitionFunction, int]]], TensorType
]
"""
Type alias for a function that returns the single point that maximizes an acquisition function over
a search space or the V points that maximize a vectorized acquisition function (as represented by an
acquisition-int tuple).
If this function receives a search space with points of shape [D] and an acquisition function
with input shape [..., 1, D] output shape [..., 1], the :const:`AcquisitionOptimizer` return shape
should be [1, D].
If instead it receives a search space and a tuple containing the acquisition function and its
vectorization V then the :const:`AcquisitionOptimizer` return shape should be [V, D].
"""
def automatic_optimizer_selector(
space: SearchSpace, target_func: Union[AcquisitionFunction, Tuple[AcquisitionFunction, int]]
) -> TensorType:
"""
A wrapper around our :const:`AcquisitionOptimizer`s. This class performs
an :const:`AcquisitionOptimizer` appropriate for the
problem's :class:`~trieste.space.SearchSpace`.
:param space: The space of points over which to search, for points with shape [D].
:param target_func: The function to maximise, with input shape [..., 1, D] and output shape
[..., 1].
:return: The batch of points in ``space`` that maximises ``target_func``, with shape [1, D].
"""
if isinstance(space, DiscreteSearchSpace):
return optimize_discrete(space, target_func)
elif isinstance(space, (Box, TaggedProductSearchSpace)):
num_samples = tf.maximum(NUM_SAMPLES_MIN, NUM_SAMPLES_DIM * tf.shape(space.lower)[-1])
num_runs = NUM_RUNS_DIM * tf.shape(space.lower)[-1]
return generate_continuous_optimizer(
num_initial_samples=num_samples,
num_optimization_runs=num_runs,
)(space, target_func)
else:
raise NotImplementedError(
f""" No optimizer currently supports acquisition function
maximisation over search spaces of type {space}.
Try specifying the optimize_random optimizer"""
)
def _get_max_discrete_points(
points: TensorType, target_func: Union[AcquisitionFunction, Tuple[AcquisitionFunction, int]]
) -> TensorType:
# check if we need a vectorized optimizer
if isinstance(target_func, tuple):
target_func, V = target_func
else:
V = 1
if V < 0:
raise ValueError(f"vectorization must be positive, got {V}")
tiled_points = tf.tile(points, [1, V, 1])
target_func_values = target_func(tiled_points)
tf.debugging.assert_shapes(
[(target_func_values, ("_", V))],
message=(
f"""
The result of function target_func has shape
{tf.shape(target_func_values)}, however, expected a trailing
dimension of size {V}.
"""
),
)
best_indices = tf.math.argmax(target_func_values, axis=0) # [V]
return tf.gather(tf.transpose(tiled_points, [1, 0, 2]), best_indices, batch_dims=1) # [V, D]
def optimize_discrete(
space: DiscreteSearchSpace,
target_func: Union[AcquisitionFunction, Tuple[AcquisitionFunction, int]],
) -> TensorType:
"""
An :const:`AcquisitionOptimizer` for :class:'DiscreteSearchSpace' spaces.
When this functions receives an acquisition-integer tuple as its `target_func`,it evaluates
all the points in the search space for each of the individual V functions making
up `target_func`.
:param space: The space of points over which to search, for points with shape [D].
:param target_func: The function to maximise, with input shape [..., V, D] and output shape
[..., V].
:return: The V points in ``space`` that maximises ``target_func``, with shape [V, D].
"""
points = space.points[:, None, :]
return _get_max_discrete_points(points, target_func)
def generate_continuous_optimizer(
num_initial_samples: int = NUM_SAMPLES_MIN,
num_optimization_runs: int = 10,
num_recovery_runs: int = 10,
optimizer_args: Optional[dict[str, Any]] = None,
) -> AcquisitionOptimizer[Box | TaggedProductSearchSpace]:
"""
Generate a gradient-based optimizer for :class:'Box' and :class:'TaggedProductSearchSpace'
spaces and batches of size 1. In the case of a :class:'TaggedProductSearchSpace', We perform
gradient-based optimization across all :class:'Box' subspaces, starting from the best location
found across a sample of `num_initial_samples` random points.
We advise the user to either use the default `NUM_SAMPLES_MIN` for `num_initial_samples`, or
`NUM_SAMPLES_DIM` times the dimensionality of the search space, whichever is greater.
Similarly, for `num_optimization_runs`, we recommend using `NUM_RUNS_DIM` times the
dimensionality of the search space.
This optimizer uses Scipy's L-BFGS-B optimizer. We run `num_optimization_runs` separate
optimizations in parallel, each starting from one of the best `num_optimization_runs` initial
query points.
If all `num_optimization_runs` optimizations fail to converge then we run
`num_recovery_runs` additional runs starting from random locations (also ran in parallel).
:param num_initial_samples: The size of the random sample used to find the starting point(s) of
the optimization.
:param num_optimization_runs: The number of separate optimizations to run.
:param num_recovery_runs: The maximum number of recovery optimization runs in case of failure.
:param optimizer_args: The keyword arguments to pass to the Scipy L-BFGS-B optimizer.
Check `minimize` method of :class:`~scipy.optimize` for details of which arguments
can be passed. Note that method, jac and bounds cannot/should not be changed.
:return: The acquisition optimizer.
"""
if num_initial_samples <= 0:
raise ValueError(f"num_initial_samples must be positive, got {num_initial_samples}")
if num_optimization_runs < 0:
raise ValueError(f"num_optimization_runs must be positive, got {num_optimization_runs}")
if num_initial_samples < num_optimization_runs:
raise ValueError(
f"""
num_initial_samples {num_initial_samples} must be at
least num_optimization_runs {num_optimization_runs}
"""
)
if num_recovery_runs <= -1:
raise ValueError(f"num_recovery_runs must be zero or greater, got {num_recovery_runs}")
def optimize_continuous(
space: Box | TaggedProductSearchSpace,
target_func: Union[AcquisitionFunction, Tuple[AcquisitionFunction, int]],
) -> TensorType:
"""
A gradient-based :const:`AcquisitionOptimizer` for :class:'Box'
and :class:`TaggedProductSearchSpace' spaces.
For :class:'TaggedProductSearchSpace' we only apply gradient updates to
its class:'Box' subspaces.
When this functions receives an acquisition-integer tuple as its `target_func`,it
optimizes each of the individual V functions making up `target_func`, i.e.
evaluating `num_initial_samples` samples, running `num_optimization_runs` runs, and
(if necessary) running `num_recovery_runs` recovery run for each of the individual
V functions.
:param space: The space over which to search.
:param target_func: The function to maximise, with input shape [..., V, D] and output shape
[..., V].
:return: The V points in ``space`` that maximises``target_func``, with shape [V, D].
"""
if isinstance(target_func, tuple): # check if we need a vectorized optimizer
target_func, V = target_func
else:
V = 1
if V < 0:
raise ValueError(f"vectorization must be positive, got {V}")
candidates = space.sample(num_initial_samples)[:, None, :] # [num_initial_samples, 1, D]
tiled_candidates = tf.tile(candidates, [1, V, 1]) # [num_initial_samples, V, D]
target_func_values = target_func(tiled_candidates) # [num_samples, V]
tf.debugging.assert_shapes(
[(target_func_values, ("_", V))],
message=(
f"""
The result of function target_func has shape
{tf.shape(target_func_values)}, however, expected a trailing
dimension of size {V}.
"""
),
)
_, top_k_indices = tf.math.top_k(
tf.transpose(target_func_values), k=num_optimization_runs
) # [1, num_optimization_runs] or [V, num_optimization_runs]
tiled_candidates = tf.transpose(tiled_candidates, [1, 0, 2]) # [V, num_initial_samples, D]
top_k_points = tf.gather(
tiled_candidates, top_k_indices, batch_dims=1
) # [V, num_optimization_runs, D]
initial_points = tf.transpose(top_k_points, [1, 0, 2]) # [num_optimization_runs,V,D]
(
successes,
fun_values,
chosen_x,
nfev,
) = _perform_parallel_continuous_optimization( # [num_optimization_runs, V]
target_func,
space,
initial_points,
optimizer_args or {},
)
successful_optimization = tf.reduce_all(
tf.reduce_any(successes, axis=0)
) # Check that at least one optimization was successful for each function
total_nfev = tf.reduce_max(nfev) # acquisition function is evaluated in parallel
recovery_run = False
if (
num_recovery_runs and not successful_optimization
): # if all optimizations failed for a function then try again from random starts
random_points = space.sample(num_recovery_runs)[:, None, :] # [num_recovery_runs, 1, D]
tiled_random_points = tf.tile(random_points, [1, V, 1]) # [num_recovery_runs, V, D]
(
recovery_successes,
recovery_fun_values,
recovery_chosen_x,
recovery_nfev,
) = _perform_parallel_continuous_optimization(
target_func, space, tiled_random_points, optimizer_args or {}
)
successes = tf.concat(
[successes, recovery_successes], axis=0
) # [num_optimization_runs + num_recovery_runs, V]
fun_values = tf.concat(
[fun_values, recovery_fun_values], axis=0
) # [num_optimization_runs + num_recovery_runs, V]
chosen_x = tf.concat(
[chosen_x, recovery_chosen_x], axis=0
) # [num_optimization_runs + num_recovery_runs, V, D]
successful_optimization = tf.reduce_all(
tf.reduce_any(successes, axis=0)
) # Check that at least one optimization was successful for each function
total_nfev += tf.reduce_max(recovery_nfev)
recovery_run = True
if not successful_optimization: # return error if still failed
raise FailedOptimizationError(
f"""
Acquisition function optimization failed,
even after {num_recovery_runs + num_optimization_runs} restarts.
"""
)
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
logging.scalar("spo_af_evaluations", total_nfev)
if recovery_run:
logging.text(
"spo_recovery_run",
f"Acquisition function optimization failed after {num_optimization_runs} "
f"optimization runs, requiring recovery runs",
)
_target_func: AcquisitionFunction = target_func # make mypy happy
def improvements() -> tf.Tensor:
best_initial_values = tf.math.reduce_max(_target_func(initial_points), axis=0)
best_values = tf.math.reduce_max(fun_values, axis=0)
improve = best_values - tf.cast(best_initial_values, best_values.dtype)
return improve[0] if V == 1 else improve
if V == 1:
logging.scalar("spo_improvement_on_initial_samples", improvements)
else:
logging.histogram("spo_improvements_on_initial_samples", improvements)
best_run_ids = tf.math.argmax(fun_values, axis=0) # [V]
chosen_points = tf.gather(
tf.transpose(chosen_x, [1, 0, 2]), best_run_ids, batch_dims=1
) # [V, D]
return chosen_points
return optimize_continuous
def _perform_parallel_continuous_optimization(
target_func: AcquisitionFunction,
space: SearchSpace,
starting_points: TensorType,
optimizer_args: dict[str, Any],
) -> Tuple[TensorType, TensorType, TensorType, TensorType]:
"""
A function to perform parallel optimization of our acquisition functions
using Scipy. We perform L-BFGS-B starting from each of the locations contained
in `starting_points`, i.e. the number of individual optimization runs is
given by the leading dimension of `starting_points`.
To provide a parallel implementation of Scipy's L-BFGS-B that can leverage
batch calculations with TensorFlow, this function uses the Greenlet package
to run each individual optimization on micro-threads.
L-BFGS-B updates for each individual optimization are performed by
independent greenlets working with Numpy arrays, however, the evaluation
of our acquisition function (and its gradients) is calculated in parallel
(for each optimization step) using Tensorflow.
For :class:'TaggedProductSearchSpace' we only apply gradient updates to
its :class:'Box' subspaces, fixing the discrete elements to the best values
found across the initial random search. To fix these discrete elements, we
optimize over a continuous :class:'Box' relaxation of the discrete subspaces
which has equal upper and lower bounds, i.e. we specify an equality constraint
for this dimension in the scipy optimizer.
This function also support the maximization of vectorized target functions (with
vectorization V).
:param target_func: The function(s) to maximise, with input shape [..., V, D] and
output shape [..., V].
:param space: The original search space.
:param starting_points: The points at which to begin our optimizations of shape
[num_optimization_runs, V, D]. The leading dimension of
`starting_points` controls the number of individual optimization runs
for each of the V target functions.
:param optimizer_args: Keyword arguments to pass to the Scipy optimizer.
:return: A tuple containing the failure statuses, maximum values, maximisers and
number of evaluations for each of our optimizations.
"""
tf_dtype = starting_points.dtype # type for communication with Trieste
num_optimization_runs_per_function = tf.shape(starting_points)[0].numpy()
V = tf.shape(starting_points)[-2].numpy() # vectorized batch size
D = tf.shape(starting_points)[-1].numpy() # search space dimension
num_optimization_runs = num_optimization_runs_per_function * V
vectorized_starting_points = tf.reshape(
starting_points, [-1, D]
) # [num_optimization_runs*V, D]
def _objective_value(vectorized_x: TensorType) -> TensorType: # [N, D] -> [N, 1]
vectorized_x = vectorized_x[:, None, :] # [N, 1, D]
x = tf.reshape(vectorized_x, [-1, V, D]) # [N/V, V, D]
evals = -target_func(x) # [N/V, V]
vectorized_evals = tf.reshape(evals, [-1, 1]) # [N, 1]
return vectorized_evals
def _objective_value_and_gradient(x: TensorType) -> Tuple[TensorType, TensorType]:
return tfp.math.value_and_gradient(_objective_value, x) # [len(x), 1], [len(x), D]
if isinstance(
space, TaggedProductSearchSpace
): # build continuous relaxation of discrete subspaces
bounds = [
get_bounds_of_box_relaxation_around_point(space, vectorized_starting_points[i : i + 1])
for i in tf.range(num_optimization_runs)
]
else:
bounds = [spo.Bounds(space.lower, space.upper)] * num_optimization_runs
# Initialize the numpy arrays to be passed to the greenlets
np_batch_x = np.zeros((num_optimization_runs, tf.shape(starting_points)[-1]), dtype=np.float64)
np_batch_y = np.zeros((num_optimization_runs,), dtype=np.float64)
np_batch_dy_dx = np.zeros(
(num_optimization_runs, tf.shape(starting_points)[-1]), dtype=np.float64
)
# Set up child greenlets
child_greenlets = [ScipyOptimizerGreenlet() for _ in range(num_optimization_runs)]
vectorized_child_results: List[Union[spo.OptimizeResult, "np.ndarray[Any, Any]"]] = [
gr.switch(
vectorized_starting_points[i].numpy(), bounds[i], space.constraints, optimizer_args
)
for i, gr in enumerate(child_greenlets)
]
while True:
all_done = True
for i, result in enumerate(vectorized_child_results): # Process results from children.
if isinstance(result, spo.OptimizeResult):
continue # children return a `spo.OptimizeResult` if they are finished
all_done = False
assert isinstance(result, np.ndarray) # or an `np.ndarray` with the query `x` otherwise
np_batch_x[i, :] = result
if all_done:
break
# Batch evaluate query `x`s from all children.
batch_x = tf.constant(np_batch_x, dtype=tf_dtype) # [num_optimization_runs, d]
batch_y, batch_dy_dx = _objective_value_and_gradient(batch_x)
np_batch_y = batch_y.numpy().astype("float64")
np_batch_dy_dx = batch_dy_dx.numpy().astype("float64")
for i, greenlet in enumerate(child_greenlets): # Feed `y` and `dy_dx` back to children.
if greenlet.dead: # Allow for crashed greenlets
continue
vectorized_child_results[i] = greenlet.switch(np_batch_y[i], np_batch_dy_dx[i, :])
final_vectorized_child_results: List[spo.OptimizeResult] = vectorized_child_results
vectorized_successes = tf.constant(
[result.success for result in final_vectorized_child_results]
) # [num_optimization_runs]
vectorized_fun_values = tf.constant(
[-result.fun for result in final_vectorized_child_results], dtype=tf_dtype
) # [num_optimization_runs]
vectorized_chosen_x = tf.constant(
[result.x for result in final_vectorized_child_results], dtype=tf_dtype
) # [num_optimization_runs, D]
vectorized_nfev = tf.constant(
[result.nfev for result in final_vectorized_child_results], dtype=tf_dtype
)
# Ensure chosen points satisfy any constraints in the search-space.
if space.has_constraints:
is_feasible = space.is_feasible(vectorized_chosen_x)
vectorized_successes = tf.logical_and(vectorized_successes, is_feasible)
successes = tf.reshape(vectorized_successes, [-1, V]) # [num_optimization_runs, V]
fun_values = tf.reshape(vectorized_fun_values, [-1, V]) # [num_optimization_runs, V]
chosen_x = tf.reshape(vectorized_chosen_x, [-1, V, D]) # [num_optimization_runs, V, D]
nfev = tf.reshape(vectorized_nfev, [-1, V]) # [num_optimization_runs, V]
return (successes, fun_values, chosen_x, nfev)
class ScipyOptimizerGreenlet(gr.greenlet): # type: ignore[misc]
"""
Worker greenlet that runs a single Scipy L-BFGS-B (by default). Each greenlet performs all the
optimizer update steps required for an individual optimization. However, the evaluation
of our acquisition function (and its gradients) is delegated back to the main Tensorflow
process (the parent greenlet) where evaluations can be made efficiently in parallel.
"""
def run(
self,
start: "np.ndarray[Any, Any]",
bounds: spo.Bounds,
constraints: Sequence[Constraint],
optimizer_args: Optional[dict[str, Any]] = None,
) -> spo.OptimizeResult:
cache_x = start + 1 # Any value different from `start`.
cache_y: Optional["np.ndarray[Any, Any]"] = None
cache_dy_dx: Optional["np.ndarray[Any, Any]"] = None
def value_and_gradient(
x: "np.ndarray[Any, Any]",
) -> Tuple["np.ndarray[Any, Any]", "np.ndarray[Any, Any]"]:
# Collect function evaluations from parent greenlet
nonlocal cache_x
nonlocal cache_y
nonlocal cache_dy_dx
if not (cache_x == x).all():
cache_x[:] = x # Copy the value of `x`. DO NOT copy the reference.
# Send `x` to parent greenlet, which will evaluate all `x`s in a batch.
cache_y, cache_dy_dx = self.parent.switch(cache_x)
return cast("np.ndarray[Any, Any]", cache_y), cast("np.ndarray[Any, Any]", cache_dy_dx)
method = "trust-constr" if len(constraints) else "l-bfgs-b"
optimizer_args = dict(
dict(method=method, constraints=constraints), **(optimizer_args or {})
)
return spo.minimize(
lambda x: value_and_gradient(x)[0],
start,
jac=lambda x: value_and_gradient(x)[1],
bounds=bounds,
**optimizer_args,
)
def get_bounds_of_box_relaxation_around_point(
space: TaggedProductSearchSpace, current_point: TensorType
) -> spo.Bounds:
"""
A function to return the bounds of a continuous relaxation of
a :class:'TaggedProductSearchSpace' space, i.e. replacing discrete
spaces with continuous spaces. In particular, all :class:'DiscreteSearchSpace'
subspaces are replaced with a new :class:'DiscreteSearchSpace' fixed at their
respective component of the specified 'current_point'. Note that
all :class:'Box' subspaces remain the same.
:param space: The original search space.
:param current_point: The point at which to make the continuous relaxation.
:return: Bounds for the Scipy optimizer.
"""
tf.debugging.Assert(isinstance(space, TaggedProductSearchSpace), [tf.constant([])])
space_with_fixed_discrete = space
for tag in space.subspace_tags:
if isinstance(
space.get_subspace(tag), DiscreteSearchSpace
): # convert discrete subspaces to box spaces.
subspace_value = space.get_subspace_component(tag, current_point)
space_with_fixed_discrete = space_with_fixed_discrete.fix_subspace(tag, subspace_value)
return spo.Bounds(space_with_fixed_discrete.lower, space_with_fixed_discrete.upper)
def batchify_joint(
batch_size_one_optimizer: AcquisitionOptimizer[SearchSpaceType],
batch_size: int,
) -> AcquisitionOptimizer[SearchSpaceType]:
"""
A wrapper around our :const:`AcquisitionOptimizer`s. This class wraps a
:const:`AcquisitionOptimizer` to allow it to jointly optimize the batch elements considered
by a batch acquisition function.
:param batch_size_one_optimizer: An optimizer that returns only batch size one, i.e. produces a
single point with shape [1, D].
:param batch_size: The number of points in the batch.
:return: An :const:`AcquisitionOptimizer` that will provide a batch of points with shape [B, D].
"""
if batch_size <= 0:
raise ValueError(f"batch_size must be positive, got {batch_size}")
def optimizer(
search_space: SearchSpaceType,
f: Union[AcquisitionFunction, Tuple[AcquisitionFunction, int]],
) -> TensorType:
expanded_search_space = search_space**batch_size # points have shape [B * D]
if isinstance(f, tuple):
raise ValueError(
"batchify_joint cannot be applied to a vectorized acquisition function"
)
af: AcquisitionFunction = f # type checking can get confused by closure of f
def target_func_with_vectorized_inputs(
x: TensorType,
) -> TensorType: # [..., 1, B * D] -> [..., 1]
return af(tf.reshape(x, x.shape[:-2].as_list() + [batch_size, -1]))
vectorized_points = batch_size_one_optimizer( # [1, B * D]
expanded_search_space, target_func_with_vectorized_inputs
)
return tf.reshape(vectorized_points, [batch_size, -1]) # [B, D]
return optimizer
def batchify_vectorize(
batch_size_one_optimizer: AcquisitionOptimizer[SearchSpaceType],
batch_size: int,
) -> AcquisitionOptimizer[SearchSpaceType]:
"""
A wrapper around our :const:`AcquisitionOptimizer`s. This class wraps a
:const:`AcquisitionOptimizer` to allow it to optimize batch acquisition functions.
Unlike :func:`batchify_joint`, :func:`batchify_vectorize` is suitable
for a :class:`AcquisitionFunction` whose individual batch element can be
optimized independently (i.e. they can be vectorized).
:param batch_size_one_optimizer: An optimizer that returns only batch size one, i.e. produces a
single point with shape [1, D].
:param batch_size: The number of points in the batch.
:return: An :const:`AcquisitionOptimizer` that will provide a batch of points with shape [V, D].
"""
if batch_size <= 0:
raise ValueError(f"batch_size must be positive, got {batch_size}")
def optimizer(
search_space: SearchSpaceType,
f: Union[AcquisitionFunction, Tuple[AcquisitionFunction, int]],
) -> TensorType:
if isinstance(f, tuple):
raise ValueError(
"batchify_vectorize cannot be applied to an already vectorized acquisition function"
)
return batch_size_one_optimizer(search_space, (f, batch_size))
return optimizer
def generate_random_search_optimizer(
num_samples: int = NUM_SAMPLES_MIN,
) -> AcquisitionOptimizer[SearchSpace]:
"""
Generate an acquisition optimizer that samples `num_samples` random points across the space.
The default is to sample at `NUM_SAMPLES_MIN` locations.
We advise the user to either use the default `NUM_SAMPLES_MIN` for `num_samples`, or
`NUM_SAMPLES_DIM` times the dimensionality of the search space, whichever is smaller.
:param num_samples: The number of random points to sample.
:return: The acquisition optimizer.
"""
if num_samples <= 0:
raise ValueError(f"num_samples must be positive, got {num_samples}")
def optimize_random(
space: SearchSpace,
target_func: Union[AcquisitionFunction, Tuple[AcquisitionFunction, int]],
) -> TensorType:
"""
A random search :const:`AcquisitionOptimizer` defined for
any :class:'SearchSpace' with a :meth:`sample`. If we have a :class:'DiscreteSearchSpace'
with fewer than `num_samples` points, then we query all the points in the space.
When this functions receives an acquisition-integer tuple as its `target_func`,it
optimizes each of the individual V functions making up `target_func`, i.e.
evaluating `num_samples` samples for each of the individual V functions making up
target_func.
:param space: The space over which to search.
:param target_func: The function to maximise, with input shape [..., V, D] and output shape
[..., V].
:return: The V points in ``space`` that maximises ``target_func``, with shape [V, D].
"""
points = space.sample(num_samples)[:, None, :]
return _get_max_discrete_points(points, target_func)
return optimize_random
| 30,061 | 42.254676 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/function/greedy_batch.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains local penalization-based acquisition function builders.
"""
from __future__ import annotations
from typing import Callable, Dict, Mapping, Optional, Union, cast
import gpflow
import tensorflow as tf
import tensorflow_probability as tfp
from typing_extensions import Protocol, runtime_checkable
from ...data import Dataset
from ...models import FastUpdateModel, ModelStack, ProbabilisticModel
from ...models.interfaces import (
PredictJointModelStack,
SupportsGetKernel,
SupportsGetObservationNoise,
SupportsPredictJoint,
)
from ...observer import OBJECTIVE
from ...space import SearchSpace
from ...types import Tag, TensorType
from ..interface import (
AcquisitionFunction,
AcquisitionFunctionBuilder,
GreedyAcquisitionFunctionBuilder,
PenalizationFunction,
SingleModelAcquisitionBuilder,
SingleModelGreedyAcquisitionBuilder,
UpdatablePenalizationFunction,
)
from .entropy import MinValueEntropySearch
from .function import ExpectedImprovement, MakePositive, expected_improvement
class LocalPenalization(SingleModelGreedyAcquisitionBuilder[ProbabilisticModel]):
r"""
Builder of the acquisition function maker for greedily collecting batches by local
penalization. The resulting :const:`AcquisitionFunctionMaker` takes in a set of pending
points and returns a base acquisition function penalized around those points.
An estimate of the objective function's Lipschitz constant is used to control the size
of penalization.
Local penalization allows us to perform batch Bayesian optimization with a standard (non-batch)
acquisition function. All that we require is that the acquisition function takes strictly
positive values. By iteratively building a batch of points though sequentially maximizing
this acquisition function but down-weighted around locations close to the already
chosen (pending) points, local penalization provides diverse batches of candidate points.
Local penalization is applied to the acquisition function multiplicatively. However, to
improve numerical stability, we perform additive penalization in a log space.
The Lipschitz constant and additional penalization parameters are estimated once
when first preparing the acquisition function with no pending points. These estimates
are reused for all subsequent function calls.
"""
def __init__(
self,
search_space: SearchSpace,
num_samples: int = 500,
penalizer: Optional[
Callable[
[ProbabilisticModel, TensorType, TensorType, TensorType],
Union[PenalizationFunction, UpdatablePenalizationFunction],
]
] = None,
base_acquisition_function_builder: ExpectedImprovement
| MinValueEntropySearch[ProbabilisticModel]
| MakePositive[ProbabilisticModel]
| None = None,
):
"""
:param search_space: The global search space over which the optimisation is defined.
:param num_samples: Size of the random sample over which the Lipschitz constant
is estimated. We recommend scaling this with search space dimension.
:param penalizer: The chosen penalization method (defaults to soft penalization). This
should be a function that accepts a model, pending points, lipschitz constant and eta
and returns a PenalizationFunction.
:param base_acquisition_function_builder: Base acquisition function to be
penalized (defaults to expected improvement). Local penalization only supports
strictly positive acquisition functions.
:raise tf.errors.InvalidArgumentError: If ``num_samples`` is not positive.
"""
tf.debugging.assert_positive(num_samples)
self._search_space = search_space
self._num_samples = num_samples
self._lipschitz_penalizer = soft_local_penalizer if penalizer is None else penalizer
if base_acquisition_function_builder is None:
self._base_builder: SingleModelAcquisitionBuilder[
ProbabilisticModel
] = ExpectedImprovement()
else:
self._base_builder = base_acquisition_function_builder
self._lipschitz_constant = None
self._eta = None
self._base_acquisition_function: Optional[AcquisitionFunction] = None
self._penalization: Optional[PenalizationFunction | UpdatablePenalizationFunction] = None
self._penalized_acquisition: Optional[AcquisitionFunction] = None
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: The data from the observer. Must be populated.
:param pending_points: The points we penalize with respect to.
:return: The (log) expected improvement penalized with respect to the pending points.
:raise tf.errors.InvalidArgumentError: If the ``dataset`` is empty.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
acq = self._update_base_acquisition_function(dataset, model)
if pending_points is not None and len(pending_points) != 0:
acq = self._update_penalization(acq, dataset, model, pending_points)
return acq
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
pending_points: Optional[TensorType] = None,
new_optimization_step: bool = True,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer. Must be populated.
:param pending_points: Points already chosen to be in the current batch (of shape [M,D]),
where M is the number of pending points and D is the search space dimension.
:param new_optimization_step: Indicates whether this call to update_acquisition_function
is to start of a new optimization step, of to continue collecting batch of points
for the current step. Defaults to ``True``.
:return: The updated acquisition function.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
tf.debugging.Assert(self._base_acquisition_function is not None, [tf.constant([])])
if new_optimization_step:
self._update_base_acquisition_function(dataset, model)
if pending_points is None or len(pending_points) == 0:
# no penalization required if no pending_points
return cast(AcquisitionFunction, self._base_acquisition_function)
return self._update_penalization(function, dataset, model, pending_points)
def _update_penalization(
self,
function: Optional[AcquisitionFunction],
dataset: Dataset,
model: ProbabilisticModel,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
tf.debugging.assert_rank(pending_points, 2)
if self._penalized_acquisition is not None and isinstance(
self._penalization, UpdatablePenalizationFunction
):
# if possible, just update the penalization function variables
self._penalization.update(pending_points, self._lipschitz_constant, self._eta)
return self._penalized_acquisition
else:
# otherwise construct a new penalized acquisition function
self._penalization = self._lipschitz_penalizer(
model, pending_points, self._lipschitz_constant, self._eta
)
self._penalized_acquisition = PenalizedAcquisition(
cast(PenalizedAcquisition, self._base_acquisition_function), self._penalization
)
return self._penalized_acquisition
@tf.function(experimental_relax_shapes=True)
def _get_lipschitz_estimate(
self, model: ProbabilisticModel, sampled_points: TensorType
) -> tuple[TensorType, TensorType]:
with tf.GradientTape() as g:
g.watch(sampled_points)
mean, _ = model.predict(sampled_points)
grads = g.gradient(mean, sampled_points)
grads_norm = tf.norm(grads, axis=1)
max_grads_norm = tf.reduce_max(grads_norm)
eta = tf.reduce_min(mean, axis=0)
return max_grads_norm, eta
def _update_base_acquisition_function(
self, dataset: Dataset, model: ProbabilisticModel
) -> AcquisitionFunction:
samples = self._search_space.sample(num_samples=self._num_samples)
samples = tf.concat([dataset.query_points, samples], 0)
lipschitz_constant, eta = self._get_lipschitz_estimate(model, samples)
if lipschitz_constant < 1e-5: # threshold to improve numerical stability for 'flat' models
lipschitz_constant = 10
self._lipschitz_constant = lipschitz_constant
self._eta = eta
if self._base_acquisition_function is not None:
self._base_acquisition_function = self._base_builder.update_acquisition_function(
self._base_acquisition_function,
model,
dataset=dataset,
)
elif isinstance(self._base_builder, ExpectedImprovement): # reuse eta estimate
self._base_acquisition_function = cast(
AcquisitionFunction, expected_improvement(model, self._eta)
)
else:
self._base_acquisition_function = self._base_builder.prepare_acquisition_function(
model,
dataset=dataset,
)
return self._base_acquisition_function
class PenalizedAcquisition:
"""Class representing a penalized acquisition function."""
# (note that this needs to be defined as a top level class make it pickleable)
def __init__(
self, base_acquisition_function: AcquisitionFunction, penalization: PenalizationFunction
):
"""
:param base_acquisition_function: Base (unpenalized) acquisition function.
:param penalization: Penalization function.
"""
self._base_acquisition_function = base_acquisition_function
self._penalization = penalization
@tf.function
def __call__(self, x: TensorType) -> TensorType:
log_acq = tf.math.log(self._base_acquisition_function(x)) + tf.math.log(
self._penalization(x)
)
return tf.math.exp(log_acq)
class local_penalizer(UpdatablePenalizationFunction):
def __init__(
self,
model: ProbabilisticModel,
pending_points: TensorType,
lipschitz_constant: TensorType,
eta: TensorType,
):
"""Initialize the local penalizer.
:param model: The model over the specified ``dataset``.
:param pending_points: The points we penalize with respect to.
:param lipschitz_constant: The estimated Lipschitz constant of the objective function.
:param eta: The estimated global minima.
:return: The local penalization function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one."""
self._model = model
mean_pending, variance_pending = model.predict(pending_points)
self._pending_points = tf.Variable(pending_points, shape=[None, *pending_points.shape[1:]])
self._radius = tf.Variable(
tf.transpose((mean_pending - eta) / lipschitz_constant),
shape=[1, None],
)
self._scale = tf.Variable(
tf.transpose(tf.sqrt(variance_pending) / lipschitz_constant),
shape=[1, None],
)
def update(
self,
pending_points: TensorType,
lipschitz_constant: TensorType,
eta: TensorType,
) -> None:
"""Update the local penalizer with new variable values."""
mean_pending, variance_pending = self._model.predict(pending_points)
self._pending_points.assign(pending_points)
self._radius.assign(tf.transpose((mean_pending - eta) / lipschitz_constant))
self._scale.assign(tf.transpose(tf.sqrt(variance_pending) / lipschitz_constant))
class soft_local_penalizer(local_penalizer):
r"""
Return the soft local penalization function used for single-objective greedy batch Bayesian
optimization in :cite:`Gonzalez:2016`.
Soft penalization returns the probability that a candidate point does not belong
in the exclusion zones of the pending points. For model posterior mean :math:`\mu`, model
posterior variance :math:`\sigma^2`, current "best" function value :math:`\eta`, and an
estimated Lipschitz constant :math:`L`,the penalization from a set of pending point
:math:`x'` on a candidate point :math:`x` is given by
.. math:: \phi(x, x') = \frac{1}{2}\textrm{erfc}(-z)
where :math:`z = \frac{1}{\sqrt{2\sigma^2(x')}}(L||x'-x|| + \eta - \mu(x'))`.
The penalization from a set of pending points is just product of the individual
penalizations. See :cite:`Gonzalez:2016` for a full derivation.
:param model: The model over the specified ``dataset``.
:param pending_points: The points we penalize with respect to.
:param lipschitz_constant: The estimated Lipschitz constant of the objective function.
:param eta: The estimated global minima.
:return: The local penalization function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
@tf.function
def __call__(self, x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This penalization function cannot be calculated for batches of points.",
)
pairwise_distances = tf.norm(
tf.expand_dims(x, 1) - tf.expand_dims(self._pending_points, 0), axis=-1
)
standardised_distances = (pairwise_distances - self._radius) / self._scale
normal = tfp.distributions.Normal(tf.cast(0, x.dtype), tf.cast(1, x.dtype))
penalization = normal.cdf(standardised_distances)
return tf.reduce_prod(penalization, axis=-1)
class hard_local_penalizer(local_penalizer):
r"""
Return the hard local penalization function used for single-objective greedy batch Bayesian
optimization in :cite:`Alvi:2019`.
Hard penalization is a stronger penalizer than soft penalization and is sometimes more effective
See :cite:`Alvi:2019` for details. Our implementation follows theirs, with the penalization from
a set of pending points being the product of the individual penalizations.
:param model: The model over the specified ``dataset``.
:param pending_points: The points we penalize with respect to.
:param lipschitz_constant: The estimated Lipschitz constant of the objective function.
:param eta: The estimated global minima.
:return: The local penalization function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
@tf.function
def __call__(self, x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This penalization function cannot be calculated for batches of points.",
)
pairwise_distances = tf.norm(
tf.expand_dims(x, 1) - tf.expand_dims(self._pending_points, 0), axis=-1
)
p = -5 # following experiments of :cite:`Alvi:2019`.
penalization = ((pairwise_distances / (self._radius + self._scale)) ** p + 1) ** (1 / p)
return tf.reduce_prod(penalization, axis=-1)
@runtime_checkable
class FantasizerModelType(
FastUpdateModel, SupportsPredictJoint, SupportsGetKernel, SupportsGetObservationNoise, Protocol
):
"""The model requirements for the Fantasizer acquisition function."""
pass
class FantasizerModelStack(PredictJointModelStack, ModelStack[FantasizerModelType]):
"""
A stack of models :class:`FantasizerModelType` models. Note that this delegates predict_joint
but none of the other methods.
"""
pass
FantasizerModelOrStack = Union[FantasizerModelType, FantasizerModelStack]
class Fantasizer(GreedyAcquisitionFunctionBuilder[FantasizerModelOrStack]):
r"""
Builder of the acquisition function maker for greedily collecting batches.
Fantasizer allows us to perform batch Bayesian optimization with any
standard (non-batch) acquisition function.
Here, every time a query point is chosen by maximising an acquisition function,
its corresponding observation is "fantasized", and the models are conditioned further
on this new artificial data.
This implies that the models need to predict what their updated predictions would be given
new data, see :class:`~FastUpdateModel`. These equations are for instance in closed form
for the GPR model, see :cite:`chevalier2014corrected` (eqs. 8-10) for details.
There are several ways to "fantasize" data: the "kriging believer" heuristic (KB, see
:cite:`ginsbourger2010kriging`) uses the mean of the model as observations.
"sample" uses samples from the model.
"""
def __init__(
self,
base_acquisition_function_builder: Optional[
AcquisitionFunctionBuilder[SupportsPredictJoint]
| SingleModelAcquisitionBuilder[SupportsPredictJoint]
] = None,
fantasize_method: str = "KB",
):
"""
:param base_acquisition_function_builder: The acquisition function builder to use.
Defaults to :class:`~trieste.acquisition.ExpectedImprovement`.
:param fantasize_method: The following options are available: "KB" and "sample".
See class docs for more details.
:raise tf.errors.InvalidArgumentError: If ``fantasize_method`` is not "KB" or "sample".
"""
tf.debugging.Assert(fantasize_method in ["KB", "sample"], [tf.constant([])])
if base_acquisition_function_builder is None:
base_acquisition_function_builder = ExpectedImprovement()
if isinstance(base_acquisition_function_builder, SingleModelAcquisitionBuilder):
base_acquisition_function_builder = base_acquisition_function_builder.using(OBJECTIVE)
self._builder = base_acquisition_function_builder
self._fantasize_method = fantasize_method
self._base_acquisition_function: Optional[AcquisitionFunction] = None
self._fantasized_acquisition: Optional[AcquisitionFunction] = None
self._fantasized_models: Mapping[
Tag, _fantasized_model | ModelStack[SupportsPredictJoint]
] = {}
def _update_base_acquisition_function(
self,
models: Mapping[Tag, FantasizerModelOrStack],
datasets: Optional[Mapping[Tag, Dataset]],
) -> AcquisitionFunction:
if self._base_acquisition_function is not None:
self._base_acquisition_function = self._builder.update_acquisition_function(
self._base_acquisition_function, models, datasets
)
else:
self._base_acquisition_function = self._builder.prepare_acquisition_function(
models, datasets
)
return self._base_acquisition_function
def _update_fantasized_acquisition_function(
self,
models: Mapping[Tag, FantasizerModelOrStack],
datasets: Optional[Mapping[Tag, Dataset]],
pending_points: TensorType,
) -> AcquisitionFunction:
tf.debugging.assert_rank(pending_points, 2)
fantasized_data = {
tag: _generate_fantasized_data(
fantasize_method=self._fantasize_method,
model=model,
pending_points=pending_points,
)
for tag, model in models.items()
}
if datasets is None:
datasets = fantasized_data
else:
datasets = {tag: data + fantasized_data[tag] for tag, data in datasets.items()}
if self._fantasized_acquisition is None:
self._fantasized_models = {
tag: _generate_fantasized_model(model, fantasized_data[tag])
for tag, model in models.items()
}
self._fantasized_acquisition = self._builder.prepare_acquisition_function(
cast(Dict[Tag, SupportsPredictJoint], self._fantasized_models), datasets
)
else:
for tag, model in self._fantasized_models.items():
if isinstance(model, ModelStack):
observations = tf.split(
fantasized_data[tag].observations, model._event_sizes, axis=-1
)
for submodel, obs in zip(model._models, observations):
submodel.update_fantasized_data(
Dataset(fantasized_data[tag].query_points, obs)
)
else:
model.update_fantasized_data(fantasized_data[tag])
self._builder.update_acquisition_function(
self._fantasized_acquisition,
cast(Dict[Tag, SupportsPredictJoint], self._fantasized_models),
datasets,
)
return self._fantasized_acquisition
def prepare_acquisition_function(
self,
models: Mapping[Tag, FantasizerModelOrStack],
datasets: Optional[Mapping[Tag, Dataset]] = None,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
"""
:param models: The models over each tag.
:param datasets: The data from the observer (optional).
:param pending_points: Points already chosen to be in the current batch (of shape [M,D]),
where M is the number of pending points and D is the search space dimension.
:return: An acquisition function.
"""
for model in models.values():
if not (
isinstance(model, FantasizerModelType)
or isinstance(model, ModelStack)
and all(isinstance(m, FantasizerModelType) for m in model._models)
):
raise NotImplementedError(
f"Fantasizer only works with FastUpdateModel models that also support "
f"predict_joint, get_kernel and get_observation_noise, or with "
f"ModelStack stacks of such models; received {model.__repr__()}"
)
if pending_points is None:
return self._update_base_acquisition_function(models, datasets)
else:
return self._update_fantasized_acquisition_function(models, datasets, pending_points)
def update_acquisition_function(
self,
function: AcquisitionFunction,
models: Mapping[Tag, FantasizerModelOrStack],
datasets: Optional[Mapping[Tag, Dataset]] = None,
pending_points: Optional[TensorType] = None,
new_optimization_step: bool = True,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param models: The models over each tag.
:param datasets: The data from the observer (optional).
:param pending_points: Points already chosen to be in the current batch (of shape [M,D]),
where M is the number of pending points and D is the search space dimension.
:param new_optimization_step: Indicates whether this call to update_acquisition_function
is to start of a new optimization step, of to continue collecting batch of points
for the current step. Defaults to ``True``.
:return: The updated acquisition function.
"""
if pending_points is None:
return self._update_base_acquisition_function(models, datasets)
else:
return self._update_fantasized_acquisition_function(models, datasets, pending_points)
def _generate_fantasized_data(
fantasize_method: str, model: FantasizerModelOrStack, pending_points: TensorType
) -> Dataset:
"""
Generates "fantasized" data at pending_points depending on the chosen heuristic:
- KB (kriging believer) uses the mean prediction of the models
- sample uses samples from the GP posterior.
:param fantasize_method: the following options are available: "KB" and "sample".
:param model: a model with predict method
:param dataset: past data
:param pending_points: points at which to fantasize data
:return: a fantasized dataset
"""
if fantasize_method == "KB":
fantasized_obs, _ = model.predict(pending_points)
elif fantasize_method == "sample":
fantasized_obs = model.sample(pending_points, num_samples=1)[0]
else:
raise NotImplementedError(
f"fantasize_method must be KB or sample, " f"received {model.__repr__()}"
)
return Dataset(pending_points, fantasized_obs)
def _generate_fantasized_model(
model: FantasizerModelOrStack, fantasized_data: Dataset
) -> _fantasized_model | PredictJointModelStack:
if isinstance(model, ModelStack):
observations = tf.split(fantasized_data.observations, model._event_sizes, axis=-1)
fmods = []
for mod, obs, event_size in zip(model._models, observations, model._event_sizes):
fmods.append(
(
_fantasized_model(mod, Dataset(fantasized_data.query_points, obs)),
event_size,
)
)
return PredictJointModelStack(*fmods)
else:
return _fantasized_model(model, fantasized_data)
class _fantasized_model(SupportsPredictJoint, SupportsGetKernel, SupportsGetObservationNoise):
"""
Creates a new model from an existing one and additional data.
This new model posterior is conditioned on both current model data and the additional one.
"""
def __init__(self, model: FantasizerModelType, fantasized_data: Dataset):
"""
:param model: a model, must be of class `FastUpdateModel`
:param fantasized_data: additional dataset to condition on
:raise NotImplementedError: If model is not of class `FastUpdateModel`.
"""
self._model = model
self._fantasized_query_points = tf.Variable(
fantasized_data.query_points,
trainable=False,
shape=[None, *fantasized_data.query_points.shape[1:]],
)
self._fantasized_observations = tf.Variable(
fantasized_data.observations,
trainable=False,
shape=[None, *fantasized_data.observations.shape[1:]],
)
def update_fantasized_data(self, fantasized_data: Dataset) -> None:
"""
:param fantasized_data: new additional dataset to condition on
"""
self._fantasized_query_points.assign(fantasized_data.query_points)
self._fantasized_observations.assign(fantasized_data.observations)
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
This function wraps conditional_predict_f. It cannot directly call
conditional_predict_f, since it does not accept query_points with rank > 2.
We use map_fn to allow leading dimensions for query_points.
:param query_points: shape [...*, N, d]
:return: mean, shape [...*, ..., N, L] and cov, shape [...*, ..., L, N],
where ... are the leading dimensions of fantasized_data
"""
def fun(qp: TensorType) -> tuple[TensorType, TensorType]: # pragma: no cover (tf.map_fn)
fantasized_data = Dataset(
self._fantasized_query_points.value(), self._fantasized_observations.value()
)
return self._model.conditional_predict_f(qp, fantasized_data)
return _broadcast_predict(query_points, fun)
def predict_joint(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
This function wraps conditional_predict_joint. It cannot directly call
conditional_predict_joint, since it does not accept query_points with rank > 2.
We use map_fn to allow leading dimensions for query_points.
:param query_points: shape [...*, N, D]
:return: mean, shape [...*, ..., N, L] and cov, shape [...*, ..., L, N, N],
where ... are the leading dimensions of fantasized_data
"""
def fun(qp: TensorType) -> tuple[TensorType, TensorType]: # pragma: no cover (tf.map_fn)
fantasized_data = Dataset(
self._fantasized_query_points.value(), self._fantasized_observations.value()
)
return self._model.conditional_predict_joint(qp, fantasized_data)
return _broadcast_predict(query_points, fun)
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
"""
This function wraps conditional_predict_f_sample. It cannot directly call
conditional_predict_joint, since it does not accept query_points with rank > 2.
We use map_fn to allow leading dimensions for query_points.
:param query_points: shape [...*, N, D]
:param num_samples: number of samples.
:return: samples of shape [...*, ..., S, N, L], where ... are the leading
dimensions of fantasized_data
"""
leading_dim, query_points_flatten = _get_leading_dim_and_flatten(query_points)
# query_points_flatten: [B, n, d], leading_dim =...*, product = B
samples = tf.map_fn(
fn=lambda qp: self._model.conditional_predict_f_sample(
qp,
Dataset(
self._fantasized_query_points.value(), self._fantasized_observations.value()
),
num_samples,
),
elems=query_points_flatten,
) # [B, ..., S, L]
return _restore_leading_dim(samples, leading_dim)
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
This function wraps conditional_predict_y. It cannot directly call
conditional_predict_joint, since it does not accept query_points with rank > 2.
We use tf.map_fn to allow leading dimensions for query_points.
:param query_points: shape [...*, N, D]
:return: mean, shape [...*, ..., N, L] and var, shape [...*, ..., L, N],
where ... are the leading dimensions of fantasized_data
"""
def fun(qp: TensorType) -> tuple[TensorType, TensorType]: # pragma: no cover (tf.map_fn)
fantasized_data = Dataset(
self._fantasized_query_points.value(), self._fantasized_observations.value()
)
return self._model.conditional_predict_y(qp, fantasized_data)
return _broadcast_predict(query_points, fun)
def get_observation_noise(self) -> TensorType:
return self._model.get_observation_noise()
def get_kernel(self) -> gpflow.kernels.Kernel:
return self._model.get_kernel()
def log(self, dataset: Optional[Dataset] = None) -> None:
return self._model.log(dataset)
def _broadcast_predict(
query_points: TensorType, fun: Callable[[TensorType], tuple[TensorType, TensorType]]
) -> tuple[TensorType, TensorType]:
"""
Utility function that allows leading dimensions for query_points when
fun only accepts rank 2 tensors. It works by flattening query_points into
a rank 3 tensor, evaluate fun(query_points) through tf.map_fn, then
restoring the leading dimensions.
:param query_points: shape [...*, N, D]
:param fun: callable that returns two tensors (e.g. a predict function)
:return: two tensors (e.g. mean and variance) with shape [...*, ...]
"""
leading_dim, query_points_flatten = _get_leading_dim_and_flatten(query_points)
# leading_dim =...*, product = B
# query_points_flatten: [B, N, D]
mean_signature = tf.TensorSpec(None, query_points.dtype)
var_signature = tf.TensorSpec(None, query_points.dtype)
mean, var = tf.map_fn(
fn=fun,
elems=query_points_flatten,
fn_output_signature=(mean_signature, var_signature),
) # [B, ..., L, N], [B, ..., L, N] (predict_f) or [B, ..., L, N, N] (predict_joint)
return _restore_leading_dim(mean, leading_dim), _restore_leading_dim(var, leading_dim)
def _get_leading_dim_and_flatten(query_points: TensorType) -> tuple[TensorType, TensorType]:
"""
:param query_points: shape [...*, N, D]
:return: leading_dim = ....*, query_points_flatten, shape [B, N, D]
"""
leading_dim = tf.shape(query_points)[:-2] # =...*, product = B
nd = tf.shape(query_points)[-2:]
query_points_flatten = tf.reshape(query_points, (-1, nd[0], nd[1])) # [B, N, D]
return leading_dim, query_points_flatten
def _restore_leading_dim(x: TensorType, leading_dim: TensorType) -> TensorType:
"""
"Un-flatten" the first dimension of x to leading_dim
:param x: shape [B, ...]
:param leading_dim: [...*]
:return: shape [...*, ...]
"""
single_x_shape = tf.shape(x[0]) # = [...]
output_x_shape = tf.concat([leading_dim, single_x_shape], axis=0) # = [...*, ...]
return tf.reshape(x, output_x_shape) # [...*, ...]
| 34,562 | 42.257822 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/function/utils.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains utility functions for acquisition functions.
"""
from typing import Callable, Tuple
import tensorflow as tf
from tensorflow_probability import distributions as tfd
from ...types import TensorType
# =============================================================================
# Multivariate Normal CDF
# =============================================================================
class MultivariateNormalCDF:
def __init__(
self,
sample_size: int,
dim: int,
dtype: tf.DType,
num_sobol_skip: int = 0,
) -> None:
"""Builds the cumulative density function of the multivariate Gaussian
using the Genz approximation detailed in :cite:`genz2016numerical`.
This is a Monte Carlo approximation which is more accurate than a naive
Monte Carlo estimate of the expected improvent. In order to use
reparametrised samples, the helper accepts a tensor of samples, and the
callable uses these fixed samples whenever it is called.
:param samples_size: int, number of samples to use.
:param dim: int, dimension of the multivariate Gaussian.
:param dtype: tf.DType, data type to use for calculations.
:param num_sobol_skip: int, number of sobol samples to skip.
"""
tf.debugging.assert_positive(sample_size)
tf.debugging.assert_positive(dim)
self._S = sample_size
self._Q = dim
self._dtype = dtype
self._num_sobol_skip = num_sobol_skip
def _standard_normal_cdf_and_inverse_cdf(
self,
dtype: tf.DType,
) -> Tuple[Callable[[TensorType], TensorType], Callable[[TensorType], TensorType]]:
"""Returns two callables *Phi* and *iPhi*, which compute the cumulative
density function and inverse cumulative density function of a standard
univariate Gaussian.
:param dtype: The data type to use, either tf.float32 or tf.float64.
:returns Phi, iPhi: Cumulative and inverse cumulative density functions.
"""
normal = tfd.Normal(
loc=tf.zeros(shape=(), dtype=dtype),
scale=tf.ones(shape=(), dtype=dtype),
)
Phi: Callable[[TensorType], TensorType] = lambda x: normal.cdf(x)
iPhi: Callable[[TensorType], TensorType] = lambda x: normal.quantile(x)
return Phi, iPhi
def _get_update_indices(self, B: int, S: int, Q: int, q: int) -> TensorType:
"""Returns indices for updating a tensor using tf.tensor_scatter_nd_add,
for use within the _mvn_cdf function, for computing the cumulative density
function of a multivariate Gaussian. The indices *idx* returned are such
that the following operation
idx = get_update_indices(B, S, Q, q)
tensor = tf.tensor_scatter_nd_add(tensor, idx, update)
is equivalent to the numpy operation
tensor = tensor[:, :, q] + update
where *tensor* is a tensor of shape (B, S, Q).
:param B: First dim. of tensor for which the indices are generated.
:param S: Second dim. of tensor for which the indices are generated.
:param Q: Third dim. of tensor for which the indices are generated.
:param q: Index of tensor along fourth dim. to which the update is applied.
"""
idxB = tf.tile(tf.range(B, dtype=tf.int32)[:, None, None], (1, S, 1))
idxS = tf.tile(tf.range(S, dtype=tf.int32)[None, :, None], (B, 1, 1))
idxQ = tf.tile(tf.convert_to_tensor(q)[None, None, None], (B, S, 1))
idx = tf.concat([idxB, idxS, idxQ], axis=-1)
return idx
def __call__(
self,
x: TensorType,
mean: TensorType,
cov: TensorType,
jitter: float = 1e-6,
) -> TensorType:
"""Computes the cumulative density function of the multivariate
Gaussian using the Genz approximation.
:param x: Tensor of shape (B, Q), batch of points to evaluate CDF at.
:param mean: Tensor of shape (B, Q), batch of means.
:param covariance: Tensor of shape (B, Q, Q), batch of covariances.
:param jitter: float, jitter to use in the Cholesky factorisation.
:returns mvn_cdf: Tensor of shape (B,), CDF values.
"""
# Unpack batch size
B = x.shape[0]
tf.debugging.assert_positive(B)
# Check shapes of input tensors
tf.debugging.assert_shapes(
[
(x, (B, self._Q)),
(mean, (B, self._Q)),
(cov, (B, self._Q, self._Q)),
]
)
# Identify data type to use for all calculations
dtype = mean.dtype
# Compute Cholesky factors
jitter = jitter * tf.eye(self._Q, dtype=dtype)[None, :, :]
C = tf.linalg.cholesky(cov + jitter) # (B, Q, Q)
# Rename samples and limits for brevity
w = tf.math.sobol_sample(
dim=self._Q,
num_results=self._S,
dtype=self._dtype,
skip=self._num_sobol_skip,
) # (S, Q)
b = x - mean # (B, Q)
# Initialise transformation variables
e = tf.zeros(shape=(B, self._S, self._Q), dtype=dtype)
f = tf.zeros(shape=(B, self._S, self._Q), dtype=dtype)
y = tf.zeros(shape=(B, self._S, self._Q), dtype=dtype)
# Initialise standard normal for computing CDFs
Phi, iPhi = self._standard_normal_cdf_and_inverse_cdf(dtype=dtype)
# Get update indices for convenience later
idx = self._get_update_indices(B=B, S=self._S, Q=self._Q, q=0)
# Slice out common tensors
b0 = b[:, None, 0]
C0 = C[:, None, 0, 0] + 1e-12
# Compute transformation variables at the first step
e_update = tf.tile(Phi(b0 / C0), (1, self._S)) # (B, S)
e = tf.tensor_scatter_nd_add(e, idx, e_update)
f = tf.tensor_scatter_nd_add(f, idx, e_update)
for i in tf.range(1, self._Q):
# Update y tensor
y_update = iPhi(1e-6 + (1 - 2e-6) * w[None, :, i - 1] * e[:, :, i - 1])
y = tf.tensor_scatter_nd_add(y, idx, y_update)
# Slice out common tensors
bi = b[:, None, i]
Ci_ = C[:, None, i, :i]
Cii = C[:, None, i, i] + 1e-12
yi = y[:, :, :i]
# Compute indices to update d, e and f tensors
idx = self._get_update_indices(B=B, S=self._S, Q=self._Q, q=i)
# Update e tensor
e_update = Phi((bi - tf.reduce_sum(Ci_ * yi, axis=-1)) / Cii)
e = tf.tensor_scatter_nd_add(e, idx, e_update)
# Update f tensor
f_update = e[:, :, i] * f[:, :, i - 1]
f = tf.tensor_scatter_nd_add(f, idx, f_update)
mvn_cdf = tf.reduce_mean(f[:, :, -1], axis=-1)
return mvn_cdf
| 7,456 | 36.285 | 87 | py |
trieste-develop | trieste-develop/trieste/acquisition/function/multi_objective.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains multi-objective acquisition function builders.
"""
from __future__ import annotations
import math
from itertools import combinations, product
from typing import Callable, Mapping, Optional, Sequence, cast
import tensorflow as tf
import tensorflow_probability as tfp
from ...data import Dataset
from ...models import ProbabilisticModel, ReparametrizationSampler
from ...models.interfaces import HasReparamSampler
from ...observer import OBJECTIVE
from ...types import Tag, TensorType
from ...utils import DEFAULTS
from ..interface import (
AcquisitionFunction,
AcquisitionFunctionBuilder,
AcquisitionFunctionClass,
GreedyAcquisitionFunctionBuilder,
PenalizationFunction,
ProbabilisticModelType,
SingleModelAcquisitionBuilder,
)
from ..multi_objective.pareto import (
Pareto,
get_reference_point,
prepare_default_non_dominated_partition_bounds,
)
from .function import ExpectedConstrainedImprovement
class ExpectedHypervolumeImprovement(SingleModelAcquisitionBuilder[ProbabilisticModel]):
"""
Builder for the expected hypervolume improvement acquisition function.
The implementation of the acquisition function largely
follows :cite:`yang2019efficient`
"""
def __init__(
self,
reference_point_spec: Sequence[float]
| TensorType
| Callable[..., TensorType] = get_reference_point,
):
"""
:param reference_point_spec: this method is used to determine how the reference point is
calculated. If a Callable function specified, it is expected to take existing
posterior mean-based observations (to screen out the observation noise) and return
a reference point with shape [D] (D represents number of objectives). If the Pareto
front location is known, this arg can be used to specify a fixed reference point
in each bo iteration. A dynamic reference point updating strategy is used by
default to set a reference point according to the datasets.
"""
if callable(reference_point_spec):
self._ref_point_spec: tf.Tensor | Callable[..., TensorType] = reference_point_spec
else:
self._ref_point_spec = tf.convert_to_tensor(reference_point_spec)
self._ref_point = None
def __repr__(self) -> str:
""""""
if callable(self._ref_point_spec):
return f"ExpectedHypervolumeImprovement(" f"{self._ref_point_spec.__name__})"
else:
return f"ExpectedHypervolumeImprovement({self._ref_point_spec!r})"
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: The data from the observer. Must be populated.
:return: The expected hypervolume improvement acquisition function.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
mean, _ = model.predict(dataset.query_points)
if callable(self._ref_point_spec):
self._ref_point = tf.cast(self._ref_point_spec(mean), dtype=mean.dtype)
else:
self._ref_point = tf.cast(self._ref_point_spec, dtype=mean.dtype)
_pf = Pareto(mean)
screened_front = _pf.front[tf.reduce_all(_pf.front <= self._ref_point, -1)]
# prepare the partitioned bounds of non-dominated region for calculating of the
# hypervolume improvement in this area
_partition_bounds = prepare_default_non_dominated_partition_bounds(
self._ref_point, screened_front
)
return expected_hv_improvement(model, _partition_bounds)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer. Must be populated.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
tf.debugging.Assert(isinstance(function, expected_hv_improvement), [tf.constant([])])
mean, _ = model.predict(dataset.query_points)
if callable(self._ref_point_spec):
self._ref_point = self._ref_point_spec(mean)
else:
assert isinstance(self._ref_point_spec, tf.Tensor) # specified a fixed ref point
self._ref_point = tf.cast(self._ref_point_spec, dtype=mean.dtype)
_pf = Pareto(mean)
screened_front = _pf.front[tf.reduce_all(_pf.front <= self._ref_point, -1)]
_partition_bounds = prepare_default_non_dominated_partition_bounds(
self._ref_point, screened_front
)
function.update(_partition_bounds) # type: ignore
return function
class expected_hv_improvement(AcquisitionFunctionClass):
def __init__(self, model: ProbabilisticModel, partition_bounds: tuple[TensorType, TensorType]):
r"""
expected Hyper-volume (HV) calculating using Eq. 44 of :cite:`yang2019efficient` paper.
The expected hypervolume improvement calculation in the non-dominated region
can be decomposed into sub-calculations based on each partitioned cell.
For easier calculation, this sub-calculation can be reformulated as a combination
of two generalized expected improvements, corresponding to Psi (Eq. 44) and Nu (Eq. 45)
function calculations, respectively.
Note:
1. Since in Trieste we do not assume the use of a certain non-dominated region partition
algorithm, we do not assume the last dimension of the partitioned cell has only one
(lower) bound (i.e., minus infinity, which is used in the :cite:`yang2019efficient` paper).
This is not as efficient as the original paper, but is applicable to different non-dominated
partition algorithm.
2. As the Psi and nu function in the original paper are defined for maximization problems,
we inverse our minimisation problem (to also be a maximisation), allowing use of the
original notation and equations.
:param model: The model of the objective function.
:param partition_bounds: with shape ([N, D], [N, D]), partitioned non-dominated hypercell
bounds for hypervolume improvement calculation
:return: The expected_hv_improvement acquisition function modified for objective
minimisation. This function will raise :exc:`ValueError` or
:exc:`~tf.errors.InvalidArgumentError` if used with a batch size greater than one.
"""
self._model = model
self._lb_points = tf.Variable(
partition_bounds[0], trainable=False, shape=[None, partition_bounds[0].shape[-1]]
)
self._ub_points = tf.Variable(
partition_bounds[1], trainable=False, shape=[None, partition_bounds[1].shape[-1]]
)
self._cross_index = tf.constant(
list(product(*[[0, 1]] * self._lb_points.shape[-1]))
) # [2^d, indices_at_dim]
def update(self, partition_bounds: tuple[TensorType, TensorType]) -> None:
"""Update the acquisition function with new partition bounds."""
self._lb_points.assign(partition_bounds[0])
self._ub_points.assign(partition_bounds[1])
@tf.function
def __call__(self, x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
normal = tfp.distributions.Normal(
loc=tf.zeros(shape=1, dtype=x.dtype), scale=tf.ones(shape=1, dtype=x.dtype)
)
def Psi(a: TensorType, b: TensorType, mean: TensorType, std: TensorType) -> TensorType:
return std * normal.prob((b - mean) / std) + (mean - a) * (
1 - normal.cdf((b - mean) / std)
)
def nu(lb: TensorType, ub: TensorType, mean: TensorType, std: TensorType) -> TensorType:
return (ub - lb) * (1 - normal.cdf((ub - mean) / std))
def ehvi_based_on_partitioned_cell(
neg_pred_mean: TensorType, pred_std: TensorType
) -> TensorType:
r"""
Calculate the ehvi based on cell i.
"""
neg_lb_points, neg_ub_points = -self._ub_points, -self._lb_points
neg_ub_points = tf.minimum(neg_ub_points, 1e10) # clip to improve numerical stability
psi_ub = Psi(
neg_lb_points, neg_ub_points, neg_pred_mean, pred_std
) # [..., num_cells, out_dim]
psi_lb = Psi(
neg_lb_points, neg_lb_points, neg_pred_mean, pred_std
) # [..., num_cells, out_dim]
psi_lb2ub = tf.maximum(psi_lb - psi_ub, 0.0) # [..., num_cells, out_dim]
nu_contrib = nu(neg_lb_points, neg_ub_points, neg_pred_mean, pred_std)
stacked_factors = tf.concat(
[tf.expand_dims(psi_lb2ub, -2), tf.expand_dims(nu_contrib, -2)], axis=-2
) # Take the cross product of psi_diff and nu across all outcomes
# [..., num_cells, 2(operation_num, refer Eq. 45), num_obj]
factor_combinations = tf.linalg.diag_part(
tf.gather(stacked_factors, self._cross_index, axis=-2)
) # [..., num_cells, 2^d, 2(operation_num), num_obj]
return tf.reduce_sum(tf.reduce_prod(factor_combinations, axis=-1), axis=-1)
candidate_mean, candidate_var = self._model.predict(tf.squeeze(x, -2))
candidate_std = tf.sqrt(candidate_var)
neg_candidate_mean = -tf.expand_dims(candidate_mean, 1) # [..., 1, out_dim]
candidate_std = tf.expand_dims(candidate_std, 1) # [..., 1, out_dim]
ehvi_cells_based = ehvi_based_on_partitioned_cell(neg_candidate_mean, candidate_std)
return tf.reduce_sum(
ehvi_cells_based,
axis=-1,
keepdims=True,
)
class BatchMonteCarloExpectedHypervolumeImprovement(
SingleModelAcquisitionBuilder[HasReparamSampler]
):
"""
Builder for the batch expected hypervolume improvement acquisition function.
The implementation of the acquisition function largely
follows :cite:`daulton2020differentiable`
"""
def __init__(
self,
sample_size: int,
reference_point_spec: Sequence[float]
| TensorType
| Callable[..., TensorType] = get_reference_point,
*,
jitter: float = DEFAULTS.JITTER,
):
"""
:param sample_size: The number of samples from model predicted distribution for
each batch of points.
:param reference_point_spec: this method is used to determine how the reference point is
calculated. If a Callable function specified, it is expected to take existing
posterior mean-based observations (to screen out the observation noise) and return
a reference point with shape [D] (D represents number of objectives). If the Pareto
front location is known, this arg can be used to specify a fixed reference point
in each bo iteration. A dynamic reference point updating strategy is used by
default to set a reference point according to the datasets.
:param jitter: The size of the jitter to use when stabilising the Cholesky decomposition of
the covariance matrix.
:raise ValueError (or InvalidArgumentError): If ``sample_size`` is not positive, or
``jitter`` is negative.
"""
tf.debugging.assert_positive(sample_size)
tf.debugging.assert_greater_equal(jitter, 0.0)
self._sample_size = sample_size
self._jitter = jitter
if callable(reference_point_spec):
self._ref_point_spec: tf.Tensor | Callable[..., TensorType] = reference_point_spec
else:
self._ref_point_spec = tf.convert_to_tensor(reference_point_spec)
self._ref_point = None
def __repr__(self) -> str:
""""""
if callable(self._ref_point_spec):
return (
f"BatchMonteCarloExpectedHypervolumeImprovement({self._sample_size!r},"
f" {self._ref_point_spec.__name__},"
f" jitter={self._jitter!r})"
)
else:
return (
f"BatchMonteCarloExpectedHypervolumeImprovement({self._sample_size!r},"
f" {self._ref_point_spec!r}"
f" jitter={self._jitter!r})"
)
def prepare_acquisition_function(
self,
model: HasReparamSampler,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model. Must have event shape [1].
:param dataset: The data from the observer. Must be populated.
:return: The batch expected hypervolume improvement acquisition function.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
mean, _ = model.predict(dataset.query_points)
if callable(self._ref_point_spec):
self._ref_point = tf.cast(self._ref_point_spec(mean), dtype=mean.dtype)
else:
self._ref_point = tf.cast(self._ref_point_spec, dtype=mean.dtype)
_pf = Pareto(mean)
screened_front = _pf.front[tf.reduce_all(_pf.front <= self._ref_point, -1)]
# prepare the partitioned bounds of non-dominated region for calculating of the
# hypervolume improvement in this area
_partition_bounds = prepare_default_non_dominated_partition_bounds(
self._ref_point, screened_front
)
if not isinstance(model, HasReparamSampler):
raise ValueError(
f"The batch Monte-Carlo expected hyper-volume improvement function only supports "
f"models that implement a reparam_sampler method; received {model.__repr__()}"
)
sampler = model.reparam_sampler(self._sample_size)
return batch_ehvi(sampler, self._jitter, _partition_bounds)
def batch_ehvi(
sampler: ReparametrizationSampler[HasReparamSampler],
sampler_jitter: float,
partition_bounds: tuple[TensorType, TensorType],
) -> AcquisitionFunction:
"""
:param sampler: The posterior sampler, which given query points `at`, is able to sample
the possible observations at 'at'.
:param sampler_jitter: The size of the jitter to use in sampler when stabilising the Cholesky
decomposition of the covariance matrix.
:param partition_bounds: with shape ([N, D], [N, D]), partitioned non-dominated hypercell
bounds for hypervolume improvement calculation
:return: The batch expected hypervolume improvement acquisition
function for objective minimisation.
"""
def acquisition(at: TensorType) -> TensorType:
_batch_size = at.shape[-2] # B
def gen_q_subset_indices(q: int) -> tf.RaggedTensor:
# generate all subsets of [1, ..., q] as indices
indices = list(range(q))
return tf.ragged.constant([list(combinations(indices, i)) for i in range(1, q + 1)])
samples = sampler.sample(at, jitter=sampler_jitter) # [..., S, B, num_obj]
q_subset_indices = gen_q_subset_indices(_batch_size)
hv_contrib = tf.zeros(tf.shape(samples)[:-2], dtype=samples.dtype)
lb_points, ub_points = partition_bounds
def hv_contrib_on_samples(
obj_samples: TensorType,
) -> TensorType: # calculate samples overlapped area's hvi for obj_samples
# [..., S, Cq_j, j, num_obj] -> [..., S, Cq_j, num_obj]
overlap_vertices = tf.reduce_max(obj_samples, axis=-2)
overlap_vertices = tf.maximum( # compare overlap vertices and lower bound of each cell:
tf.expand_dims(overlap_vertices, -3), # expand a cell dimension
lb_points[tf.newaxis, tf.newaxis, :, tf.newaxis, :],
) # [..., S, K, Cq_j, num_obj]
lengths_j = tf.maximum( # get hvi length per obj within each cell
(ub_points[tf.newaxis, tf.newaxis, :, tf.newaxis, :] - overlap_vertices), 0.0
) # [..., S, K, Cq_j, num_obj]
areas_j = tf.reduce_sum( # sum over all subsets Cq_j -> [..., S, K]
tf.reduce_prod(lengths_j, axis=-1), axis=-1 # calc hvi within each K
)
return tf.reduce_sum(areas_j, axis=-1) # sum over cells -> [..., S]
for j in tf.range(1, _batch_size + 1): # Inclusion-Exclusion loop
q_choose_j = tf.gather(q_subset_indices, j - 1).to_tensor()
# gather all combinations having j points from q batch points (Cq_j)
j_sub_samples = tf.gather(samples, q_choose_j, axis=-2) # [..., S, Cq_j, j, num_obj]
hv_contrib += tf.cast((-1) ** (j + 1), dtype=samples.dtype) * hv_contrib_on_samples(
j_sub_samples
)
return tf.reduce_mean(hv_contrib, axis=-1, keepdims=True) # average through MC
return acquisition
class ExpectedConstrainedHypervolumeImprovement(
ExpectedConstrainedImprovement[ProbabilisticModelType]
):
"""
Builder for the constrained expected hypervolume improvement acquisition function.
This function essentially combines ExpectedConstrainedImprovement and
ExpectedHypervolumeImprovement.
"""
def __init__(
self,
objective_tag: Tag,
constraint_builder: AcquisitionFunctionBuilder[ProbabilisticModelType],
min_feasibility_probability: float | TensorType = 0.5,
reference_point_spec: Sequence[float]
| TensorType
| Callable[..., TensorType] = get_reference_point,
):
"""
:param objective_tag: The tag for the objective data and model.
:param constraint_builder: The builder for the constraint function.
:param min_feasibility_probability: The minimum probability of feasibility for a
"best point" to be considered feasible.
:param reference_point_spec: this method is used to determine how the reference point is
calculated. If a Callable function specified, it is expected to take existing posterior
mean-based feasible observations (to screen out the observation noise) and return a
reference point with shape [D] (D represents number of objectives). If the feasible
Pareto front location is known, this arg can be used to specify a fixed reference
point in each bo iteration. A dynamic reference point updating strategy is used by
default to set a reference point according to the datasets.
"""
super().__init__(objective_tag, constraint_builder, min_feasibility_probability)
if callable(reference_point_spec):
self._ref_point_spec: tf.Tensor | Callable[..., TensorType] = reference_point_spec
else:
self._ref_point_spec = tf.convert_to_tensor(reference_point_spec)
self._ref_point = None
def __repr__(self) -> str:
""""""
if callable(self._ref_point_spec):
return (
f"ExpectedConstrainedHypervolumeImprovement({self._objective_tag!r},"
f" {self._constraint_builder!r}, {self._min_feasibility_probability!r},"
f" {self._ref_point_spec.__name__})"
)
else:
return (
f"ExpectedConstrainedHypervolumeImprovement({self._objective_tag!r}, "
f" {self._constraint_builder!r}, {self._min_feasibility_probability!r},"
f" ref_point_specification={repr(self._ref_point_spec)!r}"
)
def _update_expected_improvement_fn(
self, objective_model: ProbabilisticModelType, feasible_mean: TensorType
) -> None:
"""
Set or update the unconstrained expected improvement function.
:param objective_model: The objective model.
:param feasible_mean: The mean of the feasible query points.
"""
if callable(self._ref_point_spec):
self._ref_point = tf.cast(
self._ref_point_spec(feasible_mean),
dtype=feasible_mean.dtype,
)
else:
self._ref_point = tf.cast(self._ref_point_spec, dtype=feasible_mean.dtype)
_pf = Pareto(feasible_mean)
screened_front = _pf.front[tf.reduce_all(_pf.front <= self._ref_point, -1)]
# prepare the partitioned bounds of non-dominated region for calculating of the
# hypervolume improvement in this area
_partition_bounds = prepare_default_non_dominated_partition_bounds(
self._ref_point,
screened_front,
)
self._expected_improvement_fn: Optional[AcquisitionFunction]
if self._expected_improvement_fn is None:
self._expected_improvement_fn = expected_hv_improvement(
objective_model, _partition_bounds
)
else:
tf.debugging.Assert(
isinstance(self._expected_improvement_fn, expected_hv_improvement), []
)
self._expected_improvement_fn.update(_partition_bounds) # type: ignore
class HIPPO(GreedyAcquisitionFunctionBuilder[ProbabilisticModelType]):
r"""
HIPPO: HIghly Parallelizable Pareto Optimization
Builder of the acquisition function for greedily collecting batches by HIPPO
penalization in multi-objective optimization by penalizing batch points
by their distance in the objective space. The resulting acquistion function
takes in a set of pending points and returns a base multi-objective acquisition function
penalized around those points.
Penalization is applied to the acquisition function multiplicatively. However, to
improve numerical stability, we perform additive penalization in a log space.
"""
def __init__(
self,
objective_tag: Tag = OBJECTIVE,
base_acquisition_function_builder: AcquisitionFunctionBuilder[ProbabilisticModelType]
| SingleModelAcquisitionBuilder[ProbabilisticModelType]
| None = None,
):
"""
Initializes the HIPPO acquisition function builder.
:param objective_tag: The tag for the objective data and model.
:param base_acquisition_function_builder: Base acquisition function to be
penalized. Defaults to Expected Hypervolume Improvement, also supports
its constrained version.
"""
self._objective_tag = objective_tag
if base_acquisition_function_builder is None:
self._base_builder: AcquisitionFunctionBuilder[
ProbabilisticModelType
] = ExpectedHypervolumeImprovement().using(self._objective_tag)
else:
if isinstance(base_acquisition_function_builder, SingleModelAcquisitionBuilder):
self._base_builder = base_acquisition_function_builder.using(self._objective_tag)
else:
self._base_builder = base_acquisition_function_builder
self._base_acquisition_function: Optional[AcquisitionFunction] = None
self._penalization: Optional[PenalizationFunction] = None
self._penalized_acquisition: Optional[AcquisitionFunction] = None
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
"""
Creates a new instance of the acquisition function.
:param models: The models.
:param datasets: The data from the observer. Must be populated.
:param pending_points: The points we penalize with respect to.
:return: The HIPPO acquisition function.
:raise tf.errors.InvalidArgumentError: If the ``dataset`` is empty.
"""
tf.debugging.Assert(datasets is not None, [tf.constant([])])
datasets = cast(Mapping[Tag, Dataset], datasets)
tf.debugging.Assert(datasets[self._objective_tag] is not None, [tf.constant([])])
tf.debugging.assert_positive(
len(datasets[self._objective_tag]),
message=f"{self._objective_tag} dataset must be populated.",
)
acq = self._update_base_acquisition_function(models, datasets)
if pending_points is not None and len(pending_points) != 0:
acq = self._update_penalization(acq, models[self._objective_tag], pending_points)
return acq
def update_acquisition_function(
self,
function: AcquisitionFunction,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
pending_points: Optional[TensorType] = None,
new_optimization_step: bool = True,
) -> AcquisitionFunction:
"""
Updates the acquisition function.
:param function: The acquisition function to update.
:param models: The models.
:param datasets: The data from the observer. Must be populated.
:param pending_points: Points already chosen to be in the current batch (of shape [M,D]),
where M is the number of pending points and D is the search space dimension.
:param new_optimization_step: Indicates whether this call to update_acquisition_function
is to start of a new optimization step, of to continue collecting batch of points
for the current step. Defaults to ``True``.
:return: The updated acquisition function.
"""
tf.debugging.Assert(datasets is not None, [tf.constant([])])
datasets = cast(Mapping[Tag, Dataset], datasets)
tf.debugging.Assert(datasets[self._objective_tag] is not None, [tf.constant([])])
tf.debugging.assert_positive(
len(datasets[self._objective_tag]),
message=f"{self._objective_tag} dataset must be populated.",
)
tf.debugging.Assert(self._base_acquisition_function is not None, [tf.constant([])])
if new_optimization_step:
self._update_base_acquisition_function(models, datasets)
if pending_points is None or len(pending_points) == 0:
# no penalization required if no pending_points
return cast(AcquisitionFunction, self._base_acquisition_function)
return self._update_penalization(function, models[self._objective_tag], pending_points)
def _update_penalization(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
tf.debugging.assert_rank(pending_points, 2)
if self._penalized_acquisition is not None and isinstance(
self._penalization, hippo_penalizer
):
# if possible, just update the penalization function variables
# (the type ignore is due to mypy getting confused by tf.function)
self._penalization.update(pending_points) # type: ignore[unreachable]
return self._penalized_acquisition
else:
# otherwise construct a new penalized acquisition function
self._penalization = hippo_penalizer(model, pending_points)
@tf.function
def penalized_acquisition(x: TensorType) -> TensorType:
log_acq = tf.math.log(
cast(AcquisitionFunction, self._base_acquisition_function)(x)
) + tf.math.log(cast(PenalizationFunction, self._penalization)(x))
return tf.math.exp(log_acq)
self._penalized_acquisition = penalized_acquisition
return penalized_acquisition
def _update_base_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
if self._base_acquisition_function is None:
self._base_acquisition_function = self._base_builder.prepare_acquisition_function(
models, datasets
)
else:
self._base_acquisition_function = self._base_builder.update_acquisition_function(
self._base_acquisition_function, models, datasets
)
return self._base_acquisition_function
class hippo_penalizer:
r"""
Returns the penalization function used for multi-objective greedy batch Bayesian
optimization.
A candidate point :math:`x` is penalized based on the Mahalanobis distance to a
given pending point :math:`p_i`. Since we assume objectives to be independent,
the Mahalanobis distance between these points becomes a Eucledian distance
normalized by standard deviation. Penalties for multiple pending points are multiplied,
and the resulting quantity is warped with the arctan function to :math:`[0, 1]` interval.
:param model: The model over the specified ``dataset``.
:param pending_points: The points we penalize with respect to.
:return: The penalization function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
def __init__(self, model: ProbabilisticModel, pending_points: TensorType):
"""Initialize the MO penalizer.
:param model: The model.
:param pending_points: The points we penalize with respect to.
:raise ValueError: If pending points are empty or None.
:return: The penalization function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one."""
tf.debugging.Assert(
pending_points is not None and len(pending_points) != 0, [tf.constant([])]
)
self._model = model
self._pending_points = tf.Variable(pending_points, shape=[None, *pending_points.shape[1:]])
pending_means, pending_vars = self._model.predict(self._pending_points)
self._pending_means = tf.Variable(pending_means, shape=[None, *pending_means.shape[1:]])
self._pending_vars = tf.Variable(pending_vars, shape=[None, *pending_vars.shape[1:]])
def update(self, pending_points: TensorType) -> None:
"""Update the penalizer with new pending points."""
tf.debugging.Assert(
pending_points is not None and len(pending_points) != 0, [tf.constant([])]
)
self._pending_points.assign(pending_points)
pending_means, pending_vars = self._model.predict(self._pending_points)
self._pending_means.assign(pending_means)
self._pending_vars.assign(pending_vars)
@tf.function
def __call__(self, x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This penalization function cannot be calculated for batches of points.",
)
# x is [N, 1, D]
x = tf.squeeze(x, axis=1) # x is now [N, D]
x_means, x_vars = self._model.predict(x)
# x_means is [N, K], x_vars is [N, K]
# where K is the number of models/objectives
# self._pending_points is [B, D] where B is the size of the batch collected so far
tf.debugging.assert_shapes(
[
(x, ["N", "D"]),
(self._pending_points, ["B", "D"]),
(self._pending_means, ["B", "K"]),
(self._pending_vars, ["B", "K"]),
(x_means, ["N", "K"]),
(x_vars, ["N", "K"]),
],
message="""Encountered unexpected shapes while calculating mean and variance
of given point x and pending points""",
)
x_means_expanded = x_means[:, None, :]
pending_means_expanded = self._pending_means[None, :, :]
pending_vars_expanded = self._pending_vars[None, :, :]
pending_stddevs_expanded = tf.sqrt(pending_vars_expanded)
# this computes Mahalanobis distance between x and pending points
# since we assume objectives to be independent
# it reduces to regular Eucledian distance normalized by standard deviation
standardize_mean_diff = (
tf.abs(x_means_expanded - pending_means_expanded) / pending_stddevs_expanded
) # [N, B, K]
d = tf.norm(standardize_mean_diff, axis=-1) # [N, B]
# warp the distance so that resulting value is from 0 to (nearly) 1
warped_d = (2.0 / math.pi) * tf.math.atan(d)
penalty = tf.reduce_prod(warped_d, axis=-1) # [N,]
return tf.reshape(penalty, (-1, 1))
| 33,929 | 43.821664 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/function/continuous_thompson_sampling.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains acquisition function builders for continuous Thompson sampling.
"""
from __future__ import annotations
from typing import Any, Callable, Optional, Type
import tensorflow as tf
from ...data import Dataset
from ...models.interfaces import HasTrajectorySampler, TrajectoryFunction, TrajectoryFunctionClass
from ...types import TensorType
from ..interface import SingleModelGreedyAcquisitionBuilder, SingleModelVectorizedAcquisitionBuilder
from ..utils import select_nth_output
class GreedyContinuousThompsonSampling(SingleModelGreedyAcquisitionBuilder[HasTrajectorySampler]):
r"""
Acquisition function builder for performing greedy continuous Thompson sampling. This builder
return acquisition functions that are the negatives of approximate samples from the
given :class:`ProbabilisticModel`, as provided by the model's :meth:`get_trajectory`
method. A set of such samples are to be maximized in a sequential greedy manner to provide
the next recommended query points. Note that we actually return
the negative of the trajectory, so that our acquisition optimizers (which are
all maximizers) can be used to extract the minimisers of trajectories.
For more details about trajectory-based Thompson sampling see :cite:`hernandez2017parallel` and
:cite:`wilson2020efficiently`.
"""
def __init__(self, select_output: Callable[[TensorType], TensorType] = select_nth_output):
"""
:param select_output: A method that returns the desired trajectory from a trajectory
sampler with shape [..., B], where B is a batch dimension. Defaults to the
:func:~`trieste.acquisition.utils.select_nth_output` function with output dimension 0.
"""
self._select_output = select_output
def __repr__(self) -> str:
""""""
return f"GreedyContinuousThompsonSampling({self._select_output!r})"
def prepare_acquisition_function(
self,
model: HasTrajectorySampler,
dataset: Optional[Dataset] = None,
pending_points: Optional[TensorType] = None,
) -> TrajectoryFunction:
"""
:param model: The model.
:param dataset: The data from the observer (not used).
:param pending_points: The points already in the current batch (not used).
:return: A negated trajectory sampled from the model.
"""
if not isinstance(model, HasTrajectorySampler):
raise ValueError(
f"Thompson sampling from trajectory only supports models with a trajectory_sampler "
f"method; received {model.__repr__()}"
)
self._trajectory_sampler = model.trajectory_sampler()
function = self._trajectory_sampler.get_trajectory()
return negate_trajectory_function(function, self._select_output)
def update_acquisition_function(
self,
function: TrajectoryFunction,
model: HasTrajectorySampler,
dataset: Optional[Dataset] = None,
pending_points: Optional[TensorType] = None,
new_optimization_step: bool = True,
) -> TrajectoryFunction:
"""
:param function: The trajectory function to update.
:param model: The model.
:param dataset: The data from the observer (not used).
:param pending_points: The points already in the current batch (not used).
:param new_optimization_step: Indicates whether this call to update_acquisition_function
is to start of a new optimization step, of to continue collecting batch of points
for the current step. Defaults to ``True``.
:return: A new trajectory sampled from the model.
"""
if new_optimization_step: # update sampler and resample trajectory
new_function = self._trajectory_sampler.update_trajectory(function)
else: # just resample trajectory but without updating sampler
new_function = self._trajectory_sampler.resample_trajectory(function)
if new_function is not function:
function = negate_trajectory_function(new_function, self._select_output)
return function
class ParallelContinuousThompsonSampling(
SingleModelVectorizedAcquisitionBuilder[HasTrajectorySampler]
):
r"""
Acquisition function builder for performing parallel continuous Thompson sampling.
This builder provides broadly the same behavior as our :class:`GreedyContinuousThompsonSampler`
however optimizes trajectory samples in parallel rather than sequentially.
Consequently, :class:`ParallelContinuousThompsonSampling` can choose query points faster
than :class:`GreedyContinuousThompsonSampler` however it has much larger memory usage.
For a convenient way to control the total memory usage of this acquisition function, see
our :const:`split_acquisition_function_calls` wrapper.
"""
def __init__(self, select_output: Callable[[TensorType], TensorType] = select_nth_output):
"""
:param select_output: A method that returns the desired trajectory from a trajectory
sampler with shape [..., B], where B is a batch dimension. Defaults to the
:func:~`trieste.acquisition.utils.select_nth_output` function with output dimension 0.
"""
self._select_output = select_output
def __repr__(self) -> str:
""""""
return f"ParallelContinuousThompsonSampling({self._select_output!r})"
def prepare_acquisition_function(
self,
model: HasTrajectorySampler,
dataset: Optional[Dataset] = None,
) -> TrajectoryFunction:
"""
:param model: The model.
:param dataset: The data from the observer (not used).
:return: A negated trajectory sampled from the model.
"""
if not isinstance(model, HasTrajectorySampler):
raise ValueError(
f"Thompson sampling from trajectory only supports models with a trajectory_sampler "
f"method; received {model.__repr__()}"
)
self._trajectory_sampler = model.trajectory_sampler()
self._trajectory = self._trajectory_sampler.get_trajectory()
self._negated_trajectory = negate_trajectory_function(self._trajectory, self._select_output)
return self._negated_trajectory
def update_acquisition_function(
self,
function: TrajectoryFunction,
model: HasTrajectorySampler,
dataset: Optional[Dataset] = None,
) -> TrajectoryFunction:
"""
:param function: The trajectory function to update.
:param model: The model.
:param dataset: The data from the observer (not used).
:return: A new trajectory sampled from the model.
"""
if function is not self._negated_trajectory:
raise ValueError("Wrong trajectory function passed into update_acquisition_function")
new_function = self._trajectory_sampler.update_trajectory(self._trajectory)
if new_function is not self._trajectory: # need to negate again if not modified in place
self._trajectory = new_function
self._negated_trajectory = negate_trajectory_function(new_function, self._select_output)
return self._negated_trajectory
class _DummyTrajectoryFunctionClass(TrajectoryFunctionClass):
# dummy trajectory function class used while pickling NegatedTrajectory
def __call__(self, x: TensorType) -> TensorType:
return x
def negate_trajectory_function(
function: TrajectoryFunction,
select_output: Optional[Callable[[TensorType], TensorType]] = None,
function_type: Optional[Type[TrajectoryFunction]] = None,
) -> TrajectoryFunction:
"""
Return the negative of trajectories and select the output to form the acquisition function, so
that our acquisition optimizers (which are all maximizers) can be used to extract the minimizers
of trajectories.
We negate the trajectory function object's call method, as it may have e.g. update and resample
methods, and select the output we wish to use.
"""
if isinstance(function, TrajectoryFunctionClass):
class NegatedTrajectory(function_type or type(function)): # type: ignore[misc]
@tf.function
def __call__(self, x: TensorType) -> TensorType:
if select_output is not None:
return -1.0 * select_output(super().__call__(x))
else:
return -1.0 * super().__call__(x)
def __reduce__(
self,
) -> tuple[
Callable[..., TrajectoryFunction],
tuple[
TrajectoryFunction,
Optional[Callable[[TensorType], TensorType]],
Optional[Type[TrajectoryFunction]],
],
dict[str, Any],
]:
# make this pickleable
state = (
self.__getstate__() if hasattr(self, "__getstate__") else self.__dict__.copy()
)
return (
negate_trajectory_function,
(_DummyTrajectoryFunctionClass(), select_output, self.__class__.__base__),
state,
)
function.__class__ = NegatedTrajectory
return function
else:
@tf.function
def negated_trajectory(x: TensorType) -> TensorType:
if select_output is not None:
return -1.0 * select_output(function(x))
else:
return -1.0 * function(x)
return negated_trajectory
| 10,279 | 40.788618 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/function/function.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains acquisition function builders, which build and define our acquisition
functions --- functions that estimate the utility of evaluating sets of candidate points.
"""
from __future__ import annotations
from typing import Callable, Mapping, Optional, cast
import tensorflow as tf
import tensorflow_probability as tfp
from ...data import Dataset
from ...models import ProbabilisticModel, ReparametrizationSampler
from ...models.interfaces import (
HasReparamSampler,
SupportsGetObservationNoise,
SupportsReparamSamplerObservationNoise,
)
from ...space import SearchSpace
from ...types import Tag, TensorType
from ...utils import DEFAULTS
from ..interface import (
AcquisitionFunction,
AcquisitionFunctionBuilder,
AcquisitionFunctionClass,
ProbabilisticModelType,
SingleModelAcquisitionBuilder,
SingleModelVectorizedAcquisitionBuilder,
)
from .utils import MultivariateNormalCDF
class ProbabilityOfImprovement(SingleModelAcquisitionBuilder[ProbabilisticModel]):
"""
Builder for the probability of improvement function, where the "best" value
is taken to be the minimum of the posterior mean at observed points.
"""
def __repr__(self) -> str:
""""""
return "ProbabilityOfImprovement()"
def prepare_acquisition_function(
self, model: ProbabilisticModel, dataset: Optional[Dataset] = None
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: The data from the observer. Must be populated.
:return: The probability of improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise tf.errors.InvalidArgumentError: If ``dataset`` is empty.
"""
tf.debugging.Assert(dataset is not None, [])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
mean, _ = model.predict(dataset.query_points)
eta = tf.reduce_min(mean, axis=0)[0]
return probability_below_threshold(model, eta)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer. Must be populated.
"""
tf.debugging.Assert(dataset is not None, [])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
tf.debugging.Assert(isinstance(function, probability_below_threshold), [tf.constant([])])
mean, _ = model.predict(dataset.query_points)
eta = tf.reduce_min(mean, axis=0)[0]
function.update(eta) # type: ignore
return function
class ExpectedImprovement(SingleModelAcquisitionBuilder[ProbabilisticModel]):
"""
Builder for the expected improvement function where the "best" value is taken to be the minimum
of the posterior mean at observed points.
In the presence of constraints in the search_space the "best" value is computed only at the
feasible query points. If there are no feasible points, the "best" value is instead taken to be
the maximum of the posterior mean at all observed points.
"""
def __init__(self, search_space: Optional[SearchSpace] = None):
"""
:param search_space: The global search space over which the optimisation is defined. This is
only used to determine explicit constraints.
"""
self._search_space = search_space
def __repr__(self) -> str:
""""""
return f"ExpectedImprovement({self._search_space!r})"
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: The data from the observer. Must be populated.
:return: The expected improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise tf.errors.InvalidArgumentError: If ``dataset`` is empty.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
# Check feasibility against any explicit constraints in the search space.
if self._search_space is not None and self._search_space.has_constraints:
is_feasible = self._search_space.is_feasible(dataset.query_points)
if not tf.reduce_any(is_feasible):
query_points = dataset.query_points
else:
query_points = tf.boolean_mask(dataset.query_points, is_feasible)
else:
is_feasible = tf.constant([True], dtype=bool)
query_points = dataset.query_points
mean, _ = model.predict(query_points)
if not tf.reduce_any(is_feasible):
eta = tf.reduce_max(mean, axis=0)
else:
eta = tf.reduce_min(mean, axis=0)
return expected_improvement(model, eta)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer. Must be populated.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
tf.debugging.Assert(isinstance(function, expected_improvement), [tf.constant([])])
# Check feasibility against any explicit constraints in the search space.
if self._search_space is not None and self._search_space.has_constraints:
is_feasible = self._search_space.is_feasible(dataset.query_points)
if not tf.reduce_any(is_feasible):
query_points = dataset.query_points
else:
query_points = tf.boolean_mask(dataset.query_points, is_feasible)
else:
is_feasible = tf.constant([True], dtype=bool)
query_points = dataset.query_points
mean, _ = model.predict(query_points)
if not tf.reduce_any(is_feasible):
eta = tf.reduce_max(mean, axis=0)
else:
eta = tf.reduce_min(mean, axis=0)
function.update(eta) # type: ignore
return function
class expected_improvement(AcquisitionFunctionClass):
def __init__(self, model: ProbabilisticModel, eta: TensorType):
r"""
Return the Expected Improvement (EI) acquisition function for single-objective global
optimization. Improvement is with respect to the current "best" observation ``eta``, where
an improvement moves towards the objective function's minimum and the expectation is
calculated with respect to the ``model`` posterior. For model posterior :math:`f`, this is
.. math:: x \mapsto \mathbb E \left[ \max (\eta - f(x), 0) \right]
This function was introduced by Mockus et al, 1975. See :cite:`Jones:1998` for details.
:param model: The model of the objective function.
:param eta: The "best" observation.
:return: The expected improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
self._model = model
self._eta = tf.Variable(eta)
def update(self, eta: TensorType) -> None:
"""Update the acquisition function with a new eta value."""
self._eta.assign(eta)
@tf.function
def __call__(self, x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
mean, variance = self._model.predict(tf.squeeze(x, -2))
normal = tfp.distributions.Normal(mean, tf.sqrt(variance))
return (self._eta - mean) * normal.cdf(self._eta) + variance * normal.prob(self._eta)
class AugmentedExpectedImprovement(SingleModelAcquisitionBuilder[SupportsGetObservationNoise]):
"""
Builder for the augmented expected improvement function for optimization single-objective
optimization problems with high levels of observation noise.
"""
def __repr__(self) -> str:
""""""
return "AugmentedExpectedImprovement()"
def prepare_acquisition_function(
self,
model: SupportsGetObservationNoise,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: The data from the observer. Must be populated.
:return: The expected improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise tf.errors.InvalidArgumentError: If ``dataset`` is empty.
"""
if not isinstance(model, SupportsGetObservationNoise):
raise NotImplementedError(
f"AugmentedExpectedImprovement only works with models that support "
f"get_observation_noise; received {model.__repr__()}"
)
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
mean, _ = model.predict(dataset.query_points)
eta = tf.reduce_min(mean, axis=0)
return augmented_expected_improvement(model, eta)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: SupportsGetObservationNoise,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer. Must be populated.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
tf.debugging.Assert(isinstance(function, augmented_expected_improvement), [tf.constant([])])
mean, _ = model.predict(dataset.query_points)
eta = tf.reduce_min(mean, axis=0)
function.update(eta) # type: ignore
return function
class augmented_expected_improvement(AcquisitionFunctionClass):
def __init__(self, model: SupportsGetObservationNoise, eta: TensorType):
r"""
Return the Augmented Expected Improvement (AEI) acquisition function for single-objective
global optimization under homoscedastic observation noise.
Improvement is with respect to the current "best" observation ``eta``, where an
improvement moves towards the objective function's minimum and the expectation is calculated
with respect to the ``model`` posterior. In contrast to standard EI, AEI has an additional
multiplicative factor that penalizes evaluations made in areas of the space with very small
posterior predictive variance. Thus, when applying standard EI to noisy optimisation
problems, AEI avoids getting trapped and repeatedly querying the same point.
For model posterior :math:`f`, this is
.. math:: x \mapsto EI(x) * \left(1 - frac{\tau^2}{\sqrt{s^2(x)+\tau^2}}\right),
where :math:`s^2(x)` is the predictive variance and :math:`\tau` is observation noise.
This function was introduced by Huang et al, 2006. See :cite:`Huang:2006` for details.
:param model: The model of the objective function.
:param eta: The "best" observation.
:return: The expected improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one or a model without homoscedastic observation noise.
"""
self._model = model
self._eta = tf.Variable(eta)
self._noise_variance = tf.Variable(model.get_observation_noise())
def update(self, eta: TensorType) -> None:
"""Update the acquisition function with a new eta value and noise variance."""
self._eta.assign(eta)
self._noise_variance.assign(self._model.get_observation_noise())
@tf.function
def __call__(self, x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
mean, variance = self._model.predict(tf.squeeze(x, -2))
normal = tfp.distributions.Normal(mean, tf.sqrt(variance))
expected_improvement = (self._eta - mean) * normal.cdf(self._eta) + variance * normal.prob(
self._eta
)
augmentation = 1 - (tf.math.sqrt(self._noise_variance)) / (
tf.math.sqrt(self._noise_variance + variance)
)
return expected_improvement * augmentation
class NegativeLowerConfidenceBound(SingleModelAcquisitionBuilder[ProbabilisticModel]):
"""
Builder for the negative of the lower confidence bound. The lower confidence bound is typically
minimised, so the negative is suitable for maximisation.
"""
def __init__(self, beta: float = 1.96):
"""
:param beta: Weighting given to the variance contribution to the lower confidence bound.
Must not be negative.
"""
self._beta = beta
def __repr__(self) -> str:
""""""
return f"NegativeLowerConfidenceBound({self._beta!r})"
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: Unused.
:return: The negative lower confidence bound function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise ValueError: If ``beta`` is negative.
"""
lcb = lower_confidence_bound(model, self._beta)
return tf.function(lambda at: -lcb(at))
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: Unused.
"""
return function # no need to update anything
class NegativePredictiveMean(NegativeLowerConfidenceBound):
"""
Builder for the negative of the predictive mean. The predictive mean is minimised on minimising
the objective function. The negative predictive mean is therefore maximised.
"""
def __init__(self) -> None:
super().__init__(beta=0.0)
def __repr__(self) -> str:
""""""
return "NegativePredictiveMean()"
def lower_confidence_bound(model: ProbabilisticModel, beta: float) -> AcquisitionFunction:
r"""
The lower confidence bound (LCB) acquisition function for single-objective global optimization.
.. math:: x^* \mapsto \mathbb{E} [f(x^*)|x, y] - \beta \sqrt{ \mathrm{Var}[f(x^*)|x, y] }
See :cite:`Srinivas:2010` for details.
:param model: The model of the objective function.
:param beta: The weight to give to the standard deviation contribution of the LCB. Must not be
negative.
:return: The lower confidence bound function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise tf.errors.InvalidArgumentError: If ``beta`` is negative.
"""
tf.debugging.assert_non_negative(
beta, message="Standard deviation scaling parameter beta must not be negative"
)
@tf.function
def acquisition(x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
mean, variance = model.predict(tf.squeeze(x, -2))
return mean - beta * tf.sqrt(variance)
return acquisition
class ProbabilityOfFeasibility(SingleModelAcquisitionBuilder[ProbabilisticModel]):
r"""
Uses the :func:`probability_below_threshold` function to build a
probability of feasiblity acquisition function, defined in :cite:`gardner14` as
.. math::
\int_{-\infty}^{\tau} p(c(\mathbf{x}) | \mathbf{x}, \mathcal{D}) \mathrm{d} c(\mathbf{x})
\qquad ,
where :math:`\tau` is a threshold. Values below the threshold are considered feasible by the
constraint function. See also :cite:`schonlau1998global` for details.
"""
def __init__(self, threshold: float | TensorType):
"""
:param threshold: The (scalar) probability of feasibility threshold.
:raise ValueError (or InvalidArgumentError): If ``threshold`` is not a scalar.
"""
tf.debugging.assert_scalar(threshold)
self._threshold = threshold
def __repr__(self) -> str:
""""""
return f"ProbabilityOfFeasibility({self._threshold!r})"
@property
def threshold(self) -> float | TensorType:
"""The probability of feasibility threshold."""
return self._threshold
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: Unused.
:return: The probability of feasibility function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
return probability_below_threshold(model, self.threshold)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: Unused.
"""
return function # no need to update anything
class probability_below_threshold(AcquisitionFunctionClass):
def __init__(self, model: ProbabilisticModel, threshold: float | TensorType):
r"""
The probability of being below the threshold. This brings together commonality
between probability of improvement and probability of feasiblity.
Probability is is caculated with respect to the `model` posterior.
For model posterior :math:`f`, this is
.. math:: x \mapsto \mathbb P \left (f(x) < \eta)\right]
where :math:`\eta` is the threshold.
:param model: The model of the objective function.
:param threshold: The (scalar) probability of feasibility threshold.
:return: The probability of feasibility function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise ValueError or tf.errors.InvalidArgumentError: If ``threshold`` is not a scalar.
"""
tf.debugging.assert_scalar(threshold)
self._model = model
self._threshold = tf.Variable(threshold)
@tf.function
def __call__(self, x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
mean, var = self._model.predict(tf.squeeze(x, -2))
distr = tfp.distributions.Normal(mean, tf.sqrt(var))
return distr.cdf(tf.cast(self._threshold, x.dtype))
def update(self, threshold: TensorType) -> None:
"""Update the acquisition function with a new threshold value."""
self._threshold.assign(threshold)
class FastConstraintsFeasibility(SingleModelAcquisitionBuilder[ProbabilisticModel]):
"""
Builds a feasiblity acquisition function from the residuals of explicit constraints defined in
the search space.
"""
def __init__(
self,
search_space: SearchSpace,
smoothing_function: Optional[Callable[[TensorType], TensorType]] = None,
):
"""
:param search_space: The global search space over which the feasibility of the constraints
is defined.
:param smoothing_function: The smoothing function used for constraints residuals. The
default is CDF of the Normal distribution with a scale of `1e-3`.
:raise NotImplementedError: If the `search_space` does not have constraints.
"""
if not search_space.has_constraints:
raise NotImplementedError(
"FastConstraintsFeasibility requires constraints in the search space."
)
self._search_space = search_space
self._smoothing_function = smoothing_function
def __repr__(self) -> str:
""""""
return f"FastConstraintsFeasibility({self._search_space!r}, {self._smoothing_function!r})"
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: Unused.
:param dataset: Unused.
:return: The function for feasibility of constraints.
"""
return fast_constraints_feasibility(self._search_space, self._smoothing_function)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: Unused.
:param dataset: Unused.
:return: The function for feasibility of constraints.
"""
return function # No need to update anything.
def fast_constraints_feasibility(
search_space: SearchSpace,
smoothing_function: Optional[Callable[[TensorType], TensorType]] = None,
) -> AcquisitionFunction:
"""
Returns a feasiblity acquisition function from the residuals of explicit constraints defined in
the search space.
:param search_space: The global search space over which the feasibility of the constraints
is defined.
:param smoothing_function: The smoothing function used for constraints residuals. The
default is CDF of the Normal distribution with a scale of `1e-3`.
:return: The function for feasibility of constraints.
:raise NotImplementedError: If the `search_space` does not have constraints.
"""
if not search_space.has_constraints:
raise NotImplementedError(
"fast_constraints_feasibility requires constraints in the search space."
)
@tf.function
def acquisition(x: TensorType) -> TensorType:
if smoothing_function is None:
_smoothing_function = tfp.distributions.Normal(
tf.cast(0.0, x.dtype), tf.cast(1e-3, x.dtype)
).cdf
else:
_smoothing_function = smoothing_function
residuals = search_space.constraints_residuals(x)
return tf.math.reduce_prod(_smoothing_function(residuals), axis=-1)
return acquisition
class ExpectedConstrainedImprovement(AcquisitionFunctionBuilder[ProbabilisticModelType]):
"""
Builder for the *expected constrained improvement* acquisition function defined in
:cite:`gardner14`. The acquisition function computes the expected improvement from the best
feasible point, where feasible points are those that (probably) satisfy some constraint. Where
there are no feasible points, this builder simply builds the constraint function.
"""
def __init__(
self,
objective_tag: Tag,
constraint_builder: AcquisitionFunctionBuilder[ProbabilisticModelType],
min_feasibility_probability: float | TensorType = 0.5,
search_space: Optional[SearchSpace] = None,
):
"""
:param objective_tag: The tag for the objective data and model.
:param constraint_builder: The builder for the constraint function.
:param min_feasibility_probability: The minimum probability of feasibility for a
"best point" to be considered feasible.
:param search_space: The global search space over which the optimisation is defined. This is
only used to determine explicit constraints.
:raise ValueError (or tf.errors.InvalidArgumentError): If ``min_feasibility_probability``
is not a scalar in the unit interval :math:`[0, 1]`.
"""
tf.debugging.assert_scalar(min_feasibility_probability)
if isinstance(min_feasibility_probability, (int, float)):
tf.debugging.assert_greater_equal(float(min_feasibility_probability), 0.0)
tf.debugging.assert_less_equal(float(min_feasibility_probability), 1.0)
else:
dtype = min_feasibility_probability.dtype
tf.debugging.assert_greater_equal(min_feasibility_probability, tf.cast(0, dtype))
tf.debugging.assert_less_equal(min_feasibility_probability, tf.cast(1, dtype))
self._objective_tag = objective_tag
self._constraint_builder = constraint_builder
self._search_space = search_space
self._min_feasibility_probability = min_feasibility_probability
self._constraint_fn: Optional[AcquisitionFunction] = None
self._expected_improvement_fn: Optional[AcquisitionFunction] = None
self._constrained_improvement_fn: Optional[AcquisitionFunction] = None
def __repr__(self) -> str:
""""""
return (
f"ExpectedConstrainedImprovement({self._objective_tag!r}, {self._constraint_builder!r},"
f" {self._min_feasibility_probability!r}, {self._search_space!r})"
)
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
"""
:param models: The models over each tag.
:param datasets: The data from the observer.
:return: The expected constrained improvement acquisition function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
:raise KeyError: If `objective_tag` is not found in ``datasets`` and ``models``.
:raise tf.errors.InvalidArgumentError: If the objective data is empty.
"""
tf.debugging.Assert(datasets is not None, [tf.constant([])])
datasets = cast(Mapping[Tag, Dataset], datasets)
objective_model = models[self._objective_tag]
objective_dataset = datasets[self._objective_tag]
tf.debugging.assert_positive(
len(objective_dataset),
message="Expected improvement is defined with respect to existing points in the"
" objective data, but the objective data is empty.",
)
self._constraint_fn = self._constraint_builder.prepare_acquisition_function(
models, datasets=datasets
)
pof = self._constraint_fn(objective_dataset.query_points[:, None, ...])
is_feasible = tf.squeeze(pof >= self._min_feasibility_probability, axis=-1)
# Check feasibility against any explicit constraints in the search space.
if self._search_space is not None and self._search_space.has_constraints:
ss_is_feasible = self._search_space.is_feasible(objective_dataset.query_points)
is_feasible = tf.logical_and(is_feasible, ss_is_feasible)
if not tf.reduce_any(is_feasible):
return self._constraint_fn
feasible_query_points = tf.boolean_mask(objective_dataset.query_points, is_feasible)
feasible_mean, _ = objective_model.predict(feasible_query_points)
self._update_expected_improvement_fn(objective_model, feasible_mean)
@tf.function
def constrained_function(x: TensorType) -> TensorType:
return cast(AcquisitionFunction, self._expected_improvement_fn)(x) * cast(
AcquisitionFunction, self._constraint_fn
)(x)
self._constrained_improvement_fn = constrained_function
return constrained_function
def update_acquisition_function(
self,
function: AcquisitionFunction,
models: Mapping[Tag, ProbabilisticModelType],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param models: The models for each tag.
:param datasets: The data from the observer.
"""
tf.debugging.Assert(datasets is not None, [tf.constant([])])
datasets = cast(Mapping[Tag, Dataset], datasets)
objective_model = models[self._objective_tag]
objective_dataset = datasets[self._objective_tag]
tf.debugging.assert_positive(
len(objective_dataset),
message="Expected improvement is defined with respect to existing points in the"
" objective data, but the objective data is empty.",
)
tf.debugging.Assert(self._constraint_fn is not None, [tf.constant([])])
constraint_fn = cast(AcquisitionFunction, self._constraint_fn)
self._constraint_builder.update_acquisition_function(
constraint_fn, models, datasets=datasets
)
pof = constraint_fn(objective_dataset.query_points[:, None, ...])
is_feasible = tf.squeeze(pof >= self._min_feasibility_probability, axis=-1)
# Check feasibility against any explicit constraints in the search space.
if self._search_space is not None and self._search_space.has_constraints:
ss_is_feasible = self._search_space.is_feasible(objective_dataset.query_points)
is_feasible = tf.logical_and(is_feasible, ss_is_feasible)
if not tf.reduce_any(is_feasible):
return constraint_fn
feasible_query_points = tf.boolean_mask(objective_dataset.query_points, is_feasible)
feasible_mean, _ = objective_model.predict(feasible_query_points)
self._update_expected_improvement_fn(objective_model, feasible_mean)
if self._constrained_improvement_fn is not None:
return self._constrained_improvement_fn
@tf.function
def constrained_function(x: TensorType) -> TensorType:
return cast(AcquisitionFunction, self._expected_improvement_fn)(x) * cast(
AcquisitionFunction, self._constraint_fn
)(x)
self._constrained_improvement_fn = constrained_function
return self._constrained_improvement_fn
def _update_expected_improvement_fn(
self, objective_model: ProbabilisticModelType, feasible_mean: TensorType
) -> None:
"""
Set or update the unconstrained expected improvement function.
:param objective_model: The objective model.
:param feasible_mean: The mean of the feasible query points.
"""
eta = tf.reduce_min(feasible_mean, axis=0)
if self._expected_improvement_fn is None:
self._expected_improvement_fn = expected_improvement(objective_model, eta)
else:
tf.debugging.Assert(
isinstance(self._expected_improvement_fn, expected_improvement), [tf.constant([])]
)
self._expected_improvement_fn.update(eta) # type: ignore
class MonteCarloExpectedImprovement(SingleModelAcquisitionBuilder[HasReparamSampler]):
"""
Builder for a Monte Carlo-based expected improvement function for use with a model without
analytical expected improvement (e.g. a deep GP). The "best" value is taken to be
the minimum of the posterior mean at observed points. See
:class:`monte_carlo_expected_improvement` for details.
"""
def __init__(self, sample_size: int, *, jitter: float = DEFAULTS.JITTER):
"""
:param sample_size: The number of samples for each batch of points.
:param jitter: The jitter for the reparametrization sampler.
:raise tf.errors.InvalidArgumentError: If ``sample_size`` is not positive, or ``jitter`` is
negative.
"""
tf.debugging.assert_positive(sample_size)
tf.debugging.assert_greater_equal(jitter, 0.0)
super().__init__()
self._sample_size = sample_size
self._jitter = jitter
def __repr__(self) -> str:
""""""
return f"MonteCarloExpectedImprovement({self._sample_size!r}, jitter={self._jitter!r})"
def prepare_acquisition_function(
self,
model: HasReparamSampler,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model over the specified ``dataset``. Must have output dimension [1].
:param dataset: The data from the observer. Cannot be empty.
:return: The estimated *expected improvement* acquisition function.
:raise ValueError (or InvalidArgumentError): If ``dataset`` is not populated, ``model``
does not have an output dimension of [1] or does not have a ``reparam_sample`` method.
"""
if not isinstance(model, HasReparamSampler):
raise ValueError(
f"MonteCarloExpectedImprovement only supports models with a reparam_sampler method;"
f"received {model.__repr__()}"
)
sampler = model.reparam_sampler(self._sample_size)
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
samples_at_query_points = sampler.sample(
dataset.query_points[..., None, :], jitter=self._jitter
)
mean = tf.reduce_mean(samples_at_query_points, axis=-3, keepdims=True) # [N, 1, 1, L]
tf.debugging.assert_shapes(
[(mean, [..., 1])], message="Expected model with output dimension [1]."
)
eta = tf.squeeze(tf.reduce_min(mean, axis=0))
return monte_carlo_expected_improvement(sampler, eta)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: HasReparamSampler,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model. Must have output dimension [1]. Unused here.
:param dataset: The data from the observer. Cannot be empty
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
tf.debugging.Assert(
isinstance(function, monte_carlo_expected_improvement), [tf.constant([])]
)
sampler = function._sampler # type: ignore
sampler.reset_sampler()
samples_at_query_points = sampler.sample(
dataset.query_points[..., None, :], jitter=self._jitter
)
mean = tf.reduce_mean(samples_at_query_points, axis=-3, keepdims=True)
tf.debugging.assert_shapes(
[(mean, [..., 1])], message="Expected model with output dimension [1]."
)
eta = tf.squeeze(tf.reduce_min(mean, axis=0))
function.update(eta) # type: ignore
return function
class monte_carlo_expected_improvement(AcquisitionFunctionClass):
r"""
Return a Monte Carlo based Expected Improvement (EI) acquisition function for
single-objective global optimization. Improvement is with respect to the current "best"
observation ``eta``, where an improvement moves towards the objective function's minimum
and the expectation is calculated with respect to the ``model`` posterior. For model
posterior :math:`f`, this is
.. math:: x \mapsto \mathbb E \left[ \max (\eta - f(x), 0) \right].
For the Monte Carlo version, the expectation is calculated by samples that we save. See
:cite:`wilson2018maximizing` for details.
"""
def __init__(self, sampler: ReparametrizationSampler[HasReparamSampler], eta: TensorType):
r"""
:param sampler: The model sampler of the objective function.
:param eta: The "best" observation.
:return: The Monte Carlo expected improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
self._sampler = sampler
self._eta = tf.Variable(eta)
def update(self, eta: TensorType) -> None:
"""Update the acquisition function with a new eta value."""
self._eta.assign(eta)
@tf.function
def __call__(self, at: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(at, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
samples = tf.squeeze(self._sampler.sample(at), axis=-1) # [..., S, 1]
improvement = tf.maximum(self._eta - samples, 0.0) # [..., S, 1]
return tf.reduce_mean(improvement, axis=-2) # [..., 1]
class MonteCarloAugmentedExpectedImprovement(
SingleModelAcquisitionBuilder[SupportsReparamSamplerObservationNoise]
):
"""
Builder for a Monte Carlo-based augmented expected improvement function for use with a model
without analytical augmented expected improvement (e.g. a deep GP). The "best" value is taken to
be the minimum of the posterior mean at observed points. See
:class:`monte_carlo_augmented_expected_improvement` for details.
"""
def __init__(self, sample_size: int, *, jitter: float = DEFAULTS.JITTER):
"""
:param sample_size: The number of samples for each batch of points.
:param jitter: The jitter for the reparametrization sampler.
:raise tf.errors.InvalidArgumentError: If ``sample_size`` is not positive, or ``jitter`` is
negative.
"""
tf.debugging.assert_positive(sample_size)
tf.debugging.assert_greater_equal(jitter, 0.0)
super().__init__()
self._sample_size = sample_size
self._jitter = jitter
def __repr__(self) -> str:
""""""
return (
f"MonteCarloAugmentedExpectedImprovement({self._sample_size!r}, "
f"jitter={self._jitter!r})"
)
def prepare_acquisition_function(
self,
model: SupportsReparamSamplerObservationNoise,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model over the specified ``dataset``. Must have output dimension [1].
:param dataset: The data from the observer. Cannot be empty.
:return: The estimated *expected improvement* acquisition function.
:raise ValueError (or InvalidArgumentError): If ``dataset`` is not populated, ``model``
does not have an output dimension of [1], does not have a ``reparam_sample`` method, or
does not support observation noise.
"""
if not isinstance(model, SupportsReparamSamplerObservationNoise):
raise ValueError(
f"MonteCarloAugmentedExpectedImprovement only supports models with a "
f"reparam_sampler method and that support observation noise; received "
f"{model.__repr__()}."
)
sampler = model.reparam_sampler(self._sample_size)
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
samples_at_query_points = sampler.sample(
dataset.query_points[..., None, :], jitter=self._jitter
)
mean = tf.reduce_mean(samples_at_query_points, axis=-3, keepdims=True) # [N, 1, 1, L]
tf.debugging.assert_shapes(
[(mean, [..., 1])], message="Expected model with output dimension [1]."
)
eta = tf.squeeze(tf.reduce_min(mean, axis=0))
return monte_carlo_augmented_expected_improvement(model, sampler, eta)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: SupportsReparamSamplerObservationNoise,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model. Must have output dimension [1]. Unused here
:param dataset: The data from the observer. Cannot be empty.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
tf.debugging.Assert(
isinstance(function, monte_carlo_augmented_expected_improvement), [tf.constant([])]
)
sampler = function._sampler # type: ignore
sampler.reset_sampler()
samples_at_query_points = sampler.sample(
dataset.query_points[..., None, :], jitter=self._jitter
)
mean = tf.reduce_mean(samples_at_query_points, axis=-3, keepdims=True) # [N, 1, 1, L]
tf.debugging.assert_shapes(
[(mean, [..., 1])], message="Expected model with output dimension [1]."
)
eta = tf.squeeze(tf.reduce_min(mean, axis=0))
function.update(eta) # type: ignore
return function
class monte_carlo_augmented_expected_improvement(AcquisitionFunctionClass):
r"""
Return a Monte Carlo based Augmented Expected Improvement (AEI) acquisition function for
single-objective global optimization with high levels of observation noise. See
:cite:`wilson2018maximizing` for details on using the reparametrization trick for optimizing
acquisition functions and :cite:`Huang:2006`: for details of AEI.
"""
def __init__(
self,
model: SupportsReparamSamplerObservationNoise,
sampler: ReparametrizationSampler[SupportsReparamSamplerObservationNoise],
eta: TensorType,
):
r"""
:param model: The model of the objective function.
:param sampler: The model sampler of the objective function.
:param eta: The "best" observation.
:return: The Monte Carlo expected improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
self._model = model
self._sampler = sampler
self._eta = tf.Variable(eta)
self._noise_variance = tf.Variable(model.get_observation_noise())
def update(self, eta: TensorType) -> None:
"""Update the acquisition function with a new eta and noise variance"""
self._eta.assign(eta)
self._noise_variance.assign(self._model.get_observation_noise())
@tf.function
def __call__(self, at: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(at, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
samples = tf.squeeze(self._sampler.sample(at), axis=-1) # [..., S, 1]
improvement = tf.maximum(self._eta - samples, 0.0) # [..., S, 1]
variance = tf.math.reduce_variance(samples, -2) # [..., 1]
augmentation = 1 - (
tf.math.sqrt(self._noise_variance) / tf.math.sqrt(self._noise_variance + variance)
)
return augmentation * tf.reduce_mean(improvement, axis=-2) # [..., 1]
class BatchMonteCarloExpectedImprovement(SingleModelAcquisitionBuilder[HasReparamSampler]):
"""
Expected improvement for batches of points (or :math:`q`-EI), approximated using Monte Carlo
estimation with the reparametrization trick. See :cite:`Ginsbourger2010` for details.
Improvement is measured with respect to the minimum predictive mean at observed query points.
This is calculated in :class:`BatchMonteCarloExpectedImprovement` by assuming observations
at new points are independent from those at known query points. This is faster, but is an
approximation for noisy observers.
"""
def __init__(self, sample_size: int, *, jitter: float = DEFAULTS.JITTER):
"""
:param sample_size: The number of samples for each batch of points.
:param jitter: The size of the jitter to use when stabilising the Cholesky decomposition of
the covariance matrix.
:raise tf.errors.InvalidArgumentError: If ``sample_size`` is not positive, or ``jitter``
is negative.
"""
tf.debugging.assert_positive(sample_size)
tf.debugging.assert_greater_equal(jitter, 0.0)
self._sample_size = sample_size
self._jitter = jitter
def __repr__(self) -> str:
""""""
return f"BatchMonteCarloExpectedImprovement({self._sample_size!r}, jitter={self._jitter!r})"
def prepare_acquisition_function(
self,
model: HasReparamSampler,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model. Must have event shape [1].
:param dataset: The data from the observer. Must be populated.
:return: The batch *expected improvement* acquisition function.
:raise ValueError (or InvalidArgumentError): If ``dataset`` is not populated, or ``model``
does not have an event shape of [1].
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
mean, _ = model.predict(dataset.query_points)
tf.debugging.assert_shapes(
[(mean, ["_", 1])], message="Expected model with event shape [1]."
)
eta = tf.reduce_min(mean, axis=0)
return batch_monte_carlo_expected_improvement(self._sample_size, model, eta, self._jitter)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: HasReparamSampler,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model. Must have event shape [1].
:param dataset: The data from the observer. Must be populated.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
tf.debugging.Assert(
isinstance(function, batch_monte_carlo_expected_improvement), [tf.constant([])]
)
mean, _ = model.predict(dataset.query_points)
eta = tf.reduce_min(mean, axis=0)
function.update(eta) # type: ignore
return function
class batch_monte_carlo_expected_improvement(AcquisitionFunctionClass):
def __init__(self, sample_size: int, model: HasReparamSampler, eta: TensorType, jitter: float):
"""
:param sample_size: The number of Monte-Carlo samples.
:param model: The model of the objective function.
:param eta: The "best" observation.
:param jitter: The size of the jitter to use when stabilising the Cholesky decomposition of
the covariance matrix.
:return: The expected improvement function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
self._sample_size = sample_size
if not isinstance(model, HasReparamSampler):
raise ValueError(
f"The batch Monte-Carlo expected improvement acquisition function only supports "
f"models that implement a reparam_sampler method; received {model.__repr__()}"
)
sampler = model.reparam_sampler(self._sample_size)
self._sampler = sampler
self._eta = tf.Variable(eta)
self._jitter = jitter
def update(self, eta: TensorType) -> None:
"""Update the acquisition function with a new eta value and reset the reparam sampler."""
self._eta.assign(eta)
self._sampler.reset_sampler()
@tf.function
def __call__(self, x: TensorType) -> TensorType:
samples = tf.squeeze(self._sampler.sample(x, jitter=self._jitter), axis=-1) # [..., S, B]
min_sample_per_batch = tf.reduce_min(samples, axis=-1) # [..., S]
batch_improvement = tf.maximum(self._eta - min_sample_per_batch, 0.0) # [..., S]
return tf.reduce_mean(batch_improvement, axis=-1, keepdims=True) # [..., 1]
class BatchExpectedImprovement(SingleModelAcquisitionBuilder[ProbabilisticModel]):
"""Accurate approximation of the batch expected improvement, using the
method of Chvallier and Ginsbourger :cite:`chevalier2013fast`.
Internally, this uses a highly accurate approximation of the cumulative
density function of the multivariate Gaussian, developed by Alan Genz
:cite:`genz2016numerical`.
"""
def __init__(
self,
sample_size: int,
*,
jitter: float = DEFAULTS.JITTER,
):
"""Initialise the BatchExpectedImprovement instance.
:param sample_size: int, number of Sobol samples to use.
:param jitter: float, amount of jitter for Cholesky factorisations.
"""
tf.debugging.assert_positive(sample_size)
tf.debugging.assert_greater_equal(jitter, 0.0)
self._sample_size = sample_size
self._jitter = jitter
def __repr__(self) -> str:
""""""
return f"BatchExpectedImprovement({self._sample_size!r}, " f"jitter={self._jitter!r})"
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model. Must have event shape [1].
:param dataset: The data from the observer. Must be populated.
:return: The batch *expected improvement* acquisition function.
:raise ValueError (or InvalidArgumentError): If ``dataset`` is not populated, or ``model``
does not have an event shape of [1].
"""
tf.debugging.Assert(dataset is not None, [])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
# Get mean and covariance
mean, _ = model.predict(dataset.query_points)
tf.debugging.assert_shapes(
[(mean, ["_", 1])],
message="Expected model with event shape [1].",
)
eta = tf.reduce_min(mean, axis=0)
acquisition_function = batch_expected_improvement(
self._sample_size,
model,
eta,
self._jitter,
)
return acquisition_function
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model. Must have event shape [1].
:param dataset: The data from the observer. Must be populated.
"""
tf.debugging.Assert(dataset is not None, [])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
tf.debugging.Assert(isinstance(function, batch_expected_improvement), [])
# Get mean and covariance
mean, _ = model.predict(dataset.query_points)
eta = tf.reduce_min(mean, axis=0)
function.update(eta=eta) # type: ignore
return function
class batch_expected_improvement(AcquisitionFunctionClass):
def __init__(
self,
sample_size: int,
model: ProbabilisticModel,
eta: TensorType,
jitter: float,
):
"""Initialise the batch_expected_improvement instance.
:param sample_size: int, number of samples to use.
:param model: Gaussian process regression model.
:param eta: Tensor of shape (,), expected improvement threshold. This
is the best value observed so far durin the BO loop.
:param jitter: float, amount of jitter for Cholesky factorisations.
"""
self._sample_size = sample_size
self._jitter = jitter
self._eta = tf.Variable(eta)
self._model = model
self._mvn_cdf_1: Optional[MultivariateNormalCDF] = None
self._mvn_cdf_2: Optional[MultivariateNormalCDF] = None
self._num_sobol_skip = int(tf.math.floor(10**9 * tf.random.uniform((), dtype=tf.float32)))
def update(self, eta: TensorType) -> None:
"""Update the acquisition function with a new eta value and reset the
reparam sampler.
"""
self._eta.assign(eta)
self._num_sobol_skip = int(tf.math.floor(10**9 * tf.random.uniform((), dtype=tf.float32)))
def _compute_bm(
self,
mean: tf.Tensor,
threshold: tf.Tensor,
) -> TensorType:
"""Helper function for the batch expected improvement, which computes
the tensors b and m as detailed in Chevalier and Ginsbourger
:cite:`chevalier2013fast`.
:param mean: Tensor of shape (B, Q)
:param threshold: Tensor of shape (B,)
:returns b: Tensor of shape (B, Q, Q)
:returns m: Tensor of shape (B, Q, Q)
"""
# Check shapes of input tensors
tf.debugging.assert_shapes(
[
(mean, ("B", "Q")),
(threshold, ("B",)),
]
)
# Unpack tensor shape and data type
B, Q = mean.shape
dtype = mean.dtype
# Compute b tensor
threshold = tf.tile(threshold[:, None], (1, Q))
threshold = tf.linalg.diag(threshold) # (B, Q, Q)
b = tf.zeros(shape=(B, Q, Q), dtype=dtype)
b = b - threshold
# Compute m tensor
m = mean[:, None, :] - mean[:, :, None] # (B, Q, Q)
m = m - tf.linalg.diag(mean) # (B, Q, Q)
return b, m
def _delta(
self, idx: int, dim: int, B: int, transpose: bool, dtype: tf.DType
) -> TensorType: # pragma: no cover (tf.map_fn)
"""Helper function for the _compute_Sigma function, which computes a
*delta* tensor of shape (B, idx, idx) such that
delta[B, i, :] = 1 if i == idx
delta[B, i, :] = 0 otherwise.
If transpose == True, then the last two dimensions of the tensor are
transposed, in which case
delta[B, :, i] = 1 if i == idx
delta[B, :, i] = 0 otherwise.
:param idx: Index for entries equal to 1.
:param dim: Dimension of the last and second to last axes.
:param B: Leading dimension of tensor.
:param transpose: Whether to transpose the last two dimensions or not.
:param dtype: The dtype of the tensor, either tf.float32 or tf.float64.
"""
# Check input parameters
tf.debugging.assert_non_negative(idx)
tf.debugging.assert_non_negative(dim)
tf.debugging.assert_positive(B)
o1 = tf.ones(shape=(B, idx, dim), dtype=dtype)
z1 = tf.zeros(shape=(B, 1, dim), dtype=dtype)
o2 = tf.ones(shape=(B, dim - idx - 1, dim), dtype=dtype)
delta = tf.concat([o1, z1, o2], axis=1)
delta = tf.transpose(delta, perm=[0, 2, 1]) if transpose else delta
return delta
def _compute_Sigma(
self,
covariance: tf.Tensor,
) -> TensorType:
"""Helper function for the batch expected improvement, which computes
the tensor Sigma, as detailed in Chevalier and Ginsbourger
:cite:`chevalier2013fast`.
:param covariance: Tensor of shape (B, Q, Q)
:returns Sigma: Tensor of shape (B, Q, Q, Q)
"""
# Check shapes of covariance tensor
tf.debugging.assert_shapes([(covariance, ("B", "Q", "Q"))])
# Unpack tensor shape and dtype
B, Q, _ = covariance.shape
dtype = covariance.dtype
Sigma = tf.zeros(shape=(B, Q, Q, Q))
def compute_single_slice(q: int) -> TensorType: # pragma: no cover (tf.map_fn)
diq = self._delta(q, Q, B, transpose=False, dtype=dtype)
dqj = self._delta(q, Q, B, transpose=True, dtype=dtype)
Sigma_ij = covariance[:, :, :]
Sigma_iq = covariance[:, :, q : q + 1]
Sigma_qj = covariance[:, q : q + 1, :]
Sigma_qq = covariance[:, q : q + 1, q : q + 1]
cov = Sigma_ij * diq * dqj - Sigma_iq * diq - Sigma_qj * dqj + Sigma_qq
return cov
Sigma = tf.map_fn(
compute_single_slice,
tf.range(Q),
fn_output_signature=dtype,
)
Sigma = tf.transpose(Sigma, perm=[1, 0, 2, 3])
return Sigma
def _compute_p(
self,
m_reshaped: tf.Tensor,
b_reshaped: tf.Tensor,
Sigma_reshaped: tf.Tensor,
mvn_cdf: Callable[[TensorType, TensorType, TensorType, float], TensorType],
) -> TensorType:
"""Helper function for the batch expected improvement, which computes
the tensor p, as detailed in Chevalier and Ginsbourger
:cite:`chevalier2013fast`.
:param m_reshaped: Tensor of shape (BQ, Q)
:param b_reshaped: Tensor of shape (BQ, Q)
:param Sigma_reshaped: Tensor of shape (BQ, Q, Q)
:returns p: Tensor of shape (B, Q)
"""
# Check shapes of covariance tensor
tf.debugging.assert_shapes(
[
(m_reshaped, ("BQ", "Q")),
(b_reshaped, ("BQ", "Q")),
(Sigma_reshaped, ("BQ", "Q", "Q")),
]
)
# Unpack dtype and mean shape
dtype = m_reshaped.dtype
BQ, Q = m_reshaped.shape # (B*Q, Q)
if BQ % Q == 0:
B = BQ // Q
else:
raise ValueError(
f"Expected size of dimension 0 of m_reshaped tensor to be "
f"divisible by size of dimension 1, instead found "
f"{m_reshaped.shape[0]} and {m_reshaped.shape[1]}."
)
# Compute mean, covariance and x for p mvn normal cdf
p_cdf_mean = tf.zeros(shape=(BQ, Q), dtype=dtype) # (B*Q, Q)
p_cdf_cov = Sigma_reshaped # (B*Q, Q, Q)
p_cdf_x = b_reshaped - m_reshaped # (B*Q, Q)
p = mvn_cdf( # type: ignore
x=p_cdf_x,
mean=p_cdf_mean,
cov=p_cdf_cov,
) # (B*Q,)
p = tf.reshape(p, shape=(B, Q)) # (B, Q)
return p
def _compute_c(
self,
m_reshaped: tf.Tensor,
b_reshaped: tf.Tensor,
Sigma_reshaped: tf.Tensor,
) -> TensorType:
"""Helper function for the batch expected improvement, which computes
the tensor c, which is the c^{(i)} tensor detailed in Chevalier and
Ginsbourger :cite:`chevalier2013fast`.
:param m_reshaped: Tensor of shape (BQ, Q)
:param b_reshaped: Tensor of shape (BQ, Q)
:param Sigma_reshaped: Tensor of shape (BQ, Q, Q)
:returns c: Tensor of shape (B, Q, Q-1)
"""
# Check shapes of covariance tensor
tf.debugging.assert_shapes(
[
(m_reshaped, ("BQ", "Q")),
(b_reshaped, ("BQ", "Q")),
(Sigma_reshaped, ("BQ", "Q", "Q")),
]
)
# Unpack tensor shape
BQ, Q = m_reshaped.shape
# Compute difference between b and m tensors
diff = b_reshaped - m_reshaped # (B*Q, Q)
# Compute c, including the ith entry, which we want to remove
cov_ratio = Sigma_reshaped / tf.linalg.diag_part(Sigma_reshaped)[:, :, None] # (B*Q, Q, Q)
c = diff[:, None, :] - diff[:, :, None] * cov_ratio # (B*Q, Q, Q)
# Remove the ith entry by masking c with a boolean mask with False across
# the diagonal and True in the off-diagonal terms
mask = tf.math.logical_not(tf.cast(tf.eye(Q, dtype=tf.int32), dtype=tf.bool))
mask = tf.tile(mask[None, :, :], (c.shape[0], 1, 1))
c = tf.ragged.boolean_mask(c, mask).to_tensor()
return c
def _compute_R(
self,
Sigma_reshaped: tf.Tensor,
) -> TensorType:
"""Helper function for the batch expected improvement, which computes
the tensor R, which is the Sigma^{(i)} tensor detailed in Chevalier
and Ginsbourger :cite:`chevalier2013fast`.
:param Sigma_reshaped: Tensor of shape (BQ, Q, Q)
:returns R: Tensor of shape (B, Q-1, Q-1)
"""
# Check shapes of covariance tensor
tf.debugging.assert_shapes([(Sigma_reshaped, ("BQ", "Q", "Q"))])
# Unpack tensor shape
BQ, Q, _ = Sigma_reshaped.shape
Sigma_uv = tf.tile(Sigma_reshaped[:, None, :, :], (1, Q, 1, 1))
Sigma_iu = tf.tile(Sigma_reshaped[:, :, :, None], (1, 1, 1, Q))
Sigma_iv = tf.tile(Sigma_reshaped[:, :, None, :], (1, 1, Q, 1))
Sigma_ii = tf.linalg.diag_part(Sigma_reshaped)[:, :, None, None]
R_whole = Sigma_uv - Sigma_iu * Sigma_iv / Sigma_ii
def create_blocks(q: int) -> TensorType: # pragma: no cover (tf.map_fn)
block1 = tf.concat(
[
R_whole[:, q, :q, :q],
R_whole[:, q, q + 1 :, :q],
],
axis=1,
)
block2 = tf.concat(
[
R_whole[:, q, :q, q + 1 :],
R_whole[:, q, q + 1 :, q + 1 :],
],
axis=1,
)
R_block = tf.concat([block1, block2], axis=2)
return R_block
R = tf.map_fn(
create_blocks,
tf.range(Q),
fn_output_signature=R_whole.dtype,
)
R = tf.transpose(R, perm=[1, 0, 2, 3])
return R
def _compute_Phi(
self,
c: tf.Tensor,
R: tf.Tensor,
mvn_cdf: Callable[[TensorType, TensorType, TensorType, float], TensorType],
) -> TensorType:
"""Helper function for the batch expected improvement, which computes
the tensor Phi, which is the tensor of multivariate Gaussian CDFs, in
the inner sum of the equation (3) in Chevalier and Ginsbourger
:cite:`chevalier2013fast`.
:param c: Tensor of shape (BQ, Q, Q-1).
:param R: Tensor of shape (BQ, Q, Q-1, Q-1).
:param mvn_cdf: Multivariate Gaussian CDF, made using MultivariateNormalCDF.
:returns Phi: Tensor of multivariate Gaussian CDFs.
"""
# Check shapes of covariance tensor
tf.debugging.assert_shapes(
[
(c, ("BQ", "Q", "Q_")),
(R, ("BQ", "Q", "Q_", "Q_")),
]
)
# Unpack tensor shape and data type
BQ, Q, _, Q_ = R.shape
dtype = R.dtype
try:
assert BQ % Q == 0
except AssertionError:
raise ValueError(
f"Expected size of dimension 0 of R tensor to be "
f"divisible by size of dimension 1, instead found "
f"{R.shape[0]} and {R.shape[1]}."
)
# Compute parallelisation dimension from batch size
B = BQ // Q
c_reshaped = tf.reshape(c, (BQ * Q, Q - 1))
R_reshaped = tf.reshape(R, (BQ * Q, Q - 1, Q - 1))
# Compute mean, covariance and x for Phi mvn normal cdf
Phi_cdf_x = c_reshaped # (B*Q, Q-1)
Phi_cdf_mean = tf.zeros(shape=(BQ * Q, Q - 1), dtype=dtype) # (B*Q*Q, Q)
Phi_cdf_cov = R_reshaped # (B*Q*Q, Q-1, Q-1)
# Compute multivariate cdfs
mvn_cdfs = mvn_cdf( # type: ignore
x=Phi_cdf_x,
mean=Phi_cdf_mean,
cov=Phi_cdf_cov,
)
mvn_cdfs = tf.reshape(mvn_cdfs, (B, Q, Q)) # (B, Q, Q)
return mvn_cdfs
def _compute_batch_expected_improvement(
self,
mean: tf.Tensor,
covariance: tf.Tensor,
threshold: tf.Tensor,
mvn_cdf_1: Callable[[TensorType, TensorType, TensorType, float], TensorType],
mvn_cdf_2: Callable[[TensorType, TensorType, TensorType, float], TensorType],
) -> TensorType:
"""Accurate Monte Carlo approximation of the batch expected
improvement, using the method of Chevalier and Ginsbourger
:cite:`chevalier2013fast`.
:param mean: Tensor of shape (B, Q).
:param covariance: Tensor of shape (B, Q, Q).
:param threshold: Tensor of shape (B, Q).
:param mvn_cdf_1: Callable computing the multivariate CDF of a Q-dimensional Gaussian.
:param mvn_cdf_2: Callable computing the multivariate CDF of a (Q-1)-dimensional Gaussian.
:returns ei: Tensor of shape (B,), expected improvement.
"""
# Check shapes of covariance tensor
tf.debugging.assert_shapes(
[
(mean, ("B", "Q")),
(covariance, ("B", "Q", "Q")),
(threshold, ("B",)),
]
)
# Unpack and mean shape
B, Q = mean.shape
# Compute b and m tensors
b, m = self._compute_bm(
mean=mean,
threshold=threshold,
) # (B, Q, Q), (B, Q, Q)
# Compute Sigma
Sigma = self._compute_Sigma(covariance=covariance) # (B, Q, Q, Q)
# Reshape all tensors, for batching
b_reshaped = tf.reshape(b, (B * Q, Q))
m_reshaped = tf.reshape(m, (B * Q, Q))
Sigma_reshaped = tf.reshape(Sigma, (B * Q, Q, Q))
# Compute p tensor
p = self._compute_p(
m_reshaped=m_reshaped,
b_reshaped=b_reshaped,
Sigma_reshaped=Sigma_reshaped,
mvn_cdf=mvn_cdf_1,
)
# Compute c
c = self._compute_c(
m_reshaped=m_reshaped,
b_reshaped=b_reshaped,
Sigma_reshaped=Sigma_reshaped,
) # (B*Q, Q, Q-1)
# Compute Sigma_i
R = self._compute_R(
Sigma_reshaped=Sigma_reshaped,
) # (B*Q, Q, Q-1, Q-1)
# Compute Q-1 multivariate CDFs
Phi_mvn_cdfs = self._compute_Phi(
c=c,
R=R,
mvn_cdf=mvn_cdf_2,
)
# Compute univariate pdfs
S_diag = tf.linalg.diag_part(Sigma)
normal = tfp.distributions.Normal(loc=m, scale=S_diag**0.5)
uvn_pdfs = tf.math.exp(normal.log_prob(b)) # (B, Q, Q)
Sigma_diag = tf.linalg.diag_part(tf.transpose(Sigma, perm=[0, 2, 1, 3]))
Sigma_diag = tf.transpose(Sigma_diag, perm=[0, 2, 1])
T = tf.tile(threshold[:, None], (1, Q))
mean_T_term = (mean - T) * p
# Compute inner sum
sum_term = tf.reduce_sum(
Sigma_diag * uvn_pdfs * Phi_mvn_cdfs,
axis=2,
)
# Compute outer sum
expected_improvement = tf.reduce_sum(mean_T_term + sum_term, axis=1)
return expected_improvement
@tf.function
def __call__(self, x: TensorType) -> TensorType:
"""Computes the accurate approximation of the multi-point expected
improvement.
:param x: Tensor of shape (B, Q, D).
:returns ei: Tensor of shape (B,), expected improvement.
"""
if self._mvn_cdf_1 is None:
self._mvn_cdf_1 = MultivariateNormalCDF(
sample_size=self._sample_size,
dim=x.shape[1],
dtype=x.dtype,
num_sobol_skip=self._num_sobol_skip,
)
if self._mvn_cdf_2 is None:
self._mvn_cdf_2 = MultivariateNormalCDF(
sample_size=self._sample_size,
dim=x.shape[1] - 1,
dtype=x.dtype,
num_sobol_skip=self._num_sobol_skip,
)
mean, covariance = self._model.predict_joint(x) # type: ignore
mean = mean[:, :, 0]
covariance = covariance[:, 0, :, :]
covariance = (
covariance
+ 1e-6
* tf.eye(
covariance.shape[-1],
dtype=covariance.dtype,
)[None, :, :]
)
threshold = tf.tile(self._eta, (mean.shape[0],))
# Check shapes of x, mean, covariance and threshold tensors
tf.debugging.assert_shapes(
[
(x, ("B", "Q", "D")),
(mean, ("B", "Q")),
(covariance, ("B", "Q", "Q")),
(threshold, ("B",)),
]
)
ei = self._compute_batch_expected_improvement(
mean=-mean,
covariance=covariance,
threshold=-threshold,
mvn_cdf_1=self._mvn_cdf_1,
mvn_cdf_2=self._mvn_cdf_2,
)[:, None]
return ei
class MultipleOptimismNegativeLowerConfidenceBound(
SingleModelVectorizedAcquisitionBuilder[ProbabilisticModel]
):
"""
A simple parallelization of the lower confidence bound acquisition function that produces
a vectorized acquisition function which can efficiently optimized even for large batches.
See :cite:`torossian2020bayesian` for details.
"""
def __init__(self, search_space: SearchSpace):
"""
:param search_space: The global search space over which the optimisation is defined.
"""
self._search_space = search_space
def __repr__(self) -> str:
""""""
return f"MultipleOptimismNegativeLowerConfidenceBound({self._search_space!r})"
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: Unused.
:return: The multiple optimism negative lower confidence bound function.
"""
return multiple_optimism_lower_confidence_bound(model, self._search_space.dimension)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: Unused.
"""
tf.debugging.Assert(
isinstance(function, multiple_optimism_lower_confidence_bound), [tf.constant([])]
)
return function # nothing to update
class multiple_optimism_lower_confidence_bound(AcquisitionFunctionClass):
r"""
The multiple optimism lower confidence bound (MOLCB) acquisition function for single-objective
global optimization.
Each batch dimension of this acquisiton function correponds to a lower confidence bound
acquisition function with different beta values, i.e. each point in a batch chosen by this
acquisition function lies on a gradient of exploration/exploitation trade-offs.
We choose the different beta values following the cdf method of :cite:`torossian2020bayesian`.
See their paper for more details.
"""
def __init__(self, model: ProbabilisticModel, search_space_dim: int):
"""
:param model: The model of the objective function.
:param search_space_dim: The dimensions of the optimisation problem's search space.
:raise tf.errors.InvalidArgumentError: If ``search_space_dim`` is not postive.
"""
tf.debugging.assert_positive(search_space_dim)
self._search_space_dim = search_space_dim
self._model = model
self._initialized = tf.Variable(False) # Keep track of when we need to resample
self._betas = tf.Variable(tf.ones([0], dtype=tf.float64), shape=[None]) # [0] lazy init
@tf.function
def __call__(self, x: TensorType) -> TensorType:
batch_size = tf.shape(x)[-2]
tf.debugging.assert_positive(batch_size)
if self._initialized: # check batch size hasnt changed during BO
tf.debugging.assert_equal(
batch_size,
tf.shape(self._betas)[0],
f"{type(self).__name__} requires a fixed batch size. Got batch size {batch_size}"
f" but previous batch size was {tf.shape(self._betas)[0]}.",
)
if not self._initialized:
normal = tfp.distributions.Normal(
tf.cast(0.0, dtype=x.dtype), tf.cast(1.0, dtype=x.dtype)
)
spread = 0.5 + 0.5 * tf.range(1, batch_size + 1, dtype=x.dtype) / (
tf.cast(batch_size, dtype=x.dtype) + 1.0
) # [B]
betas = normal.quantile(spread) # [B]
scaled_betas = 5.0 * tf.cast(self._search_space_dim, dtype=x.dtype) * betas # [B]
self._betas.assign(scaled_betas) # [B]
self._initialized.assign(True)
mean, variance = self._model.predict(x) # [..., B, 1]
mean, variance = tf.squeeze(mean, -1), tf.squeeze(variance, -1)
return -mean + tf.sqrt(variance) * self._betas # [..., B]
class MakePositive(SingleModelAcquisitionBuilder[ProbabilisticModelType]):
r"""
Converts an acquisition function builder into one that only returns positive values, via
:math:`x \mapsto \log(1 + \exp(x))`.
This is sometimes a useful transformation: for example, converting non-batch acquisition
functions into batch acquisition functions with local penalization requires functions
that only return positive values.
"""
def __init__(
self,
base_acquisition_function_builder: SingleModelAcquisitionBuilder[ProbabilisticModelType],
) -> None:
"""
:param base_acquisition_function_builder: Base acquisition function to be made positive.
"""
self._base_builder = base_acquisition_function_builder
def __repr__(self) -> str:
""""""
return f"MakePositive({self._base_builder})"
def prepare_acquisition_function(
self,
model: ProbabilisticModelType,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: The data to use to build the acquisition function (optional).
:return: An acquisition function.
"""
self._base_function = self._base_builder.prepare_acquisition_function(model, dataset)
@tf.function
def acquisition(x: TensorType) -> TensorType:
return tf.math.log(1 + tf.math.exp(self._base_function(x)))
return acquisition
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModelType,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer (optional).
:return: The updated acquisition function.
"""
up_fn = self._base_builder.update_acquisition_function(self._base_function, model, dataset)
if up_fn is self._base_function:
return function
else:
self._base_function = up_fn
@tf.function
def acquisition(x: TensorType) -> TensorType:
return tf.math.log(1 + tf.math.exp(self._base_function(x)))
return acquisition
| 77,739 | 38.262626 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/function/__init__.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This folder contains single-objective optimization functions. """
from .active_learning import (
BayesianActiveLearningByDisagreement,
ExpectedFeasibility,
IntegratedVarianceReduction,
PredictiveVariance,
bayesian_active_learning_by_disagreement,
bichon_ranjan_criterion,
integrated_variance_reduction,
predictive_variance,
)
from .continuous_thompson_sampling import (
GreedyContinuousThompsonSampling,
ParallelContinuousThompsonSampling,
)
from .entropy import (
GIBBON,
MinValueEntropySearch,
gibbon_quality_term,
gibbon_repulsion_term,
min_value_entropy_search,
)
from .function import (
AugmentedExpectedImprovement,
BatchExpectedImprovement,
BatchMonteCarloExpectedImprovement,
ExpectedConstrainedImprovement,
ExpectedImprovement,
FastConstraintsFeasibility,
MakePositive,
MonteCarloAugmentedExpectedImprovement,
MonteCarloExpectedImprovement,
MultipleOptimismNegativeLowerConfidenceBound,
NegativeLowerConfidenceBound,
NegativePredictiveMean,
ProbabilityOfFeasibility,
ProbabilityOfImprovement,
augmented_expected_improvement,
batch_expected_improvement,
expected_improvement,
fast_constraints_feasibility,
lower_confidence_bound,
multiple_optimism_lower_confidence_bound,
probability_below_threshold,
)
from .greedy_batch import Fantasizer, LocalPenalization, hard_local_penalizer, soft_local_penalizer
from .multi_objective import (
HIPPO,
BatchMonteCarloExpectedHypervolumeImprovement,
ExpectedConstrainedHypervolumeImprovement,
ExpectedHypervolumeImprovement,
batch_ehvi,
expected_hv_improvement,
)
from .utils import MultivariateNormalCDF
| 2,311 | 32.507246 | 99 | py |
trieste-develop | trieste-develop/trieste/acquisition/function/active_learning.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains acquisition function builders and acquisition functions for Bayesian active
learning.
"""
from __future__ import annotations
import math
from typing import Optional, Sequence, Union
import tensorflow as tf
import tensorflow_probability as tfp
from ...data import Dataset
from ...models import ProbabilisticModel
from ...models.interfaces import FastUpdateModel, SupportsPredictJoint
from ...types import TensorType
from ...utils import DEFAULTS
from ..interface import AcquisitionFunction, AcquisitionFunctionClass, SingleModelAcquisitionBuilder
class PredictiveVariance(SingleModelAcquisitionBuilder[SupportsPredictJoint]):
"""
Builder for the determinant of the predictive covariance matrix over the batch points.
For a batch of size 1 it is the same as maximizing the predictive variance.
"""
def __init__(self, jitter: float = DEFAULTS.JITTER) -> None:
"""
:param jitter: The size of the jitter to use when stabilising the Cholesky decomposition of
the covariance matrix.
"""
self._jitter = jitter
def __repr__(self) -> str:
""""""
return f"PredictiveVariance(jitter={self._jitter!r})"
def prepare_acquisition_function(
self,
model: SupportsPredictJoint,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: Unused.
:return: The determinant of the predictive function.
"""
if not isinstance(model, SupportsPredictJoint):
raise NotImplementedError(
f"PredictiveVariance only works with models that support "
f"predict_joint; received {model.__repr__()}"
)
return predictive_variance(model, self._jitter)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: SupportsPredictJoint,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: Unused.
"""
return function # no need to update anything
def predictive_variance(model: SupportsPredictJoint, jitter: float) -> AcquisitionFunction:
"""
The predictive variance acquisition function for active learning, based on
the determinant of the covariance (see :cite:`MacKay1992` for details).
Note that the model needs to supply covariance of the joint marginal distribution,
which can be expensive to compute.
:param model: The model of the objective function.
:param jitter: The size of the jitter to use when stabilising the Cholesky decomposition of
the covariance matrix.
"""
@tf.function
def acquisition(x: TensorType) -> TensorType:
try:
_, covariance = model.predict_joint(x)
except NotImplementedError:
raise ValueError(
"""
PredictiveVariance only supports models with a predict_joint method.
"""
)
return tf.exp(tf.linalg.logdet(covariance + jitter))
return acquisition
class ExpectedFeasibility(SingleModelAcquisitionBuilder[ProbabilisticModel]):
"""
Builder for the Expected feasibility acquisition function for identifying a failure or
feasibility region. It implements two related sampling strategies called *bichon* criterion
(:cite:`bichon2008efficient`) and *ranjan* criterion (:cite:`ranjan2008sequential`). The goal
of both criteria is to sample points with a mean close to the threshold and a high variance.
"""
def __init__(self, threshold: float, alpha: float = 1, delta: int = 1) -> None:
"""
:param threshold: The failure or feasibility threshold.
:param alpha: The parameter which determines the neighbourhood around the estimated contour
line as a percentage of the posterior variance in which to allocate new points. Defaults
to value of 1.
:param delta: The parameter identifying which criterion is used, *bichon* for value of 1
(default) and *ranjan* for value of 2.
:raise ValueError (or InvalidArgumentError): If arguments are not a scalar, or `alpha` is
not positive, or `delta` is not 1 or 2.
"""
tf.debugging.assert_scalar(threshold)
tf.debugging.assert_scalar(alpha)
tf.debugging.assert_positive(alpha, message="Parameter alpha must be positive.")
tf.debugging.assert_scalar(delta)
tf.debugging.Assert(delta in [1, 2], [delta])
self._threshold = threshold
self._alpha = alpha
self._delta = delta
def __repr__(self) -> str:
""""""
return (
f"ExpectedFeasibility(threshold={self._threshold!r}, alpha={self._alpha!r},"
f" delta={self._delta!r})"
)
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: Unused.
:return: The expected feasibility function. This function will raise
:exc:`ValueError` or :exc:`~tf.errors.InvalidArgumentError` if used with a batch size
greater than one.
"""
return bichon_ranjan_criterion(model, self._threshold, self._alpha, self._delta)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
return function # no need to update anything
def bichon_ranjan_criterion(
model: ProbabilisticModel,
threshold: float,
alpha: float,
delta: int,
) -> AcquisitionFunction:
r"""
Return the *bichon* criterion (:cite:`bichon2008efficient`) and *ranjan* criterion
(:cite:`ranjan2008sequential`) used in Expected feasibility acquisition function for active
learning of failure or feasibility regions.
The problem of identifying a failure or feasibility region of a function :math:`f` can be
formalized as estimating the excursion set, :math:`\Gamma^* = \{ x \in X: f(x) \ge T\}`, or
estimating the contour line, :math:`C^* = \{ x \in X: f(x) = T\}`, for some threshold :math:`T`
(see :cite:`bect2012sequential` for more details).
It turns out that probabilistic models can be used as classifiers for identifying where
excursion probability is larger than 1/2 and this idea is used to build many sequential
sampling strategies. We follow :cite:`bect2012sequential` and use a formulation which provides
a common expression for these two criteria:
.. math:: \mathbb{E}[\max(0, (\alpha s(x))^\delta - |T - m(x)|^\delta)]
Here :math:`m(x)` and :math:`s(x)` are the mean and standard deviation of the predictive
posterior of a probabilistic model. *Bichon* criterion is obtained when :math:`\delta = 1` while
*ranjan* criterion is obtained when :math:`\delta = 2`. :math:`\alpha>0` is another parameter
that acts as a percentage of standard deviation of the posterior around the current boundary
estimate where we want to sample. The goal is to sample a point with a mean close to the
threshold :math:`T` and a high variance, so that the positive difference in the equation above
is as large as possible.
Note that only batches of size 1 are allowed.
:param model: The probabilistic model of the objective function.
:param threshold: The failure or feasibility threshold.
:param alpha: The parameter which determines the neighbourhood around the estimated contour
line as a percentage of the posterior variance in which to allocate new points.
:param delta: The parameter identifying which criterion is used, *bichon* for value of 1
and *ranjan* for value of 2.
"""
@tf.function
def acquisition(x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
mean, variance = model.predict(tf.squeeze(x, -2))
stdev = tf.sqrt(variance)
t = (threshold - mean) / stdev
t_plus = t + alpha
t_minus = t - alpha
normal = tfp.distributions.Normal(tf.cast(0, x.dtype), tf.cast(1, x.dtype))
if delta == 1:
G = (
alpha * (normal.cdf(t_plus) - normal.cdf(t_minus))
- t * (2 * normal.cdf(t) - normal.cdf(t_plus) - normal.cdf(t_minus))
- (2 * normal.prob(t) - normal.prob(t_plus) - normal.prob(t_minus))
)
tf.debugging.check_numerics(G, "NaN or Inf values encountered in criterion")
criterion = G * stdev
elif delta == 2:
G = (
(alpha**2 - 1 - t**2) * (normal.cdf(t_plus) - normal.cdf(t_minus))
- 2 * t * (normal.prob(t_plus) - normal.prob(t_minus))
+ t_plus * normal.prob(t_plus)
- t_minus * normal.prob(t_minus)
)
tf.debugging.check_numerics(G, "NaN or Inf values encountered in criterion")
criterion = G * variance
return criterion
return acquisition
class IntegratedVarianceReduction(SingleModelAcquisitionBuilder[FastUpdateModel]):
"""
Builder for the reduction of the integral of the predicted variance over the search
space given a batch of query points.
"""
def __init__(
self,
integration_points: TensorType,
threshold: Optional[Union[float, Sequence[float], TensorType]] = None,
) -> None:
"""
:param integration_points: set of points to integrate the prediction variance over.
:param threshold: either None, a float or a sequence of 1 or 2 float values.
"""
self._integration_points = integration_points
self._threshold = threshold
def __repr__(self) -> str:
""""""
return f"IntegratedVarianceReduction(threshold={self._threshold!r})"
def prepare_acquisition_function(
self,
model: FastUpdateModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: Unused.
:return: The integral of the predictive variance.
"""
if not isinstance(model, FastUpdateModel):
raise NotImplementedError(
f"PredictiveVariance only works with FastUpdateModel models; "
f"received {model.__repr__()}"
)
return integrated_variance_reduction(model, self._integration_points, self._threshold)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: FastUpdateModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: Unused.
"""
return function # no need to update anything
class integrated_variance_reduction(AcquisitionFunctionClass):
r"""
The reduction of the (weighted) average of the predicted variance over the integration points
(a.k.a. Integrated Means Square Error or IMSE criterion).
See :cite:`Picheny2010` for details.
The criterion (to maximise) writes as:
.. math:: \int_x (v_{old}(x) - v_{new}(x)) * weights(x),
where :math:`v_{old}(x)` is the predictive variance of the model at :math:`x`, and
:math:`v_{new}(x)` is the updated predictive variance, given that the GP is further
conditioned on the query points.
Note that since :math:`v_{old}(x)` is constant w.r.t. the query points, this function
only returns :math:`-\int_x v_{new}(x) * weights(x)`.
If no threshold is provided, the goal is to learn a globally accurate model, and
the predictive variance (:math:`v_{new}`) is used. Otherwise, learning is 'targeted'
towards regions where the GP is close to particular values, and the variance is weighted
by the posterior GP pdf evaluated at the threshold T (if a single value is given) or by the
probability that the GP posterior belongs to the interval between the 2 thresholds T1 and T2
(note the slightly different parametrisation compared to :cite:`Picheny2010` in that case).
This criterion allows batch size > 1. Note that the computational cost grows cubically with
the batch size.
This criterion requires a method (conditional_predict_f) to compute the new predictive variance
given that query points are added to the data.
"""
def __init__(
self,
model: FastUpdateModel,
integration_points: TensorType,
threshold: Optional[Union[float, Sequence[float], TensorType]] = None,
):
"""
:param model: The model of the objective function.
:param integration_points: Points over which to integrate the objective prediction variance.
:param threshold: Either None, a float or a sequence of 1 or 2 float values.
See class docs for details.
:raise ValueError (or InvalidArgumentError): If ``threshold`` has more than 2 values.
"""
self._model = model
tf.debugging.assert_equal(
len(tf.shape(integration_points)),
2,
message="integration_points must be of shape [N, D]",
)
tf.debugging.assert_positive(
tf.shape(integration_points)[0],
message="integration_points should contain at least one point",
)
self._integration_points = integration_points
if threshold is None:
self._weights = tf.cast(1.0, integration_points.dtype)
else:
if isinstance(threshold, float):
t_threshold = tf.cast([threshold], integration_points.dtype)
else:
t_threshold = tf.cast(threshold, integration_points.dtype)
tf.debugging.assert_rank(
t_threshold,
1,
message=f"threshold should be a float, a sequence "
f"or a rank 1 tensor, received {tf.shape(t_threshold)}",
)
tf.debugging.assert_less_equal(
tf.size(t_threshold),
2,
message=f"threshold should have one or two values,"
f" received {tf.size(t_threshold)}",
)
tf.debugging.assert_greater_equal(
tf.size(t_threshold),
1,
message=f"threshold should have one or two values,"
f" received {tf.size(t_threshold)}",
)
if tf.size(t_threshold) > 1:
tf.debugging.assert_greater_equal(
t_threshold[1],
t_threshold[0],
message=f"threshold values should be in increasing order,"
f" received {t_threshold}",
)
if tf.size(t_threshold) == 1:
mean_old, var_old = self._model.predict(query_points=integration_points)
distr = tfp.distributions.Normal(mean_old, tf.sqrt(var_old))
self._weights = distr.prob(t_threshold[0])
else:
mean_old, var_old = self._model.predict(query_points=integration_points)
distr = tfp.distributions.Normal(mean_old, tf.sqrt(var_old))
self._weights = distr.cdf(t_threshold[1]) - distr.cdf(t_threshold[0])
@tf.function
def __call__(self, x: TensorType) -> TensorType:
additional_data = Dataset(x, tf.ones_like(x[..., 0:1]))
_, variance = self._model.conditional_predict_f(
query_points=self._integration_points, additional_data=additional_data
)
return -tf.reduce_mean(variance * self._weights, axis=-2)
class BayesianActiveLearningByDisagreement(SingleModelAcquisitionBuilder[ProbabilisticModel]):
"""
Builder for the *Bayesian Active Learning By Disagreement* acquisition function defined in
:cite:`houlsby2011bayesian`.
"""
def __init__(self, jitter: float = DEFAULTS.JITTER) -> None:
"""
:param jitter: The size of the jitter to avoid numerical problem caused by the
log operation if variance is close to zero.
"""
self._jitter = jitter
def __repr__(self) -> str:
""""""
return f"BayesianActiveLearningByDisagreement(jitter={self._jitter!r})"
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: Unused.
:return: The determinant of the predictive function.
"""
return bayesian_active_learning_by_disagreement(model, self._jitter)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: Unused.
"""
return function # no need to update anything
class bayesian_active_learning_by_disagreement(AcquisitionFunctionClass):
def __init__(self, model: ProbabilisticModel, jitter: float):
r"""
The Bayesian active learning by disagrement acquisition function computes
the information gain of the predictive entropy :cite:`houlsby2011bayesian`.
the acquisiton function is calculated by:
.. math::
\mathrm{h}\left(\Phi\left(\frac{\mu_{\boldsymbol{x}, \mathcal{D}}}
{\sqrt{\sigma_{\boldsymbol{x}, \mathcal{D}}^{2}+1}}\right)\right)
-\frac{C \exp \left(-\frac{\mu_{\boldsymbol{x}, \mathcal{D}}^{2}}
{2\left(\sigma_{\boldsymbol{w}, \mathcal{D}}^{+C^{2}}\right)}\right)}
{\sqrt{\sigma_{\boldsymbol{x}, \mathcal{D}}^{2}+C^{2}}}
Here :math:`\mathrm{h}(p)` is defined as:
.. math::
\mathrm{h}(p)=-p \log p-(1-p) \log (1-p)
This acquisition function is intended to use for Binary Gaussian Process Classification
model with Bernoulli likelihood. It is designed for VGP but other Gaussian approximation
of the posterior can be used. SVGP for instance, or some other model that is not currently
supported by Trieste. Integrating over nuisance parameters is currently not
supported (see equation 6 of the paper).
:param model: The model of the objective function.
:param jitter: The size of the jitter to avoid numerical problem caused by the
log operation if variance is close to zero.
:return: The Bayesian Active Learning By Disagreement acquisition function.
"""
tf.debugging.assert_positive(jitter, message="Jitter must be positive.")
self._model = model
self._jitter = jitter
@tf.function
def __call__(self, x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
mean, variance = self._model.predict(tf.squeeze(x, -2))
variance = tf.maximum(variance, self._jitter)
normal = tfp.distributions.Normal(tf.cast(0, mean.dtype), tf.cast(1, mean.dtype))
p = normal.cdf((mean / tf.sqrt(variance + 1)))
C2 = (math.pi * tf.math.log(tf.cast(2, mean.dtype))) / 2
Ef = (tf.sqrt(C2) / tf.sqrt(variance + C2)) * tf.exp(-(mean**2) / (2 * (variance + C2)))
return -p * tf.math.log(p + self._jitter) - (1 - p) * tf.math.log(1 - p + self._jitter) - Ef
| 20,764 | 39.320388 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/function/entropy.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains entropy-based acquisition function builders.
"""
from __future__ import annotations
from typing import List, Optional, TypeVar, cast, overload
import tensorflow as tf
import tensorflow_probability as tfp
from typing_extensions import Protocol, runtime_checkable
from ...data import Dataset, add_fidelity_column
from ...models import ProbabilisticModel
from ...models.gpflow.interface import SupportsCovarianceBetweenPoints
from ...models.interfaces import (
HasTrajectorySampler,
SupportsCovarianceWithTopFidelity,
SupportsGetObservationNoise,
)
from ...space import SearchSpace
from ...types import TensorType
from ..interface import (
AcquisitionFunction,
AcquisitionFunctionClass,
PenalizationFunction,
ProbabilisticModelType,
SingleModelAcquisitionBuilder,
SingleModelGreedyAcquisitionBuilder,
UpdatablePenalizationFunction,
)
from ..sampler import ExactThompsonSampler, ThompsonSampler
CLAMP_LB = 1e-8
class MinValueEntropySearch(SingleModelAcquisitionBuilder[ProbabilisticModelType]):
r"""
Builder for the max-value entropy search acquisition function modified for objective
minimisation. :class:`MinValueEntropySearch` estimates the information in the distribution
of the objective minimum that would be gained by evaluating the objective at a given point.
This implementation largely follows :cite:`wang2017max` and samples the objective's minimum
:math:`y^*` across a large set of sampled locations via either a Gumbel sampler, an exact
Thompson sampler or an approximate random Fourier feature-based Thompson sampler, with the
Gumbel sampler being the cheapest but least accurate. Default behavior is to use the
exact Thompson sampler.
"""
@overload
def __init__(
self: "MinValueEntropySearch[ProbabilisticModel]",
search_space: SearchSpace,
num_samples: int = 5,
grid_size: int = 1000,
min_value_sampler: None = None,
):
...
@overload
def __init__(
self: "MinValueEntropySearch[ProbabilisticModelType]",
search_space: SearchSpace,
num_samples: int = 5,
grid_size: int = 1000,
min_value_sampler: Optional[ThompsonSampler[ProbabilisticModelType]] = None,
):
...
def __init__(
self,
search_space: SearchSpace,
num_samples: int = 5,
grid_size: int = 1000,
min_value_sampler: Optional[ThompsonSampler[ProbabilisticModelType]] = None,
):
"""
:param search_space: The global search space over which the optimisation is defined.
:param num_samples: Number of samples to draw from the distribution over the minimum of the
objective function.
:param grid_size: Size of the grid from which to sample the min-values. We recommend
scaling this with search space dimension.
:param min_value_sampler: Sampler which samples minimum values.
:raise tf.errors.InvalidArgumentError: If
- ``num_samples`` or ``grid_size`` are negative.
"""
tf.debugging.assert_positive(num_samples)
tf.debugging.assert_positive(grid_size)
if min_value_sampler is not None:
if not min_value_sampler.sample_min_value:
raise ValueError(
"""
Minvalue Entropy Search requires a min_value_sampler that samples minimum
values, however the passed sampler has sample_min_value=False.
"""
)
else:
min_value_sampler = ExactThompsonSampler(sample_min_value=True)
self._min_value_sampler = min_value_sampler
self._search_space = search_space
self._num_samples = num_samples
self._grid_size = grid_size
def prepare_acquisition_function(
self,
model: ProbabilisticModelType,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: The data from the observer.
:return: The max-value entropy search acquisition function modified for objective
minimisation. This function will raise :exc:`ValueError` or
:exc:`~tf.errors.InvalidArgumentError` if used with a batch size greater than one.
:raise tf.errors.InvalidArgumentError: If ``dataset`` is empty.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
query_points = self._search_space.sample(num_samples=self._grid_size)
tf.debugging.assert_same_float_dtype([dataset.query_points, query_points])
query_points = tf.concat([dataset.query_points, query_points], 0)
min_value_samples = self._min_value_sampler.sample(model, self._num_samples, query_points)
return min_value_entropy_search(model, min_value_samples)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModelType,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
tf.debugging.Assert(isinstance(function, min_value_entropy_search), [tf.constant([])])
query_points = self._search_space.sample(num_samples=self._grid_size)
tf.debugging.assert_same_float_dtype([dataset.query_points, query_points])
query_points = tf.concat([dataset.query_points, query_points], 0)
min_value_samples = self._min_value_sampler.sample(model, self._num_samples, query_points)
function.update(min_value_samples) # type: ignore
return function
class min_value_entropy_search(AcquisitionFunctionClass):
def __init__(self, model: ProbabilisticModel, samples: TensorType):
r"""
Return the max-value entropy search acquisition function (adapted from :cite:`wang2017max`),
modified for objective minimisation. This function calculates the information gain (or
change in entropy) in the distribution over the objective minimum :math:`y^*`, if we were
to evaluate the objective at a given point.
:param model: The model of the objective function.
:param samples: Samples from the distribution over :math:`y^*`.
:return: The max-value entropy search acquisition function modified for objective
minimisation. This function will raise :exc:`ValueError` or
:exc:`~tf.errors.InvalidArgumentError` if used with a batch size greater than one.
:raise ValueError or tf.errors.InvalidArgumentError: If ``samples`` has rank less than two,
or is empty.
"""
tf.debugging.assert_rank(samples, 2)
tf.debugging.assert_positive(len(samples))
self._model = model
self._samples = tf.Variable(samples)
def update(self, samples: TensorType) -> None:
"""Update the acquisition function with new samples."""
tf.debugging.assert_rank(samples, 2)
tf.debugging.assert_positive(len(samples))
self._samples.assign(samples)
@tf.function
def __call__(self, x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
fmean, fvar = self._model.predict(tf.squeeze(x, -2))
fsd = tf.math.sqrt(fvar)
fsd = tf.clip_by_value(
fsd, CLAMP_LB, fmean.dtype.max
) # clip below to improve numerical stability
normal = tfp.distributions.Normal(tf.cast(0, fmean.dtype), tf.cast(1, fmean.dtype))
gamma = (tf.squeeze(self._samples) - fmean) / fsd
log_minus_cdf = normal.log_cdf(-gamma)
ratio = tf.math.exp(normal.log_prob(gamma) - log_minus_cdf)
f_acqu_x = -gamma * ratio / 2 - log_minus_cdf
return tf.math.reduce_mean(f_acqu_x, axis=1, keepdims=True)
@runtime_checkable
class SupportsCovarianceObservationNoise(
SupportsCovarianceBetweenPoints, SupportsGetObservationNoise, Protocol
):
"""A model that supports both covariance_between_points and get_observation_noise."""
pass
class SupportsCovarianceObservationNoiseTrajectory(
HasTrajectorySampler, SupportsCovarianceObservationNoise, Protocol
):
"""A model that supports covariance_between_points and get_observation_noise, and also
has an associated trajectory sampler."""
GIBBONModelType = TypeVar(
"GIBBONModelType", bound=SupportsCovarianceObservationNoise, contravariant=True
)
""" Type variable bound to :class:`~trieste.models.SupportsCovarianceObservationNoise`. """
class GIBBON(SingleModelGreedyAcquisitionBuilder[GIBBONModelType]):
r"""
The General-purpose Information-Based Bayesian Optimisation (GIBBON) acquisition function
of :cite:`Moss:2021`. :class:`GIBBON` provides a computationally cheap approximation of the
information gained about (i.e the change in entropy of) the objective function's minimum by
evaluating a batch of candidate points. Batches are built in a greedy manner.
This implementation follows :cite:`Moss:2021` but is modified for function
minimisation (rather than maximisation). We sample the objective's minimum
:math:`y^*` across a large set of sampled locations via either a Gumbel sampler, an exact
Thompson sampler or an approximate random Fourier feature-based Thompson sampler, with the
Gumbel sampler being the cheapest but least accurate. Default behavior is to use the
exact Thompson sampler.
"""
@overload
def __init__(
self: "GIBBON[SupportsCovarianceObservationNoise]",
search_space: SearchSpace,
num_samples: int = 5,
grid_size: int = 1000,
min_value_sampler: None = None,
rescaled_repulsion: bool = True,
):
...
@overload
def __init__(
self: "GIBBON[GIBBONModelType]",
search_space: SearchSpace,
num_samples: int = 5,
grid_size: int = 1000,
min_value_sampler: Optional[ThompsonSampler[GIBBONModelType]] = None,
rescaled_repulsion: bool = True,
):
...
def __init__(
self,
search_space: SearchSpace,
num_samples: int = 5,
grid_size: int = 1000,
min_value_sampler: Optional[ThompsonSampler[GIBBONModelType]] = None,
rescaled_repulsion: bool = True,
):
"""
:param search_space: The global search space over which the optimisation is defined.
:param num_samples: Number of samples to draw from the distribution over the minimum of
the objective function.
:param grid_size: Size of the grid from which to sample the min-values. We recommend
scaling this with search space dimension.
:param min_value_sampler: Sampler which samples minimum values.
:param rescaled_repulsion: If True, then downweight GIBBON's repulsion term to improve
batch optimization performance.
:raise tf.errors.InvalidArgumentError: If
- ``num_samples`` is not positive, or
- ``grid_size`` is not positive.
"""
tf.debugging.assert_positive(num_samples)
tf.debugging.assert_positive(grid_size)
if min_value_sampler is not None:
if not min_value_sampler.sample_min_value:
raise ValueError(
"""
GIBBON requires a min_value_sampler that samples minimum values,
however the passed sampler has sample_min_value=False.
"""
)
else:
min_value_sampler = ExactThompsonSampler(sample_min_value=True)
self._min_value_sampler = min_value_sampler
self._search_space = search_space
self._num_samples = num_samples
self._grid_size = grid_size
self._rescaled_repulsion = rescaled_repulsion
self._min_value_samples: Optional[TensorType] = None
self._quality_term: Optional[gibbon_quality_term] = None
self._diversity_term: Optional[gibbon_repulsion_term] = None
self._gibbon_acquisition: Optional[AcquisitionFunction] = None
def prepare_acquisition_function(
self,
model: GIBBONModelType,
dataset: Optional[Dataset] = None,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: The data from the observer. Must be populated.
:param pending_points: The points we penalize with respect to.
:return: The GIBBON acquisition function modified for objective minimisation.
:raise tf.errors.InvalidArgumentError: If ``dataset`` is empty.
"""
if not isinstance(model, SupportsCovarianceObservationNoise):
raise NotImplementedError(
f"GIBBON only works with models that support "
f"covariance_between_points and get_observation_noise; received {model.__repr__()}"
)
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
acq = self._update_quality_term(dataset, model)
if pending_points is not None and len(pending_points) != 0:
acq = self._update_repulsion_term(acq, dataset, model, pending_points)
return acq
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: GIBBONModelType,
dataset: Optional[Dataset] = None,
pending_points: Optional[TensorType] = None,
new_optimization_step: bool = True,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer. Must be populated.
:param pending_points: Points already chosen to be in the current batch (of shape [M,D]),
where M is the number of pending points and D is the search space dimension.
:param new_optimization_step: Indicates whether this call to update_acquisition_function
is to start of a new optimization step, or to continue collecting batch of points
for the current step. Defaults to ``True``.
:return: The updated acquisition function.
"""
tf.debugging.Assert(dataset is not None, [tf.constant([])])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
tf.debugging.Assert(self._quality_term is not None, [tf.constant([])])
if new_optimization_step:
self._update_quality_term(dataset, model)
if pending_points is None:
# no repulsion term required if no pending_points.
return cast(AcquisitionFunction, self._quality_term)
return self._update_repulsion_term(function, dataset, model, pending_points)
def _update_repulsion_term(
self,
function: Optional[AcquisitionFunction],
dataset: Dataset,
model: GIBBONModelType,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
tf.debugging.assert_rank(pending_points, 2)
if self._gibbon_acquisition is not None and isinstance(
self._diversity_term, gibbon_repulsion_term
):
# if possible, just update the repulsion term
self._diversity_term.update(pending_points)
return self._gibbon_acquisition
else:
# otherwise construct a new repulsion term and acquisition function
self._diversity_term = gibbon_repulsion_term(
model, pending_points, rescaled_repulsion=self._rescaled_repulsion
)
self._gibbon_acquisition = GibbonAcquisition(
cast(AcquisitionFunction, self._quality_term), self._diversity_term
)
return self._gibbon_acquisition
def _update_quality_term(self, dataset: Dataset, model: GIBBONModelType) -> AcquisitionFunction:
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
query_points = self._search_space.sample(num_samples=self._grid_size)
tf.debugging.assert_same_float_dtype([dataset.query_points, query_points])
query_points = tf.concat([dataset.query_points, query_points], 0)
self._min_value_samples = self._min_value_sampler.sample(
model, self._num_samples, query_points
)
if self._quality_term is not None: # if possible, just update the quality term
self._quality_term.update(self._min_value_samples)
else: # otherwise build quality term
self._quality_term = gibbon_quality_term(model, self._min_value_samples)
return cast(AcquisitionFunction, self._quality_term)
class GibbonAcquisition:
"""Class representing a GIBBON acquisition function."""
# (note that this needs to be defined as a top level class make it pickleable)
def __init__(self, quality_term: AcquisitionFunction, diversity_term: PenalizationFunction):
"""
:param quality_term: Quality term.
:param diversity_term: Diversity term.
"""
self._quality_term = quality_term
self._diversity_term = diversity_term
@tf.function
def __call__(self, x: TensorType) -> TensorType:
return self._diversity_term(x) + self._quality_term(x)
class gibbon_quality_term(AcquisitionFunctionClass):
def __init__(self, model: SupportsCovarianceObservationNoise, samples: TensorType):
"""
GIBBON's quality term measures the amount of information that each individual
batch element provides about the objective function's minimal value :math:`y^*` (ensuring
that evaluations are targeted in promising areas of the space).
:param model: The model of the objective function. GIBBON requires a model with
a :method:covariance_between_points method and so GIBBON only
supports :class:`GaussianProcessRegression` models.
:param samples: Samples from the distribution over :math:`y^*`.
:return: GIBBON's quality term. This function will raise :exc:`ValueError` or
:exc:`~tf.errors.InvalidArgumentError` if used with a batch size greater than one.
:raise ValueError or tf.errors.InvalidArgumentError: If ``samples`` does not have rank two,
or is empty, or if ``model`` has no homoscedastic observation noise.
:raise AttributeError: If ``model`` doesn't implement covariance_between_points method.
"""
tf.debugging.assert_rank(samples, 2)
tf.debugging.assert_positive(len(samples))
try:
model.get_observation_noise()
except NotImplementedError:
raise ValueError(
"""
GIBBON only currently supports homoscedastic gpflow models
with a likelihood.variance attribute.
"""
)
self._model = model
self._samples = tf.Variable(samples)
def update(self, samples: TensorType) -> None:
"""Update the acquisition function with new samples."""
tf.debugging.assert_rank(samples, 2)
tf.debugging.assert_positive(len(samples))
self._samples.assign(samples)
@tf.function
def __call__(self, x: TensorType) -> TensorType: # [N, D] -> [N, 1]
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
fmean, fvar = self._model.predict(tf.squeeze(x, -2))
noise_variance = self._model.get_observation_noise()
yvar = fvar + tf.cast(noise_variance, fmean.dtype) # predictive variance of observations
rho_squared = fvar / yvar # squared correlation between observations and latent function
fsd = tf.clip_by_value(
tf.math.sqrt(fvar), CLAMP_LB, fmean.dtype.max
) # clip below to improve numerical stability
gamma = (tf.squeeze(self._samples) - fmean) / fsd
normal = tfp.distributions.Normal(tf.cast(0, fmean.dtype), tf.cast(1, fmean.dtype))
log_minus_cdf = normal.log_cdf(-gamma)
ratio = tf.math.exp(normal.log_prob(gamma) - log_minus_cdf)
inner_log = 1 + rho_squared * ratio * (gamma - ratio)
return -0.5 * tf.math.reduce_mean(tf.math.log(inner_log), axis=1, keepdims=True) # [N, 1]
class gibbon_repulsion_term(UpdatablePenalizationFunction):
def __init__(
self,
model: SupportsCovarianceObservationNoise,
pending_points: TensorType,
rescaled_repulsion: bool = True,
):
r"""
GIBBON's repulsion term encourages diversity within the batch
(achieving high values for points with low predictive correlation).
The repulsion term :math:`r=\log |C|` is given by the log determinant of the predictive
correlation matrix :math:`C` between the `m` pending points and the current candidate.
The predictive covariance :math:`V` can be expressed as :math:V = [[v, A], [A, B]]` for a
tensor :math:`B` with shape [`m`,`m`] and so we can efficiently calculate :math:`|V|` using
the formula for the determinant of block matrices, i.e
:math:`|V| = (v - A^T * B^{-1} * A) * |B|`.
Note that when using GIBBON for purely sequential optimization, the repulsion term is
not required.
As GIBBON's batches are built in a greedy manner, i.e sequentially adding points to build a
set of `m` pending points, we need only ever calculate the entropy reduction provided by
adding the current candidate point to the current pending points, not the full information
gain provided by evaluating all the pending points. This allows for a modest computational
saving.
When performing batch BO, GIBBON's approximation can sometimes become
less accurate as its repulsion term dominates. Therefore, we follow the
arguments of :cite:`Moss:2021` and divide GIBBON's repulsion term by :math:`B^{2}`. This
behavior can be deactivated by setting `rescaled_repulsion` to False.
:param model: The model of the objective function. GIBBON requires a model with
a :method:covariance_between_points method and so GIBBON only
supports :class:`GaussianProcessRegression` models.
:param pending_points: The points already chosen in the current batch.
:param rescaled_repulsion: If True, then downweight GIBBON's repulsion term to improve
batch optimization performance.
:return: GIBBON's repulsion term. This function will raise :exc:`ValueError` or
:exc:`~tf.errors.InvalidArgumentError` if used with a batch size greater than one.
:raise ValueError or tf.errors.InvalidArgumentError: If ``pending_points`` does not have
rank two, or is empty, or if ``model`` has no homoscedastic observation noise.
:raise AttributeError: If ``model`` doesn't implement covariance_between_points method.
"""
tf.debugging.assert_rank(pending_points, 2)
tf.debugging.assert_positive(len(pending_points))
try:
model.get_observation_noise()
except NotImplementedError:
raise ValueError(
"""
GIBBON only currently supports homoscedastic gpflow models
with a likelihood.variance attribute.
"""
)
if not hasattr(model, "covariance_between_points"):
raise AttributeError(
"""
GIBBON only supports models with a covariance_between_points method.
"""
)
self._model = model
self._pending_points = tf.Variable(pending_points, shape=[None, *pending_points.shape[1:]])
self._rescaled_repulsion = rescaled_repulsion
def update(
self,
pending_points: TensorType,
lipschitz_constant: TensorType = None,
eta: TensorType = None,
) -> None:
"""Update the repulsion term with new variable values."""
self._pending_points.assign(pending_points)
@tf.function
def __call__(self, x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This penalization function cannot be calculated for batches of points.",
)
fmean, fvar = self._model.predict(tf.squeeze(x, -2))
noise_variance = self._model.get_observation_noise()
yvar = fvar + noise_variance # need predictive variance of observations
_, B = self._model.predict_joint(self._pending_points) # [1, m, m]
B_shape = tf.shape(B)
noise = noise_variance * tf.eye(
B_shape[-2], batch_shape=B_shape[:-2], dtype=B.dtype
) # need predictive variance of observations
L = tf.linalg.cholesky(B + noise)
A = tf.squeeze(
tf.expand_dims(
self._model.covariance_between_points(tf.squeeze(x, -2), self._pending_points),
axis=-1,
),
axis=0,
) # [N, m, 1]
L_inv_A = tf.linalg.triangular_solve(L, A)
V_det = yvar - tf.squeeze(
tf.matmul(L_inv_A, L_inv_A, transpose_a=True), -1
) # equation for determinant of block matrices
repulsion = 0.5 * (tf.math.log(V_det) - tf.math.log(yvar))
if self._rescaled_repulsion:
batch_size = tf.cast(tf.shape(self._pending_points)[0], dtype=fmean.dtype)
repulsion_weight = (1 / batch_size) ** (2)
else:
repulsion_weight = 1.0
return repulsion_weight * repulsion
MUMBOModelType = TypeVar(
"MUMBOModelType", bound=SupportsCovarianceWithTopFidelity, contravariant=True
)
""" Type variable bound to :class:`~trieste.models.SupportsCovarianceWithTopFidelity`. """
class MUMBO(MinValueEntropySearch[MUMBOModelType]):
r"""
Builder for the MUlti-task Max-value Bayesian Optimization MUMBO acquisition function modified
for objective minimisation. :class:`MinValueEntropySearch` estimates the information in the
distribution of the objective minimum that would be gained by evaluating the objective at a
given point on a given fidelity level.
This implementation largely follows :cite:`moss2021mumbo` and samples the objective's minimum
:math:`y^*` across a large set of sampled locations via either a Gumbel sampler, an exact
Thompson sampler or an approximate random Fourier feature-based Thompson sampler, with the
Gumbel sampler being the cheapest but least accurate. Default behavior is to use the
exact Thompson sampler.
"""
@overload
def __init__(
self: "MUMBO[SupportsCovarianceWithTopFidelity]",
search_space: SearchSpace,
num_samples: int = 5,
grid_size: int = 1000,
min_value_sampler: None = None,
):
...
@overload
def __init__(
self: "MUMBO[MUMBOModelType]",
search_space: SearchSpace,
num_samples: int = 5,
grid_size: int = 1000,
min_value_sampler: Optional[ThompsonSampler[MUMBOModelType]] = None,
):
...
def __init__(
self,
search_space: SearchSpace,
num_samples: int = 5,
grid_size: int = 1000,
min_value_sampler: Optional[ThompsonSampler[MUMBOModelType]] = None,
):
super().__init__(search_space, num_samples, grid_size, min_value_sampler)
def prepare_acquisition_function(
self,
model: MUMBOModelType,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param model: The multifidelity model.
:param dataset: The data from the observer.
:return: The max-value entropy search acquisition function modified for objective
minimisation. This function will raise :exc:`ValueError` or
:exc:`~tf.errors.InvalidArgumentError` if used with a batch size greater than one.
:raise tf.errors.InvalidArgumentError: If ``dataset`` is empty.
"""
tf.debugging.Assert(dataset is not None, [])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
min_value_samples = self.get_min_value_samples_on_top_fidelity(model, dataset)
return mumbo(model, min_value_samples)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: MUMBOModelType,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer.
"""
tf.debugging.Assert(dataset is not None, [])
dataset = cast(Dataset, dataset)
tf.debugging.assert_positive(len(dataset), message="Dataset must be populated.")
min_value_samples = self.get_min_value_samples_on_top_fidelity(model, dataset)
function.update(min_value_samples) # type: ignore
return function
def get_min_value_samples_on_top_fidelity(
self, model: MUMBOModelType, dataset: Dataset
) -> TensorType:
"""
:param model: The model.
:param dataset: The data from the observer.
"""
query_points = self._search_space.sample(num_samples=self._grid_size)
tf.debugging.assert_same_float_dtype([dataset.query_points, query_points])
query_points = tf.concat([dataset.query_points, query_points], 0)
query_points_on_top_fidelity = add_fidelity_column(
query_points[:, :-1], model.num_fidelities - 1
)
return self._min_value_sampler.sample(
model, self._num_samples, query_points_on_top_fidelity
)
class mumbo(AcquisitionFunctionClass):
def __init__(self, model: MUMBOModelType, samples: TensorType):
r"""
The MUMBO acquisition function of :cite:`moss2021mumbo`, modified for objective
minimisation. This function calculates the information gain (or change in entropy) in the
distribution over the objective minimum :math:`y^*`, if we were to evaluate the objective
at a given point on a given fidelity level.
To speed up calculations, we use a trick from :cite:`Moss:2021` and use moment-matching to
calculate MUMBO's entropy terms rather than numerical integration.
:param model: The model of the objective function.
:param samples: Samples from the distribution over :math:`y^*`.
:return: The MUMBO acquisition function modified for objective
minimisation. This function will raise :exc:`ValueError` or
:exc:`~tf.errors.InvalidArgumentError` if used with a batch size greater than one.
:raise ValueError or tf.errors.InvalidArgumentError: If ``samples`` has rank less than two,
or is empty.
"""
tf.debugging.assert_rank(samples, 2)
tf.debugging.assert_positive(len(samples))
self._model = model
self._samples = tf.Variable(samples)
def update(self, samples: TensorType) -> None:
"""Update the acquisition function with new samples."""
tf.debugging.assert_rank(samples, 2)
tf.debugging.assert_positive(len(samples))
self._samples.assign(samples)
@tf.function
def __call__(self, x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
x_squeezed = tf.squeeze(x, -2)
x_on_top_fidelity = add_fidelity_column(x_squeezed[:, :-1], self._model.num_fidelities - 1)
fmean, fvar = self._model.predict(x_on_top_fidelity)
fsd = tf.clip_by_value(
tf.math.sqrt(fvar), CLAMP_LB, fmean.dtype.max
) # clip below to improve numerical stability
ymean, yvar = self._model.predict_y(x_squeezed)
cov = self._model.covariance_with_top_fidelity(x_squeezed)
# calculate squared correlation between observations and high-fidelity latent function
rho_squared = (cov**2) / (fvar * yvar)
rho_squared = tf.clip_by_value(rho_squared, 0.0, 1.0)
normal = tfp.distributions.Normal(tf.cast(0, fmean.dtype), tf.cast(1, fmean.dtype))
gamma = (tf.squeeze(self._samples) - fmean) / fsd
log_minus_cdf = normal.log_cdf(-gamma)
ratio = tf.math.exp(normal.log_prob(gamma) - log_minus_cdf)
inner_log = 1 + rho_squared * ratio * (gamma - ratio)
return -0.5 * tf.math.reduce_mean(tf.math.log(inner_log), axis=1, keepdims=True) # [N, 1]
class CostWeighting(SingleModelAcquisitionBuilder[ProbabilisticModel]):
def __init__(self, fidelity_costs: List[float]):
"""
Builder for a cost-weighted acquisition function which returns the reciprocal of the cost
associated with the fidelity of each input.
Note that the fidelity level is assumed to be contained in the inputs final dimension.
The primary use of this acquisition function is to be used as a product with
multi-fidelity acquisition functions.
"""
self._fidelity_costs = fidelity_costs
self._num_fidelities = len(self._fidelity_costs)
def prepare_acquisition_function(
self, model: ProbabilisticModel, dataset: Optional[Dataset] = None
) -> AcquisitionFunction:
"""
:param model: The model.
:param dataset: The data from the observer. Not actually used here.
:return: The reciprocal of the costs corresponding to the fidelity level of each input.
"""
@tf.function
def acquisition(x: TensorType) -> TensorType:
tf.debugging.assert_shapes(
[(x, [..., 1, None])],
message="This acquisition function only supports batch sizes of one.",
)
fidelities = x[..., -1] # [..., 1]
tf.debugging.assert_greater(
tf.cast(self._num_fidelities, fidelities.dtype),
tf.reduce_max(fidelities),
message="You are trying to use more fidelity levels than cost levels.",
)
costs = tf.gather(self._fidelity_costs, tf.cast(fidelities, tf.int32))
return tf.cast(1.0 / costs, x.dtype) # [N, 1]
return acquisition
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
"""
Nothing to do here, so just return previous cost function.
:param function: The acquisition function to update.
:param model: The model.
:param dataset: The data from the observer.
"""
return function
| 36,326 | 41.787986 | 100 | py |
trieste-develop | trieste-develop/trieste/acquisition/multi_objective/pareto.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This module contains functions and classes for Pareto based multi-objective optimization. """
from __future__ import annotations
try:
import cvxpy as cp
except ImportError: # pragma: no cover (tested but not by coverage)
cp = None
import numpy as np
import tensorflow as tf
from ...types import TensorType
from .dominance import non_dominated
from .partition import prepare_default_non_dominated_partition_bounds
class Pareto:
"""
A :class:`Pareto` constructs a Pareto set.
Stores a Pareto set and calculates hypervolume of the Pareto set given a
specified reference point
"""
def __init__(self, observations: TensorType, already_non_dominated: bool = False):
"""
:param observations: The observations for all objectives, with shape [N, D].
:param already_non_dominated: Whether the observations are already non dominated
:raise ValueError (or InvalidArgumentError): If ``observations`` has an invalid shape.
"""
tf.debugging.assert_rank(observations, 2)
tf.debugging.assert_greater_equal(tf.shape(observations)[-1], 2)
if not already_non_dominated:
self.front = non_dominated(observations)[0]
else:
self.front = observations
def hypervolume_indicator(self, reference: TensorType) -> TensorType:
"""
Calculate the hypervolume indicator based on self.front and a reference point
The hypervolume indicator is the volume of the dominated region.
:param reference: a reference point to use, with shape [D].
Defines the upper bound of the hypervolume.
Should be equal or bigger than the anti-ideal point of the Pareto set.
For comparing results across runs, the same reference point must be used.
:return: hypervolume indicator, if reference point is less than all of the front
in any dimension, the hypervolume indicator will be zero.
:raise ValueError (or `tf.errors.InvalidArgumentError`): If ``reference`` has an invalid
shape.
:raise ValueError (or `tf.errors.InvalidArgumentError`): If ``self.front`` is empty
(which can happen if the concentration point is too strict so no frontier
exists after the screening)
"""
if tf.equal(tf.size(self.front), 0):
raise ValueError("empty front cannot be used to calculate hypervolume indicator")
helper_anti_reference = tf.reduce_min(self.front, axis=0) - tf.ones(
shape=1, dtype=self.front.dtype
)
lower, upper = prepare_default_non_dominated_partition_bounds(
reference, self.front, helper_anti_reference
)
non_dominated_hypervolume = tf.reduce_sum(tf.reduce_prod(upper - lower, 1))
hypervolume_indicator = (
tf.reduce_prod(reference - helper_anti_reference) - non_dominated_hypervolume
)
return hypervolume_indicator
def sample_diverse_subset(
self,
sample_size: int,
allow_repeats: bool = True,
bounds_delta_scale_factor: float = 0.2,
bounds_min_delta: float = 1e-9,
) -> tuple[TensorType, TensorType]:
"""
Sample a set of diverse points from the Pareto set using
Hypervolume Sharpe-Ratio Indicator
:param sample_size: The number of points to sample from the Pareto front
:param allow_repeats: Whether the sample may contain repeats
:param bounds_delta_scale_factor: The factor by which to grow the distance
between extrema when calculating lower and upper bounds
:param bounds_min_delta: The minimum value of the distance between extrema
:return: sample: Tensor of query points selected in the sample
and sample_ids: Tensor of indices of points selected from the Pareto set
"""
if cp is None:
raise ImportError(
"Pareto.sample method requires cvxpy, "
"this can be installed via `pip install trieste[qhsri]`"
)
if bounds_delta_scale_factor < 0:
raise ValueError(
"bounds_delta_scale_factor should be non-negative,"
f" got {bounds_delta_scale_factor}"
)
if bounds_delta_scale_factor < 0:
raise ValueError("bounds_delta_min should be non-negative," f" got {bounds_min_delta}")
front_size, front_dims = self.front.shape
if (front_size < sample_size) and allow_repeats is False:
raise ValueError(
f"Tried to sample {sample_size} points from a Pareto"
f" set of size {front_size}, please ensure sample size is smaller than"
" Pareto set size."
)
lower_bound, upper_bound = self._get_bounds(bounds_delta_scale_factor, bounds_min_delta)
p = self._calculate_p_matrix(lower_bound, upper_bound)
# Calculate q matrix
p_diag = np.expand_dims(np.diagonal(p), axis=1)
q = p - np.dot(p_diag, np.transpose(p_diag))
x_star = self._find_x_star(q, p)
if allow_repeats:
samples, sample_ids = self._choose_batch_with_repeats(x_star, sample_size)
else:
samples, sample_ids = self._choose_batch_no_repeats(x_star, sample_size)
return samples, sample_ids
def _choose_batch_with_repeats(
self, x_star: TensorType, sample_size: int
) -> tuple[TensorType, TensorType]:
# Calculate number of times each point is sampled
n_times_sampled = x_star * sample_size
# Round each number to an int
n_times_sampled = np.round(n_times_sampled)
# Check difference in number of samples and required sample size
n_samples_difference = sample_size - int(np.sum(n_times_sampled))
if n_samples_difference < 0:
# We need to randomly remove samples
# Get indices of point that were to be sampled >0 times
non_zero_indices = np.flatnonzero(n_times_sampled)
# Choose indices to decrement
samples_to_decr = np.random.choice(non_zero_indices, size=-n_samples_difference)
# Decrement indices
for idx in samples_to_decr:
n_times_sampled[idx] -= 1
elif n_samples_difference > 0:
# We need to randomly add samples
samples_to_incr = np.random.choice(
np.arange(len(n_times_sampled)), size=n_samples_difference
)
for idx in samples_to_incr:
n_times_sampled[idx] += 1
# Create a list of the sample indices
sample_ids = list()
for idx, repeats in enumerate(n_times_sampled):
for _ in range(int(repeats)):
sample_ids.append(idx)
# Convert to array for indexing and return
sample_ids_array = np.array(sample_ids)
# Create batch with each of the selected samples
samples = np.array(self.front)[sample_ids_array]
return samples, sample_ids_array
def _choose_batch_no_repeats(
self, x_star: TensorType, sample_size: int
) -> tuple[TensorType, TensorType]:
front_size = self.front.shape[0]
# Create id array to keep track of points
id_arr = np.expand_dims(np.arange(front_size), axis=1)
# Stitch id array, x_star and the front together
stitched_array = np.concatenate([id_arr, x_star, np.array(self.front)], axis=1)
# Sort array by x_star descending
sorted_array = stitched_array[stitched_array[:, 1].argsort()[::-1]]
samples = sorted_array[:sample_size, 2:]
sample_ids = sorted_array[:sample_size, 0].astype(int)
return samples, sample_ids
def _find_x_star(self, q: TensorType, p: TensorType) -> TensorType:
front_size = self.front.shape[0]
p_diag = np.expand_dims(np.diagonal(p), axis=1)
# Solve quadratic program for y*
P = cp.atoms.affine.wraps.psd_wrap(q)
G = np.eye(front_size)
h = np.zeros(front_size)
A = np.transpose(p_diag)
b = np.ones(1)
# Define and solve the CVXPY problem.
y = cp.Variable(front_size)
prob = cp.Problem(cp.Minimize((1 / 2) * cp.quad_form(y, P)), [G @ y >= h, A @ y == b])
prob.solve()
y_star = y.value
# Calculate x*
x_star = np.expand_dims(y_star, axis=1) / np.sum(y_star)
return x_star
def _calculate_p_matrix(self, lower_bound: TensorType, upper_bound: TensorType) -> TensorType:
front_size, front_dims = self.front.shape
p = np.zeros([front_size, front_size])
# Calculate denominator value for p matrix elements
denominator: float = 1
for i in range(front_dims):
if upper_bound[i] - lower_bound[i] == 0:
raise ValueError(
"Pareto set has identical upper and lower bounds"
" in a dimension, you can avoid this by setting a "
"nonzero value for bounds_min_delta"
)
denominator *= upper_bound[i] - lower_bound[i]
# Fill entries of p
for i in range(front_size):
for j in range(front_size):
pij = 1
for k in range(front_dims):
pij *= upper_bound[k] - max(self.front[i, k], self.front[j, k])
p[i, j] = pij
p = p / denominator
return p
def _get_bounds(
self, delta_scaling_factor: float, min_delta: float
) -> tuple[TensorType, TensorType]:
# Find min and max for each dimension in the front
dim_mins = np.min(self.front, axis=0)
dim_maxes = np.max(self.front, axis=0)
# Calculate the deltas to add to the min/max to get the upper and lower bounds
deltas = ((dim_maxes - dim_mins) * delta_scaling_factor) + min_delta
# Calculate the bounds
lower_bound = dim_mins - deltas
upper_bound = dim_maxes + deltas
return lower_bound, upper_bound
def get_reference_point(
observations: TensorType,
) -> TensorType:
"""
Default reference point calculation method that calculates the reference
point according to a Pareto front extracted from set of observations.
:param observations: observations referred to calculate the reference
point, with shape [..., N, D]
:return: a reference point to use, with shape [..., D].
:raise ValueError: If ``observations`` is empty
"""
if tf.equal(tf.size(observations), 0):
raise ValueError("empty observations cannot be used to calculate reference point")
front = Pareto(observations).front
f = tf.math.reduce_max(front, axis=-2) - tf.math.reduce_min(front, axis=-2)
return tf.math.reduce_max(front, axis=-2) + 2 * f / tf.cast(tf.shape(front)[-2], f.dtype)
| 11,521 | 39.006944 | 99 | py |
trieste-develop | trieste-develop/trieste/acquisition/multi_objective/dominance.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains functionality for computing the non-dominated set
given a set of data points."""
from __future__ import annotations
import tensorflow as tf
from ...types import TensorType
def non_dominated(observations: TensorType) -> tuple[TensorType, TensorType]:
"""
Computes the non-dominated set for a set of data points. Based on:
https://stackoverflow.com/questions/32791911/fast-calculation-of-pareto-front-in-python
If there are duplicate point(s) in the non-dominated set, this function will return
as it is without removing the duplicate.
:param observations: set of points with shape [N,D]
:return: tf.Tensor of the non-dominated set [P,D] and a non-dominated point mask [N],
P is the number of points in pareto front, the mask specifies whether each data point
is non-dominated or not.
"""
num_points = tf.shape(observations)[0]
# Reordering the observations beforehand speeds up the search:
mean = tf.reduce_mean(observations, axis=0)
std = tf.math.reduce_std(observations, axis=0)
weights = tf.reduce_sum(((observations - mean) / (std + 1e-7)), axis=1)
sorting_indices = tf.argsort(weights)
def cond(i: tf.Tensor, indices: tf.Tensor) -> tf.Tensor:
return i < len(indices)
def body(i: tf.Tensor, indices: tf.Tensor) -> tuple[tf.Tensor, tf.Tensor]:
obs = tf.gather(observations, indices)
nondominated = tf.reduce_any(obs < obs[i], axis=1) | tf.reduce_all(obs == obs[i], axis=1)
i = tf.reduce_sum(tf.cast(nondominated[:i], tf.int32)) + 1
indices = indices[nondominated]
return i, indices
_, indices = tf.while_loop(
cond,
body,
loop_vars=(
0, # i
tf.gather(tf.range(num_points), sorting_indices), # indices
),
shape_invariants=(
tf.TensorShape([]), # i
tf.TensorShape([None]), # indices
),
)
nondominated_observations = tf.gather(observations, indices)
trues = tf.ones(tf.shape(indices), tf.bool)
is_nondominated = tf.scatter_nd(indices[:, None], trues, [num_points])
return nondominated_observations, is_nondominated
| 2,789 | 38.295775 | 97 | py |
trieste-develop | trieste-develop/trieste/acquisition/multi_objective/__init__.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This folder contains multi-objective optimization utilities. """
from .dominance import non_dominated
from .pareto import Pareto, get_reference_point
from .partition import (
DividedAndConquerNonDominated,
ExactPartition2dNonDominated,
prepare_default_non_dominated_partition_bounds,
)
| 890 | 39.5 | 74 | py |
trieste-develop | trieste-develop/trieste/acquisition/multi_objective/partition.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains functions of different methods for
partitioning the dominated/non-dominated region in multi-objective optimization problems."""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Optional
import tensorflow as tf
from ...types import TensorType
from ...utils.misc import DEFAULTS
from .dominance import non_dominated
def prepare_default_non_dominated_partition_bounds(
reference: TensorType,
observations: Optional[TensorType] = None,
anti_reference: Optional[TensorType] = None,
) -> tuple[TensorType, TensorType]:
"""
Prepare the default non-dominated partition boundary for acquisition function usage.
This functionality will trigger different partition according to objective numbers, if
objective number is 2, an `ExactPartition2dNonDominated` will be used. If the objective
number is larger than 2, a `DividedAndConquerNonDominated` will be used.
:param observations: The observations for all objectives, with shape [N, D], if not specified
or is an empty Tensor, a single non-dominated partition bounds constructed by reference
and anti_reference point will be returned.
:param anti_reference: a worst point to use with shape [D].
Defines the lower bound of the hypercell. If not specified, will use a default value:
-[1e10] * D.
:param reference: a reference point to use, with shape [D].
Defines the upper bound of the hypervolume.
Should be equal to or bigger than the anti-ideal point of the Pareto set.
For comparing results across runs, the same reference point must be used.
:return: lower, upper bounds of the partitioned cell, each with shape [N, D]
:raise ValueError (or `tf.errors.InvalidArgumentError`): If ``reference`` has an invalid
shape.
:raise ValueError (or `tf.errors.InvalidArgumentError`): If ``anti_reference`` has an invalid
shape.
"""
def is_empty_obs(obs: Optional[TensorType]) -> bool:
return obs is None or tf.equal(tf.size(observations), 0)
def specify_default_anti_reference_point(
ref: TensorType, obs: Optional[TensorType]
) -> TensorType:
anti_ref = -1e10 * tf.ones(shape=(tf.shape(reference)), dtype=reference.dtype)
tf.debugging.assert_greater_equal(
ref,
anti_ref,
message=f"reference point: {ref} containing at least one value below default "
"anti-reference point ([-1e10, ..., -1e10]), try specify a lower "
"anti-reference point.",
)
if not is_empty_obs(obs): # make sure given (valid) observations are larger than -1e10
tf.debugging.assert_greater_equal(
obs,
anti_ref,
message=f"observations: {obs} containing at least one value below default "
"anti-reference point ([-1e10, ..., -1e10]), try specify a lower "
"anti-reference point.",
)
return anti_ref
tf.debugging.assert_shapes([(reference, ["D"])])
if anti_reference is None:
# if anti_reference point is not specified, use a -1e10 as default (act as -inf)
anti_reference = specify_default_anti_reference_point(reference, observations)
else:
# anti_reference point is specified
tf.debugging.assert_shapes([(anti_reference, ["D"])])
if is_empty_obs(observations): # if no valid observations
assert tf.reduce_all(tf.less_equal(anti_reference, reference)), ValueError(
f"anti_reference point: {anti_reference} contains at least one value larger "
f"than reference point: {reference}"
)
return tf.expand_dims(anti_reference, 0), tf.expand_dims(reference, 0)
elif tf.shape(observations)[-1] > 2:
return DividedAndConquerNonDominated(observations).partition_bounds(
anti_reference, reference
)
else:
return ExactPartition2dNonDominated(observations).partition_bounds(
anti_reference, reference
)
@dataclass(frozen=True)
class _BoundedVolumes:
# stores the index of the Pareto front to form lower and upper
# bounds of the pseudo cells decomposition.
# the lowerbounds index of the volumes
lower_idx: TensorType
# the upperbounds index of the volumes
upper_idx: TensorType
def __post_init__(self) -> None:
tf.debugging.assert_shapes([(self.lower_idx, ["N", "D"]), (self.upper_idx, ["N", "D"])])
class _BoundIndexPartition:
"""
A collection of partition strategies that are based on storing the index of pareto fronts
& other auxiliary points
"""
front: TensorType
_bounds: _BoundedVolumes
def __new__(cls, *args: Any, **kwargs: Any) -> Any:
if cls is _BoundIndexPartition:
raise TypeError("BoundIndexPartition may not be instantiated directly")
return object.__new__(cls)
def partition_bounds(
self, anti_reference: TensorType, reference: TensorType
) -> tuple[TensorType, TensorType]:
"""
Get the partitioned hypercell's lower and upper bounds.
:param anti_reference: a worst point to use with shape [D].
Defines the lower bound of the hypercell
:param reference: a reference point to use, with shape [D].
Defines the upper bound of the hypervolume.
Should be equal to or bigger than the anti-ideal point of the Pareto set.
For comparing results across runs, the same reference point must be used.
:return: lower, upper bounds of the partitioned cell, each with shape [N, D]
:raise ValueError (or `tf.errors.InvalidArgumentError`): If ``reference`` has an invalid
shape.
"""
tf.debugging.assert_greater_equal(reference, self.front)
tf.debugging.assert_greater_equal(self.front, anti_reference)
tf.debugging.assert_type(anti_reference, self.front.dtype)
tf.debugging.assert_type(reference, self.front.dtype)
tf.debugging.assert_shapes(
[
(self._bounds.lower_idx, ["N", "D"]),
(self._bounds.upper_idx, ["N", "D"]),
(self.front, ["M", "D"]),
(reference, ["D"]),
(anti_reference, ["D"]),
]
)
# concatenate the pseudo front to have the same corresponding of bound index
pseudo_pfront = tf.concat((anti_reference[None], self.front, reference[None]), axis=0)
N = tf.shape(self._bounds.upper_idx)[0]
D = tf.shape(self._bounds.upper_idx)[1]
idx = tf.tile(tf.range(D), (N,))
lower_idx = tf.stack((tf.reshape(self._bounds.lower_idx, [-1]), idx), axis=1)
upper_idx = tf.stack((tf.reshape(self._bounds.upper_idx, [-1]), idx), axis=1)
lower = tf.reshape(tf.gather_nd(pseudo_pfront, lower_idx), [N, D])
upper = tf.reshape(tf.gather_nd(pseudo_pfront, upper_idx), [N, D])
return lower, upper
class ExactPartition2dNonDominated(_BoundIndexPartition):
"""
Exact partition of non-dominated space, used as a default option when the
objective number equals 2.
"""
def __init__(self, front: TensorType):
"""
:param front: non-dominated pareto front.
"""
tf.debugging.assert_equal(
tf.reduce_all(non_dominated(front)[1]),
True,
message=f"\ninput {front} " f"contains dominated points",
)
self.front = tf.gather_nd(front, tf.argsort(front[:, :1], axis=0)) # sort input front
self._bounds = self._get_bound_index()
def _get_bound_index(self) -> _BoundedVolumes:
# Compute the cells covering the non-dominated region for 2 dimension case
# this assumes the Pareto set has been sorted in ascending order on the first
# objective, which implies the second objective is sorted in descending order
len_front, number_of_objectives = self.front.shape
pseudo_front_idx = tf.concat(
[
tf.zeros([1, number_of_objectives], dtype=tf.int32),
tf.argsort(self.front, axis=0) + 1,
tf.ones([1, number_of_objectives], dtype=tf.int32) * len_front + 1,
],
axis=0,
)
range_ = tf.range(len_front + 1)[:, None]
lower_result = tf.concat([range_, tf.zeros_like(range_)], axis=-1)
upper_result = tf.concat(
[range_ + 1, pseudo_front_idx[::-1, 1:][: pseudo_front_idx[-1, 0]]], axis=-1
)
return _BoundedVolumes(lower_result, upper_result)
class DividedAndConquerNonDominated(_BoundIndexPartition):
"""
branch and bound procedure algorithm. a divide and conquer method introduced
in :cite:`Couckuyt2012`.
"""
def __init__(self, front: TensorType, threshold: TensorType | float = 0):
"""
:param front: non-dominated pareto front.
:param threshold: a threshold used to screen out cells in partition : when its volume is
below this threshold, its rejected directly in order to be more computationally
efficient, if setting above 0, this partition strategy tends to return an
approximated partition.
"""
tf.debugging.assert_equal(
tf.reduce_all(non_dominated(front)[1]),
True,
message=f"\ninput {front} " f"contains dominated points",
)
self.front = tf.gather_nd(front, tf.argsort(front[:, :1], axis=0)) # sort
self.front = front
self._bounds = self._get_bound_index(threshold)
def _get_bound_index(self, threshold: TensorType | float = 0) -> _BoundedVolumes:
len_front, number_of_objectives = self.front.shape
lower_result = tf.zeros([0, number_of_objectives], dtype=tf.int32)
upper_result = tf.zeros([0, number_of_objectives], dtype=tf.int32)
min_front = tf.reduce_min(self.front, axis=0, keepdims=True) - 1
max_front = tf.reduce_max(self.front, axis=0, keepdims=True) + 1
pseudo_front = tf.concat([min_front, self.front, max_front], axis=0)
pseudo_front_idx = tf.concat(
[
tf.zeros([1, number_of_objectives], dtype=tf.int32),
tf.argsort(self.front, axis=0)
+ 1, # +1 as index zero is reserved for the ideal point
tf.ones([1, number_of_objectives], dtype=tf.int32) * len_front + 1,
],
axis=0,
)
divide_conquer_cells = tf.stack(
[
tf.zeros(number_of_objectives, dtype=tf.int32),
(int(pseudo_front_idx.shape[0]) - 1)
* tf.ones(number_of_objectives, dtype=tf.int32),
],
axis=0,
)[None]
total_size = tf.reduce_prod(max_front - min_front)
def while_body(
divide_conquer_cells: TensorType,
lower_result: TensorType,
upper_result: TensorType,
) -> tuple[TensorType, TensorType, TensorType]:
divide_conquer_cells_unstacked = tf.unstack(divide_conquer_cells, axis=0)
cell = divide_conquer_cells_unstacked[-1]
divide_conquer_cells_new = tf.cond(
tf.not_equal(tf.size(divide_conquer_cells_unstacked[:-1]), 0),
lambda: tf.stack(divide_conquer_cells_unstacked[:-1]),
lambda: tf.zeros([0, 2, number_of_objectives], dtype=tf.int32),
)
arr = tf.range(number_of_objectives)
lower_idx = tf.gather_nd(pseudo_front_idx, tf.stack((cell[0], arr), -1))
upper_idx = tf.gather_nd(pseudo_front_idx, tf.stack((cell[1], arr), -1))
lower = tf.gather_nd(pseudo_front, tf.stack((lower_idx, arr), -1))
upper = tf.gather_nd(pseudo_front, tf.stack((upper_idx, arr), -1))
test_accepted = self._is_test_required((upper - DEFAULTS.JITTER) < self.front)
lower_result_final, upper_result_final = tf.cond(
test_accepted,
lambda: self._accepted_test_body(lower_result, upper_result, lower_idx, upper_idx),
lambda: (lower_result, upper_result),
)
test_rejected = self._is_test_required((lower + DEFAULTS.JITTER) < self.front)
divide_conquer_cells_final = tf.cond(
tf.logical_and(test_rejected, tf.logical_not(test_accepted)),
lambda: self._rejected_test_body(
cell, lower, upper, divide_conquer_cells_new, total_size, threshold
),
lambda: divide_conquer_cells_new,
)
return divide_conquer_cells_final, lower_result_final, upper_result_final
_, lower_result_final, upper_result_final = tf.while_loop(
lambda divide_conquer_cells, lower_result, upper_result: len(divide_conquer_cells) > 0,
while_body,
loop_vars=[divide_conquer_cells, lower_result, upper_result],
shape_invariants=[
tf.TensorShape([None, 2, number_of_objectives]),
tf.TensorShape([None, number_of_objectives]),
tf.TensorShape([None, number_of_objectives]),
],
)
return _BoundedVolumes(lower_result_final, upper_result_final)
@staticmethod
def _is_test_required(smaller: TensorType) -> TensorType:
idx_dom_augm = tf.reduce_any(smaller, axis=1)
is_dom_augm = tf.reduce_all(idx_dom_augm)
return is_dom_augm
@staticmethod
def _accepted_test_body(
lower_result: TensorType,
upper_result: TensorType,
lower_idx: TensorType,
upper_idx: TensorType,
) -> tuple[TensorType, TensorType]:
lower_result_accepted = tf.concat([lower_result, lower_idx[None]], axis=0)
upper_result_accepted = tf.concat([upper_result, upper_idx[None]], axis=0)
return lower_result_accepted, upper_result_accepted
@classmethod
def _rejected_test_body(
cls,
cell: TensorType,
lower: TensorType,
upper: TensorType,
divide_conquer_cells: TensorType,
total_size: TensorType,
threshold: TensorType,
) -> TensorType:
divide_conquer_cells_dist = cell[1] - cell[0]
hc_size = tf.math.reduce_prod(upper - lower, axis=0, keepdims=True)
not_unit_cell = tf.reduce_any(divide_conquer_cells_dist > 1)
vol_above_thresh = tf.reduce_all((hc_size[0] / total_size) > threshold)
divide_conquer_cells_rejected = tf.cond(
tf.logical_and(not_unit_cell, vol_above_thresh),
lambda: cls._divide_body(divide_conquer_cells, divide_conquer_cells_dist, cell),
lambda: tf.identity(divide_conquer_cells),
)
return divide_conquer_cells_rejected
@staticmethod
def _divide_body(
divide_conquer_cells: TensorType,
divide_conquer_cells_dist: TensorType,
cell: TensorType,
) -> TensorType:
edge_size = tf.reduce_max(divide_conquer_cells_dist)
idx = tf.argmax(divide_conquer_cells_dist)
edge_size1 = int(tf.round(tf.cast(edge_size, dtype=tf.float32) / 2.0))
edge_size2 = int(edge_size - edge_size1)
sparse_edge_size1 = tf.concat(
[tf.zeros([idx]), edge_size1 * tf.ones([1]), tf.zeros([len(cell[1]) - idx - 1])], axis=0
)
upper = tf.identity(cell[1]) - tf.cast(sparse_edge_size1, dtype=tf.int32)
divide_conquer_cells_new = tf.concat(
[divide_conquer_cells, tf.stack([tf.identity(cell[0]), upper], axis=0)[None]], axis=0
)
sparse_edge_size2 = tf.concat(
[tf.zeros([idx]), edge_size2 * tf.ones([1]), tf.zeros([len(cell[1]) - idx - 1])], axis=0
)
lower = tf.identity(cell[0]) + tf.cast(sparse_edge_size2, dtype=tf.int32)
divide_conquer_cells_final = tf.concat(
[divide_conquer_cells_new, tf.stack([lower, tf.identity(cell[1])], axis=0)[None]],
axis=0,
)
return divide_conquer_cells_final
| 16,741 | 41.492386 | 100 | py |
trieste-develop | trieste-develop/trieste/experimental/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/trieste/experimental/plotting/plotting_plotly.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Callable, Optional
import numpy as np
import plotly.graph_objects as go
import tensorflow as tf
from plotly.subplots import make_subplots
from trieste.models.interfaces import ProbabilisticModel
from trieste.types import TensorType
from trieste.utils import to_numpy
from .plotting import create_grid
def format_point_markers(
num_pts: int,
num_init: int,
idx_best: Optional[int] = None,
mask_fail: Optional[TensorType] = None,
m_init: str = "x",
m_add: str = "circle",
c_pass: str = "green",
c_fail: str = "red",
c_best: str = "darkmagenta",
) -> tuple[TensorType, TensorType]:
"""
Prepares point marker styles according to some BO factors
:param num_pts: total number of BO points
:param num_init: initial number of BO points
:param idx_best: index of the best BO point
:param mask_fail: Bool vector, True if the corresponding observation violates the constraint(s)
:param m_init: marker for the initial BO points
:param m_add: marker for the other BO points
:param c_pass: color for the regular BO points
:param c_fail: color for the failed BO points
:param c_best: color for the best BO points
:return: 2 string vectors col_pts, mark_pts containing marker styles and colors
"""
col_pts = np.repeat(c_pass, num_pts).astype("<U15")
mark_pts = np.repeat(m_init, num_pts).astype("<U15")
mark_pts[num_init:] = m_add
if mask_fail is not None:
col_pts[mask_fail] = c_fail
if idx_best is not None:
col_pts[idx_best] = c_best
return col_pts, mark_pts
def add_surface_plotly(
xx: TensorType,
yy: TensorType,
f: TensorType,
fig: go.Figure,
alpha: float = 1.0,
figrow: int = 1,
figcol: int = 1,
) -> go.Figure:
"""
Adds a surface to an existing plotly subfigure
:param xx: [n, n] array (input)
:param yy: [n, n] array (input)
:param f: [n, n] array (output)
:param fig: the current plotly figure
:param alpha: transparency
:param figrow: row index of the subfigure
:param figcol: column index of the subfigure
:return: updated plotly figure
"""
z = f.reshape([xx.shape[0], yy.shape[1]])
fig.add_trace(
go.Surface(z=z, x=xx, y=yy, showscale=False, opacity=alpha, colorscale="viridis"),
row=figrow,
col=figcol,
)
return fig
def add_bo_points_plotly(
x: TensorType,
y: TensorType,
z: TensorType,
fig: go.Figure,
num_init: int,
idx_best: Optional[int] = None,
mask_fail: Optional[TensorType] = None,
figrow: int = 1,
figcol: int = 1,
) -> go.Figure:
"""
Adds scatter points to an existing subfigure. Markers and colors are chosen according to
BO factors.
:param x: [N] x inputs
:param y: [N] y inputs
:param z: [N] z outputs
:param fig: the current plotly figure
:param num_init: initial number of BO points
:param idx_best: index of the best BO point
:param mask_fail: Bool vector, True if the corresponding observation violates the constraint(s)
:param figrow: row index of the subfigure
:param figcol: column index of the subfigure
:return: a plotly figure
"""
num_pts = x.shape[0]
col_pts, mark_pts = format_point_markers(num_pts, num_init, idx_best, mask_fail)
fig.add_trace(
go.Scatter3d(
x=x,
y=y,
z=z,
mode="markers",
marker=dict(size=4, color=col_pts, symbol=mark_pts, opacity=0.8),
),
row=figrow,
col=figcol,
)
return fig
def plot_model_predictions_plotly(
model: ProbabilisticModel,
mins: TensorType,
maxs: TensorType,
grid_density: int = 20,
num_samples: Optional[int] = None,
alpha: float = 0.85,
) -> go.Figure:
"""
Plots 2-dimensional plot of model's predictions. We first create a regular grid of points
and evaluate the model on these points. We then plot the mean and 2 standard deviations to
show epistemic uncertainty.
For ``DeepGaussianProcess`` models ``num_samples`` should be used
and set to some positive number. This is needed as predictions from deep GP's are stochastic
and we need to take more than one sample to estimate the mean and variance.
:param model: A probabilistic model
:param mins: List of 2 lower bounds for creating a grid of points for model predictions.
:param maxs: List of 2 upper bounds for creating a grid of points for model predictions.
:param grid_density: Number of points per dimension. This will result in a grid size of
grid_density^2.
:param num_samples: Number of samples to use with deep GPs.
:param alpha: Transparency.
:return: A plotly figure.
"""
mins = to_numpy(mins)
maxs = to_numpy(maxs)
# Create a regular grid on the parameter space
Xplot, xx, yy = create_grid(mins=mins, maxs=maxs, grid_density=grid_density)
# Evaluate objective function, ``num_samples`` is currently used
if num_samples is None:
Fmean, Fvar = model.predict(Xplot)
else:
means = []
vars = []
for _ in range(num_samples):
Fmean_sample, Fvar_sample = model.predict(Xplot)
means.append(Fmean_sample)
vars.append(Fvar_sample)
Fmean = tf.reduce_mean(tf.stack(means), axis=0)
Fvar = tf.reduce_mean(tf.stack(vars) + tf.stack(means) ** 2, axis=0) - Fmean**2
n_output = Fmean.shape[1]
fig = make_subplots(rows=1, cols=n_output, specs=[[{"type": "surface"}] * n_output])
for k in range(n_output):
fmean = Fmean[:, k].numpy()
fvar = Fvar[:, k].numpy()
lcb = fmean - 2 * np.sqrt(fvar)
ucb = fmean + 2 * np.sqrt(fvar)
fig = add_surface_plotly(xx, yy, fmean, fig, alpha=alpha, figrow=1, figcol=k + 1)
fig = add_surface_plotly(xx, yy, lcb, fig, alpha=alpha - 0.35, figrow=1, figcol=k + 1)
fig = add_surface_plotly(xx, yy, ucb, fig, alpha=alpha - 0.35, figrow=1, figcol=k + 1)
fig.update_layout(height=600, width=600)
return fig
def plot_function_plotly(
obj_func: Callable[[TensorType], TensorType],
mins: TensorType,
maxs: TensorType,
grid_density: int = 20,
title: Optional[str] = None,
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
alpha: float = 1.0,
) -> go.Figure:
"""
Plots 2-dimensional plot of an objective function. To illustrate the function we create a
regular grid of points and evaluate the function on these points.
:param obj_func: The vectorized objective function.
:param mins: List of 2 lower bounds for creating a grid of points for model predictions.
:param maxs: List of 2 upper bounds for creating a grid of points for model predictions.
:param grid_density: Number of points per dimension. This will result in a grid size of
grid_density^2.
:param title: optional titles
:param xlabel: optional xlabel
:param ylabel: optional ylabel
:param alpha: transparency
:return: A plotly figure.
"""
# Create a regular grid on the parameter space
Xplot, xx, yy = create_grid(mins=mins, maxs=maxs, grid_density=grid_density)
# Evaluate objective function
F = to_numpy(obj_func(Xplot))
if len(F.shape) == 1:
F = F.reshape(-1, 1)
n_output = F.shape[1]
fig = make_subplots(
rows=1,
cols=n_output,
specs=[[{"type": "surface"}] * n_output],
subplot_titles=title,
)
for k in range(n_output):
f = F[:, k]
fig = add_surface_plotly(xx, yy, f, fig, alpha=alpha, figrow=1, figcol=k + 1)
fig.update_xaxes(title_text=xlabel, row=1, col=k + 1)
fig.update_yaxes(title_text=ylabel, row=1, col=k + 1)
fig.update_layout(height=600, width=600)
return fig
| 8,489 | 31.653846 | 99 | py |
trieste-develop | trieste-develop/trieste/experimental/plotting/inequality_constraints.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from abc import abstractmethod
from typing import Optional, Type, cast
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.figure import Figure
from typing_extensions import Protocol
from ...space import SearchSpace
from ...types import TensorType
from .plotting import create_grid
class Simulation(Protocol):
"""A representation of a constrained objective used for plotting."""
threshold: float
@staticmethod
@abstractmethod
def objective(input_data: TensorType) -> TensorType:
"""Objective function."""
@staticmethod
@abstractmethod
def constraint(input_data: TensorType) -> TensorType:
"""Constraint function."""
def plot_objective_and_constraints(
search_space: SearchSpace, simulation: Type[Simulation]
) -> Figure:
"""
Plot constrained objective.
:param search_space: Search space
:param simulation: Constrained objective
:return: figure
"""
objective_fn = simulation.objective
constraint_fn = simulation.constraint
lower_bound = search_space.lower
upper_bound = search_space.upper
grid, xx, yy = create_grid(lower_bound, upper_bound, grid_density=30)
objective = objective_fn(grid).numpy()
constraint = constraint_fn(grid).numpy()
fig, (axes1, axes2) = plt.subplots(2, 2, sharex="all", sharey="all", figsize=(8, 8))
levels = 30
axes1[0].contourf(xx, yy, objective.reshape(*xx.shape), levels, alpha=0.9)
axes1[1].contourf(xx, yy, constraint.reshape(*xx.shape), levels, alpha=0.9)
axes1[0].set_title("Objective")
axes1[1].set_title("Constraint")
mask_ids = np.argwhere(constraint > simulation.threshold)
mask = np.zeros_like(objective, dtype=bool)
mask[mask_ids] = True
objective_masked = np.ma.array(objective, mask=mask)
constraint_masked = np.ma.array(constraint, mask=mask)
axes2[0].contourf(xx, yy, objective_masked.reshape(*xx.shape), levels, alpha=0.9)
axes2[1].contourf(xx, yy, constraint_masked.reshape(*xx.shape), levels, alpha=0.9)
axes2[0].set_title("Constrained objective")
axes2[1].set_title("Constraint mask")
for ax in np.ravel([axes1, axes2]):
ax.set_xlim(lower_bound[0], upper_bound[0])
ax.set_ylim(lower_bound[1], upper_bound[1])
return fig
def plot_init_query_points(
search_space: SearchSpace,
simulation: Type[Simulation],
objective_data: TensorType,
constraint_data: TensorType,
new_constraint_data: Optional[TensorType] = None,
) -> Figure:
"""
Plot initial query points on constrained objective.
:param search_space: Search space
:param simulation: Constrained objective
:param objective_data: Objective data
:param constraint_data: Constraint data
:param new_constraint_data: Optional new constraint data
:return: figure
"""
objective_fn = simulation.objective
constraint_fn = simulation.constraint
lower_bound = search_space.lower
upper_bound = search_space.upper
levels = 30
psize = 15
cw, cb, co = "white", "tab:blue", "tab:orange"
grid, xx, yy = create_grid(lower_bound, upper_bound, grid_density=30)
objective = objective_fn(grid).numpy()
constraint = constraint_fn(grid).numpy()
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
constraint_points = constraint_data[-1].numpy()
x = objective_data[0].numpy()
mask_ids = constraint[:, 0] > simulation.threshold
mask = np.zeros_like(objective, dtype=bool)
mask[mask_ids, :] = True
objective_masked = np.ma.array(objective, mask=mask)
def in_out_points(
x: TensorType, constraint_points: TensorType
) -> tuple[TensorType, TensorType]:
ids_in = constraint_points[:, 0] <= simulation.threshold
ids_out = constraint_points[:, 0] > simulation.threshold
return x.T[..., ids_in], x.T[..., ids_out]
(x_in, y_in), (x_out, y_out) = in_out_points(x, constraint_points)
ax.contourf(xx, yy, objective_masked.reshape(*xx.shape), levels, alpha=0.9)
ax.scatter(x_in, y_in, s=psize, c=cb, edgecolors=cw, marker="o")
ax.scatter(x_out, y_out, s=psize, c=cw, edgecolors=cb, marker="o")
if new_constraint_data is not None:
x_new, constraint_points_new = new_constraint_data
(x_in_new, y_in_new), (x_out_new, y_out_new) = in_out_points(
x_new.numpy(), constraint_points_new.numpy()
)
ax.scatter(x_in_new, y_in_new, s=psize, c=co, edgecolors=cw, marker="o")
ax.scatter(x_out_new, y_out_new, s=psize, c=cw, edgecolors=co, marker="o")
ax.set_title("Constrained objective")
ax.set_xlim(lower_bound[0], upper_bound[0])
ax.set_ylim(lower_bound[1], upper_bound[1])
return fig
def plot_2obj_cst_query_points(
search_space: SearchSpace,
simulation: Type[Simulation],
objective_data: TensorType,
constraint_data: TensorType,
) -> None:
"""
Plot 2 objective constrainted query points.
:param search_space: Search space
:param simulation: Constrained objective
:param objective_data: Objective data
:param constraint_data: Constraint data
"""
class Sim1(simulation): # type: ignore
@staticmethod
def objective(input_data: TensorType) -> TensorType:
return simulation.objective(input_data)[:, 0:1]
class Sim2(simulation): # type: ignore
@staticmethod
def objective(input_data: TensorType) -> TensorType:
return simulation.objective(input_data)[:, 1:2]
for sim in [Sim1, Sim2]:
plot_init_query_points(
search_space,
cast(Type[Simulation], sim),
objective_data,
constraint_data,
)
| 6,314 | 32.590426 | 88 | py |
trieste-develop | trieste-develop/trieste/experimental/plotting/plotting.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Callable, Optional, Sequence
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from gpflow.models import GPModel
from matplotlib import cm
from matplotlib.axes import Axes
from matplotlib.collections import Collection
from matplotlib.contour import ContourSet
from matplotlib.figure import Figure
from trieste.acquisition import AcquisitionFunction
from trieste.acquisition.multi_objective.dominance import non_dominated
from trieste.types import TensorType
from trieste.utils import to_numpy
def create_grid(
mins: TensorType, maxs: TensorType, grid_density: int = 30
) -> tuple[TensorType, TensorType, TensorType]:
"""
Creates a regular 2D grid of size `grid_density^2` between mins and maxs.
:param mins: list of 2 lower bounds
:param maxs: list of 2 upper bounds
:param grid_density: scalar
:return: Xplot [grid_density**2, 2], xx, yy from meshgrid for the specific formatting of
contour / surface plots
"""
tf.debugging.assert_shapes([(mins, [2]), (maxs, [2])])
xspaced = np.linspace(mins[0], maxs[0], grid_density)
yspaced = np.linspace(mins[1], maxs[1], grid_density)
xx, yy = np.meshgrid(xspaced, yspaced)
Xplot = np.vstack((xx.flatten(), yy.flatten())).T
return Xplot, xx, yy
def plot_surface(
xx: TensorType,
yy: TensorType,
f: TensorType,
ax: Axes,
contour: bool = False,
fill: bool = False,
alpha: float = 1.0,
) -> ContourSet | Collection:
"""
Adds either a contour or a surface to a given ax.
:param xx: input 1, from meshgrid
:param yy: input2, from meshgrid
:param f: output, from meshgrid
:param ax: plt axes object
:param contour: Boolean
:param fill: filled contour
:param alpha: transparency
:return: generated contour or surface
"""
if contour:
if fill:
return ax.contourf(xx, yy, f.reshape(*xx.shape), 80, alpha=alpha)
else:
return ax.contour(xx, yy, f.reshape(*xx.shape), 80, alpha=alpha)
else:
return ax.plot_surface(
xx,
yy,
f.reshape(*xx.shape),
cmap=cm.coolwarm,
linewidth=0,
antialiased=False,
alpha=alpha,
)
def plot_function_2d(
obj_func: Callable[[TensorType], TensorType],
mins: TensorType,
maxs: TensorType,
grid_density: int = 100,
contour: bool = False,
log: bool = False,
title: Optional[Sequence[str]] = None,
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
figsize: Optional[tuple[float, float]] = (8, 6),
colorbar: bool = False,
alpha: float = 1.0,
fill: bool = False,
) -> tuple[Figure, Axes]:
"""
2D/3D plot of an obj_func for a grid of size grid_density**2 between mins and maxs
:param obj_func: a function that returns a n-array given a [n, d] array
:param mins: 2 lower bounds
:param maxs: 2 upper bounds
:param grid_density: positive integer for the grid size
:param contour: Boolean. If False, a 3d plot is produced
:param log: Boolean. If True, the log transformation (log(f - min(f) + 0.1)) is applied
:param title: optional titles
:param xlabel: optional xlabel
:param ylabel: optional ylabel
:param figsize: optional figsize
:param colorbar: whether to use colorbar
:param alpha: transparency
:param fill: filled contour
:return: figure and axes
"""
mins = to_numpy(mins)
maxs = to_numpy(maxs)
# Create a regular grid on the parameter space
Xplot, xx, yy = create_grid(mins=mins, maxs=maxs, grid_density=grid_density)
# Evaluate objective function
F = to_numpy(obj_func(Xplot))
if len(F.shape) == 1:
F = F.reshape(-1, 1)
n_output = F.shape[1]
if contour:
fig, ax = plt.subplots(
1, n_output, squeeze=False, sharex="all", sharey="all", figsize=figsize
)
else:
fig = plt.figure(figsize=figsize)
for k in range(F.shape[1]):
# Apply log transformation
f = F[:, k]
if log:
f = np.log(f - np.min(f) + 1e-1)
# Either plot contour of surface
if contour:
axx = ax[0, k]
else:
ax = axx = fig.add_subplot(1, n_output, k + 1, projection="3d")
plt_obj = plot_surface(xx, yy, f, axx, contour=contour, alpha=alpha, fill=fill)
if title is not None:
axx.set_title(title[k])
if colorbar:
fig.colorbar(plt_obj, ax=axx)
axx.set_xlabel(xlabel)
axx.set_ylabel(ylabel)
axx.set_xlim(mins[0], maxs[0])
axx.set_ylim(mins[1], maxs[1])
return fig, ax
def plot_acq_function_2d(
acq_func: AcquisitionFunction,
mins: TensorType,
maxs: TensorType,
grid_density: int = 100,
contour: bool = False,
log: bool = False,
title: Optional[Sequence[str]] = None,
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
figsize: Optional[tuple[float, float]] = (8, 6),
colorbar: bool = False,
alpha: float = 1.0,
fill: bool = False,
) -> tuple[Figure, Axes]:
"""
Wrapper to produce a 2D/3D plot of an acq_func for a grid of size grid_density**2 between
mins and maxs.
:param acq_func: a function that returns a n-array given a [n, d] array
:param mins: 2 lower bounds
:param maxs: 2 upper bounds
:param grid_density: positive integer for the grid size
:param contour: Boolean. If False, a 3d plot is produced
:param log: Boolean. If True, the log transformation (log(f - min(f) + 0.1)) is applied
:param title: optional titles
:param xlabel: optional xlabel
:param ylabel: optional ylabel
:param figsize: optional figsize
:param colorbar: whether to use colorbar
:param alpha: transparency
:param fill: filled contour
:return: figure and axes
"""
def batched_func(x: TensorType) -> TensorType:
return acq_func(tf.expand_dims(x, axis=-2))
return plot_function_2d(
batched_func,
mins,
maxs,
grid_density,
contour,
log,
title,
xlabel,
ylabel,
figsize,
colorbar,
alpha,
fill,
)
def format_point_markers(
num_pts: int,
num_init: Optional[int] = None,
idx_best: Optional[TensorType] = None,
mask_fail: Optional[TensorType] = None,
m_init: str = "x",
m_add: str = "o",
c_pass: str = "tab:green",
c_fail: str = "tab:red",
c_best: str = "tab:purple",
) -> tuple[TensorType, TensorType]:
"""
Prepares point marker styles according to some BO factors.
:param num_pts: total number of BO points
:param num_init: initial number of BO points
:param idx_best: index of the best BO point(s)
:param mask_fail: Bool vector, True if the corresponding observation violates the constraint(s)
:param m_init: marker for the initial BO points
:param m_add: marker for the other BO points
:param c_pass: color for the regular BO points
:param c_fail: color for the failed BO points
:param c_best: color for the best BO points
:return: 2 string vectors col_pts, mark_pts containing marker styles and colors
"""
if num_init is None:
num_init = num_pts
col_pts = np.repeat(c_pass, num_pts)
col_pts = col_pts.astype("<U15")
mark_pts = np.repeat(m_init, num_pts)
mark_pts[num_init:] = m_add
if mask_fail is not None:
col_pts[np.where(mask_fail)] = c_fail
if idx_best is not None:
col_pts[idx_best] = c_best
return col_pts, mark_pts
def plot_bo_points(
pts: TensorType,
ax: Axes,
num_init: Optional[int] = None,
idx_best: Optional[int] = None,
mask_fail: Optional[TensorType] = None,
obs_values: Optional[TensorType] = None,
m_init: str = "x",
m_add: str = "o",
c_pass: str = "tab:green",
c_fail: str = "tab:red",
c_best: str = "tab:purple",
) -> None:
"""
Adds scatter points to an existing subfigure. Markers and colors are chosen according to
BO factors.
:param pts: [N, 2] x inputs
:param ax: a plt axes object
:param num_init: initial number of BO points
:param idx_best: index of the best BO point
:param mask_fail: Bool vector, True if the corresponding observation violates the constraint(s)
:param obs_values: optional [N] outputs (for 3d plots)
:param m_init: marker for the initial BO points
:param m_add: marker for the other BO points
:param c_pass: color for the regular BO points
:param c_fail: color for the failed BO points
:param c_best: color for the best BO points
"""
num_pts = pts.shape[0]
col_pts, mark_pts = format_point_markers(
num_pts, num_init, idx_best, mask_fail, m_init, m_add, c_pass, c_fail, c_best
)
if obs_values is None:
for i in range(pts.shape[0]):
ax.scatter(pts[i, 0], pts[i, 1], c=col_pts[i], marker=mark_pts[i])
else:
for i in range(pts.shape[0]):
ax.scatter(pts[i, 0], pts[i, 1], obs_values[i], c=col_pts[i], marker=mark_pts[i])
def plot_mobo_points_in_obj_space(
obs_values: TensorType,
num_init: Optional[int] = None,
mask_fail: Optional[TensorType] = None,
figsize: Optional[tuple[float, float]] = (8, 6),
xlabel: str = "Obj 1",
ylabel: str = "Obj 2",
zlabel: str = "Obj 3",
title: Optional[str] = None,
m_init: str = "x",
m_add: str = "o",
c_pass: str = "tab:green",
c_fail: str = "tab:red",
c_pareto: str = "tab:purple",
only_plot_pareto: bool = False,
) -> tuple[Figure, Axes]:
"""
Adds scatter points in objective space, used for multi-objective optimization (2 or 3
objectives only). Markers and colors are chosen according to BO factors.
:param obs_values: TF Tensor or numpy array of objective values, shape (N, 2) or (N, 3).
:param num_init: initial number of BO points
:param mask_fail: Bool vector, True if the corresponding observation violates the constraint(s)
:param figsize: Size of the figure.
:param xlabel: Label of the X axis.
:param ylabel: Label of the Y axis.
:param zlabel: Label of the Z axis (in 3d case).
:param title: Title of the plot.
:param m_init: Marker for initial points.
:param m_add: Marker for the points observed during the BO loop.
:param c_pass: color for the regular BO points
:param c_fail: color for the failed BO points
:param c_pareto: color for the Pareto front points
:param only_plot_pareto: if set to `True`, only plot the pareto points. Default is `False`.
:return: figure and axes
"""
obj_num = obs_values.shape[-1]
tf.debugging.assert_shapes([])
assert obj_num == 2 or obj_num == 3, NotImplementedError(
f"Only support 2/3-objective functions but found: {obj_num}"
)
_, dom = non_dominated(obs_values)
idx_pareto = np.where(dom) if mask_fail is None else np.where(np.logical_and(dom, ~mask_fail))
pts = obs_values.numpy() if tf.is_tensor(obs_values) else obs_values
num_pts = pts.shape[0]
col_pts, mark_pts = format_point_markers(
num_pts, num_init, idx_pareto, mask_fail, m_init, m_add, c_pass, c_fail, c_pareto
)
if only_plot_pareto:
col_pts = col_pts[idx_pareto]
mark_pts = mark_pts[idx_pareto]
pts = pts[idx_pareto]
if obj_num == 2:
fig, ax = plt.subplots(figsize=figsize)
else:
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
for i in range(pts.shape[0]):
ax.scatter(*pts[i], c=col_pts[i], marker=mark_pts[i])
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if obj_num == 3:
ax.set_zlabel(zlabel)
if title is not None:
ax.set_title(title)
return fig, ax
def plot_mobo_history(
obs_values: TensorType,
metric_func: Callable[[TensorType], TensorType],
num_init: int,
mask_fail: Optional[TensorType] = None,
figsize: Optional[tuple[float, float]] = (8, 6),
) -> tuple[Figure, Axes]:
"""
Draw the performance measure for multi-objective optimization.
:param obs_values: TF Tensor or numpy array of objective values
:param metric_func: a callable function calculate metric score
:param num_init: initial number of BO points
:param mask_fail: Bool vector, True if the corresponding observation violates the constraint(s)
:param figsize: Size of the figure.
:return: figure and axes
"""
fig, ax = plt.subplots(figsize=figsize)
size, obj_num = obs_values.shape
if mask_fail is not None:
obs_values[mask_fail] = [np.inf] * obj_num
_idxs = np.arange(1, size + 1)
ax.plot(_idxs, [metric_func(obs_values[:pts, :]) for pts in _idxs], color="tab:orange")
ax.axvline(x=num_init - 0.5, color="tab:blue")
return fig, ax
def plot_regret(
obs_values: TensorType,
ax: Axes,
num_init: int,
show_obs: bool = True,
mask_fail: Optional[TensorType] = None,
idx_best: Optional[int] = None,
m_init: str = "x",
m_add: str = "o",
c_pass: str = "tab:green",
c_fail: str = "tab:red",
c_best: str = "tab:purple",
) -> None:
"""
Draws the simple regret with same colors / markers as the other plots.
:param obs_values: TF Tensor or numpy array of objective values
:param ax: a plt axes object
:param show_obs: show observations
:param num_init: initial number of BO points
:param mask_fail: Bool vector, True if the corresponding observation violates the constraint(s)
:param idx_best: index of the best BO point
:param m_init: marker for the initial BO points
:param m_add: marker for the other BO points
:param c_pass: color for the regular BO points
:param c_fail: color for the failed BO points
:param c_best: color for the best BO points
"""
col_pts, mark_pts = format_point_markers(
obs_values.shape[0], num_init, idx_best, mask_fail, m_init, m_add, c_pass, c_fail, c_best
)
safe_obs_values = obs_values.copy()
if mask_fail is not None:
safe_obs_values[mask_fail] = np.max(obs_values)
ax.plot(np.minimum.accumulate(safe_obs_values), color="tab:orange")
if show_obs:
for i in range(obs_values.shape[0]):
ax.scatter(i, obs_values[i], c=col_pts[i], marker=mark_pts[i])
ax.axvline(x=num_init - 0.5, color="tab:blue")
def plot_gp_2d(
model: GPModel,
mins: TensorType,
maxs: TensorType,
grid_density: int = 100,
contour: bool = False,
xlabel: Optional[str] = None,
ylabel: Optional[str] = None,
figsize: Optional[tuple[float, float]] = (8, 6),
predict_y: bool = False,
) -> tuple[Figure, Axes]:
"""
2D/3D plot of a gp model for a grid of size grid_density**2 between mins and maxs
:param model: a gpflow model
:param mins: 2 lower bounds
:param maxs: 2 upper bounds
:param grid_density: positive integer for the grid size
:param contour: Boolean. If False, a 3d plot is produced
:param xlabel: optional string
:param ylabel: optional string
:param figsize: optional figsize
:param predict_y: predict_y or predict_f
:return: figure and axes
"""
mins = to_numpy(mins)
maxs = to_numpy(maxs)
# Create a regular grid on the parameter space
Xplot, xx, yy = create_grid(mins=mins, maxs=maxs, grid_density=grid_density)
# Evaluate objective function
if predict_y:
Fmean, Fvar = model.predict_y(Xplot)
else:
Fmean, Fvar = model.predict_f(Xplot)
n_output = Fmean.shape[1]
if contour:
fig, ax = plt.subplots(
n_output, 2, squeeze=False, sharex="all", sharey="all", figsize=figsize
)
ax[0, 0].set_xlim(mins[0], maxs[0])
ax[0, 0].set_ylim(mins[1], maxs[1])
else:
fig = plt.figure(figsize=figsize)
for k in range(n_output):
# Apply log transformation
fmean = Fmean[:, k].numpy()
fvar = Fvar[:, k].numpy()
# Either plot contour of surface
if contour:
axx = ax[k, 0]
plot_surface(xx, yy, fmean, ax[k, 0], contour=contour, alpha=1.0)
plot_surface(xx, yy, fvar, ax[k, 1], contour=contour, alpha=1.0)
ax[k, 0].set_title("mean")
ax[k, 1].set_title("variance")
ax[k, 0].set_xlabel(xlabel)
ax[k, 0].set_ylabel(ylabel)
ax[k, 1].set_xlabel(xlabel)
ax[k, 1].set_ylabel(ylabel)
else:
ax = axx = fig.add_subplot(1, n_output, k + 1, projection="3d")
plot_surface(xx, yy, fmean, axx, contour=contour, alpha=0.5)
ucb = fmean + 2.0 * np.sqrt(fvar)
lcb = fmean - 2.0 * np.sqrt(fvar)
plot_surface(xx, yy, ucb, axx, contour=contour, alpha=0.1)
plot_surface(xx, yy, lcb, axx, contour=contour, alpha=0.1)
axx.set_xlabel(xlabel)
axx.set_ylabel(ylabel)
axx.set_xlim(mins[0], maxs[0])
axx.set_ylim(mins[1], maxs[1])
return fig, ax
| 17,758 | 32.070764 | 99 | py |
trieste-develop | trieste-develop/trieste/experimental/plotting/__init__.py | # Copyright 2022 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Experimental plotting package. Not intended for production code, as it is not yet fully
tested and may change quickly.
"""
try:
from .inequality_constraints import (
plot_2obj_cst_query_points,
plot_init_query_points,
plot_objective_and_constraints,
)
from .plotting import (
plot_acq_function_2d,
plot_bo_points,
plot_function_2d,
plot_gp_2d,
plot_mobo_history,
plot_mobo_points_in_obj_space,
plot_regret,
)
from .plotting_plotly import (
add_bo_points_plotly,
plot_function_plotly,
plot_model_predictions_plotly,
)
except Exception as e:
print(
"trieste.experimental.plotting requires matplotlib and plotly to be installed."
"\nOne way to do this is to install 'trieste[plotting]'."
)
raise e
| 1,444 | 31.111111 | 87 | py |
trieste-develop | trieste-develop/tests/conftest.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import pytest
from _pytest.config import Config
from _pytest.config.argparsing import Parser
def pytest_addoption(parser: Parser) -> None:
parser.addoption(
"--runslow",
action="store",
default="no",
choices=("yes", "no", "only"),
help="whether to run slow tests",
)
parser.addoption(
"--qhsri",
action="store",
default="no",
choices=("yes", "no", "only"),
help="whether to run qhsri tests",
)
parser.addoption(
"--eager",
action="store_true",
default=False,
help="whether to run all functions eagerly",
)
def pytest_configure(config: Config) -> None:
config.addinivalue_line("markers", "slow: mark test as slow to run")
config.addinivalue_line("markers", "qhsri: mark test as requiring qhsri dependencies")
def pytest_collection_modifyitems(config: Config, items: list[pytest.Item]) -> None:
if config.getoption("--runslow") == "no":
skip_slow = pytest.mark.skip(reason="need --runslow option to run")
for item in items:
if "slow" in item.keywords:
item.add_marker(skip_slow)
elif config.getoption("--runslow") == "only":
skip_fast = pytest.mark.skip(reason="--skipfast option set")
for item in items:
if "slow" not in item.keywords:
item.add_marker(skip_fast)
if config.getoption("--qhsri") == "no":
skip_qhsri = pytest.mark.skip(reason="need --qhsri option to run")
for item in items:
if "qhsri" in item.keywords:
item.add_marker(skip_qhsri)
if config.getoption("--qhsri") == "only":
skip_non_qhsri = pytest.mark.skip(reason="--qhsri only option set")
for item in items:
if "qhsri" not in item.keywords:
item.add_marker(skip_non_qhsri)
if config.getoption("--eager"):
import tensorflow as tf
tf.config.experimental_run_functions_eagerly(True)
| 2,636 | 33.246753 | 90 | py |
trieste-develop | trieste-develop/tests/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/unit/test_setup.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import json
import re
from pathlib import Path
from typing import Any
import pytest
import yaml
BASE_PATH = Path(__file__).parents[2]
VERSION = BASE_PATH / "trieste" / "VERSION"
VERSIONS = BASE_PATH / "versions.json"
CITATION = BASE_PATH / "CITATION.cff"
REDIRECT = BASE_PATH / "redirect.html"
REDIRECT_AUTOAPI = BASE_PATH / "redirect_autoapi.html"
REDIRECT_TUTORIALS = BASE_PATH / "redirect_tutorials.html"
@pytest.fixture(name="version")
def _version() -> str:
print(__file__)
return VERSION.read_text().strip()
@pytest.fixture(name="versions")
def _versions() -> list[dict[str, Any]]:
with open(VERSIONS) as f:
return json.load(f)
@pytest.fixture(name="citation")
def _citation() -> list[dict[str, Any]]:
with open(CITATION) as f:
return yaml.safe_load(f)
@pytest.fixture(name="redirect")
def _redirect() -> str:
return REDIRECT.read_text()
@pytest.fixture(name="redirect_autoapi")
def _redirect_autoapi() -> str:
return REDIRECT_AUTOAPI.read_text()
@pytest.fixture(name="redirect_tutorials")
def _redirect_tutorials() -> str:
return REDIRECT_TUTORIALS.read_text()
def test_version_is_valid(version: str) -> None:
assert re.match(r"\d+\.\d+\.\d+", version)
def test_versions_is_valid(versions: list[dict[str, Any]]) -> None:
assert isinstance(versions, list)
for v in versions:
assert isinstance(v, dict)
assert set(v.keys()) == {"version", "url"}
assert all(isinstance(value, str) for value in v.values())
assert v["url"] == f"https://secondmind-labs.github.io/trieste/{v['version']}/"
def test_version_in_versions(version: str, versions: list[dict[str, Any]]) -> None:
assert any(v["version"] == version for v in versions)
def test_citation_version(version: str, citation: dict[str, Any]) -> None:
assert citation["version"] == version
def test_redirect_version(
version: str, redirect: str, redirect_autoapi: str, redirect_tutorials: str
) -> None:
assert "$VERSION/index.html" in redirect
assert "$VERSION/autoapi/trieste/index.html" in redirect_autoapi
assert "$VERSION/tutorials.html" in redirect_tutorials
| 2,776 | 28.860215 | 87 | py |
trieste-develop | trieste-develop/tests/unit/test_logging.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import tempfile
import unittest.mock
from collections.abc import Mapping
from itertools import zip_longest
from time import sleep
from typing import Optional
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import FixedAcquisitionRule, mk_dataset
from tests.util.models.gpflow.models import PseudoTrainableProbModel, QuadraticMeanAndRBFKernel
from trieste.ask_tell_optimization import AskTellOptimizer
from trieste.bayesian_optimizer import BayesianOptimizer
from trieste.data import Dataset
from trieste.logging import (
SummaryFilter,
default_summary_filter,
get_current_name_scope,
get_step_number,
get_summary_filter,
get_tensorboard_writer,
histogram,
include_summary,
scalar,
set_step_number,
set_summary_filter,
set_tensorboard_writer,
step_number,
tensorboard_writer,
text,
)
from trieste.models import ProbabilisticModel
from trieste.space import Box, SearchSpace
from trieste.types import Tag, TensorType
class _PseudoTrainableQuadratic(QuadraticMeanAndRBFKernel, PseudoTrainableProbModel):
pass
def test_get_tensorboard_writer_default() -> None:
assert get_tensorboard_writer() is None
def test_set_get_tensorboard_writer() -> None:
with tempfile.TemporaryDirectory() as tmpdirname:
summary_writer = tf.summary.create_file_writer(tmpdirname)
set_tensorboard_writer(summary_writer)
assert get_tensorboard_writer() is summary_writer
set_tensorboard_writer(None)
assert get_tensorboard_writer() is None
def test_tensorboard_writer() -> None:
with tempfile.TemporaryDirectory() as tmpdirname:
summary_writer = tf.summary.create_file_writer(tmpdirname)
assert get_tensorboard_writer() is None
with tensorboard_writer(summary_writer):
assert get_tensorboard_writer() is summary_writer
with tensorboard_writer(None):
assert get_tensorboard_writer() is None
assert get_tensorboard_writer() is summary_writer
assert get_tensorboard_writer() is None
@pytest.mark.parametrize("step", [0, 1, 42])
def test_set_get_step_number(step: int) -> None:
set_step_number(step)
assert get_step_number() == step
set_step_number(0)
assert get_step_number() == 0
@pytest.mark.parametrize("step", [0, 1, 42])
def test_step_number(step: int) -> None:
assert get_step_number() == 0
with step_number(step):
assert get_step_number() == step
with step_number(0):
assert get_step_number() == 0
assert get_step_number() == step
assert get_step_number() == 0
@pytest.mark.parametrize("fn", [lambda name: True, lambda name: False, lambda name: "a" in name])
def test_set_get_summary_filter(fn: SummaryFilter) -> None:
try:
set_summary_filter(fn)
assert get_summary_filter() is fn
finally:
set_summary_filter(default_summary_filter)
def test_get_current_name_scope() -> None:
assert get_current_name_scope() == ""
with tf.name_scope("outer"):
assert get_current_name_scope() == "outer"
with tf.name_scope("inner"):
assert get_current_name_scope() == "outer/inner"
assert get_current_name_scope() == "outer"
assert get_current_name_scope() == ""
def test_include_summary() -> None:
try:
set_summary_filter(lambda name: "foo" in name)
assert include_summary("foo")
assert not include_summary("bar")
with tf.name_scope("foo"):
assert include_summary("bar")
finally:
set_summary_filter(default_summary_filter)
@unittest.mock.patch("trieste.logging.tf.summary.scalar")
def test_scalar(mocked_summary_scalar: unittest.mock.MagicMock) -> None:
scalar("this", 1, step=1)
scalar("_that", 2, step=2)
with tf.name_scope("foo"):
scalar("this", lambda: 3, step=3)
scalar("_that", lambda: 4, step=4)
scalar("broken", lambda: 1 / 0, step=5)
assert len(mocked_summary_scalar.call_args_list) == 2
for i, j in enumerate([1, 3]):
assert mocked_summary_scalar.call_args_list[i][0] == ("this", j)
assert mocked_summary_scalar.call_args_list[i][1] == {"step": j}
@unittest.mock.patch("trieste.logging.tf.summary.histogram")
def test_histogram(mocked_summary_histogram: unittest.mock.MagicMock) -> None:
histogram("this", tf.constant(1), step=1)
histogram("_that", tf.constant(2), step=2)
with tf.name_scope("foo"):
histogram("this", lambda: tf.constant(3), step=3)
histogram("_that", lambda: tf.constant(4), step=4)
histogram("broken", lambda: tf.constant(1 / 0), step=5)
assert len(mocked_summary_histogram.call_args_list) == 2
for i, j in enumerate([1, 3]):
assert mocked_summary_histogram.call_args_list[i][0] == ("this", tf.constant(j))
assert mocked_summary_histogram.call_args_list[i][1] == {"step": j}
@unittest.mock.patch("trieste.logging.tf.summary.text")
def test_text(mocked_summary_histogram: unittest.mock.MagicMock) -> None:
text("this", "1", step=1)
text("_that", "2", step=2)
with tf.name_scope("foo"):
text("this", lambda: "3", step=3)
text("_that", lambda: "4", step=4)
text("broken", lambda: f"{1/0}", step=5)
assert len(mocked_summary_histogram.call_args_list) == 2
for i, j in enumerate([1, 3]):
assert mocked_summary_histogram.call_args_list[i][0] == ("this", str(j))
assert mocked_summary_histogram.call_args_list[i][1] == {"step": j}
@unittest.mock.patch("trieste.models.gpflow.interface.tf.summary.scalar")
def test_tensorboard_logging(mocked_summary_scalar: unittest.mock.MagicMock) -> None:
mocked_summary_writer = unittest.mock.MagicMock()
with tensorboard_writer(mocked_summary_writer):
tag: Tag = "A"
data, models = {tag: mk_dataset([[0.0]], [[0.0]])}, {tag: _PseudoTrainableQuadratic()}
steps = 5
rule = FixedAcquisitionRule([[0.0]])
BayesianOptimizer(lambda x: {tag: Dataset(x, x**2)}, Box([-1], [1])).optimize(
steps, data, models, rule
)
ordered_scalar_names = [
"A.observation/best_new_observation",
"A.observation/best_overall",
"wallclock/model_fitting",
"query_point/[0]",
"wallclock/query_point_generation",
"wallclock/step",
]
for call_arg, scalar_name in zip_longest(
mocked_summary_scalar.call_args_list,
["wallclock/model_fitting"] + steps * ordered_scalar_names,
):
assert call_arg[0][0] == scalar_name
assert isinstance(call_arg[0][1], float)
@unittest.mock.patch("trieste.models.gpflow.interface.tf.summary.scalar")
@pytest.mark.parametrize("fit_model", ["all", "all_but_init", "never"])
def test_wallclock_time_logging(
mocked_summary_scalar: unittest.mock.MagicMock,
fit_model: str,
) -> None:
model_fit_time = 0.2
acq_time = 0.1
class _PseudoTrainableQuadraticWithWaiting(QuadraticMeanAndRBFKernel, PseudoTrainableProbModel):
def optimize(self, dataset: Dataset) -> None:
sleep(model_fit_time)
class _FixedAcquisitionRuleWithWaiting(FixedAcquisitionRule):
def acquire(
self,
search_space: SearchSpace,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> TensorType:
sleep(acq_time)
return self._qp
mocked_summary_writer = unittest.mock.MagicMock()
with tensorboard_writer(mocked_summary_writer):
tag: Tag = "A"
data, models = {tag: mk_dataset([[0.0]], [[0.0]])}, {
tag: _PseudoTrainableQuadraticWithWaiting()
}
steps = 3
rule = _FixedAcquisitionRuleWithWaiting([[0.0]])
BayesianOptimizer(lambda x: {tag: Dataset(x, x**2)}, Box([-1], [1])).optimize(
steps,
data,
models,
rule,
fit_model=fit_model in ["all", "all_but_init"],
fit_initial_model=fit_model in ["all"],
)
other_scalars = 0
for i, call_arg in enumerate(mocked_summary_scalar.call_args_list):
name = call_arg[0][0]
value = call_arg[0][1]
if fit_model == "all" and i == 0:
assert name == "wallclock/model_fitting"
if name.startswith("wallclock"):
assert value > 0 # want positive wallclock times
if name == "wallclock/query_point_generation":
npt.assert_allclose(value, acq_time, rtol=0.01)
elif name == "wallclock/step":
total_time = acq_time if fit_model == "never" else model_fit_time + acq_time
npt.assert_allclose(value, total_time, rtol=0.1)
elif name == "wallclock/model_fitting":
model_time = 0.0 if fit_model == "never" else model_fit_time
npt.assert_allclose(value, model_time, atol=0.01)
else:
other_scalars += 1
# check that we processed all the wallclocks we were expecting
total_wallclocks = other_scalars + 3 * steps
if fit_model == "all":
total_wallclocks += 1
assert len(mocked_summary_scalar.call_args_list) == total_wallclocks
@unittest.mock.patch("trieste.models.gpflow.interface.tf.summary.scalar")
def test_tensorboard_logging_ask_tell(mocked_summary_scalar: unittest.mock.MagicMock) -> None:
mocked_summary_writer = unittest.mock.MagicMock()
with tensorboard_writer(mocked_summary_writer):
tag: Tag = "A"
data, models = {tag: mk_dataset([[0.0]], [[0.0]])}, {tag: _PseudoTrainableQuadratic()}
rule = FixedAcquisitionRule([[0.0]])
ask_tell = AskTellOptimizer(Box([-1], [1]), data, models, rule)
with step_number(3):
new_point = ask_tell.ask()
ask_tell.tell({tag: Dataset(new_point, new_point**2)})
ordered_scalar_names = [
"wallclock/model_fitting",
"query_point/[0]",
"wallclock/query_point_generation",
"A.observation/best_new_observation",
"A.observation/best_overall",
"wallclock/model_fitting",
]
for call_arg, scalar_name in zip_longest(
mocked_summary_scalar.call_args_list, ordered_scalar_names
):
assert call_arg[0][0] == scalar_name
assert isinstance(call_arg[0][1], float)
| 10,994 | 36.271186 | 100 | py |
trieste-develop | trieste-develop/tests/unit/test_data.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import ShapeLike, assert_datasets_allclose
from trieste.data import (
Dataset,
add_fidelity_column,
check_and_extract_fidelity_query_points,
get_dataset_for_fidelity,
split_dataset_by_fidelity,
)
from trieste.utils import shapes_equal
@pytest.mark.parametrize(
"query_points, observations",
[
(tf.constant([[]]), tf.constant([[]])),
(tf.constant([[0.0], [1.0], [2.0]]), tf.constant([[], [], []])),
(tf.constant([[], [], []]), tf.constant([[0.0], [1.0], [2.0]])),
],
)
def test_dataset_raises_for_zero_dimensional_data(
query_points: tf.Tensor, observations: tf.Tensor
) -> None:
with pytest.raises(ValueError):
Dataset(query_points, observations)
@pytest.mark.parametrize(
"query_points_leading_shape, observations_leading_shape",
[
((1,), (2,)),
((2,), (1,)),
((5, 6), (5, 4)),
((5, 6), (4, 6)),
((5, 6), (4, 4)),
],
)
@pytest.mark.parametrize("last_dim_size", [1, 5])
def test_dataset_raises_for_different_leading_shapes(
query_points_leading_shape: tuple[int, ...],
observations_leading_shape: tuple[int, ...],
last_dim_size: int,
) -> None:
query_points = tf.zeros(query_points_leading_shape + (last_dim_size,))
observations = tf.ones(observations_leading_shape + (last_dim_size,))
with pytest.raises(ValueError, match="(L|l)eading"):
Dataset(query_points, observations)
@pytest.mark.parametrize(
"query_points_shape, observations_shape",
[
((1, 2), (1,)),
((1, 2), (1, 2, 3)),
],
)
def test_dataset_raises_for_different_ranks(
query_points_shape: ShapeLike, observations_shape: ShapeLike
) -> None:
query_points = tf.zeros(query_points_shape)
observations = tf.ones(observations_shape)
with pytest.raises(ValueError):
Dataset(query_points, observations)
@pytest.mark.parametrize(
"query_points_shape, observations_shape",
[
((), ()),
((), (10,)),
((10,), (10,)),
((1, 2), (1,)),
((1, 2), (1, 2, 3)),
],
)
def test_dataset_raises_for_invalid_ranks(
query_points_shape: ShapeLike, observations_shape: ShapeLike
) -> None:
query_points = tf.zeros(query_points_shape)
observations = tf.ones(observations_shape)
with pytest.raises(ValueError):
Dataset(query_points, observations)
def test_dataset_getters() -> None:
query_points, observations = tf.constant([[0.0]]), tf.constant([[1.0]])
dataset = Dataset(query_points, observations)
assert dataset.query_points.dtype == query_points.dtype
assert dataset.observations.dtype == observations.dtype
assert shapes_equal(dataset.query_points, query_points)
assert shapes_equal(dataset.observations, observations)
assert tf.reduce_all(dataset.query_points == query_points)
assert tf.reduce_all(dataset.observations == observations)
@pytest.mark.parametrize(
"lhs, rhs, expected",
[
( # lhs and rhs populated
Dataset(tf.constant([[1.2, 3.4], [5.6, 7.8]]), tf.constant([[1.1], [2.2]])),
Dataset(tf.constant([[5.0, 6.0], [7.0, 8.0]]), tf.constant([[-1.0], [-2.0]])),
Dataset(
# fmt: off
tf.constant([[1.2, 3.4], [5.6, 7.8], [5.0, 6.0], [7.0, 8.0]]),
tf.constant([[1.1], [2.2], [-1.0], [-2.0]]),
# fmt: on
),
),
( # lhs populated
Dataset(tf.constant([[1.2, 3.4], [5.6, 7.8]]), tf.constant([[1.1], [2.2]])),
Dataset(tf.zeros([0, 2]), tf.zeros([0, 1])),
Dataset(tf.constant([[1.2, 3.4], [5.6, 7.8]]), tf.constant([[1.1], [2.2]])),
),
( # rhs populated
Dataset(tf.zeros([0, 2]), tf.zeros([0, 1])),
Dataset(tf.constant([[1.2, 3.4], [5.6, 7.8]]), tf.constant([[1.1], [2.2]])),
Dataset(tf.constant([[1.2, 3.4], [5.6, 7.8]]), tf.constant([[1.1], [2.2]])),
),
( # both empty
Dataset(tf.zeros([0, 2]), tf.zeros([0, 1])),
Dataset(tf.zeros([0, 2]), tf.zeros([0, 1])),
Dataset(tf.zeros([0, 2]), tf.zeros([0, 1])),
),
],
)
def test_dataset_concatenation(lhs: Dataset, rhs: Dataset, expected: Dataset) -> None:
assert_datasets_allclose(lhs + rhs, expected)
@pytest.mark.parametrize(
"lhs, rhs",
[
( # incompatible query points shape
Dataset(tf.constant([[0.0]]), tf.constant([[0.0]])),
Dataset(tf.constant([[1.0, 1.0]]), tf.constant([[1.0]])),
),
( # incompatible observations shape
Dataset(tf.constant([[0.0]]), tf.constant([[0.0]])),
Dataset(tf.constant([[1.0]]), tf.constant([[1.0, 1.0]])),
),
( # incompatible query points dtype
Dataset(tf.constant([[0.0]]), tf.constant([[0.0]])),
Dataset(tf.constant([[1.0]], dtype=tf.float64), tf.constant([[1.0]])),
),
( # incompatible observations dtype
Dataset(tf.constant([[0.0]]), tf.constant([[0.0]])),
Dataset(tf.constant([[1.0]]), tf.constant([[1.0]], dtype=tf.float64)),
),
],
)
def test_dataset_concatentation_raises_for_incompatible_data(lhs: Dataset, rhs: Dataset) -> None:
with pytest.raises(tf.errors.InvalidArgumentError):
lhs + rhs
with pytest.raises(tf.errors.InvalidArgumentError):
rhs + lhs
@pytest.mark.parametrize(
"data, length",
[
(Dataset(tf.ones((7, 8, 10)), tf.ones((7, 8, 13))), 7),
(Dataset(tf.ones([0, 2]), tf.ones([0, 1])), 0),
(Dataset(tf.ones([1, 0, 2]), tf.ones([1, 0, 1])), 1),
],
)
def test_dataset_length(data: Dataset, length: int) -> None:
assert len(data) == length
def test_dataset_deepcopy() -> None:
data = Dataset(tf.constant([[0.0, 1.0]]), tf.constant([[2.0]]))
assert_datasets_allclose(data, copy.deepcopy(data))
def test_dataset_astuple() -> None:
qp, obs = tf.constant([[0.0]]), tf.constant([[1.0]])
qp_from_astuple, obs_from_astuple = Dataset(qp, obs).astuple()
assert qp_from_astuple is qp
assert obs_from_astuple is obs
@pytest.mark.parametrize(
"query_points,is_valid,problem",
(
(tf.constant([[0.456, 0.0]]), True, "None"),
(tf.constant([[0.456, 0.001]]), False, "bad_fidelity"),
(tf.constant([[0.456]]), False, "no_fidelity"),
),
)
def test_check_fidelity_query_points_(query_points: Dataset, is_valid: bool, problem: str) -> None:
if is_valid:
check_and_extract_fidelity_query_points(query_points)
else:
if problem == "bad_fidelity":
with pytest.raises(tf.errors.InvalidArgumentError):
check_and_extract_fidelity_query_points(query_points)
elif problem == "no_fidelity":
with pytest.raises(ValueError):
check_and_extract_fidelity_query_points(query_points)
def test_multifidelity_split_dataset_by_fidelity() -> None:
fidelity_0 = Dataset(tf.constant([[0.456, 0.0], [0.789, 0.0]]), tf.constant([[0.2], [0.3]]))
fidelity_1 = Dataset(tf.constant([[0.123, 1.0]]), tf.constant([[0.1]]))
fidelity_0_out_truth = Dataset(fidelity_0.query_points[:, :-1], fidelity_0.observations)
fidelity_1_out_truth = Dataset(fidelity_1.query_points[:, :-1], fidelity_1.observations)
data = fidelity_1 + fidelity_0
fidelity_0_out, fidelity_1_out = split_dataset_by_fidelity(data, 2)
assert_datasets_allclose(fidelity_0_out, fidelity_0_out_truth)
assert_datasets_allclose(fidelity_1_out, fidelity_1_out_truth)
def test_multifidelity_split_dataset_by_fidelity_with_fidelity_gap() -> None:
fidelity_0 = Dataset(tf.constant([[0.456, 0.0], [0.789, 0.0]]), tf.constant([[0.2], [0.3]]))
fidelity_2 = Dataset(tf.constant([[0.123, 2.0]]), tf.constant([[0.1]]))
fidelity_0_out_truth = Dataset(fidelity_0.query_points[:, :-1], fidelity_0.observations)
fidelity_2_out_truth = Dataset(fidelity_2.query_points[:, :-1], fidelity_2.observations)
data = fidelity_2 + fidelity_0
fidelity_0_out, fidelity_1_out, fidelity_2_out = split_dataset_by_fidelity(data, 3)
assert tf.equal(tf.size(fidelity_1_out.query_points), 0)
assert tf.equal(tf.size(fidelity_1_out.observations), 0)
assert_datasets_allclose(fidelity_0_out, fidelity_0_out_truth)
assert_datasets_allclose(fidelity_2_out, fidelity_2_out_truth)
def test_multifidelity_split_dataset_by_fidelity_raises_for_bad_fidelity() -> None:
fidelity_0 = Dataset(tf.constant([[0.456, 0.0], [0.789, 0.0]]), tf.constant([[0.2], [0.3]]))
fidelity_1 = Dataset(tf.constant([[0.123, 1.0]]), tf.constant([[0.1]]))
data = fidelity_1 + fidelity_0
with pytest.raises(ValueError):
split_dataset_by_fidelity(data, -1)
def test_multifidelity_get_dataset_for_fidelity() -> None:
mixed_fidelity_dataset = Dataset(
tf.constant([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 0.0], [4.0, 2.0]]),
tf.constant([[0.1], [0.2], [0.3], [0.4], [0.5]]),
)
fidelity_zero_truth_dataset = Dataset(tf.constant([[0.0], [3.0]]), tf.constant([[0.1], [0.4]]))
fidelity_zero_out_dataset = get_dataset_for_fidelity(mixed_fidelity_dataset, fidelity=0)
assert_datasets_allclose(fidelity_zero_out_dataset, fidelity_zero_truth_dataset)
def test_multifidelity_add_fidelity_column() -> None:
fidelity_zero_query_points = tf.constant([[0.0, 0.0], [1.0, 0.0]])
fidelity_removed_query_points = fidelity_zero_query_points[:, :-1]
fidelity_zero_out_query_points = add_fidelity_column(fidelity_removed_query_points, fidelity=0)
npt.assert_allclose(fidelity_zero_out_query_points, fidelity_zero_query_points)
| 10,398 | 36.814545 | 99 | py |
trieste-develop | trieste-develop/tests/unit/test_space.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import itertools
import operator
from functools import reduce
from typing import Container, Optional, Sequence
import numpy.testing as npt
import pytest
import tensorflow as tf
from typing_extensions import Final
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, ShapeLike, various_shapes
from trieste.space import (
Box,
Constraint,
DiscreteSearchSpace,
LinearConstraint,
NonlinearConstraint,
SearchSpace,
TaggedProductSearchSpace,
)
from trieste.types import TensorType
class Integers(SearchSpace):
def __init__(self, exclusive_limit: int):
assert exclusive_limit > 0
self.limit: Final[int] = exclusive_limit
@property
def lower(self) -> None:
pass
@property
def upper(self) -> None:
pass
def sample(self, num_samples: int, seed: Optional[int] = None) -> tf.Tensor:
return tf.random.shuffle(tf.range(self.limit), seed=seed)[:num_samples]
def _contains(self, point: tf.Tensor) -> bool | TensorType:
tf.debugging.assert_integer(point)
return 0 <= point < self.limit
def product(self, other: Integers) -> Integers:
return Integers(self.limit * other.limit)
@property
def dimension(self) -> TensorType:
pass
def __eq__(self, other: object) -> bool:
if not isinstance(other, Integers):
return NotImplemented
return self.limit == other.limit
@pytest.mark.parametrize("exponent", [0, -2])
def test_search_space___pow___raises_for_non_positive_exponent(exponent: int) -> None:
space = Integers(3)
with pytest.raises(tf.errors.InvalidArgumentError):
space**exponent
def test_search_space___pow___multiplies_correct_number_of_search_spaces() -> None:
assert (Integers(5) ** 7).limit == 5**7
def _points_in_2D_search_space() -> tf.Tensor:
return tf.constant([[-1.0, 0.4], [-1.0, 0.6], [0.0, 0.4], [0.0, 0.6], [1.0, 0.4], [1.0, 0.6]])
@pytest.mark.parametrize("shape", various_shapes(excluding_ranks=[2]))
def test_discrete_search_space_raises_for_invalid_shapes(shape: ShapeLike) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
DiscreteSearchSpace(tf.random.uniform(shape))
def test_discrete_search_space_points() -> None:
space = DiscreteSearchSpace(_points_in_2D_search_space())
npt.assert_array_equal(space.points, _points_in_2D_search_space())
@pytest.mark.parametrize("point", list(_points_in_2D_search_space()))
def test_discrete_search_space_contains_all_its_points(point: tf.Tensor) -> None:
space = DiscreteSearchSpace(_points_in_2D_search_space())
assert point in space
assert space.contains(point)
def test_discrete_search_space_contains_all_its_points_at_once() -> None:
points = _points_in_2D_search_space()
space = DiscreteSearchSpace(points)
contains = space.contains(points)
assert len(contains) == len(points)
assert tf.reduce_all(contains)
@pytest.mark.parametrize(
"point",
[
tf.constant([-1.0, -0.4]),
tf.constant([-1.0, 0.5]),
tf.constant([-2.0, 0.4]),
tf.constant([-2.0, 0.7]),
],
)
def test_discrete_search_space_does_not_contain_other_points(point: tf.Tensor) -> None:
space = DiscreteSearchSpace(_points_in_2D_search_space())
assert point not in space
assert not space.contains(point)
def test_discrete_search_space_contains_some_points_but_not_others() -> None:
points = tf.constant([[-1.0, -0.4], [-1.0, 0.4], [-1.0, 0.5]])
space = DiscreteSearchSpace(_points_in_2D_search_space())
contains = space.contains(points)
assert list(contains) == [False, True, False]
@pytest.mark.parametrize(
"test_points, contains",
[
(tf.constant([[0.0, 0.0], [1.0, 1.0]]), tf.constant([True, False])),
(tf.constant([[[0.0, 0.0]]]), tf.constant([[True]])),
],
)
def test_discrete_search_space_contains_handles_broadcast(
test_points: tf.Tensor, contains: tf.Tensor
) -> None:
space = DiscreteSearchSpace(tf.constant([[0.0, 0.0]]))
tf.assert_equal(contains, space.contains(test_points))
# point in space raises (because python insists on a bool)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
_ = test_points in space
@pytest.mark.parametrize(
"space, dimension",
[
(DiscreteSearchSpace(tf.constant([[-0.5], [0.2], [1.2], [1.7]])), 1), # 1d
(DiscreteSearchSpace(tf.constant([[-0.5, -0.3], [1.2, 0.4]])), 2), # 2d
],
)
def test_discrete_search_space_returns_correct_dimension(
space: DiscreteSearchSpace, dimension: int
) -> None:
assert space.dimension == dimension
@pytest.mark.parametrize(
"space, lower, upper",
[
(
DiscreteSearchSpace(tf.constant([[-0.5], [0.2], [1.2], [1.7]])),
tf.constant([-0.5]),
tf.constant([1.7]),
), # 1d
(
DiscreteSearchSpace(tf.constant([[-0.5, 0.3], [1.2, -0.4]])),
tf.constant([-0.5, -0.4]),
tf.constant([1.2, 0.3]),
), # 2d
],
)
def test_discrete_search_space_returns_correct_bounds(
space: DiscreteSearchSpace, lower: tf.Tensor, upper: tf.Tensor
) -> None:
npt.assert_array_equal(space.lower, lower)
npt.assert_array_equal(space.upper, upper)
@pytest.mark.parametrize(
"points, test_point",
[
(tf.constant([[0.0]]), tf.constant([0.0, 0.0])),
(tf.constant([[0.0, 0.0]]), tf.constant(0.0)),
(tf.constant([[0.0, 0.0]]), tf.constant([0.0])),
(tf.constant([[0.0, 0.0]]), tf.constant([0.0, 0.0, 0.0])),
],
)
def test_discrete_search_space_contains_raises_for_invalid_shapes(
points: tf.Tensor, test_point: tf.Tensor
) -> None:
space = DiscreteSearchSpace(points)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
_ = test_point in space
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
_ = space.contains(test_point)
@pytest.mark.parametrize("num_samples", [0, 1, 3, 5, 6, 10, 20])
def test_discrete_search_space_sampling(num_samples: int) -> None:
search_space = DiscreteSearchSpace(_points_in_2D_search_space())
samples = search_space.sample(num_samples)
assert all(sample in search_space for sample in samples)
assert len(samples) == num_samples
@pytest.mark.parametrize("seed", [1, 42, 123])
def test_discrete_search_space_sampling_returns_same_points_for_same_seed(seed: int) -> None:
search_space = DiscreteSearchSpace(_points_in_2D_search_space())
random_samples_1 = search_space.sample(num_samples=100, seed=seed)
random_samples_2 = search_space.sample(num_samples=100, seed=seed)
npt.assert_allclose(random_samples_1, random_samples_2)
def test_discrete_search_space_sampling_returns_different_points_for_different_call() -> None:
search_space = DiscreteSearchSpace(_points_in_2D_search_space())
random_samples_1 = search_space.sample(num_samples=100)
random_samples_2 = search_space.sample(num_samples=100)
npt.assert_raises(AssertionError, npt.assert_allclose, random_samples_1, random_samples_2)
def test_discrete_search_space___mul___points_is_the_concatenation_of_original_points() -> None:
dss1 = DiscreteSearchSpace(tf.constant([[-1.0, -1.4], [-1.5, -3.6], [-0.5, -0.6]]))
dss2 = DiscreteSearchSpace(tf.constant([[1.0, 1.4], [1.5, 3.6]]))
product = dss1 * dss2
all_expected_points = tf.constant(
[
[-1.0, -1.4, 1.0, 1.4],
[-1.0, -1.4, 1.5, 3.6],
[-1.5, -3.6, 1.0, 1.4],
[-1.5, -3.6, 1.5, 3.6],
[-0.5, -0.6, 1.0, 1.4],
[-0.5, -0.6, 1.5, 3.6],
]
)
assert len(product.points) == len(all_expected_points)
assert all(point in product for point in all_expected_points)
def test_discrete_search_space___mul___for_empty_search_space() -> None:
dss = DiscreteSearchSpace(tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]))
empty = DiscreteSearchSpace(tf.zeros([0, 1]))
npt.assert_array_equal((empty * dss).points, tf.zeros([0, 3]))
npt.assert_array_equal((dss * empty).points, tf.zeros([0, 3]))
def test_discrete_search_space___mul___for_identity_space() -> None:
dss = DiscreteSearchSpace(tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]))
identity = DiscreteSearchSpace(tf.zeros([1, 0]))
npt.assert_array_equal((dss * identity).points, dss.points)
npt.assert_array_equal((identity * dss).points, dss.points)
def test_discrete_search_space___mul___raises_if_points_have_different_types() -> None:
dss1 = DiscreteSearchSpace(_points_in_2D_search_space())
dss2 = DiscreteSearchSpace(tf.constant([[1.0, 1.4], [-1.5, 3.6]], tf.float64))
with pytest.raises(TypeError):
_ = dss1 * dss2
def test_discrete_search_space_deepcopy() -> None:
dss = DiscreteSearchSpace(_points_in_2D_search_space())
npt.assert_allclose(copy.deepcopy(dss).points, _points_in_2D_search_space())
@pytest.mark.parametrize(
"lower, upper",
[
pytest.param([0.0, 1.0], [1.0, 2.0], id="lists"),
pytest.param((0.0, 1.0), (1.0, 2.0), id="tuples"),
pytest.param(range(2), range(1, 3), id="ranges"),
],
)
def test_box_converts_sequences_to_float64_tensors(
lower: Sequence[float], upper: Sequence[float]
) -> None:
box = Box(lower, upper)
assert tf.as_dtype(box.lower.dtype) is tf.float64
assert tf.as_dtype(box.upper.dtype) is tf.float64
npt.assert_array_equal(box.lower, [0.0, 1.0])
npt.assert_array_equal(box.upper, [1.0, 2.0])
def _pairs_of_shapes(
*, excluding_ranks: Container[int] = ()
) -> frozenset[tuple[ShapeLike, ShapeLike]]:
shapes = various_shapes(excluding_ranks=excluding_ranks)
return frozenset(itertools.product(shapes, shapes))
@pytest.mark.parametrize(
"lower_shape, upper_shape", _pairs_of_shapes(excluding_ranks={1}) | {((1,), (2,)), ((0,), (0,))}
)
def test_box_raises_if_bounds_have_invalid_shape(
lower_shape: ShapeLike, upper_shape: ShapeLike
) -> None:
lower, upper = tf.zeros(lower_shape), tf.ones(upper_shape)
if lower_shape == upper_shape == (0,):
Box(lower, upper) # empty box is ok
else:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
Box(lower, upper)
def test_box___mul___for_empty_search_space() -> None:
empty = Box(tf.zeros(0, dtype=tf.float64), tf.zeros(0, dtype=tf.float64))
cube = Box([0, 0, 0], [1, 1, 1])
npt.assert_array_equal((cube * empty).lower, cube.lower)
npt.assert_array_equal((cube * empty).upper, cube.upper)
npt.assert_array_equal((empty * cube).lower, cube.lower)
npt.assert_array_equal((empty * cube).upper, cube.upper)
@pytest.mark.parametrize(
"lower_dtype, upper_dtype",
[
(tf.uint32, tf.uint32), # same dtypes
(tf.int8, tf.uint16), # different dtypes ...
(tf.uint32, tf.float32),
(tf.float32, tf.float64),
(tf.float64, tf.bfloat16),
],
)
def test_box_raises_if_bounds_have_invalid_dtypes(
lower_dtype: tf.DType, upper_dtype: tf.DType
) -> None:
lower, upper = tf.zeros([3], dtype=lower_dtype), tf.ones([3], dtype=upper_dtype)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
Box(lower, upper)
@pytest.mark.parametrize(
"lower, upper",
[
(tf.ones((3,)), tf.ones((3,))), # all equal
(tf.ones((3,)) + 1, tf.ones((3,))), # lower all higher than upper
( # one lower higher than upper
tf.constant([2.3, -0.1, 8.0]),
tf.constant([3.0, -0.2, 8.0]),
),
(tf.constant([2.3, -0.1, 8.0]), tf.constant([3.0, -0.1, 8.0])), # one lower equal to upper
],
)
def test_box_raises_if_any_lower_bound_is_not_less_than_upper_bound(
lower: tf.Tensor, upper: tf.Tensor
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
Box(lower, upper)
@pytest.mark.parametrize(
"space, dimension",
[
(Box([-1], [2]), 1), # 1d
(Box([-1, -2], [1.5, 2.5]), 2), # 2d
(Box([-1, -2, -3], [1.5, 2.5, 3.5]), 3), # 3d
],
)
def test_box_returns_correct_dimension(space: Box, dimension: int) -> None:
assert space.dimension == dimension
def test_box_bounds_attributes() -> None:
lower, upper = tf.zeros([2]), tf.ones([2])
box = Box(lower, upper)
npt.assert_array_equal(box.lower, lower)
npt.assert_array_equal(box.upper, upper)
@pytest.mark.parametrize(
"point",
[
tf.constant([-1.0, 0.0, -2.0]), # lower bound
tf.constant([2.0, 1.0, -0.5]), # upper bound
tf.constant([0.5, 0.5, -1.5]), # approx centre
tf.constant([-1.0, 0.0, -1.9]), # near the edge
],
)
def test_box_contains_point(point: tf.Tensor) -> None:
box = Box(tf.constant([-1.0, 0.0, -2.0]), tf.constant([2.0, 1.0, -0.5]))
assert point in box
assert box.contains(point)
@pytest.mark.parametrize(
"point",
[
tf.constant([-1.1, 0.0, -2.0]), # just outside
tf.constant([-0.5, -0.5, 1.5]), # negative of a contained point
tf.constant([10.0, -10.0, 10.0]), # well outside
],
)
def test_box_does_not_contain_point(point: tf.Tensor) -> None:
box = Box(tf.constant([-1.0, 0.0, -2.0]), tf.constant([2.0, 1.0, -0.5]))
assert point not in box
assert not box.contains(point)
@pytest.mark.parametrize(
"points, contains",
[
(tf.constant([[-1.0, 0.0, -2.0], [-1.1, 0.0, -2.0]]), tf.constant([True, False])),
(tf.constant([[[0.5, 0.5, -1.5]]]), tf.constant([[True]])),
],
)
def test_box_contains_broadcasts(points: tf.Tensor, contains: tf.Tensor) -> None:
box = Box(tf.constant([-1.0, 0.0, -2.0]), tf.constant([2.0, 1.0, -0.5]))
npt.assert_array_equal(contains, box.contains(points))
# point in space raises (because python insists on a bool)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
_ = points in box
@pytest.mark.parametrize(
"bound_shape, point_shape",
(
(bs, ps)
for bs, ps in _pairs_of_shapes()
if bs[-1:] != ps[-1:] and len(bs) == 1 and bs != (0,)
),
)
def test_box_contains_raises_on_point_of_different_shape(
bound_shape: ShapeLike,
point_shape: ShapeLike,
) -> None:
box = Box(tf.zeros(bound_shape), tf.ones(bound_shape))
point = tf.zeros(point_shape)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
_ = point in box
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
_ = box.contains(point)
def _assert_correct_number_of_unique_constrained_samples(
num_samples: int, search_space: SearchSpace, samples: tf.Tensor
) -> None:
assert all(sample in search_space for sample in samples)
assert len(samples) == num_samples
unique_samples = set(tuple(sample.numpy().tolist()) for sample in samples)
assert len(unique_samples) == len(samples)
def _box_sampling_constraints() -> Sequence[LinearConstraint]:
return [LinearConstraint(A=tf.eye(3), lb=tf.zeros((3)) + 0.3, ub=tf.ones((3)) - 0.3)]
@pytest.mark.parametrize("num_samples", [0, 1, 10])
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_sampling_returns_correct_shape(
num_samples: int,
constraints: Sequence[LinearConstraint],
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
samples = box.sample_feasible(num_samples)
_assert_correct_number_of_unique_constrained_samples(num_samples, box, samples)
@pytest.mark.parametrize("num_samples", [0, 1, 10])
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_sobol_sampling_returns_correct_shape(
num_samples: int,
constraints: Sequence[LinearConstraint],
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
sobol_samples = box.sample_sobol_feasible(num_samples)
_assert_correct_number_of_unique_constrained_samples(num_samples, box, sobol_samples)
@pytest.mark.parametrize("num_samples", [0, 1, 10])
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_halton_sampling_returns_correct_shape(
num_samples: int,
constraints: Sequence[LinearConstraint],
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
halton_samples = box.sample_halton_feasible(num_samples)
_assert_correct_number_of_unique_constrained_samples(num_samples, box, halton_samples)
@pytest.mark.parametrize("num_samples", [-1, -10])
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_sampling_raises_for_invalid_sample_size(
num_samples: int,
constraints: Sequence[LinearConstraint],
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
box.sample_feasible(num_samples)
@pytest.mark.parametrize("num_samples", [-1, -10])
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_sobol_sampling_raises_for_invalid_sample_size(
num_samples: int,
constraints: Sequence[LinearConstraint],
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
box.sample_sobol_feasible(num_samples)
@pytest.mark.parametrize("num_samples", [-1, -10])
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_halton_sampling_raises_for_invalid_sample_size(
num_samples: int,
constraints: Sequence[LinearConstraint],
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
box.sample_halton_feasible(num_samples)
@pytest.mark.parametrize("seed", [1, 42, 123])
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_sampling_returns_same_points_for_same_seed(
seed: int,
constraints: Sequence[LinearConstraint],
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
random_samples_1 = box.sample_feasible(num_samples=100, seed=seed)
random_samples_2 = box.sample_feasible(num_samples=100, seed=seed)
npt.assert_allclose(random_samples_1, random_samples_2)
@pytest.mark.parametrize("skip", [1, 10, 100])
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_sobol_sampling_returns_same_points_for_same_skip(
skip: int,
constraints: Sequence[LinearConstraint],
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
sobol_samples_1 = box.sample_sobol_feasible(num_samples=100, skip=skip)
sobol_samples_2 = box.sample_sobol_feasible(num_samples=100, skip=skip)
npt.assert_allclose(sobol_samples_1, sobol_samples_2)
@pytest.mark.parametrize("seed", [1, 42, 123])
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_halton_sampling_returns_same_points_for_same_seed(
seed: int,
constraints: Sequence[LinearConstraint],
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
halton_samples_1 = box.sample_halton_feasible(num_samples=100, seed=seed)
halton_samples_2 = box.sample_halton_feasible(num_samples=100, seed=seed)
npt.assert_allclose(halton_samples_1, halton_samples_2)
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_sampling_returns_different_points_for_different_call(
constraints: Sequence[LinearConstraint],
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
random_samples_1 = box.sample_feasible(num_samples=100)
random_samples_2 = box.sample_feasible(num_samples=100)
npt.assert_raises(AssertionError, npt.assert_allclose, random_samples_1, random_samples_2)
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_sobol_sampling_returns_different_points_for_different_call(
constraints: Sequence[LinearConstraint],
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
sobol_samples_1 = box.sample_sobol_feasible(num_samples=100)
sobol_samples_2 = box.sample_sobol_feasible(num_samples=100)
npt.assert_raises(AssertionError, npt.assert_allclose, sobol_samples_1, sobol_samples_2)
@pytest.mark.parametrize("constraints", [None, _box_sampling_constraints()])
def test_box_halton_sampling_returns_different_points_for_different_call(
constraints: Sequence[LinearConstraint],
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), constraints)
halton_samples_1 = box.sample_halton_feasible(num_samples=100)
halton_samples_2 = box.sample_halton_feasible(num_samples=100)
npt.assert_raises(AssertionError, npt.assert_allclose, halton_samples_1, halton_samples_2)
def test_box_sampling_with_constraints_returns_feasible_points() -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), _box_sampling_constraints())
samples = box.sample_feasible(num_samples=100)
assert all(box.is_feasible(samples))
def test_box_sobol_sampling_with_constraints_returns_feasible_points() -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), _box_sampling_constraints())
samples = box.sample_sobol_feasible(num_samples=100)
assert all(box.is_feasible(samples))
def test_box_halton_sampling_with_constraints_returns_feasible_points() -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), _box_sampling_constraints())
samples = box.sample_halton_feasible(num_samples=100)
assert all(box.is_feasible(samples))
@pytest.mark.parametrize("num_samples", [0, 1, 10])
def test_box_discretize_returns_search_space_with_only_points_contained_within_box(
num_samples: int,
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)))
dss = box.discretize(num_samples)
samples = dss.sample(num_samples)
assert all(sample in box for sample in samples)
@pytest.mark.parametrize("num_samples", [0, 1, 10])
def test_box_discretize_returns_search_space_with_correct_number_of_points(
num_samples: int,
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)))
dss = box.discretize(num_samples)
samples = dss.sample(num_samples)
assert len(samples) == num_samples
def test_box___mul___bounds_are_the_concatenation_of_original_bounds() -> None:
box1 = Box(tf.constant([0.0, 1.0]), tf.constant([2.0, 3.0]))
box2 = Box(tf.constant([4.1, 5.1, 6.1]), tf.constant([7.2, 8.2, 9.2]))
product = box1 * box2
npt.assert_allclose(product.lower, [0, 1, 4.1, 5.1, 6.1])
npt.assert_allclose(product.upper, [2, 3, 7.2, 8.2, 9.2])
def test_box___mul___raises_if_bounds_have_different_types() -> None:
box1 = Box(tf.constant([0.0, 1.0]), tf.constant([2.0, 3.0]))
box2 = Box(tf.constant([4.0, 5.0], tf.float64), tf.constant([6.0, 7.0], tf.float64))
with pytest.raises(TypeError):
_ = box1 * box2
def test_box_deepcopy() -> None:
box = Box(tf.constant([1.2, 3.4]), tf.constant([5.6, 7.8]))
box_copy = copy.deepcopy(box)
npt.assert_allclose(box.lower, box_copy.lower)
npt.assert_allclose(box.upper, box_copy.upper)
def test_product_space_raises_for_non_unqique_subspace_names() -> None:
space_A = Box([-1, -2], [2, 3])
space_B = DiscreteSearchSpace(tf.constant([[-0.5, 0.5]]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
TaggedProductSearchSpace(spaces=[space_A, space_B], tags=["A", "A"])
def test_product_space_raises_for_length_mismatch_between_spaces_and_tags() -> None:
space_A = Box([-1, -2], [2, 3])
space_B = DiscreteSearchSpace(tf.constant([[-0.5, 0.5]]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
TaggedProductSearchSpace(spaces=[space_A, space_B], tags=["A", "B", "C"])
def test_product_space_subspace_tags_attribute() -> None:
decision_space = Box([-1, -2], [2, 3])
context_space = DiscreteSearchSpace(tf.constant([[-0.5, 0.5]]))
product_space = TaggedProductSearchSpace(
spaces=[context_space, decision_space], tags=["context", "decision"]
)
npt.assert_array_equal(product_space.subspace_tags, ["context", "decision"])
def test_product_space_subspace_tags_default_behaviour() -> None:
decision_space = Box([-1, -2], [2, 3])
context_space = DiscreteSearchSpace(tf.constant([[-0.5, 0.5]]))
product_space = TaggedProductSearchSpace(spaces=[context_space, decision_space])
npt.assert_array_equal(product_space.subspace_tags, ["0", "1"])
@pytest.mark.parametrize(
"spaces, dimension",
[
([DiscreteSearchSpace(tf.constant([[-0.5, -0.3], [1.2, 0.4]]))], 2),
([DiscreteSearchSpace(tf.constant([[-0.5]])), Box([-1], [2])], 2),
([Box([-1, -2], [2, 3]), DiscreteSearchSpace(tf.constant([[-0.5]]))], 3),
([Box([-1, -2], [2, 3]), Box([-1, -2], [2, 3]), Box([-1], [2])], 5),
],
)
def test_product_search_space_returns_correct_dimension(
spaces: Sequence[SearchSpace], dimension: int
) -> None:
for space in (TaggedProductSearchSpace(spaces=spaces), reduce(operator.mul, spaces)):
assert space.dimension == dimension
@pytest.mark.parametrize(
"spaces, lower, upper",
[
(
[DiscreteSearchSpace(tf.constant([[-0.5, 0.4], [1.2, -0.3]]))],
tf.constant([-0.5, -0.3]),
tf.constant([1.2, 0.4]),
),
(
[DiscreteSearchSpace(tf.constant([[-0.5]], dtype=tf.float64)), Box([-1.0], [2.0])],
tf.constant([-0.5, -1.0]),
tf.constant([-0.5, 2.0]),
),
(
[Box([-1, -2], [2, 3]), DiscreteSearchSpace(tf.constant([[-0.5]], dtype=tf.float64))],
tf.constant([-1.0, -2.0, -0.5]),
tf.constant([2.0, 3.0, -0.5]),
),
(
[Box([-1, -2], [2, 3]), Box([-1, -2], [2, 3]), Box([-1], [2])],
tf.constant([-1.0, -2.0, -1.0, -2.0, -1.0]),
tf.constant([2.0, 3.0, 2.0, 3.0, 2.0]),
),
],
)
def test_product_space_returns_correct_bounds(
spaces: Sequence[SearchSpace], lower: tf.Tensor, upper: tf.Tensor
) -> None:
for space in (TaggedProductSearchSpace(spaces=spaces), reduce(operator.mul, spaces)):
npt.assert_array_equal(space.lower, lower)
npt.assert_array_equal(space.upper, upper)
def test_product_space_get_subspace_raises_for_invalid_tag() -> None:
space_A = Box([-1, -2], [2, 3])
space_B = DiscreteSearchSpace(tf.constant([[-0.5, 0.5]]))
product_space = TaggedProductSearchSpace(spaces=[space_A, space_B], tags=["A", "B"])
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
product_space.get_subspace("dummy")
def test_product_space_get_subspace() -> None:
space_A = Box([-1, -2], [2, 3])
space_B = DiscreteSearchSpace(tf.constant([[-0.5, 0.5]]))
space_C = Box([-1], [2])
product_space = TaggedProductSearchSpace(
spaces=[space_A, space_B, space_C], tags=["A", "B", "C"]
)
subspace_A = product_space.get_subspace("A")
assert isinstance(subspace_A, Box)
npt.assert_array_equal(subspace_A.lower, [-1, -2])
npt.assert_array_equal(subspace_A.upper, [2, 3])
subspace_B = product_space.get_subspace("B")
assert isinstance(subspace_B, DiscreteSearchSpace)
npt.assert_array_equal(subspace_B.points, tf.constant([[-0.5, 0.5]]))
subspace_C = product_space.get_subspace("C")
assert isinstance(subspace_C, Box)
npt.assert_array_equal(subspace_C.lower, [-1])
npt.assert_array_equal(subspace_C.upper, [2])
@pytest.mark.parametrize(
"points",
[
tf.ones((1, 5), dtype=tf.float64),
tf.ones((2, 3), dtype=tf.float64),
],
)
def test_product_space_fix_subspace_fixes_desired_subspace(points: tf.Tensor) -> None:
spaces = [
Box([-1, -2], [2, 3]),
DiscreteSearchSpace(tf.constant([[-0.5]], dtype=tf.float64)),
Box([-1], [2]),
]
tags = ["A", "B", "C"]
product_space = TaggedProductSearchSpace(spaces=spaces, tags=tags)
for tag in tags:
product_space_with_fixed_subspace = product_space.fix_subspace(tag, points)
new_subspace = product_space_with_fixed_subspace.get_subspace(tag)
assert isinstance(new_subspace, DiscreteSearchSpace)
npt.assert_array_equal(new_subspace.points, points)
@pytest.mark.parametrize(
"points",
[
tf.ones((1, 5), dtype=tf.float64),
tf.ones((2, 3), dtype=tf.float64),
],
)
def test_product_space_fix_subspace_doesnt_fix_undesired_subspace(points: tf.Tensor) -> None:
spaces = [
Box([-1, -2], [2, 3]),
DiscreteSearchSpace(tf.constant([[-0.5]], dtype=tf.float64)),
Box([-1], [2]),
]
tags = ["A", "B", "C"]
product_space = TaggedProductSearchSpace(spaces=spaces, tags=tags)
for tag in tags:
product_space_with_fixed_subspace = product_space.fix_subspace(tag, points)
for other_tag in tags:
if other_tag != tag:
assert isinstance(
product_space_with_fixed_subspace.get_subspace(other_tag),
type(product_space.get_subspace(other_tag)),
)
@pytest.mark.parametrize(
"spaces, tags, subspace_dim_range",
[
([DiscreteSearchSpace(tf.constant([[-0.5]]))], ["A"], {"A": [0, 1]}),
(
[
DiscreteSearchSpace(tf.constant([[-0.5]])),
DiscreteSearchSpace(tf.constant([[-0.5, -0.3], [1.2, 0.4]])),
],
["A", "B"],
{"A": [0, 1], "B": [1, 3]},
),
(
[
Box([-1, -2], [2, 3]),
DiscreteSearchSpace(tf.constant([[-0.5]])),
Box([-1], [2]),
],
["A", "B", "C"],
{"A": [0, 2], "B": [2, 3], "C": [3, 4]},
),
],
)
def test_product_space_can_get_subspace_components(
spaces: list[SearchSpace],
tags: list[str],
subspace_dim_range: dict[str, list[int]],
) -> None:
space = TaggedProductSearchSpace(spaces, tags)
points = tf.random.uniform([10, space.dimension])
for tag in space.subspace_tags:
subspace_points = points[:, subspace_dim_range[tag][0] : subspace_dim_range[tag][1]]
npt.assert_array_equal(space.get_subspace_component(tag, points), subspace_points)
@pytest.mark.parametrize(
"point",
[
tf.constant([-1.0, 0.0, -0.5, 0.5], dtype=tf.float64),
tf.constant([2.0, 3.0, -0.5, 0.5], dtype=tf.float64),
],
)
def test_product_space_contains_point(point: tf.Tensor) -> None:
space_A = Box([-1.0, -2.0], [2.0, 3.0])
space_B = DiscreteSearchSpace(tf.constant([[-0.5, 0.5]], dtype=tf.float64))
product_space = TaggedProductSearchSpace(spaces=[space_A, space_B])
assert point in product_space
assert product_space.contains(point)
@pytest.mark.parametrize(
"point",
[
tf.constant([-1.1, 0.0, -0.5, 0.5], dtype=tf.float64), # just outside context space
tf.constant([-10, 10.0, -0.5, 0.5], dtype=tf.float64), # well outside context space
tf.constant([2.0, 3.0, 2.0, 7.0], dtype=tf.float64), # outside decision space
tf.constant([-10.0, -10.0, -10.0, -10.0], dtype=tf.float64), # outside both
tf.constant([-0.5, 0.5, 1.0, 2.0], dtype=tf.float64), # swap order of components
],
)
def test_product_space_does_not_contain_point(point: tf.Tensor) -> None:
space_A = Box([-1.0, -2.0], [2.0, 3.0])
space_B = DiscreteSearchSpace(tf.constant([[-0.5, 0.5]], dtype=tf.float64))
product_space = TaggedProductSearchSpace(spaces=[space_A, space_B])
assert point not in product_space
assert not product_space.contains(point)
def test_product_space_contains_broadcasts() -> None:
space_A = Box([-1.0, -2.0], [2.0, 3.0])
space_B = DiscreteSearchSpace(tf.constant([[-0.5, 0.5]], dtype=tf.float64))
product_space = TaggedProductSearchSpace(spaces=[space_A, space_B])
points = tf.constant([[-1.1, 0.0, -0.5, 0.5], [-1.0, 0.0, -0.5, 0.5]], dtype=tf.float64)
tf.assert_equal(product_space.contains(points), [False, True])
# point in space raises (because python insists on a bool)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
_ = points in product_space
@pytest.mark.parametrize(
"spaces",
[
[DiscreteSearchSpace(tf.constant([[-0.5]]))],
[
DiscreteSearchSpace(tf.constant([[-0.5]])),
DiscreteSearchSpace(tf.constant([[-0.5, -0.3], [1.2, 0.4]])),
],
[
Box([-1, -2], [2, 3]),
DiscreteSearchSpace(tf.constant([[-0.5]])),
Box([-1], [2]),
],
],
)
def test_product_space_contains_raises_on_point_of_different_shape(
spaces: Sequence[SearchSpace],
) -> None:
space = TaggedProductSearchSpace(spaces=spaces)
dimension = space.dimension
for wrong_input_shape in [dimension - 1, dimension + 1]:
point = tf.zeros([wrong_input_shape])
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
_ = point in space
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
_ = space.contains(point)
@pytest.mark.parametrize("num_samples", [0, 1, 10])
def test_product_space_sampling_returns_correct_shape(num_samples: int) -> None:
space_A = Box([-1], [2])
space_B = DiscreteSearchSpace(tf.ones([100, 2], dtype=tf.float64))
for product_space in (TaggedProductSearchSpace(spaces=[space_A, space_B]), space_A * space_B):
samples = product_space.sample(num_samples)
npt.assert_array_equal(tf.shape(samples), [num_samples, 3])
@pytest.mark.parametrize("num_samples", [-1, -10])
def test_product_space_sampling_raises_for_invalid_sample_size(num_samples: int) -> None:
space_A = Box([-1], [2])
space_B = DiscreteSearchSpace(tf.ones([100, 2], dtype=tf.float64))
product_space = TaggedProductSearchSpace(spaces=[space_A, space_B])
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
product_space.sample(num_samples)
@pytest.mark.parametrize("num_samples", [0, 1, 10])
def test_product_space_discretize_returns_search_space_with_only_points_contained_within_box(
num_samples: int,
) -> None:
space_A = Box([-1], [2])
space_B = DiscreteSearchSpace(tf.ones([100, 2], dtype=tf.float64))
product_space = TaggedProductSearchSpace(spaces=[space_A, space_B])
dss = product_space.discretize(num_samples)
samples = dss.sample(num_samples)
assert all(sample in product_space for sample in samples)
@pytest.mark.parametrize("num_samples", [0, 1, 10])
def test_product_space_discretize_returns_search_space_with_correct_number_of_points(
num_samples: int,
) -> None:
space_A = Box([-1], [2])
space_B = DiscreteSearchSpace(tf.ones([100, 2], dtype=tf.float64))
product_space = TaggedProductSearchSpace(spaces=[space_A, space_B])
dss = product_space.discretize(num_samples)
samples = dss.sample(num_samples)
assert len(samples) == num_samples
@pytest.mark.parametrize("seed", [1, 42, 123])
def test_product_space_sampling_returns_same_points_for_same_seed(seed: int) -> None:
space_A = Box([-1], [2])
space_B = DiscreteSearchSpace(tf.random.uniform([100, 2], dtype=tf.float64, seed=42))
product_space = TaggedProductSearchSpace(spaces=[space_A, space_B])
random_samples_1 = product_space.sample(num_samples=100, seed=seed)
random_samples_2 = product_space.sample(num_samples=100, seed=seed)
npt.assert_allclose(random_samples_1, random_samples_2)
def test_product_space_sampling_returns_different_points_for_different_call() -> None:
space_A = Box([-1], [2])
space_B = DiscreteSearchSpace(tf.random.uniform([100, 2], dtype=tf.float64, seed=42))
product_space = TaggedProductSearchSpace(spaces=[space_A, space_B])
random_samples_1 = product_space.sample(num_samples=100)
random_samples_2 = product_space.sample(num_samples=100)
npt.assert_raises(AssertionError, npt.assert_allclose, random_samples_1, random_samples_2)
def test_product_space___mul___() -> None:
space_A = Box([-1], [2])
space_B = DiscreteSearchSpace(tf.ones([100, 2], dtype=tf.float64))
product_space_1 = TaggedProductSearchSpace(spaces=[space_A, space_B], tags=["A", "B"])
space_C = Box([-2, -2], [2, 3])
space_D = DiscreteSearchSpace(tf.ones([5, 3], dtype=tf.float64))
product_space_2 = TaggedProductSearchSpace(spaces=[space_C, space_D], tags=["C", "D"])
product_of_product_spaces = product_space_1 * product_space_2
subspace_0 = product_of_product_spaces.get_subspace("0")
subspace_0_A = subspace_0.get_subspace("A") # type: ignore
assert isinstance(subspace_0_A, Box)
npt.assert_array_equal(subspace_0_A.lower, [-1])
npt.assert_array_equal(subspace_0_A.upper, [2])
subspace_0_B = subspace_0.get_subspace("B") # type: ignore
assert isinstance(subspace_0_B, DiscreteSearchSpace)
npt.assert_array_equal(subspace_0_B.points, tf.ones([100, 2], dtype=tf.float64))
subspace_1 = product_of_product_spaces.get_subspace("1")
subspace_1_C = subspace_1.get_subspace("C") # type: ignore
assert isinstance(subspace_1_C, Box)
npt.assert_array_equal(subspace_1_C.lower, [-2, -2])
npt.assert_array_equal(subspace_1_C.upper, [2, 3])
subspace_1_D = subspace_1.get_subspace("D") # type: ignore
assert isinstance(subspace_1_D, DiscreteSearchSpace)
npt.assert_array_equal(subspace_1_D.points, tf.ones([5, 3], dtype=tf.float64))
def test_product_search_space_deepcopy() -> None:
space_A = Box([-1], [2])
space_B = DiscreteSearchSpace(tf.ones([100, 2], dtype=tf.float64))
product_space = TaggedProductSearchSpace(spaces=[space_A, space_B], tags=["A", "B"])
copied_space = copy.deepcopy(product_space)
npt.assert_allclose(copied_space.get_subspace("A").lower, space_A.lower)
npt.assert_allclose(copied_space.get_subspace("A").upper, space_A.upper)
npt.assert_allclose(copied_space.get_subspace("B").points, space_B.points) # type: ignore
def test_product_space_handles_empty_spaces() -> None:
space_A = Box([-1, -2], [2, 3])
tag_A = TaggedProductSearchSpace(spaces=[space_A], tags=["A"])
tag_B = TaggedProductSearchSpace(spaces=[], tags=[])
tag_C = TaggedProductSearchSpace(spaces=[tag_A, tag_B], tags=["AA", "BB"])
assert tag_C.dimension == 2
npt.assert_array_equal(tag_C.lower, [-1, -2])
npt.assert_array_equal(tag_C.upper, [2, 3])
npt.assert_array_equal(tag_C.subspace_tags, ["AA", "BB"])
def _nlc_func(x: TensorType) -> TensorType:
c0 = x[..., 0] - tf.sin(x[..., 1])
c0 = tf.expand_dims(c0, axis=-1)
return c0
@pytest.mark.parametrize(
"a, b, equal",
[
(Box([-1], [2]), Box([-1], [2]), True),
(Box([-1], [2]), Box([0], [2]), False),
(Box([-1], [2]), DiscreteSearchSpace(tf.constant([[-0.5, -0.3], [1.2, 0.4]])), False),
(
DiscreteSearchSpace(tf.constant([[-0.5, -0.3], [1.2, 0.4]])),
DiscreteSearchSpace(tf.constant([[-0.5, -0.3], [1.2, 0.4]])),
True,
),
(
DiscreteSearchSpace(tf.constant([[-0.5, -0.3]])),
DiscreteSearchSpace(tf.constant([[-0.5, -0.3], [1.2, 0.4]])),
False,
),
(
DiscreteSearchSpace(tf.constant([[-0.5, -0.3], [1.2, 0.4]])),
DiscreteSearchSpace(tf.constant([[1.2, 0.4], [-0.5, -0.3]])),
True,
),
(
TaggedProductSearchSpace([Box([-1], [1]), Box([1], [2])]),
TaggedProductSearchSpace([Box([-1], [1]), Box([1], [2])]),
True,
),
(
TaggedProductSearchSpace([Box([-1], [1]), Box([1], [2])]),
TaggedProductSearchSpace([Box([-1], [1]), Box([3], [4])]),
False,
),
(
TaggedProductSearchSpace([Box([-1], [1]), Box([1], [2])], tags=["A", "B"]),
TaggedProductSearchSpace([Box([-1], [1]), Box([1], [2])], tags=["B", "A"]),
False,
),
(
TaggedProductSearchSpace([Box([-1], [1]), Box([1], [2])], tags=["A", "B"]),
TaggedProductSearchSpace([Box([1], [2]), Box([-1], [1])], tags=["B", "A"]),
False,
),
(
Box(
[-1],
[2],
[
NonlinearConstraint(_nlc_func, -1.0, 0.0),
LinearConstraint(A=tf.eye(2), lb=tf.zeros((2)), ub=tf.ones((2))),
],
),
Box(
[-1],
[2],
[
NonlinearConstraint(_nlc_func, -1.0, 0.0),
LinearConstraint(A=tf.eye(2), lb=tf.zeros((2)), ub=tf.ones((2))),
],
),
True,
),
(
Box(
[-1],
[2],
[
NonlinearConstraint(_nlc_func, -1.0, 0.0),
LinearConstraint(A=tf.eye(2), lb=tf.zeros((2)), ub=tf.ones((2))),
],
),
Box(
[-1],
[2],
[
NonlinearConstraint(_nlc_func, -1.0, 0.1),
LinearConstraint(A=tf.eye(2), lb=tf.zeros((2)), ub=tf.ones((2))),
],
),
False,
),
],
)
def test___eq___search_spaces(a: SearchSpace, b: SearchSpace, equal: bool) -> None:
assert (a == b) is equal
assert (a != b) is (not equal)
assert (a == a) and (b == b)
def test_linear_constraints_residual() -> None:
points = tf.constant([[-1.0, 0.4], [-1.0, 0.6], [0.0, 0.4]])
lc = LinearConstraint(
A=tf.constant([[-1.0, 1.0], [1.0, 0.0]]),
lb=tf.constant([-0.4, 0.5]),
ub=tf.constant([-0.2, 0.9]),
)
got = lc.residual(points)
expected = tf.constant([[1.8, -1.5, -1.6, 1.9], [2.0, -1.5, -1.8, 1.9], [0.8, -0.5, -0.6, 0.9]])
npt.assert_allclose(expected, got)
def test_nonlinear_constraints_residual() -> None:
points = tf.constant([[-1.0, 0.4], [-1.0, 0.6], [0.0, 0.4]])
nlc = NonlinearConstraint(
lambda x: tf.expand_dims(x[..., 0] - tf.math.sin(x[..., 1]), -1), -1.4, 1.9
)
got = nlc.residual(points)
expected = tf.constant(
[[0.01058163, 3.28941832], [-0.1646425, 3.46464245], [1.01058163, 2.28941832]]
)
npt.assert_allclose(expected, got, atol=1e-7)
@pytest.mark.parametrize(
"constraints, points",
[
(
[
LinearConstraint(
A=tf.constant([[-1.0, 1.0], [1.0, 0.0], [0.0, 1.0]]),
lb=tf.constant([-0.4, 0.15, 0.2]),
ub=tf.constant([0.6, 0.9, 0.9]),
),
NonlinearConstraint(_nlc_func, tf.constant(-1.0), tf.constant(0.0)),
LinearConstraint(A=tf.eye(2), lb=tf.zeros((2)), ub=tf.ones((2))),
],
tf.constant([[0.820, 0.057], [0.3, 0.4], [0.582, 0.447], [0.15, 0.75]]),
),
],
)
def test_box_constraints_residuals_and_feasibility(
constraints: Sequence[Constraint], points: tf.Tensor
) -> None:
space = Box(tf.constant([0.0, 0.0]), tf.constant([1.0, 1.0]), constraints)
got = space.constraints_residuals(points)
expected = tf.constant(
[
[
-0.363,
0.66999996,
-0.143,
1.363,
0.07999998,
0.843,
1.7630308,
-0.7630308,
0.82,
0.057,
0.18,
0.943,
],
[
0.5,
0.15,
0.2,
0.5,
0.59999996,
0.49999997,
0.9105817,
0.08941832,
0.3,
0.4,
0.7,
0.6,
],
[
0.265,
0.432,
0.247,
0.735,
0.31799996,
0.45299998,
1.1497378,
-0.14973778,
0.582,
0.447,
0.41799998,
0.553,
],
[
1.0,
0.0,
0.55,
0.0,
0.75,
0.14999998,
0.46836126,
0.53163874,
0.15,
0.75,
0.85,
0.25,
],
]
)
print(got)
npt.assert_array_equal(expected, got)
npt.assert_array_equal(tf.constant([False, True, False, True]), space.is_feasible(points))
def test_discrete_search_space_raises_if_has_constraints() -> None:
space = Box(
tf.zeros((2)),
tf.ones((2)),
[LinearConstraint(A=tf.eye(2), lb=tf.zeros((2)), ub=tf.ones((2)))],
)
with pytest.raises(NotImplementedError):
_ = space.discretize(2)
def test_nonlinear_constraints_multioutput_raises() -> None:
points = tf.constant([[-1.0, 0.4], [-1.0, 0.6], [0.0, 0.4]])
nlc = NonlinearConstraint(
lambda x: tf.broadcast_to(tf.expand_dims(x[..., 0] - x[..., 1], -1), (x.shape[0], 2)),
-1.4,
1.9,
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
nlc.residual(points)
| 45,771 | 34.843383 | 100 | py |
trieste-develop | trieste-develop/tests/unit/test_ask_tell_optimization.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Mapping, Optional
import pytest
import tensorflow as tf
from tests.util.misc import FixedAcquisitionRule, assert_datasets_allclose, mk_dataset
from tests.util.models.gpflow.models import GaussianProcess, PseudoTrainableProbModel, rbf
from trieste.acquisition.rule import AcquisitionRule
from trieste.ask_tell_optimization import AskTellOptimizer
from trieste.bayesian_optimizer import OptimizationResult, Record
from trieste.data import Dataset
from trieste.models.interfaces import ProbabilisticModel, TrainableProbabilisticModel
from trieste.observer import OBJECTIVE
from trieste.space import Box
from trieste.types import State, Tag, TensorType
# tags
TAG1: Tag = "1"
TAG2: Tag = "2"
class LinearWithUnitVariance(GaussianProcess, PseudoTrainableProbModel):
def __init__(self) -> None:
super().__init__([lambda x: 2 * x], [rbf()])
self._optimize_count = 0
def optimize(self, dataset: Dataset) -> None:
self._optimize_count += 1
@property
def optimize_count(self) -> int:
return self._optimize_count
@pytest.fixture
def search_space() -> Box:
return Box([-1], [1])
@pytest.fixture
def init_dataset() -> Dataset:
return mk_dataset([[0.0]], [[0.0]])
@pytest.fixture
def acquisition_rule() -> AcquisitionRule[TensorType, Box, ProbabilisticModel]:
return FixedAcquisitionRule([[0.0]])
@pytest.fixture
def model() -> TrainableProbabilisticModel:
return LinearWithUnitVariance()
def test_ask_tell_optimizer_suggests_new_point(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)
new_point = ask_tell.ask()
assert len(new_point) == 1
def test_ask_tell_optimizer_with_default_acquisition_suggests_new_point(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
) -> None:
ask_tell = AskTellOptimizer(search_space, init_dataset, model)
new_point = ask_tell.ask()
assert len(new_point) == 1
@pytest.mark.parametrize("copy", [True, False])
def test_ask_tell_optimizer_returns_complete_state(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
copy: bool,
) -> None:
ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)
state_record: Record[None] = ask_tell.to_record(copy=copy)
assert_datasets_allclose(state_record.dataset, init_dataset)
assert isinstance(state_record.model, type(model))
assert state_record.acquisition_state is None
@pytest.mark.parametrize("copy", [True, False])
def test_ask_tell_optimizer_loads_from_state(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
copy: bool,
) -> None:
old_state: Record[None] = Record({OBJECTIVE: init_dataset}, {OBJECTIVE: model}, None)
ask_tell = AskTellOptimizer.from_record(old_state, search_space, acquisition_rule)
new_state: Record[None] = ask_tell.to_record(copy=copy)
assert_datasets_allclose(old_state.dataset, new_state.dataset)
assert isinstance(new_state.model, type(old_state.model))
@pytest.mark.parametrize("copy", [True, False])
def test_ask_tell_optimizer_returns_optimization_result(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
copy: bool,
) -> None:
ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)
result: OptimizationResult[None] = ask_tell.to_result(copy=copy)
assert_datasets_allclose(result.try_get_final_dataset(), init_dataset)
assert isinstance(result.try_get_final_model(), type(model))
def test_ask_tell_optimizer_updates_state_with_new_data(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
new_data = mk_dataset([[1.0]], [[1.0]])
ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)
ask_tell.tell(new_data)
state_record: Record[None] = ask_tell.to_record()
assert_datasets_allclose(state_record.dataset, init_dataset + new_data)
@pytest.mark.parametrize("copy", [True, False])
def test_ask_tell_optimizer_copies_state(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
copy: bool,
) -> None:
new_data = mk_dataset([[1.0]], [[1.0]])
ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)
state_start: Record[None] = ask_tell.to_record(copy=copy)
ask_tell.tell(new_data)
state_end: Record[None] = ask_tell.to_record(copy=copy)
assert_datasets_allclose(state_start.dataset, init_dataset if copy else init_dataset + new_data)
assert_datasets_allclose(state_end.dataset, init_dataset + new_data)
assert state_start.model is not model if copy else state_start.model is model
def test_ask_tell_optimizer_datasets_property(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)
assert_datasets_allclose(ask_tell.datasets[OBJECTIVE], init_dataset)
assert_datasets_allclose(ask_tell.dataset, init_dataset)
def test_ask_tell_optimizer_models_property(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)
assert ask_tell.models[OBJECTIVE] is model
assert ask_tell.model is model
def test_ask_tell_optimizer_models_setter(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)
model2 = LinearWithUnitVariance()
ask_tell.models = {OBJECTIVE: model2}
assert ask_tell.models[OBJECTIVE] is model2 is not model
def test_ask_tell_optimizer_models_setter_errors(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)
with pytest.raises(ValueError):
ask_tell.models = {}
with pytest.raises(ValueError):
ask_tell.models = {OBJECTIVE: LinearWithUnitVariance(), "X": LinearWithUnitVariance()}
with pytest.raises(ValueError):
ask_tell.models = {"CONSTRAINT": LinearWithUnitVariance()}
def test_ask_tell_optimizer_model_setter(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)
model2 = LinearWithUnitVariance()
ask_tell.model = model2
assert ask_tell.models[OBJECTIVE] is model2 is not model
def test_ask_tell_optimizer_model_setter_errors(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
one_model = AskTellOptimizer(search_space, {"X": init_dataset}, {"X": model}, acquisition_rule)
with pytest.raises(ValueError):
one_model.model = model
two_models = AskTellOptimizer(
search_space,
{OBJECTIVE: init_dataset, "X": init_dataset},
{OBJECTIVE: model, "X": model},
acquisition_rule,
)
with pytest.raises(ValueError):
two_models.model = model
def test_ask_tell_optimizer_trains_model(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
new_data = mk_dataset([[1.0]], [[1.0]])
ask_tell = AskTellOptimizer(
search_space, init_dataset, model, acquisition_rule, fit_model=False
)
ask_tell.tell(new_data)
state_record: Record[None] = ask_tell.to_record()
assert state_record.model.optimize_count == 1 # type: ignore
@pytest.mark.parametrize("fit_initial_model", [True, False])
def test_ask_tell_optimizer_optimizes_initial_model(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
fit_initial_model: bool,
) -> None:
ask_tell = AskTellOptimizer(
search_space, init_dataset, model, acquisition_rule, fit_model=fit_initial_model
)
state_record: Record[None] = ask_tell.to_record()
if fit_initial_model:
assert state_record.model.optimize_count == 1 # type: ignore
else:
assert state_record.model.optimize_count == 0 # type: ignore
def test_ask_tell_optimizer_from_state_does_not_train_model(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
old_state: Record[None] = Record({OBJECTIVE: init_dataset}, {OBJECTIVE: model}, None)
ask_tell = AskTellOptimizer.from_record(old_state, search_space, acquisition_rule)
state_record: Record[None] = ask_tell.to_record()
assert state_record.model.optimize_count == 0 # type: ignore
@pytest.mark.parametrize(
"starting_state, expected_state",
[(None, 1), (0, 1), (3, 4)],
)
def test_ask_tell_optimizer_uses_specified_acquisition_state(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
starting_state: int | None,
expected_state: int,
) -> None:
class Rule(AcquisitionRule[State[Optional[int], TensorType], Box, ProbabilisticModel]):
def __init__(self) -> None:
self.states_received: list[int | None] = []
def acquire(
self,
search_space: Box,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> State[int | None, TensorType]:
def go(state: int | None) -> tuple[int | None, TensorType]:
self.states_received.append(state)
if state is None:
state = 0
return state + 1, tf.constant([[0.0]], tf.float64)
return go
rule = Rule()
ask_tell = AskTellOptimizer(
search_space, init_dataset, model, rule, acquisition_state=starting_state
)
_ = ask_tell.ask()
state_record: Record[State[int, TensorType]] = ask_tell.to_record()
# mypy cannot see that this is in fact int
assert state_record.acquisition_state == expected_state # type: ignore
assert ask_tell.acquisition_state == expected_state
def test_ask_tell_optimizer_does_not_accept_empty_datasets_or_models(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
with pytest.raises(ValueError):
AskTellOptimizer(search_space, {}, model, acquisition_rule) # type: ignore
with pytest.raises(ValueError):
AskTellOptimizer(search_space, init_dataset, {}, acquisition_rule) # type: ignore
def test_ask_tell_optimizer_validates_keys(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
dataset_with_key_1 = {TAG1: init_dataset}
model_with_key_2 = {TAG2: model}
with pytest.raises(ValueError):
AskTellOptimizer(search_space, dataset_with_key_1, model_with_key_2, acquisition_rule)
def test_ask_tell_optimizer_tell_validates_keys(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
dataset_with_key_1 = {TAG1: init_dataset}
model_with_key_1 = {TAG1: model}
new_data_with_key_2 = {TAG2: mk_dataset([[1.0]], [[1.0]])}
ask_tell = AskTellOptimizer(
search_space, dataset_with_key_1, model_with_key_1, acquisition_rule
)
with pytest.raises(ValueError):
ask_tell.tell(new_data_with_key_2)
def test_ask_tell_optimizer_default_acquisition_requires_objective_tag(
search_space: Box,
init_dataset: Dataset,
model: TrainableProbabilisticModel,
) -> None:
wrong_tag: Tag = f"{OBJECTIVE}_WRONG"
wrong_datasets = {wrong_tag: init_dataset}
wrong_models = {wrong_tag: model}
with pytest.raises(ValueError):
AskTellOptimizer(search_space, wrong_datasets, wrong_models)
def test_ask_tell_optimizer_for_uncopyable_model(
search_space: Box,
init_dataset: Dataset,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
) -> None:
class _UncopyableModel(LinearWithUnitVariance):
def __deepcopy__(self, memo: dict[int, object]) -> _UncopyableModel:
raise MemoryError
model = _UncopyableModel()
ask_tell = AskTellOptimizer(search_space, init_dataset, model, acquisition_rule)
with pytest.raises(NotImplementedError):
ask_tell.to_result()
assert ask_tell.to_result(copy=False).final_result.is_ok
ask_tell.tell(mk_dataset([[1.0]], [[1.0]]))
with pytest.raises(NotImplementedError):
ask_tell.to_result()
assert ask_tell.to_result(copy=False).final_result.is_ok
| 15,024 | 33.94186 | 100 | py |
trieste-develop | trieste-develop/tests/unit/test_observer.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from tests.util.misc import ShapeLike, assert_datasets_allclose
from trieste.data import Dataset
from trieste.observer import filter_finite, map_is_finite
def _sum_with_nan_at_origin(t: tf.Tensor) -> tf.Tensor:
"""
Example:
>>> _sum_with_nan_at_origin(tf.constant([[0.0, 0.0], [0.1, 0.5]])).numpy()
array([[nan],
[0.6]], dtype=float32)
:param t: A tensor of N two-dimensional points.
:return: A tensor of N one-dimensional points. For each of the N points, if the point in ``t``
is at the origin [[0.0]], the result is `np.nan`. Otherwise, it's the sum across dimensions
of the point in ``t``.
"""
is_at_origin = tf.reduce_all(t == [[0.0, 0.0]], axis=-1, keepdims=True)
sums = tf.reduce_sum(t, axis=-1, keepdims=True)
return tf.where(is_at_origin, [[np.nan]], sums)
@pytest.mark.parametrize(
"query_points, expected",
[
( # one failure point
tf.constant([[-1.0, 0.0], [1.0, 0.0], [0.0, 2.0], [0.0, 0.0], [1.0, 3.0]]),
Dataset(
tf.constant([[-1.0, 0.0], [1.0, 0.0], [0.0, 2.0], [1.0, 3.0]]),
tf.constant([[-1.0], [1.0], [2.0], [4.0]]),
),
),
( # no failure points
tf.constant([[-1.0, 0.0], [1.0, 0.0], [0.0, 2.0], [1.0, 3.0]]),
Dataset(
tf.constant([[-1.0, 0.0], [1.0, 0.0], [0.0, 2.0], [1.0, 3.0]]),
tf.constant([[-1.0], [1.0], [2.0], [4.0]]),
),
),
( # only failure points
tf.constant([[0.0, 0.0]]),
Dataset(tf.zeros([0, 2]), tf.zeros([0, 1])),
),
(tf.zeros([0, 2]), Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))), # empty data
],
)
def test_filter_finite(query_points: tf.Tensor, expected: Dataset) -> None:
observations = _sum_with_nan_at_origin(query_points)
assert_datasets_allclose(filter_finite(query_points, observations), expected)
@pytest.mark.parametrize(
"qp_shape, obs_shape",
[
([3, 4], [3, 2]), # observations not N x 1
([3, 4], [4, 1]), # different leading dims
([3], [3, 1]), # query_points missing a dimension
([3, 4, 2], [3, 1]), # query_points have too many dimensions
],
)
def test_filter_finite_raises_for_invalid_shapes(qp_shape: ShapeLike, obs_shape: ShapeLike) -> None:
with pytest.raises(ValueError):
filter_finite(tf.ones(qp_shape), tf.ones(obs_shape))
def test_map_is_finite() -> None:
query_points = tf.constant([[-1.0, 0.0], [1.0, 0.0], [0.0, 2.0], [0.0, 0.0], [1.0, 3.0]])
observations = _sum_with_nan_at_origin(query_points)
expected = Dataset(query_points, tf.constant([[1], [1], [1], [0], [1]], tf.uint8))
assert_datasets_allclose(map_is_finite(query_points, observations), expected)
def test_map_is_finite_with_empty_data() -> None:
query_points, observations = tf.zeros([0, 2]), tf.zeros([0, 1])
expected = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1], tf.uint8))
assert_datasets_allclose(map_is_finite(query_points, observations), expected)
| 3,726 | 38.231579 | 100 | py |
Subsets and Splits