response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Creates a constraint gp. | def _create_constrained_gp(features: np.ndarray, labels: np.ndarray):
"""Creates a constraint gp."""
# This logging is too chatty because paramz transformations do not implement
# log jacobians. Silence it.
logging.logging.getLogger('paramz.transformations').setLevel(
logging.logging.CRITICAL)
class LogGaussian:
"""Multi-variate version of Log-Gaussian.
GPy surprisingly doesn't have this. The expected API of lnpdf and lnpdf_grad
are not precisely defined, so this handwaves a lot of stuff based on how
MultiVariateGaussian is implemented.
"""
domain = 'positive'
def __init__(self, mu, sigma):
self.mu = (mu)
self.sigma = (sigma)
self.inv, _, self.hld, _ = linalg.pdinv(self.sigma)
self.sigma2 = np.square(self.sigma)
self.constant = -0.5 * (self.mu.size * np.log(2 * np.pi) + self.hld)
def lnpdf(self, x):
x = np.array(x).flatten()
d = np.log(x) - self.mu
# Constant is dropped. Exact value doesn't really matter. Hopefully.
return -0.5 * np.dot(d.T, np.dot(self.inv, d))
def lnpdf_grad(self, x):
x = np.array(x).flatten()
d = np.log(x) - self.mu
return -np.dot(self.inv, d)
def rvs(self, n):
return np.exp(
np.random.randn(int(n), self.sigma.shape[0]) * self.sigma + self.mu)
# Use heavy tailed priors, but start with small values.
kernel = kern.Matern52(features.shape[1], variance=.04, ARD=True)
kernel.unconstrain()
loggaussian = LogGaussian(
np.zeros(features.shape[1:]),
sigma=np.diag(np.ones(features.shape[1:]) * 4.6))
kernel.lengthscale.set_prior(loggaussian)
kernel.lengthscale.constrain_bounded(1e-2, 1e2)
kernel.variance.set_prior(priors.LogGaussian(-3.2, 4.6))
kernel.variance.constrain_bounded(1e-3, 1e1)
gpy_model = models.GPRegression(features, labels, kernel, noise_var=0.0039)
gpy_model.likelihood.unconstrain()
gpy_model.likelihood.variance.set_prior(priors.LogGaussian(-5.5, sigma=4.6))
gpy_model.likelihood.variance.constrain_bounded(1e-10, 1.)
gpy_model.optimize_restarts(20, robust=True, optimizer='lbfgsb')
logging.info('After train: %s, %s', gpy_model, gpy_model.kern.lengthscale)
return gpy_model |
No-op.
Marks functions that can be easily overridden for experimentation.
Args:
fun:
Returns:
fun: | def _experimental_override_allowed(fun):
"""No-op.
Marks functions that can be easily overridden for experimentation.
Args:
fun:
Returns:
fun:
"""
return fun |
Sets up a GP designer and outputs completed studies for `f`.
Args:
f: 1D objective to be optimized, i.e. f(x), where x is a scalar in [-5., 5.)
num_trials: Number of mock "evaluated" trials to return.
Returns:
A GP designer set up for the problem of optimizing the objective, without any
data updated.
Evaluated trials against `f`. | def _setup_lambda_search(
f: Callable[[float], float], num_trials: int = 100
) -> tuple[gp_bandit.VizierGPBandit, list[vz.Trial], vz.ProblemStatement]:
"""Sets up a GP designer and outputs completed studies for `f`.
Args:
f: 1D objective to be optimized, i.e. f(x), where x is a scalar in [-5., 5.)
num_trials: Number of mock "evaluated" trials to return.
Returns:
A GP designer set up for the problem of optimizing the objective, without any
data updated.
Evaluated trials against `f`.
"""
assert (
num_trials > 0
), f'Must provide a positive number of trials. Got {num_trials}.'
search_space = vz.SearchSpace()
search_space.root.add_float_param('x0', -5.0, 5.0)
problem = vz.ProblemStatement(
search_space=search_space,
metric_information=vz.MetricsConfig(
metrics=[
vz.MetricInformation('obj', goal=vz.ObjectiveMetricGoal.MAXIMIZE),
]
),
)
suggestions = quasi_random.QuasiRandomDesigner(
problem.search_space, seed=1
).suggest(num_trials)
obs_trials = []
for idx, suggestion in enumerate(suggestions):
trial = suggestion.to_trial(idx)
x = suggestions[idx].parameters['x0'].value
trial.complete(vz.Measurement(metrics={'obj': f(x)}))
obs_trials.append(trial)
gp_designer = gp_bandit.VizierGPBandit(problem, ard_optimizer=ard_optimizer)
return gp_designer, obs_trials, problem |
Evaluate the designer's accuracy on the test set.
Args:
designer: The GP bandit designer to predict from.
test_trials: The trials of the test set
y_test: The results of the test set
Returns:
The MSE of `designer` on `test_trials` and `y_test` | def _compute_mse(
designer: gp_bandit.VizierGPBandit,
test_trials: list[vz.Trial],
y_test: list[float],
) -> float:
"""Evaluate the designer's accuracy on the test set.
Args:
designer: The GP bandit designer to predict from.
test_trials: The trials of the test set
y_test: The results of the test set
Returns:
The MSE of `designer` on `test_trials` and `y_test`
"""
preds = designer.predict(test_trials)
return np.sum(np.square(preds.mean - y_test)) |
Returns True iff there are newer completed trials than active trials.
Args:
completed_trials: Completed trials.
active_trials: Active trials.
Returns:
True if `completed_trials` is non-empty and:
- `active_trials` is empty, or
- The latest `completion_time` among `completed_trials` is
later than the latest `creation_time` among `active_trials`.
False: otherwise. | def _has_new_completed_trials(
completed_trials: Sequence[vz.Trial], active_trials: Sequence[vz.Trial]
) -> bool:
"""Returns True iff there are newer completed trials than active trials.
Args:
completed_trials: Completed trials.
active_trials: Active trials.
Returns:
True if `completed_trials` is non-empty and:
- `active_trials` is empty, or
- The latest `completion_time` among `completed_trials` is
later than the latest `creation_time` among `active_trials`.
False: otherwise.
"""
if not completed_trials:
return False
if not active_trials:
return True
completed_completion_times = [t.completion_time for t in completed_trials]
active_creation_times = [t.creation_time for t in active_trials]
if not all(completed_completion_times):
raise ValueError('All completed trials must have completion times.')
if not all(active_creation_times):
raise ValueError('All active trials must have creation times.')
return max(completed_completion_times) > max(active_creation_times) |
Computes a threshold on UCB values.
A promising evaluation point has UCB value no less than the threshold
computed here. The threshold is the predicted mean of the feature array
with the maximum UCB value among the points `gprm.index_points`.
Args:
gprm: A GP regression model for a set of predictive index points.
is_missing: A 1-d boolean array indicating whether the corresponding
predictive index points are missing.
ucb_coefficient: The UCB coefficient.
Returns:
The predicted mean of the feature array with the maximum UCB among `xs`. | def _compute_ucb_threshold(
gprm: tfd.Distribution,
is_missing: jt.Bool[jt.Array, ''],
ucb_coefficient: jt.Float[jt.Array, ''],
) -> jax.Array:
"""Computes a threshold on UCB values.
A promising evaluation point has UCB value no less than the threshold
computed here. The threshold is the predicted mean of the feature array
with the maximum UCB value among the points `gprm.index_points`.
Args:
gprm: A GP regression model for a set of predictive index points.
is_missing: A 1-d boolean array indicating whether the corresponding
predictive index points are missing.
ucb_coefficient: The UCB coefficient.
Returns:
The predicted mean of the feature array with the maximum UCB among `xs`.
"""
pred_mean = gprm.mean()
ucb_values = jnp.where(
is_missing, -jnp.inf, pred_mean + ucb_coefficient * gprm.stddev()
)
return pred_mean[jnp.argmax(ucb_values)] |
Applies the trust region to acquisition function values.
Args:
tr: Trust region.
xs: Predictive index points.
acq_values: Acquisition function values at predictive index points.
Returns:
Acquisition function values with trust region applied. | def _apply_trust_region(
tr: acquisitions.TrustRegion, xs: types.ModelInput, acq_values: jax.Array
) -> jax.Array:
"""Applies the trust region to acquisition function values.
Args:
tr: Trust region.
xs: Predictive index points.
acq_values: Acquisition function values at predictive index points.
Returns:
Acquisition function values with trust region applied.
"""
distance = tr.min_linf_distance(xs)
# Due to output normalization, acquisition values can't be as low as -1e12.
# We use a bad value that decreases in the distance to trust region so that
# acquisition optimizer can follow the gradient and escape untrustred regions.
return jnp.where(
(distance < tr.trust_radius) | (tr.trust_radius > 0.5),
acq_values,
-1e12 - distance,
) |
Gets the shapes of continuous/categorical features for logging. | def _get_features_shape(
features: types.ModelInput,
) -> types.ContinuousAndCategorical:
"""Gets the shapes of continuous/categorical features for logging."""
return types.ContinuousAndCategorical(
features.continuous.shape,
features.categorical.shape,
) |
Outputs all possible binary vectors from {-1, 1}^{dim} where only positions from `indices` are changed. | def _binary_subset_enumeration(
dim: int, indices: Sequence[int], default_value: float = 1.0
) -> np.ndarray:
"""Outputs all possible binary vectors from {-1, 1}^{dim} where only positions from `indices` are changed."""
output = default_value * np.ones(
shape=(2 ** len(indices), dim), dtype=np.float32
)
for i, binary in enumerate(
itertools.product([-1.0, 1.0], repeat=len(indices))
):
output[i, indices] = binary
return output |
Factory for an Ensemble of Gaussian weighted scalarized Designers. | def create_gaussian_scalarizing_designer(
problem_statement: vz.ProblemStatement,
designer_factory: vza.DesignerFactory[vza.Designer],
scalarization_factory: scalarization.ScalarizationFromWeights,
num_ensemble: int,
*,
seed: Optional[int] = None,
) -> vza.Designer:
"""Factory for an Ensemble of Gaussian weighted scalarized Designers."""
objectives = problem_statement.metric_information.of_type(
vz.MetricType.OBJECTIVE
)
if len(objectives) <= 1:
raise ValueError(
'Problem should be multi-objective for applying '
f'scalarization ensembling {objectives}'
)
key = jax_random.PRNGKey(seed or random.getrandbits(32))
weights = abs(
jax_random.normal(key=key, shape=(num_ensemble, len(objectives)))
)
weights /= jnp.linalg.norm(weights, axis=1)[..., jnp.newaxis]
ensemble_dict = {}
for weight in weights:
ensemble_dict[f'scalarized_weight: {weight}'] = ScalarizingDesigner(
problem_statement=problem_statement,
designer_factory=designer_factory,
scalarizer=scalarization_factory(weight),
seed=seed,
)
return ensemble_designer.EnsembleDesigner(ensemble_dict) |
Iterates over the search space parameters to find parameter by name. | def _get_parameter_config(search_space: vz.SearchSpace,
param_name: str) -> Optional[vz.ParameterConfig]:
"""Iterates over the search space parameters to find parameter by name."""
for param_config in search_space.parameters:
if param_config.name == param_name:
return param_config |
Tests that the pool size doesn't change when adding an infeasible fly. | def test_pool_size_with_infeasible(self):
"""Tests that the pool size doesn't change when adding an infeasible fly."""
firefly_pool = testing.create_fake_populated_firefly_pool(
x_values=[1, 2, 5, -1], obj_values=[2, 10, -2, 8], capacity=5
)
infeasible_firefly_id = firefly_pool.generate_new_fly_id()
infeasible_trial = testing.create_fake_trial(
parent_fly_id=infeasible_firefly_id, x_value=-1, obj_value=None
)
self.assertEqual(firefly_pool.size, 4)
firefly_pool.create_or_update_fly(
infeasible_trial, parent_fly_id=infeasible_firefly_id
)
self.assertEqual(firefly_pool.capacity, 5)
# Test that adding the infeasible trial doesn't change the pool size.
self.assertEqual(firefly_pool.size, 4)
self.assertEqual(firefly_pool._infeasible_count, 1) |
Serialize parts of the FireflyPool. | def partially_serialize_firefly_pool(firefly_pool: FireflyPool) -> str:
"""Serialize parts of the FireflyPool."""
return json.dumps(firefly_pool, cls=PartialFireflyPoolEncoder) |
Fully restore the FireflyPool. | def restore_firefly_pool(utils: EagleStrategyUtils, obj: str) -> FireflyPool:
"""Fully restore the FireflyPool."""
return FireflyPoolDecoder(utils).decode(obj) |
Serialize Numpy Random Genertor. | def serialize_rng(rng: np.random.Generator) -> str:
"""Serialize Numpy Random Genertor."""
return json.dumps(rng.bit_generator.state) |
Restore Numpy Random Genertor. | def restore_rng(obj: str) -> np.random.Generator:
"""Restore Numpy Random Genertor."""
rng = np.random.default_rng()
rng.bit_generator.state = json.loads(obj)
return rng |
Create a fake completed trial ('obj_value' = None means infeasible trial). | def create_fake_trial(
parent_fly_id: int,
x_value: float,
obj_value: Optional[float],
) -> vz.Trial:
"""Create a fake completed trial ('obj_value' = None means infeasible trial)."""
trial = vz.Trial()
measurement = vz.Measurement(
metrics={
eagle_strategy_utils.OBJECTIVE_NAME: vz.Metric(
value=obj_value or float('inf')
)
}
)
trial.parameters['x'] = x_value
trial.complete(
measurement,
inplace=True,
infeasibility_reason='infeasible' if obj_value is None else None,
)
trial.metadata.ns('eagle')['parent_fly_id'] = str(parent_fly_id)
return trial |
Create a fake problem statement. | def create_fake_problem_statement() -> vz.ProblemStatement:
"""Create a fake problem statement."""
problem = vz.ProblemStatement()
problem.search_space.root.add_float_param('x', 0.0, 10.0)
problem.metric_information.append(
vz.MetricInformation(
name=eagle_strategy_utils.OBJECTIVE_NAME,
goal=vz.ObjectiveMetricGoal.MAXIMIZE,
)
)
return problem |
Create a fake firefly with a fake completed trial. | def create_fake_fly(
parent_fly_id: int,
x_value: float,
obj_value: Optional[float],
) -> Firefly:
"""Create a fake firefly with a fake completed trial."""
trial = create_fake_trial(parent_fly_id, x_value, obj_value)
return Firefly(id_=parent_fly_id, perturbation=1.0, generation=1, trial=trial) |
Create a fake empty Firefly pool. | def create_fake_empty_firefly_pool(capacity: int = 10) -> FireflyPool:
"""Create a fake empty Firefly pool."""
problem = create_fake_problem_statement()
# By default incorporating infeasible trials is disabled; setting it manually.
config = FireflyAlgorithmConfig(infeasible_force_factor=0.1)
rng = np.random.default_rng(0)
utils = EagleStrategyUtils(problem_statement=problem, config=config, rng=rng)
return FireflyPool(utils, capacity) |
Create a fake populated Firefly pool with a given capacity. | def create_fake_populated_firefly_pool(
*,
capacity: int,
x_values: Optional[list[float]] = None,
obj_values: Optional[list[Optional[float]]] = None,
parent_fly_ids: Optional[list[int]] = None,
) -> FireflyPool:
"""Create a fake populated Firefly pool with a given capacity."""
firefly_pool = create_fake_empty_firefly_pool(capacity=capacity)
rng = np.random.default_rng(0)
if not x_values:
x_values = [float(x) for x in rng.uniform(low=0, high=10, size=(5,))]
if not obj_values:
obj_values = [
float(o) for o in rng.uniform(low=-1.5, high=1.5, size=(len(x_values),))
]
if not parent_fly_ids:
parent_fly_ids = list(range(len(obj_values)))
if not len(obj_values) == len(x_values) == len(parent_fly_ids):
raise ValueError('Length of obj_values, ')
for parent_fly_id, x_value, obj_value in zip(
parent_fly_ids, x_values, obj_values
):
firefly = create_fake_fly(
parent_fly_id=parent_fly_id, x_value=x_value, obj_value=obj_value
)
# pylint: disable=protected-access
firefly_pool._pool[parent_fly_id] = firefly
# pylint: disable=protected-access
firefly_pool._max_fly_id = capacity
return firefly_pool |
Create a fake empty eagle designer. | def create_fake_empty_eagle_designer() -> EagleStrategyDesiger:
"""Create a fake empty eagle designer."""
problem = create_fake_problem_statement()
return EagleStrategyDesiger(problem_statement=problem) |
Create a fake populated eagle designer. | def create_fake_populated_eagle_designer(
*,
x_values: Optional[list[float]] = None,
obj_values: Optional[list[Optional[float]]] = None,
parent_fly_ids: Optional[list[int]] = None,
) -> EagleStrategyDesiger:
"""Create a fake populated eagle designer."""
problem = create_fake_problem_statement()
eagle_designer = EagleStrategyDesiger(problem_statement=problem)
# pylint: disable=protected-access
pool_capacity = eagle_designer._firefly_pool._capacity
# Override the eagle designer's firefly pool with a populated firefly pool.
eagle_designer._firefly_pool = create_fake_populated_firefly_pool(
capacity=pool_capacity,
x_values=x_values,
obj_values=obj_values,
parent_fly_ids=parent_fly_ids,
)
return eagle_designer |
Returns the maximum values of labels.
A note on "labels" in TFP acquisition functions: TFP acquisition functions
(EI, PI, qEI, qUCB) take the maximum of `"observations"` (labels) over the
rightmost axis, which is assumed to correspond to the number of observations.
`best_labels` has a (singleton) rightmost dimension corresponding to the
number of metrics. The shapes therefore work out correctly, although the
semantics are different.
Args:
labels: Observed labels with padded shape `(num_observations, num_metrics)`.
Returns: Maximum label values for each metric. | def get_best_labels(labels: types.PaddedArray) -> jax.Array:
"""Returns the maximum values of labels.
A note on "labels" in TFP acquisition functions: TFP acquisition functions
(EI, PI, qEI, qUCB) take the maximum of `"observations"` (labels) over the
rightmost axis, which is assumed to correspond to the number of observations.
`best_labels` has a (singleton) rightmost dimension corresponding to the
number of metrics. The shapes therefore work out correctly, although the
semantics are different.
Args:
labels: Observed labels with padded shape `(num_observations, num_metrics)`.
Returns: Maximum label values for each metric.
"""
if jnp.size(labels.padded_array) == 0:
return -np.inf
return jnp.max(labels.replace_fill_value(-np.inf).padded_array, axis=-2) |
Applies the trust region to acquisition values. | def _apply_trust_region(
region: 'TrustRegion',
xs: types.ModelInput,
acquisition: jax.Array,
pred: tfd.Distribution,
aux: chex.ArrayTree,
) -> tuple[jax.Array, chex.ArrayTree]:
"""Applies the trust region to acquisition values."""
distance = region.min_linf_distance(xs)
raw_acquisition = acquisition
acquisition = jnp.where(
((distance <= region.trust_radius) | (region.trust_radius > 0.5)),
acquisition,
-1e12 - distance,
)
aux = aux | {
'mean': pred.mean(),
'stddev': pred.stddev(),
'raw_acquisition': raw_acquisition,
'linf_distance': distance,
'radius': jnp.ones_like(distance) * region.trust_radius,
}
return acquisition, aux |
Builds a ScoringFunctionFactory. | def bayesian_scoring_function_factory(
acquisition_fn_factory: Callable[[types.ModelData], AcquisitionFunction],
) -> ScoringFunctionFactory:
"""Builds a ScoringFunctionFactory."""
def f(
data: types.ModelData,
predictive: Predictive,
use_trust_region: bool = False,
) -> ScoreFunction:
acquisition_fn = acquisition_fn_factory(data)
trust_region = TrustRegion(data.features) if use_trust_region else None
return BayesianScoringFunction(predictive, acquisition_fn, trust_region)
return f |
Gets a GP model coroutine.
Args:
data: The data used to the train the GP model
linear_coef: If non-zero, uses a linear kernel with `linear_coef`
hyperparameter.
Returns:
The model coroutine. | def get_vizier_gp_coroutine(
data: types.ModelData,
*,
linear_coef: float = 0.0,
) -> sp.ModelCoroutine:
"""Gets a GP model coroutine.
Args:
data: The data used to the train the GP model
linear_coef: If non-zero, uses a linear kernel with `linear_coef`
hyperparameter.
Returns:
The model coroutine.
"""
# Construct the multi-task GP.
labels_shape = data.labels.shape
if labels_shape[-1] > 1:
gp_coroutine = multitask_tuned_gp_models.VizierMultitaskGaussianProcess(
_feature_dim=types.ContinuousAndCategorical[int](
data.features.continuous.padded_array.shape[-1],
data.features.categorical.padded_array.shape[-1],
),
_num_tasks=labels_shape[-1],
)
return sp.StochasticProcessModel(gp_coroutine).coroutine
if linear_coef:
return tuned_gp_models.VizierLinearGaussianProcess.build_model(
features=data.features, linear_coef=linear_coef
).coroutine
return tuned_gp_models.VizierGaussianProcess.build_model(
data.features
).coroutine |
Trains a Gaussian Process model.
1. Performs ARD to find the best model parameters.
2. Pre-computes the Cholesky decomposition for the model.
Args:
spec: Spec required to train the GP. See `GPTrainingSpec` for more info.
data: Data on which to train the GP.
Returns:
The trained GP model. | def _train_gp(spec: GPTrainingSpec, data: types.ModelData) -> GPState:
"""Trains a Gaussian Process model.
1. Performs ARD to find the best model parameters.
2. Pre-computes the Cholesky decomposition for the model.
Args:
spec: Spec required to train the GP. See `GPTrainingSpec` for more info.
data: Data on which to train the GP.
Returns:
The trained GP model.
"""
jax.monitoring.record_event(
'/vizier/jax/designers/gp_bandit/train_gp', scope=profiler.current_scope()
)
jax.monitoring.record_event(
'/vizier/jax/train_gp_with_data_shapes',
**{
'num_rows': data.features.categorical.shape[0],
'num_categoricals': data.features.categorical.shape[1],
'num_continuous': data.features.continuous.shape[1],
'num_labels': (
data.labels.shape[1] if data.labels.padded_array.ndim == 2 else 1
),
},
)
model = sp.CoroutineWithData(spec.coroutine, data)
# Optimize the parameters
ard_rngs = jax.random.split(spec.ard_rng, spec.ard_random_restarts + 1)
best_n = spec.ensemble_size or 1
best_params, _ = spec.ard_optimizer(
eqx.filter_jit(eqx.filter_vmap(model.setup))(ard_rngs[1:]),
model.loss_with_aux,
ard_rngs[0],
constraints=model.constraints(),
best_n=best_n,
)
if best_n == 1 and all(x.shape[0] == 1 for x in best_params.values()):
best_params = jax.tree_util.tree_map(
lambda x: jnp.squeeze(x, axis=0), best_params
)
best_models = sp.StochasticProcessWithCoroutine(
coroutine=spec.coroutine, params=best_params
)
# Logging for debugging purposes.
logging.info(
'Best models: %s', eqx.tree_pformat(best_models, short_arrays=False)
)
predictive = sp.UniformEnsemblePredictive(
eqx.filter_jit(best_models.precompute_predictive)(data)
)
return GPState(predictive=predictive, data=data) |
Returns the mean of the predictions from `pred` on `features`.
Workaround while `eqx.filter_jit(pred.pred_with_aux)(features)` is broken
due to a bug in tensorflow probability.
Args:
pred: `Predictive` to predict with.
features: Xs to predict on.
Returns:
Means of the predictions from `pred` on `features`. | def _pred_mean(
pred: acquisitions.Predictive, features: types.ModelInput
) -> types.Array:
"""Returns the mean of the predictions from `pred` on `features`.
Workaround while `eqx.filter_jit(pred.pred_with_aux)(features)` is broken
due to a bug in tensorflow probability.
Args:
pred: `Predictive` to predict with.
features: Xs to predict on.
Returns:
Means of the predictions from `pred` on `features`.
"""
return pred.predict_with_aux(features)[0].mean() |
Trains a `StackedResidualGP`.
Completes the following steps in order:
1. Uses `base_gp` to predict on the `data`
2. Computes the residuals from the above predictions
3. Trains a top-level GP on the above residuals
4. Returns a `StackedResidualGP` combining the base GP and newly-trained
GP.
Args:
base_gp: The predictive to use as the base GP for the `StackedResidualGP`
training.
spec: Training spec for the top level GP.
data: Training data for the top level GP.
Returns:
The trained `StackedResidualGP`. | def train_stacked_residual_gp(
base_gp: GPState,
spec: GPTrainingSpec,
data: types.ModelData,
) -> StackedResidualGP:
"""Trains a `StackedResidualGP`.
Completes the following steps in order:
1. Uses `base_gp` to predict on the `data`
2. Computes the residuals from the above predictions
3. Trains a top-level GP on the above residuals
4. Returns a `StackedResidualGP` combining the base GP and newly-trained
GP.
Args:
base_gp: The predictive to use as the base GP for the `StackedResidualGP`
training.
spec: Training spec for the top level GP.
data: Training data for the top level GP.
Returns:
The trained `StackedResidualGP`.
"""
# Compute the residuals of `data` as predicted by `base_gp`
pred_means = _pred_mean(base_gp, data.features)
has_no_padding = ~(
data.features.continuous.is_missing[0]
| data.features.categorical.is_missing[0]
| data.labels.is_missing[0]
)
# Scope this to non-padded predictions only.
pred_means_no_padding = pred_means[has_no_padding]
residuals = (
data.labels.unpad().reshape(pred_means_no_padding.shape)
- pred_means_no_padding
)
# Train on the re-padded residuals
residual_labels = types.PaddedArray.from_array(
array=residuals,
target_shape=data.labels.shape,
fill_value=data.labels.fill_value,
)
data_with_residuals = types.ModelData(
features=data.features, labels=residual_labels
)
top_gp = _train_gp(spec=spec, data=data_with_residuals)
return StackedResidualGP(
predictive=top_gp.predictive,
data=top_gp.data,
base_gp=base_gp,
) |
Trains a Gaussian Process model.
If `spec` contains multiple elements, each will be used to train a
`StackedResidualGP`, sequentially. The first entry will be used to train the
first GP, and then subsequent GPs will be trained on the residuals from the
previous GP. This process completes in the order that `spec` and `data are
provided, such that `spec[0]` is the first GP trained and `spec[-1]` is the
last GP trained.
spec[-1] and data[-1] make up the top-level GP, and spec[:-1] and data[:-1]
define the priors in context of transfer learning.
Args:
spec: Specification for how to train a GP model. If multiple specs are
provided, transfer learning will train multiple models and combine into a
single GP.
data: Data on which to train GPs. NOTE: `spec` and `data` must be of the
same shape. Trains a GP on `data[i]` with `spec[i]`.
Returns:
The trained GP model. | def train_gp(
spec: Union[GPTrainingSpec, Iterable[GPTrainingSpec]],
data: Union[types.ModelData, Iterable[types.ModelData]],
) -> GPState:
"""Trains a Gaussian Process model.
If `spec` contains multiple elements, each will be used to train a
`StackedResidualGP`, sequentially. The first entry will be used to train the
first GP, and then subsequent GPs will be trained on the residuals from the
previous GP. This process completes in the order that `spec` and `data are
provided, such that `spec[0]` is the first GP trained and `spec[-1]` is the
last GP trained.
spec[-1] and data[-1] make up the top-level GP, and spec[:-1] and data[:-1]
define the priors in context of transfer learning.
Args:
spec: Specification for how to train a GP model. If multiple specs are
provided, transfer learning will train multiple models and combine into a
single GP.
data: Data on which to train GPs. NOTE: `spec` and `data` must be of the
same shape. Trains a GP on `data[i]` with `spec[i]`.
Returns:
The trained GP model.
"""
is_singleton_spec = isinstance(spec, GPTrainingSpec)
is_singleton_data = isinstance(data, types.ModelData)
if is_singleton_spec != is_singleton_data:
raise ValueError(
'`train_gp` expected the shapes of `spec` and `data` to be identical.'
f' Instead got `data` {data} but `spec` {spec}.'
)
if is_singleton_spec and is_singleton_data:
return _train_gp(spec=spec, data=data)
if len(spec) != len(data):
raise ValueError(
'`train_gp` expected the shapes of `spec` and `data` to be identical.'
f' Instead got `spec` of length {len(spec)} but `data` of length'
f' {len(data)}. `spec` was {spec} and `data` was {data}.'
)
curr_gp: Optional[GPState] = None
for curr_spec, curr_data in zip(spec, data):
if curr_gp is None:
# We are on the first iteration.
curr_gp = _train_gp(spec=curr_spec, data=curr_data)
else:
# Otherwise, we have a base GP to use - the GP trained on the last
# iteration.
curr_gp = train_stacked_residual_gp(
base_gp=curr_gp,
spec=curr_spec,
data=curr_data,
)
if curr_gp is None:
raise ValueError(
f'Failed to train a GP with provided training spec: {spec} and'
f' data: {data}. `curr_gp` was never updated. This should never happen.'
)
return curr_gp |
Sets up training state for a GP and outputs an test set for `f`.
Args:
f: 1D objective to be optimized, i.e. f(x), where x is a scalar in [-5., 5.)
num_train: Number of training samples to generate.
num_test: Number of testing samples to generate.
linear_coef: If set, uses a linear kernel with coef `linear_coef` for the GP
ensemble_size: Ensembles together `ensemble_size` GPs.
Returns:
A GP training spec.
A generated train set.
A generated test set. | def _setup_lambda_search(
f: Callable[[float], float],
num_train: int = 100,
num_test: int = 100,
linear_coef: float = 0.0,
ensemble_size: int = 1,
) -> tuple[gp_models.GPTrainingSpec, types.ModelData, types.ModelData]:
"""Sets up training state for a GP and outputs an test set for `f`.
Args:
f: 1D objective to be optimized, i.e. f(x), where x is a scalar in [-5., 5.)
num_train: Number of training samples to generate.
num_test: Number of testing samples to generate.
linear_coef: If set, uses a linear kernel with coef `linear_coef` for the GP
ensemble_size: Ensembles together `ensemble_size` GPs.
Returns:
A GP training spec.
A generated train set.
A generated test set.
"""
assert num_train > 0 and num_test > 0, (
f'Must provide a positive number of trials. Got {num_train} training and'
f' {num_test} testing.'
)
search_space = vz.SearchSpace()
search_space.root.add_float_param('x0', -5.0, 5.0)
problem = vz.ProblemStatement(
search_space=search_space,
metric_information=vz.MetricsConfig(
metrics=[
vz.MetricInformation('obj', goal=vz.ObjectiveMetricGoal.MAXIMIZE),
]
),
)
converter = converters.TrialToModelInputConverter.from_problem(problem)
quasi_random_designer = quasi_random.QuasiRandomDesigner(
problem.search_space, seed=1
)
def create_model_data(
num_entries: int,
) -> tuple[types.ModelData, list[vz.Trial]]:
suggestions = quasi_random_designer.suggest(num_entries)
obs_trials: list[vz.Trial] = []
for idx, suggestion in enumerate(suggestions):
trial = suggestion.to_trial(idx)
x = suggestions[idx].parameters['x0'].value
trial.complete(vz.Measurement(metrics={'obj': f(x)}))
obs_trials.append(trial)
model_data = converter.to_xy(obs_trials)
return model_data, obs_trials
train_data, _ = create_model_data(num_entries=num_train)
train_spec = gp_models.GPTrainingSpec(
ard_optimizer=optimizers.default_optimizer(),
ard_rng=jax.random.PRNGKey(0),
coroutine=gp_models.get_vizier_gp_coroutine(
data=train_data, linear_coef=linear_coef
),
ensemble_size=ensemble_size,
ard_random_restarts=optimizers.DEFAULT_RANDOM_RESTARTS,
)
test_data, _ = create_model_data(num_entries=num_test)
return train_spec, train_data, test_data |
Computes the mean-squared error of `predictive` on `test_data. | def _compute_mse(
predictive: acquisitions.Predictive, test_data: types.ModelData
) -> float:
"""Computes the mean-squared error of `predictive` on `test_data."""
pred_dist, _ = predictive.predict_with_aux(test_data.features)
# We need this reshape to prevent a broadcast from (num_samples, ) -
# (num_samples, 1) yielding (num_samples, num_samples) and breaking this
# calculation.
test_labels_reshaped = np.asarray(test_data.labels.unpad()).reshape(-1)
mse = np.sum(np.square(pred_dist.mean() - test_labels_reshaped))
return mse |
Checks and modifies the shape and values of the labels. | def _validate_labels(labels_arr: types.Array) -> types.Array:
"""Checks and modifies the shape and values of the labels."""
labels_arr = labels_arr.astype(float)
if not (labels_arr.ndim == 2 and labels_arr.shape[-1] == 1):
raise ValueError(
'Labels need to be an array of shape (num_points, 1).'
f'Got shape: {labels_arr.shape}'
)
if np.isposinf(labels_arr).any():
raise ValueError('Infinity metric value is not valid.')
if np.isneginf(labels_arr).any():
labels_arr[np.isneginf(labels_arr)] = np.nan
return labels_arr |
Creates an output warper pipeline.
Args:
half_rank_warp: boolean indicating if half-rank warping to be performed.
log_warp: boolean indicating if log warping to be performed.
infeasible_warp: boolean indicating if infeasible warping to be performed.
Returns:
an instance of OutputWarperPipeline. | def create_default_warper(
*,
half_rank_warp: bool = True,
log_warp: bool = True,
infeasible_warp: bool = True,
) -> OutputWarperPipeline:
"""Creates an output warper pipeline.
Args:
half_rank_warp: boolean indicating if half-rank warping to be performed.
log_warp: boolean indicating if log warping to be performed.
infeasible_warp: boolean indicating if infeasible warping to be performed.
Returns:
an instance of OutputWarperPipeline.
"""
if not half_rank_warp and not log_warp and not infeasible_warp:
raise ValueError(
'At least one of "half_rank_warp", "log_warp" or "infeasible_warp" '
'must be True.'
)
warpers = []
if half_rank_warp:
warpers.append(HalfRankComponent())
if log_warp:
warpers.append(LogWarperComponent())
if infeasible_warp:
warpers.append(InfeasibleWarperComponent())
return OutputWarperPipeline(warpers) |
Creates an output warper outline which detects outliers and warps them. | def create_warp_outliers_warper(
*,
warp_outliers: bool = True,
infeasible_warp: bool = True,
transform_gaussian: bool = True,
) -> OutputWarperPipeline:
"""Creates an output warper outline which detects outliers and warps them."""
warpers = []
if warp_outliers:
warpers.append(DetectOutliers())
if infeasible_warp:
warpers.append(InfeasibleWarperComponent())
if transform_gaussian:
warpers.append(TransformToGaussian())
return OutputWarperPipeline(warpers) |
Computes the DOF of a `Predictive`.
This is a maximum of two measures of DOF.
The first represents the DOF associated with a log likelihood computation,
after optimizing the hyperparameters of the kernel, i.e. the
degrees-of-freedom (dof) of a finite linear regression problem.
The second represents the fact we know something about the standard
deviation even when there are more hyperpameters than training data.
Args:
training_data_count: Number of samples used to train the `Predictive`.
num_hyperparameters: Number of hyperparameters in the `Predictive`.
Returns:
Returns DOF of the predictive | def _compute_dof(training_data_count: int, num_hyperparameters: int) -> float:
"""Computes the DOF of a `Predictive`.
This is a maximum of two measures of DOF.
The first represents the DOF associated with a log likelihood computation,
after optimizing the hyperparameters of the kernel, i.e. the
degrees-of-freedom (dof) of a finite linear regression problem.
The second represents the fact we know something about the standard
deviation even when there are more hyperpameters than training data.
Args:
training_data_count: Number of samples used to train the `Predictive`.
num_hyperparameters: Number of hyperparameters in the `Predictive`.
Returns:
Returns DOF of the predictive
"""
return max(
training_data_count - num_hyperparameters,
training_data_count / (1 + num_hyperparameters),
) |
Combines two predictions from transfer learning.
The means are combined as a simple sum.
The standard deviations are combined using a geometric mean, with a
weighting coefficient `alpha` that sets their relative importance.
See the below code for the exact computation of `alpha`, which is a function
of the uncertainty of the base to the uncertainty of the top-level
prediction.
Args:
top_pred: Prediction from the top model (trained on the base's residuals)
base_pred: Prediction from the base model (trained on the full data)
expected_base_stddev_mismatch: Used for combining a base standard deviation
with the top-level model's standard deviation estimate. Formally, it is
the expected RMS fractional mismatch in standard deviation between a
typical base and a typical top-level model (averaged over the feasible
region). Allowable values are [0, 1] with (0.1, 0.8) being more likely.
Assumes that the value is allowable, due to compatibility with `jax` and
avoiding `jax.checkify`. Unexpected results may occur if value is set
out-of-bounds.
Returns:
The combined distribution, assumed to be Normal, and auxiliary information. | def combine_predictions_with_aux(
top_pred: TransferPredictionState,
base_pred: TransferPredictionState,
*,
expected_base_stddev_mismatch: float = 1.0
) -> tuple[tfd.Distribution, chex.ArrayTree]:
"""Combines two predictions from transfer learning.
The means are combined as a simple sum.
The standard deviations are combined using a geometric mean, with a
weighting coefficient `alpha` that sets their relative importance.
See the below code for the exact computation of `alpha`, which is a function
of the uncertainty of the base to the uncertainty of the top-level
prediction.
Args:
top_pred: Prediction from the top model (trained on the base's residuals)
base_pred: Prediction from the base model (trained on the full data)
expected_base_stddev_mismatch: Used for combining a base standard deviation
with the top-level model's standard deviation estimate. Formally, it is
the expected RMS fractional mismatch in standard deviation between a
typical base and a typical top-level model (averaged over the feasible
region). Allowable values are [0, 1] with (0.1, 0.8) being more likely.
Assumes that the value is allowable, due to compatibility with `jax` and
avoiding `jax.checkify`. Unexpected results may occur if value is set
out-of-bounds.
Returns:
The combined distribution, assumed to be Normal, and auxiliary information.
"""
dof_base = _compute_dof(
training_data_count=base_pred.training_data_count,
num_hyperparameters=base_pred.num_hyperparameters,
)
dof_top = _compute_dof(
training_data_count=top_pred.training_data_count,
num_hyperparameters=top_pred.num_hyperparameters,
)
# `beta_squared` is the ratio of uncertainty of the base to the uncertainty
# in the top-level model. More precisely, it is the
# variance{ log { stddev returned by the base}} /
# variance{ log { stddev returned by the top model}}.
# This is a large number when the top-level stddev is more trustworthy, and
# small when the base stddev is relatively trustworthy.
beta_squared = (dof_top / dof_base) * (
1 + dof_base + (expected_base_stddev_mismatch**2)
)
# Finally, compute the geometric mean weight, `alpha`.
alpha = beta_squared / (1 + beta_squared)
# Combine the means.
comb_mean = top_pred.pred.mean() + base_pred.pred.mean()
# Use `alpha` to combine the stddevs in a weighted geometric mean.
comb_stddev = jnp.power(top_pred.pred.stddev(), alpha) * jnp.power(
base_pred.pred.stddev(), (1 - alpha)
)
prev_aux = {
'base_aux': base_pred.aux,
'top_aux': top_pred.aux,
}
# Entries in `aux` must have the same batch shape as the predictions.
batch_shape = comb_mean.shape[0]
aux = {
'prev_aux': prev_aux,
'mean': comb_mean,
'stddev': comb_stddev,
'alpha': jnp.ones(batch_shape) * alpha,
'expected_base_stddev_mismatch': (
jnp.ones(batch_shape) * expected_base_stddev_mismatch
),
'beta_squared': jnp.ones(batch_shape) * beta_squared,
}
# Assume a multivariate normal distribution with diagonal covariance as output
return tfd.MultivariateNormalDiag(loc=comb_mean, scale_diag=comb_stddev), aux |
Returns the power transformation with optimal parameterization.
The optimal parameterization makes the transformed data as "normal"-esque
as possible.
Args:
data: 1-D or 2-D array. If 1-D, then the bijector has batch_shape =[]. If
2-D, then the bijector has batch shape equal to the last dimension
method: 'yeo-johnson' or 'box-cox'. Boxcox can only be used for
positive-only data.
standardize: If True, returns a bijector that applies a power transform and
then normalize so that the data maps to zero mean unit stddev normal. 1e-6
is added to the stddev so that division by zero never happens.
Returns:
Bijector that maps data such that it follows a normal distribution.
(standard normal if standardize=True). | def optimal_transformation(
data: np.ndarray,
method: Literal['yeo-johnson', 'box-cox'] = 'yeo-johnson',
*,
standardize: bool = True) -> tfb.AutoCompositeTensorBijector:
"""Returns the power transformation with optimal parameterization.
The optimal parameterization makes the transformed data as "normal"-esque
as possible.
Args:
data: 1-D or 2-D array. If 1-D, then the bijector has batch_shape =[]. If
2-D, then the bijector has batch shape equal to the last dimension
method: 'yeo-johnson' or 'box-cox'. Boxcox can only be used for
positive-only data.
standardize: If True, returns a bijector that applies a power transform and
then normalize so that the data maps to zero mean unit stddev normal. 1e-6
is added to the stddev so that division by zero never happens.
Returns:
Bijector that maps data such that it follows a normal distribution.
(standard normal if standardize=True).
"""
dtype = data.dtype
dimension = len(data.shape)
if dimension not in {1, 2}:
raise ValueError('Data must be 1-D or 2-D array')
if dimension == 1:
# PowerTransformer.fit() expects 2D array.
data = data[:, np.newaxis]
reduce_axis = None
else:
reduce_axis = 0
if method == 'yeo-johnson':
# For yeo-johnson, center the median to zero.
# In the long run, we should consider identifying outliers that are very
# far away from the optimum, and softclipping them to reasonable numbers.
# This will help prevent them from having too much influence in deciding
# the warping parameters.
medians = np.median(data, axis=reduce_axis)
shift1 = tfb.Shift(-medians)
data = shift1(data)
else:
shift1 = tfb.Identity()
lambdas = preprocessing.PowerTransformer(
method, standardize=False).fit(data).lambdas_.astype(dtype)
logging.info('Optimal lambda was: %s', lambdas)
if dimension == 1:
# Make it a scalar, so we don't end up with batch_shape = [1] in the
# bijector.
lambdas = lambdas.item()
if method == 'yeo-johnson':
warp = tfsb.YeoJohnson(lambdas)
elif method == 'box-cox':
warp = tfsb.YeoJohnson(lambdas, shift=.0)
else:
raise ValueError(f'Unknown method: {method}')
if standardize:
transformed = warp(data) # 2-D array.
shift2 = tfb.Shift(-np.mean(transformed, axis=reduce_axis))
scale = tfb.Scale(1.0 / (np.std(transformed, axis=reduce_axis) + 1e-6))
return tfb.Chain([scale, shift2, warp, shift1])
else:
return tfb.Chain([warp, shift1]) |
Returns the meta eagle search space. | def meta_eagle_search_space() -> vz.SearchSpace:
"""Returns the meta eagle search space."""
search_space = vz.SearchSpace()
# Perturbation
search_space.root.add_float_param(
name='perturbation',
min_value=1e-4,
max_value=1e2,
default_value=1e-1,
scale_type=vz.ScaleType.LOG,
)
search_space.root.add_float_param(
name='perturbation_lower_bound',
min_value=1e-5,
max_value=1e-1,
default_value=1e-3,
scale_type=vz.ScaleType.LOG,
)
# Gravity
search_space.root.add_float_param(
name='gravity',
min_value=1e-2,
max_value=1e2,
default_value=1.0,
scale_type=vz.ScaleType.LOG,
)
# Visibility
search_space.root.add_float_param(
name='visibility',
min_value=3 * 1e-2,
max_value=3 * 1e2,
default_value=3.0,
scale_type=vz.ScaleType.LOG,
)
search_space.root.add_float_param(
name='categorical_visibility',
min_value=2.0 * 1e-3,
max_value=2.0 * 1e1,
default_value=2.0 * 1e-1,
scale_type=vz.ScaleType.LOG,
)
search_space.root.add_float_param(
name='discrete_visibility',
min_value=1e-2,
max_value=1e2,
default_value=1.0,
scale_type=vz.ScaleType.LOG,
)
search_space.root.add_float_param(
name='categorical_perturbation_factor',
min_value=2.5 * 1e-1,
max_value=2.5 * 1e3,
default_value=2.5 * 1e1,
scale_type=vz.ScaleType.LOG,
)
search_space.root.add_float_param(
name='discrete_perturbation_factor',
min_value=1e-1,
max_value=1e3,
default_value=1e1,
scale_type=vz.ScaleType.LOG,
)
# Pool size.
search_space.root.add_float_param(
name='pool_size_factor',
min_value=1.0,
max_value=2.0,
default_value=1.2,
scale_type=vz.ScaleType.LOG,
)
search_space.root.add_float_param(
name='negative_gravity',
min_value=2.0 * 1e-4,
max_value=2.0,
default_value=2 * 1e-2,
scale_type=vz.ScaleType.LOG,
)
search_space.root.add_float_param(
name='pure_categorical_perturbation',
min_value=1e-3,
max_value=1e1,
default_value=1e-1,
scale_type=vz.ScaleType.LOG,
)
return search_space |
Creates an EagleStrategyDesigner with hyper-parameters and seed. | def _eagle_designer_factory(
problem: vz.ProblemStatement, seed: Optional[int], **kwargs
):
"""Creates an EagleStrategyDesigner with hyper-parameters and seed."""
config = eagle_strategy.FireflyAlgorithmConfig()
# Unpack the hyperparameters into the Eagle config class.
for param_name, param_value in kwargs.items():
if param_name not in attrs.asdict(config):
raise ValueError(f"'{param_name}' is not in FireflyAlgorithmConfig!")
setattr(config, param_name, param_value)
return eagle_strategy.EagleStrategyDesigner(
problem_statement=problem,
seed=seed,
config=config,
) |
Creates an EagleStrategyDesigner with hyper-parameters and seed. | def _eagle_designer_factory(
problem: vz.ProblemStatement, seed: Optional[int], **kwargs
):
"""Creates an EagleStrategyDesigner with hyper-parameters and seed."""
config = eagle_strategy.FireflyAlgorithmConfig()
# Unpack the hyperparameters into the Eagle config class.
for param_name, param_value in kwargs.items():
if param_name not in attrs.asdict(config):
raise ValueError(f"'{param_name}' is not in FireflyAlgorithmConfig!")
setattr(config, param_name, param_value)
return eagle_strategy.EagleStrategyDesigner(
problem_statement=problem,
seed=seed,
config=config,
) |
Creates a QuasiRandomDesigner with seed. | def _quasirandom_designer_factory(
problem: vz.ProblemStatement, seed: Optional[int] = None
):
"""Creates a QuasiRandomDesigner with seed."""
return quasi_random.QuasiRandomDesigner(problem.search_space, seed=seed) |
Compute softmax values for x. | def softmax(x: np.ndarray) -> np.ndarray:
"""Compute softmax values for x."""
e_x = np.exp(x - np.max(x))
return e_x / np.sum(e_x) |
Pareto rank, which is the number of points dominating it.
Args:
ys: (number of population) x (number of metrics) array.
Returns:
(number of population) integer array. | def _pareto_rank(ys: np.ndarray) -> np.ndarray:
"""Pareto rank, which is the number of points dominating it.
Args:
ys: (number of population) x (number of metrics) array.
Returns:
(number of population) integer array.
"""
if ys.shape[0] == 0:
return np.zeros([0])
dominated = [np.all(ys <= r, axis=-1) & np.any(r > ys, axis=-1) for r in ys]
return np.sum(np.stack(dominated), axis=0) |
Crowding distance.
Args:
ys: (number of population) x (number of metrics) array.
Returns:
(number of population) float32 array. Higher numbers mean less crowding
and more desirable. | def _crowding_distance(ys: np.ndarray) -> np.ndarray:
"""Crowding distance.
Args:
ys: (number of population) x (number of metrics) array.
Returns:
(number of population) float32 array. Higher numbers mean less crowding
and more desirable.
"""
scores = np.zeros([ys.shape[0]], dtype=np.float32)
for m in range(ys.shape[1]):
# Sort by the m-th metric.
yy = ys[:, m] # Shape: (num_population,)
sid = np.argsort(yy)
# Boundary are assigned infinity.
scores[sid[0]] += np.inf
scores[sid[-1]] += np.inf
# Compute the crowding distance.
yrange = yy[sid[-1]] - yy[sid[0]] + np.finfo(np.float32).eps
scores[sid[1:-1]] += (yy[sid[2:]] - yy[sid[:-2]]) / yrange
return scores |
Counts the constraints violated.
Args:
ys: (number of population) x (number of metrics) array.
Returns:
(number of population) array of integers. | def _constraint_violation(ys: np.ndarray) -> np.ndarray:
"""Counts the constraints violated.
Args:
ys: (number of population) x (number of metrics) array.
Returns:
(number of population) array of integers.
"""
return np.sum(ys < 0, axis=1) |
Returns a boolean index array for the top `target` elements of ys.
This method is tough to parse. Please improve the API if you see a better
design!
Args:
ys: Array of shape [M]. Entries are expected to have a small set of unique
values.
target: Count to return.
Returns:
A tuple of two bolean index arrays `top` and `border`.
* `ys[top]` has length less than or equal to `target`. They are within
top `target`.
* `ys[top | border]` has length greater than or equal to `target`.
* `ys[border]` have all-identical entries. Callers should break ties
among them.
* `top & border` is all False. | def _select_by(ys: np.ndarray, target: int) -> Tuple[np.ndarray, np.ndarray]:
"""Returns a boolean index array for the top `target` elements of ys.
This method is tough to parse. Please improve the API if you see a better
design!
Args:
ys: Array of shape [M]. Entries are expected to have a small set of unique
values.
target: Count to return.
Returns:
A tuple of two bolean index arrays `top` and `border`.
* `ys[top]` has length less than or equal to `target`. They are within
top `target`.
* `ys[top | border]` has length greater than or equal to `target`.
* `ys[border]` have all-identical entries. Callers should break ties
among them.
* `top & border` is all False.
"""
if ys.shape[0] <= target:
return (
np.ones(ys.shape[:1], dtype=np.bool_),
np.zeros(ys.shape[:1], dtype=np.bool_),
)
unique, counts = np.unique(ys, return_counts=True)
cutoffidx = np.argmax(np.cumsum(counts) > target)
cutoffnumber = unique[cutoffidx]
return ys < cutoffnumber, ys == cutoffnumber |
Choose objective and safety metrics and split.
Args:
metrics:
Returns:
Tuple of objective and safety metrics. | def _filter_and_split(
metrics: Collection[vz.MetricInformation],
) -> Tuple[List[vz.MetricInformation], List[vz.MetricInformation]]:
"""Choose objective and safety metrics and split.
Args:
metrics:
Returns:
Tuple of objective and safety metrics.
"""
metrics_by_type = collections.defaultdict(list)
for metric in metrics:
metrics_by_type[metric.type].append(metric)
return (
metrics_by_type[vz.MetricType.OBJECTIVE],
metrics_by_type[vz.MetricType.SAFETY],
) |
Creates a shape validator for attr.
For example, _shape_equals(lambda s : [3, None]) validates that the shape has
length 2 and its first element is 3.
Args:
instance_to_shape: Takes instance as input and returns the desired shape for
the instance. `None` is treated as "any number".
Returns:
A validator that can be passed into attr.ib or attr.field. | def _shape_equals(
instance_to_shape: Callable[[Any], Collection[Optional[int]]]
):
"""Creates a shape validator for attr.
For example, _shape_equals(lambda s : [3, None]) validates that the shape has
length 2 and its first element is 3.
Args:
instance_to_shape: Takes instance as input and returns the desired shape for
the instance. `None` is treated as "any number".
Returns:
A validator that can be passed into attr.ib or attr.field.
"""
def validator(instance, attribute, value) -> None:
shape = instance_to_shape(instance)
def _validator_boolean():
if len(value.shape) != len(shape):
return False
for s1, s2 in zip(value.shape, shape):
if (s2 is not None) and (s1 != s2):
return False
return True
if not _validator_boolean():
raise ValueError(
f'{attribute.name} has shape {value.shape} '
f'which does not match the expected shape {shape}'
)
return validator |
Returns parameter converters. | def _create_parameter_converters(
search_space: vz.SearchSpace,
) -> Collection[converters.DefaultModelInputConverter]:
"""Returns parameter converters."""
if search_space.is_conditional:
raise ValueError('Cannot handle conditional search space!')
def create_input_converter(
pc: vz.ParameterConfig,
) -> converters.DefaultModelInputConverter:
return converters.DefaultModelInputConverter(
pc, scale=True, max_discrete_indices=0, onehot_embed=True
)
return [create_input_converter(pc) for pc in search_space.parameters] |
Computes distance between features (or parallel feature batches). | def _compute_features_dist(
x_batch: vb.VectorizedOptimizerInput, x_pool: vb.VectorizedOptimizerInput
) -> jax.Array:
"""Computes distance between features (or parallel feature batches)."""
dist = jnp.zeros([], dtype=x_batch.continuous.dtype)
if x_batch.continuous.size > 0:
x_batch_cont = jnp.reshape(
x_batch.continuous, (x_batch.continuous.shape[0], -1)
)
x_pool_cont = jnp.reshape(
x_pool.continuous, (x_pool.continuous.shape[0], -1)
)
continuous_dists = (
jnp.sum(x_batch_cont**2, axis=-1, keepdims=True)
+ jnp.sum(x_pool_cont**2, axis=-1)
- 2.0 * jnp.matmul(x_batch_cont, x_pool_cont.T)
) # shape (batch_size, pool_size)
dist = dist + continuous_dists
if x_batch.categorical.size > 0:
x_batch_cat = jnp.reshape(
x_batch.categorical, (x_batch.categorical.shape[0], -1)
)
x_pool_cat = jnp.reshape(
x_pool.categorical, (x_pool.categorical.shape[0], -1)
)
categorical_diffs = (x_batch_cat[..., jnp.newaxis, :] != x_pool_cat).astype(
x_batch.continuous.dtype
)
dist = dist + jnp.sum(categorical_diffs, axis=-1)
return dist |
Flips the ordering of the elements in `prior_rewards` and `prior_features`.
Args:
prior_features: Prior features to be flipped.
prior_rewards: Prior rewards to be flipped.
Returns:
A tuple of flipped prior features and prior rewards such that all elements
corresponding to -inf entries in `prior_rewards` are at the end, while all
other elements have the opposite order. For example, if `prior_rewards` is
[1, -jnp.inf, 3, -jnp.inf ,2], `flipped_prior_rewards` will be
[2, 3, 1, -jnp.inf, -jnp.inf]. | def _mask_flip(
prior_features: vb.VectorizedOptimizerInput, prior_rewards: types.Array
) -> Tuple[vb.VectorizedOptimizerInput, types.Array]:
"""Flips the ordering of the elements in `prior_rewards` and `prior_features`.
Args:
prior_features: Prior features to be flipped.
prior_rewards: Prior rewards to be flipped.
Returns:
A tuple of flipped prior features and prior rewards such that all elements
corresponding to -inf entries in `prior_rewards` are at the end, while all
other elements have the opposite order. For example, if `prior_rewards` is
[1, -jnp.inf, 3, -jnp.inf ,2], `flipped_prior_rewards` will be
[2, 3, 1, -jnp.inf, -jnp.inf].
"""
mask = jnp.invert(jnp.isneginf(prior_rewards))
indices = jnp.flip(
jnp.argsort(jnp.where(mask, jnp.arange(prior_rewards.shape[0]), -1))
)
flipped_prior_features = jax.tree_util.tree_map(
lambda x: x[indices], prior_features
)
flipped_prior_rewards = prior_rewards[indices]
return flipped_prior_features, flipped_prior_rewards |
A version of `_create_features` that materializes large intermediates. | def _create_features_simple(
features,
rewards,
features_batch,
rewards_batch,
config,
n_features,
categorical_sizes,
max_categorical_size,
seed,
):
"""A version of `_create_features` that materializes large intermediates."""
# Only works with no parallel batch dimension.
continuous_features_diffs = (
features.continuous - features_batch.continuous[:, jnp.newaxis, :]
)
categorical_features_diffs = (
features.categorical != features_batch.categorical[:, jnp.newaxis, :]
)
features_diffs = vb.VectorizedOptimizerInput(
continuous=continuous_features_diffs,
categorical=categorical_features_diffs,
)
dists = jax.tree_util.tree_map(
lambda x: jnp.sum(jnp.square(x), axis=-1), features_diffs
)
directions = rewards - rewards_batch[:, jnp.newaxis]
scaled_directions = jnp.where(
directions >= 0.0, config.gravity, -config.negative_gravity
)
# Handle removed fireflies without updated rewards.
finite_ind = jnp.isfinite(rewards).astype(directions.dtype)
# Ignore fireflies that were removed from the pool.
scale = jax.tree_util.tree_map(
lambda x: finite_ind # pylint: disable=g-long-lambda
* scaled_directions
* jnp.exp(-config.visibility * x / n_features * 10.0),
dists,
)
# Separate forces to pull and push so to normalize them separately.
new_continuous_features = features_batch.continuous + jnp.sum(
features_diffs.continuous * scale.continuous[..., jnp.newaxis], axis=1
)
categorical_features_logits = _create_logits_vector_simple(
features.categorical,
features_batch.categorical,
scale.categorical,
categorical_sizes,
max_categorical_size,
config,
)
new_categorical_features = tfd.Categorical(
logits=categorical_features_logits
).sample(seed=seed)
return vb.VectorizedOptimizerInput(
new_continuous_features, new_categorical_features
) |
Creates a new vectorized strategy based on the Protocol. | def random_strategy_factory(
converter: converters.TrialToModelInputConverter,
suggestion_batch_size: int,
) -> vb.VectorizedStrategy:
"""Creates a new vectorized strategy based on the Protocol."""
return RandomVectorizedStrategy(
converter=converter,
suggestion_batch_size=suggestion_batch_size,
) |
Creates a random optimizer. | def create_random_optimizer(
converter: converters.TrialToModelInputConverter,
max_evaluations: int,
suggestion_batch_size: int,
) -> vb.VectorizedOptimizer:
"""Creates a random optimizer."""
return vb.VectorizedOptimizerFactory(
strategy_factory=random_strategy_factory,
max_evaluations=max_evaluations,
suggestion_batch_size=suggestion_batch_size,
)(converter=converter) |
Creates a random optimizer factory. | def create_random_optimizer_factory(
max_evaluations: int, suggestion_batch_size: int
) -> vb.VectorizedOptimizerFactory:
"""Creates a random optimizer factory."""
return vb.VectorizedOptimizerFactory(
strategy_factory=random_strategy_factory,
max_evaluations=max_evaluations,
suggestion_batch_size=suggestion_batch_size,
) |
Docstring. | def _reshape_to_parallel_batches(
x: types.PaddedArray, parallel_dim: int
) -> tuple[jax.Array, jax.Array]:
"""Docstring."""
new_batch_dim = x.shape[0] // parallel_dim
new_padded_array = jnp.reshape(
x.padded_array[: new_batch_dim * parallel_dim],
(new_batch_dim, parallel_dim, x.shape[-1]),
)
valid_batch_mask = (
jnp.arange(new_batch_dim) < x._original_shape[0] // parallel_dim # pylint: disable=protected-access
)
return new_padded_array, valid_batch_mask |
Returns the best candidate trials in the original search space. | def best_candidates_to_trials(
best_results: VectorizedStrategyResults,
converter: converters.TrialToModelInputConverter,
) -> list[vz.Trial]:
"""Returns the best candidate trials in the original search space."""
best_features = best_results.features
trials = []
sorted_ind = jnp.argsort(-best_results.rewards)
for i in range(len(best_results.rewards)):
# Create trials and convert the strategy features back to parameters.
ind = sorted_ind[i]
suggested_features = VectorizedOptimizerInput(
best_features.continuous[ind], best_features.categorical[ind]
)
reward = best_results.rewards[ind]
# Loop over the number of candidates per batch (which will be one, unless a
# parallel acquisition function is used).
for j in range(suggested_features.continuous.shape[0]):
features = VectorizedOptimizerInput(
continuous=jnp.expand_dims(suggested_features.continuous[j], axis=0),
categorical=jnp.expand_dims(
suggested_features.categorical[j], axis=0
),
)
trial = vz.Trial(
parameters=converter.to_parameters(
_optimizer_to_model_input(
features,
n_features=types.ContinuousAndCategorical(
len(converter.output_specs.continuous),
len(converter.output_specs.categorical),
),
)
)[0]
)
metadata = trial.metadata.ns('devinfo')
metadata['acquisition_optimization'] = json.dumps(
{'acquisition': best_results.rewards[ind]}
| jax.tree_map(
lambda x, ind=ind: np.asarray(x[ind]), best_results.aux
),
cls=json_utils.NumpyEncoder,
)
trial.complete(vz.Measurement({'acquisition': reward}))
trials.append(trial)
return trials |
Sorts trials by the order they were created and converts to array. | def trials_to_sorted_array(
prior_trials: list[vz.Trial],
converter: converters.TrialToModelInputConverter,
) -> Optional[types.ModelInput]:
"""Sorts trials by the order they were created and converts to array."""
if prior_trials:
prior_trials = sorted(prior_trials, key=lambda x: x.creation_time)
prior_features = converter.to_features(prior_trials)
else:
prior_features = None
return prior_features |
Creates the default runner with completed and active trials. | def _create_runner() -> pythia.InRamPolicySupporter:
"""Creates the default runner with completed and active trials."""
runner = pythia.InRamPolicySupporter(vz.ProblemStatement())
runner.AddTrials(
[
vz.Trial().complete(vz.Measurement())
for _ in range(_NUM_INITIAL_COMPLETED_TRIALS)
]
)
# The default status is ACTIVE.
runner.AddTrials([vz.Trial() for _ in range(_NUM_INITIAL_ACTIVE_TRIALS)])
return runner |
Samples unifrom value and udpate key. | def sample_uniform(rng: np.random.Generator, min_value=0, max_value=1) -> float:
"""Samples unifrom value and udpate key."""
return float(rng.uniform(low=min_value, high=max_value)) |
Samples value1 with probability prob1. | def sample_bernoulli(
rng: np.random.Generator,
prob1: float,
value1: _T = 0,
value2: _T = 1,
) -> _T:
"""Samples value1 with probability prob1."""
return value1 if rng.binomial(1, p=prob1) else value2 |
Samples a random integer. | def sample_integer(
rng: np.random.Generator,
min_value: float,
max_value: float,
) -> int:
"""Samples a random integer."""
val = sample_uniform(rng, min_value, max_value)
return round(val) |
Samples a random categorical value. | def sample_categorical(rng: np.random.Generator, categories: List[str]) -> str:
"""Samples a random categorical value."""
return rng.choice(categories) |
Samples random discrete value.
To sample a discrete value we sample uniformly a decimal value between the
minimum and maximum feasible points and returns the closest feasible point.
Args:
rng:
feasible_points:
Returns:
The sampled feasible point and a new key. | def sample_discrete(rng: np.random.Generator,
feasible_points: List[float]) -> float:
"""Samples random discrete value.
To sample a discrete value we sample uniformly a decimal value between the
minimum and maximum feasible points and returns the closest feasible point.
Args:
rng:
feasible_points:
Returns:
The sampled feasible point and a new key.
"""
min_value = min(feasible_points)
max_value = max(feasible_points)
value = sample_uniform(rng, min_value, max_value)
closest_element = get_closest_element(feasible_points, value)
return closest_element |
Finds closest element in array to value. | def get_closest_element(array: List[float], value: float) -> float:
"""Finds closest element in array to value."""
gaps = [abs(x - value) for x in array]
closest_idx = min(enumerate(gaps), key=lambda x: x[1])[0]
return array[closest_idx] |
Samples random value based on the parameter type. | def _sample_value(
rng: np.random.Generator,
param_config: vz.ParameterConfig,
) -> vz.ParameterValueTypes:
"""Samples random value based on the parameter type."""
if param_config.type == vz.ParameterType.CATEGORICAL:
return sample_categorical(rng, param_config.feasible_values)
elif param_config.type == vz.ParameterType.DISCRETE:
return sample_discrete(rng, param_config.feasible_values)
else:
min_value, max_value = param_config.bounds
if param_config.type == vz.ParameterType.INTEGER:
return sample_integer(rng, min_value, max_value)
elif param_config.type == vz.ParameterType.DOUBLE:
return sample_uniform(rng, min_value, max_value)
else:
logging.error('Invalid parameter config type: %s; deafults to DOUBLE.',
param_config.type)
return sample_uniform(rng, min_value, max_value) |
Randomly samples parameter values from the search space. | def sample_parameters(rng: np.random.Generator,
search_space: vz.SearchSpace) -> vz.ParameterDict:
"""Randomly samples parameter values from the search space."""
sampled_parameters: Dict[str, vz.ParameterValue] = {}
parameter_configs: List[vz.ParameterConfig] = search_space.parameters
for param_config in parameter_configs:
sample_param_value = _sample_value(rng, param_config)
sampled_parameters[param_config.name] = vz.ParameterValue(
sample_param_value)
return vz.ParameterDict(sampled_parameters) |
Create a new list of shuffled items. Original list remains the same. | def shuffle_list(rng: np.random.Generator, items: List[_T]) -> List[_T]:
"""Create a new list of shuffled items. Original list remains the same."""
shuffled_indices = np.array(range(len(items)))
rng.shuffle(shuffled_indices)
shuffled_items = [items[i] for i in shuffled_indices]
return shuffled_items |
Generates an interpolation function from a trial's measurement data.
Since different trials have evaluations at different step numbers,
we need to be able to interpolate the objective value between steps
in order to compare trials and regress against trial data. This function
converts a trial into a function suitable for this use.
Args:
steps: list of integers indicating the x-axis of the input data points.
values: list of floats indicating the y-axis of the input data points. steps
and values list contains the same number of elements.
Returns:
interpolation function that takes input a number t and returns
interpolated value of objective function for this trial at t steps. | def _generate_interpolation_fn_from_trial(
steps: list[int], values: list[float]
) -> Callable[[int], float]:
"""Generates an interpolation function from a trial's measurement data.
Since different trials have evaluations at different step numbers,
we need to be able to interpolate the objective value between steps
in order to compare trials and regress against trial data. This function
converts a trial into a function suitable for this use.
Args:
steps: list of integers indicating the x-axis of the input data points.
values: list of floats indicating the y-axis of the input data points. steps
and values list contains the same number of elements.
Returns:
interpolation function that takes input a number t and returns
interpolated value of objective function for this trial at t steps.
"""
return InterpolatedUnivariateSpline(steps, values, k=1) |
Sort and remove duplicates in the trial's measurements.
Args:
steps: a list of integer measurement steps for a given trial.
values: a list of objective values corresponding to the steps for a given
trial.
Returns:
steps: a list of integer measurement steps after dedupe.
values: a list of objective values corresponding to the steps after dedupe. | def _sort_dedupe_measurements(
steps: list[Union[int, float]], values: list[float]
) -> Tuple[list[Union[int, float]], list[float]]:
"""Sort and remove duplicates in the trial's measurements.
Args:
steps: a list of integer measurement steps for a given trial.
values: a list of objective values corresponding to the steps for a given
trial.
Returns:
steps: a list of integer measurement steps after dedupe.
values: a list of objective values corresponding to the steps after dedupe.
"""
if isinstance(steps[0], float):
# Dedupe is skipped when steps are not integers.
return steps, values
step_obj_dict = {}
updated_steps = []
updated_values = []
for index in range(len(steps)):
step_obj_dict[steps[index]] = values[index]
last_step = None
for step, value in sorted(six.iteritems(step_obj_dict)):
if last_step is None or step > last_step:
updated_steps.append(step)
updated_values.append(value)
last_step = step
return updated_steps, updated_values |
Smoke test on random score. | def assert_passes_on_random_single_metric_function(
self, search_space: vz.SearchSpace, optimizer: vza.GradientFreeOptimizer, *,
np_random_seed: int):
"""Smoke test on random score."""
rng = np.random.default_rng(np_random_seed)
logging.info('search space: %s', search_space)
problem = vz.ProblemStatement(
search_space=search_space,
metric_information=[
vz.MetricInformation(
'acquisition', goal=vz.ObjectiveMetricGoal.MAXIMIZE)
])
def mock_score(trials):
return {'acquisition': rng.uniform(size=[len(trials), 1])}
suggestions = optimizer.optimize(mock_score, problem, count=5)
self.assertNotEmpty(suggestions)
logging.info('suggestions: %s', suggestions)
for suggestion in suggestions:
problem.search_space.assert_contains(suggestion.parameters) |
Bi-objective test on random score. | def assert_passes_on_random_multi_metric_function(
self,
search_space: vz.SearchSpace,
optimizer: vza.GradientFreeOptimizer,
*,
np_random_seed: int
):
"""Bi-objective test on random score."""
rng = np.random.default_rng(np_random_seed)
logging.info('search space: %s', search_space)
problem = vz.ProblemStatement(
search_space=search_space,
metric_information=[
vz.MetricInformation(
'acquisition_1', goal=vz.ObjectiveMetricGoal.MAXIMIZE
),
vz.MetricInformation(
'acquisition_2', goal=vz.ObjectiveMetricGoal.MAXIMIZE
),
],
)
def mock_score(trials):
return {
'acquisition_1': rng.uniform(size=[len(trials), 1]),
'acquisition_2': rng.uniform(size=[len(trials), 1]),
}
suggestions = optimizer.optimize(mock_score, problem, count=5)
self.assertNotEmpty(suggestions)
logging.info('suggestions: %s', suggestions)
for suggestion in suggestions:
problem.search_space.assert_contains(suggestion.parameters) |
DO NOT USE. DEPRECATED. Use RandomMetricsRunner.run_designer(). | def run_with_random_metrics(
designer: vza.Designer,
problem: vz.ProblemStatement,
iters: int = 5,
*,
batch_size: Optional[int] = 1,
seed: Any = None,
verbose: int = 0,
validate_parameters: bool = False,
) -> Sequence[vz.Trial]:
"""DO NOT USE. DEPRECATED. Use RandomMetricsRunner.run_designer()."""
return RandomMetricsRunner(
problem,
iters,
batch_size=batch_size,
seed=seed,
verbose=verbose,
validate_parameters=validate_parameters,
).run_designer(designer) |
Builds a relative convergence curve (see returns for definition).
Finds the smallest index j for each element i in 'baseline_curve'
such that baseline_curve[i] <= compared_curve[j]. The function uses the
'bisect_left' function to efficiently perform binary search under the
assumption that 'baseline_curve' and 'compared_curve' are sorted in
non-decreasing order.
Args:
baseline_curve: Baseline maximization convergence curve.
compared_curve: Compared maximization convergence curve.
Returns:
A list of numbers where i-th (zero-index) element is the smallest "j" such
that baseline_curve[i] <= compared_curve[j] | def build_convergence_curve(
baseline_curve: Sequence[float], compared_curve: Sequence[float]
) -> List[float]:
"""Builds a relative convergence curve (see returns for definition).
Finds the smallest index j for each element i in 'baseline_curve'
such that baseline_curve[i] <= compared_curve[j]. The function uses the
'bisect_left' function to efficiently perform binary search under the
assumption that 'baseline_curve' and 'compared_curve' are sorted in
non-decreasing order.
Args:
baseline_curve: Baseline maximization convergence curve.
compared_curve: Compared maximization convergence curve.
Returns:
A list of numbers where i-th (zero-index) element is the smallest "j" such
that baseline_curve[i] <= compared_curve[j]
"""
convergence_curve = []
for value in baseline_curve:
j = bisect.bisect_left(compared_curve, value)
convergence_curve.append(j if j != len(compared_curve) else float('inf'))
return convergence_curve |
Returns trials where trials[i] has empty metric name equal to values[i]. | def _gen_trials(values):
"""Returns trials where trials[i] has empty metric name equal to values[i]."""
trials = []
for v in values:
trial = pyvizier.Trial()
trials.append(
trial.complete(
pyvizier.Measurement(metrics={'': pyvizier.Metric(value=v)})))
return trials |
Computes the entropy of parameter values.
Args:
parameter_config: The parameter config.
parameter_values: Values of a parameter.
WARNING: Entropy estimation accuracy depends on the sample size, so to compare
the entropies of two `parameter_values`, make sure they have the same size.
Returns:
The entropy of parameter values. | def compute_parameter_entropy(
parameter_config: vz.ParameterConfig,
parameter_values: Iterable[Optional[vz.ParameterValue]],
) -> float:
"""Computes the entropy of parameter values.
Args:
parameter_config: The parameter config.
parameter_values: Values of a parameter.
WARNING: Entropy estimation accuracy depends on the sample size, so to compare
the entropies of two `parameter_values`, make sure they have the same size.
Returns:
The entropy of parameter values.
"""
values = [pv.value for pv in parameter_values if pv is not None]
if not values:
return 0.0
if parameter_config.type in [
vz.ParameterType.CATEGORICAL,
vz.ParameterType.DISCRETE,
] and hasattr(parameter_config, 'feasible_values'):
if any([value not in parameter_config.feasible_values for value in values]):
raise ValueError(
f'Parameter values: {parameter_values} contain out-of-bound values.'
f' Feasible values: {parameter_config.feasible_values}'
)
_, counts = np.unique(values, return_counts=True)
elif hasattr(parameter_config, 'bounds'):
min_val = parameter_config.bounds[0]
max_val = parameter_config.bounds[1]
if any([value < min_val or value > max_val for value in values]):
raise ValueError(
f'Parameter values: {parameter_values} contain out-of-bound values.'
f' Bound: [{min_val}, {max_val}]'
)
if parameter_config.type == vz.ParameterType.INTEGER:
_, counts = np.unique(values, return_counts=True)
else:
# Sets the number of fixed-width bins as c * sample_size ** (1.0 / 3.0).
# The cubic-root dependency on the sample size appears in several common
# bin-size selection strategies, e.g.
# https://en.wikipedia.org/wiki/Histogram#Number_of_bins_and_width
# The multiplier `c` is chosen such that for a small sample size, say 100,
# we still get a reasonable number of bins, say 30.
alpha = 1.0 / 3.0
c = 30.0 / (100**alpha)
n_sample = len(values)
# We also ensure that the number of bins is at most the sample size.
num_bins = np.min((int(c * n_sample**alpha), n_sample))
counts, _ = np.histogram(
values,
bins=np.linspace(
min_val,
max_val,
num=num_bins + 1,
dtype=np.float32,
),
)
else:
raise ValueError(
'Invalid parameter config: either `feasible_values` or'
'`bounds` is expected to be set, but both are unset. '
f'Parameter config: {parameter_config}'
)
return float(scipy.stats.entropy(counts)) |
Computes the average marginal parameter entropy across results.
Computes the marginal entropy of every parameter in every study, and then
returns the average marginal entropy over all parameters and all studies.
Args:
results: Benchmark results.
Returns:
Average marginal parameter entropy. | def compute_average_marginal_parameter_entropy(
results: BenchmarkResults,
) -> float:
"""Computes the average marginal parameter entropy across results.
Computes the marginal entropy of every parameter in every study, and then
returns the average marginal entropy over all parameters and all studies.
Args:
results: Benchmark results.
Returns:
Average marginal parameter entropy.
"""
marginal_param_entropies = []
for _, spec_gen_results in results.items():
for _, spec_results in spec_gen_results.items():
for _, study in spec_results.items():
for param_config in study.problem.search_space.parameters:
param_values = [
trial.parameters.get(param_config.name) for trial in study.trials
]
marginal_param_entropies.append(
compute_parameter_entropy(
parameter_config=param_config, parameter_values=param_values
)
)
return np.mean(marginal_param_entropies) |
Generates two studies with zero and large parameter entropies. | def _generate_min_and_max_ent_studies() -> (
Tuple[vz.ProblemAndTrials, vz.ProblemAndTrials]
):
"""Generates two studies with zero and large parameter entropies."""
space = vz.SearchSpace()
root = space.root
root.add_float_param('continuous', -5.0, 5.0)
root.add_int_param('integer', -5, 5)
root.add_categorical_param(
'categorical', [str(v) for v in np.linspace(-5, 5, 11)]
)
root.add_discrete_param('discrete', list(np.linspace(-5, 5, 11)))
problem = vz.ProblemStatement(
search_space=space,
metric_information=[
vz.MetricInformation('x1', goal=vz.ObjectiveMetricGoal.MAXIMIZE),
],
)
max_ent_trials = []
min_ent_trials = []
values = list(np.linspace(-5, 5, 11)) * 10
for idx, value in enumerate(values):
# Generates trials with large marginal parameter entropies by looping
# through the feasible values of each parameter.
max_ent_trials.append(
vz.Trial(
id=idx + 1,
parameters={
'continuous': vz.ParameterValue(value),
'integer': vz.ParameterValue(int(value)),
'categorical': vz.ParameterValue(str(value)),
'discrete': vz.ParameterValue(value),
},
)
)
# Generates trials with zero marginal parameter entropies by setting every
# parameter to the same value.
min_ent_trials.append(
vz.Trial(
id=idx + 1,
parameters={
'continuous': vz.ParameterValue(values[55]),
'integer': vz.ParameterValue(int(values[17])),
'categorical': vz.ParameterValue(str(values[96])),
'discrete': vz.ParameterValue(values[34]),
},
)
)
return vz.ProblemAndTrials(
problem=problem, trials=min_ent_trials
), vz.ProblemAndTrials(problem=problem, trials=max_ent_trials) |
Aggregates multiple convergence curves into a plot with confidence bounds.
Example usage:
```python
fig, ax = plt.subplots(1, 1, figsize=(12,8))
plot_median_convergence(ax,
[[1,1,2,3,4], [1,1,1,2,nan]],
percentiles=((40, 60), (30, 70)),
alphas=(0.4, 0.2),
xs=np.arange(1,6),
color='r')
```
Args:
ax: matplotlib axis to plot on.
curves: Expected to have shape (Number of studies, points), where rows are
convergence curves from repeated studies from the same algorithm and
settings. May contain NaNs, which will be excluded from plotting.
percentiles: Each pair defines (lower_percentile, upper_percentile).
alphas: Must have the same length as percentiles. Defines the color strength
for the confidence bounds. Make it decrease in the distance between lower
and upper percentiles. (See example above).
xs: x values for the plot. If not provided, uses np.arange(curves.shape[1]).
Must have the shape (curves.shape[1], 0)
**kwargs: Forwared to ax.plot(). | def plot_median_convergence(
ax: mpl.axes.Axes,
curves: 'np.ndarray',
*,
percentiles: Sequence[Tuple[int, int]] = ((40, 60),),
alphas: Sequence[float] = (0.2,),
xs: Optional['np.ndarray'] = None,
**kwargs,
):
"""Aggregates multiple convergence curves into a plot with confidence bounds.
Example usage:
```python
fig, ax = plt.subplots(1, 1, figsize=(12,8))
plot_median_convergence(ax,
[[1,1,2,3,4], [1,1,1,2,nan]],
percentiles=((40, 60), (30, 70)),
alphas=(0.4, 0.2),
xs=np.arange(1,6),
color='r')
```
Args:
ax: matplotlib axis to plot on.
curves: Expected to have shape (Number of studies, points), where rows are
convergence curves from repeated studies from the same algorithm and
settings. May contain NaNs, which will be excluded from plotting.
percentiles: Each pair defines (lower_percentile, upper_percentile).
alphas: Must have the same length as percentiles. Defines the color strength
for the confidence bounds. Make it decrease in the distance between lower
and upper percentiles. (See example above).
xs: x values for the plot. If not provided, uses np.arange(curves.shape[1]).
Must have the shape (curves.shape[1], 0)
**kwargs: Forwared to ax.plot().
"""
if xs is None:
xs = np.arange(curves.shape[1])
line = ax.plot(xs, np.nanmedian(curves, axis=0), **kwargs)
for (lower, upper), alpha in zip(percentiles, alphas):
ax.fill_between(
xs,
np.nanpercentile(curves, lower, axis=0),
np.nanpercentile(curves, upper, axis=0),
alpha=alpha,
color=line[0].get_color(),
) |
Aggregates multiple convergence curves into a plot with standard error bounds.
Example usage:
```python
fig, ax = plt.subplots(1, 1, figsize=(12,8))
plot_mean_convergence(ax,
[[1,1,2,3,4], [1,1,1,2,nan]],
alpha=0.3,
xs=np.arange(1,6),
color='r')
```
Args:
ax: matplotlib axis to plot on.
curves: Expected to have shape (Number of studies, points), where rows are
convergence curves from repeated studies from the same algorithm and
settings. May contain NaNs, which will be excluded from plotting.
alpha: Defines the color strength for the standard error bounds.
xs: x values for the plot. If not provided, uses np.arange(curves.shape[1]).
Must have the shape (curves.shape[1], 0)
**kwargs: Forwared to ax.plot(). | def plot_mean_convergence(
ax: mpl.axes.Axes,
curves: 'np.ndarray',
*,
alpha: float = 0.2,
xs: Optional['np.ndarray'] = None,
**kwargs,
):
"""Aggregates multiple convergence curves into a plot with standard error bounds.
Example usage:
```python
fig, ax = plt.subplots(1, 1, figsize=(12,8))
plot_mean_convergence(ax,
[[1,1,2,3,4], [1,1,1,2,nan]],
alpha=0.3,
xs=np.arange(1,6),
color='r')
```
Args:
ax: matplotlib axis to plot on.
curves: Expected to have shape (Number of studies, points), where rows are
convergence curves from repeated studies from the same algorithm and
settings. May contain NaNs, which will be excluded from plotting.
alpha: Defines the color strength for the standard error bounds.
xs: x values for the plot. If not provided, uses np.arange(curves.shape[1]).
Must have the shape (curves.shape[1], 0)
**kwargs: Forwared to ax.plot().
"""
if xs is None:
xs = np.arange(curves.shape[1])
curves_mean = np.nanmean(curves, axis=0)
curves_std_error = np.nanstd(curves, axis=0) / np.sqrt(curves.shape[0])
line = ax.plot(xs, curves_mean, **kwargs)
ax.fill_between(
xs,
curves_mean + 1.5 * curves_std_error,
curves_mean - 1.5 * curves_std_error,
alpha=alpha,
color=line[0].get_color(),
) |
Generates a grid of algorithm comparison plots.
Generates one plot for each Experimenter x Metrics in records. Note that
each row = Experimenter and each column = Metrics.
Args:
records: All BenchmarkRecords used for plotting.
metrics: Keys in the plot_elements dict in BenchmarkRecord used for plot. If
not supplied, all keys are plotted.
fig_title: Title of the entire grid plot.
title_maxlen: Maximum length of title of each Experimenter.
col_figsize: Size of the column of each subfigure.
row_figsize: Size of the row of each subfigure.
**kwargs: Additional keyword args forwarded to pyplot.
Raises:
ValueError: When plot type is not supported. | def plot_from_records(
records: Sequence[state_analyzer.BenchmarkRecord],
metrics: Optional[Sequence[str]] = None,
*,
fig_title: str = 'All Plot Elements',
title_maxlen: int = 50,
col_figsize: float = 6.0,
row_figsize: float = 6.0,
**kwargs,
):
"""Generates a grid of algorithm comparison plots.
Generates one plot for each Experimenter x Metrics in records. Note that
each row = Experimenter and each column = Metrics.
Args:
records: All BenchmarkRecords used for plotting.
metrics: Keys in the plot_elements dict in BenchmarkRecord used for plot. If
not supplied, all keys are plotted.
fig_title: Title of the entire grid plot.
title_maxlen: Maximum length of title of each Experimenter.
col_figsize: Size of the column of each subfigure.
row_figsize: Size of the row of each subfigure.
**kwargs: Additional keyword args forwarded to pyplot.
Raises:
ValueError: When plot type is not supported.
"""
def _metadata_to_str(metadata: vz.Metadata) -> str:
visual_dict = {}
for _, key, value in metadata.all_items():
try:
loaded = json.loads(value, cls=json_utils.NumpyDecoder)
assert isinstance(loaded, dict)
visual_dict = visual_dict | {k: v for k, v in loaded.items() if v}
except Exception as e: # pylint: disable=broad-except
del e
visual_dict[key] = value
return str(visual_dict)
records_list = [
(rec.algorithm, _metadata_to_str(rec.experimenter_metadata), rec)
for rec in records
]
df = pd.DataFrame(
records_list, columns=['algorithm', 'experimenter', 'record']
)
algorithms = df.algorithm.unique()
colors = {
algorithm: plt.get_cmap('tab10')(i)
for i, algorithm in enumerate(algorithms)
}
total_rows = len(df.groupby('experimenter'))
if metrics is None:
metrics = set()
for record in df.record:
metrics = metrics.union(set(record.plot_elements.keys()))
print(f'All inferred metrics {metrics}')
fig, axes = plt.subplots(
total_rows,
len(metrics),
figsize=(col_figsize * len(metrics), row_figsize * total_rows),
squeeze=False,
)
fig.suptitle(fig_title, fontsize=16)
fig_idx = 0
for experimenter_key, group_by_experimenter in df.groupby('experimenter'):
for metric_idx, metric in enumerate(metrics):
ax = axes[fig_idx, metric_idx]
subplot_title = (
str(experimenter_key)[:title_maxlen] if experimenter_key else metric
)
ax.set_title(subplot_title)
ax.set_ylabel(metric)
for algorithm_name, group in group_by_experimenter.groupby('algorithm'):
if not group.size:
continue
if len(group) != 1:
print(
f'Found more records than expected in {algorithm_name} for'
f' {group}'
)
elems = group.record.iloc[0].plot_elements
if metric not in elems:
print(f'metric {metric} not found in {group.record.iloc[0]}')
continue
elem_for_metric = elems[metric]
plot_type = elem_for_metric.plot_type
if plot_type == 'error-bar':
plot_median_convergence(
ax,
elem_for_metric.curve.ys,
xs=elem_for_metric.curve.xs,
label=f'{algorithm_name}',
color=colors[algorithm_name],
percentiles=(elem_for_metric.percentile_error_bar,),
**kwargs,
)
ax.set_xlabel('# of Trials')
elif plot_type == 'scatter':
plot = elem_for_metric.plot_array
ax.scatter(
plot[:, 0],
plot[:, 1],
label=f'{algorithm_name}',
color=colors[algorithm_name],
**kwargs,
)
elif plot_type == 'histogram':
plot = elem_for_metric.plot_array
linewidth = (
len(algorithms)
+ 1
- float(list(algorithms).index(algorithm_name))
)
ax.hist(
plot,
histtype='step',
density=True,
fill=False,
linewidth=linewidth,
label=f'{algorithm_name}',
color=colors[algorithm_name],
**kwargs,
)
else:
raise ValueError(f'{plot_type} plot not yet supported!')
ax.set_yscale(elem_for_metric.yscale)
ax.yaxis.set_major_locator(mpl.ticker.LinearLocator(20))
ax.yaxis.set_minor_locator(mpl.ticker.LinearLocator(100))
ax.yaxis.set_major_formatter(mpl.ticker.ScalarFormatter())
ax.legend()
fig_idx += 1 |
Computes the one-sided T-test score.
In case of a maximization (minimizatoin) problem, it scores the confidence
that the mean of 'baseline_mean_values' is less (greater) than the mean of
'candidate_mean_values'.
The lower the score the higher the confidence that it's the case.
One-sample
----------
The test assumes t-distribution with mean given by 'candidate' and compute
the probability of observing sample mean of 'baseline' or less.
Two-sample
----------
The test assumes 'baseline' and 'candidate' have the same mean and computes
the probability that the 'baseline' sample mean is less than the 'candidate'
sample mean.
The lower the T-test p-value score the more confidence we have that
'candidate' is indeed "better" than 'baseline'.
Arguments:
baseline_mean_values: List of baseline simple regret values.
candidate_mean_values: List of candidate simple regret values.
objective_goal: The optimization problem type (MAXIMIZE or MINIMIZE).
Returns:
The p-value score of the one-sided T test. | def t_test_mean_score(baseline_mean_values: Union[list[float], np.ndarray],
candidate_mean_values: Union[list[float], np.ndarray],
objective_goal: vz.ObjectiveMetricGoal) -> float:
"""Computes the one-sided T-test score.
In case of a maximization (minimizatoin) problem, it scores the confidence
that the mean of 'baseline_mean_values' is less (greater) than the mean of
'candidate_mean_values'.
The lower the score the higher the confidence that it's the case.
One-sample
----------
The test assumes t-distribution with mean given by 'candidate' and compute
the probability of observing sample mean of 'baseline' or less.
Two-sample
----------
The test assumes 'baseline' and 'candidate' have the same mean and computes
the probability that the 'baseline' sample mean is less than the 'candidate'
sample mean.
The lower the T-test p-value score the more confidence we have that
'candidate' is indeed "better" than 'baseline'.
Arguments:
baseline_mean_values: List of baseline simple regret values.
candidate_mean_values: List of candidate simple regret values.
objective_goal: The optimization problem type (MAXIMIZE or MINIMIZE).
Returns:
The p-value score of the one-sided T test.
"""
if objective_goal == vz.ObjectiveMetricGoal.MAXIMIZE:
alternative = 'less'
else:
alternative = 'greater'
if len(candidate_mean_values) == 1:
return stats.ttest_1samp(
a=baseline_mean_values,
popmean=candidate_mean_values[0],
alternative=alternative).pvalue
else:
# use Welch’s t-test
return stats.ttest_ind(
baseline_mean_values,
candidate_mean_values,
equal_var=False,
alternative=alternative).pvalue |
Helper function for creating full rainbow-based Atari 100k agent. | def create_agent_fn(
sess, # pylint: disable=unused-argument
environment,
seed: Optional[int] = None,
summary_writer=None) -> atari_100k_rainbow_agent.Atari100kRainbowAgent:
"""Helper function for creating full rainbow-based Atari 100k agent."""
return atari_100k_rainbow_agent.Atari100kRainbowAgent(
num_actions=environment.action_space.n,
seed=seed,
summary_writer=summary_writer) |
Produces a reasonable SearchSpace for tuning the Rainbow training process. | def default_search_space() -> pyvizier.SearchSpace:
"""Produces a reasonable SearchSpace for tuning the Rainbow training process."""
ss = pyvizier.SearchSpace()
ss.root.add_float_param(
'JaxDQNAgent.gamma',
0.7,
0.999999,
scale_type=pyvizier.ScaleType.REVERSE_LOG)
ss.root.add_int_param('JaxDQNAgent.update_horizon', 1, 20)
ss.root.add_int_param('JaxDQNAgent.update_period', 1, 10)
ss.root.add_int_param('JaxDQNAgent.target_update_period', 1, 10000)
ss.root.add_int_param('JaxDQNAgent.min_replay_history', 100, 100000)
ss.root.add_float_param(
'JaxDQNAgent.epsilon_train',
0.0000001,
1.0,
scale_type=pyvizier.ScaleType.LOG)
ss.root.add_int_param('JaxDQNAgent.epsilon_decay_period', 1000, 10000)
ss.root.add_bool_param('JaxFullRainbowAgent.noisy')
ss.root.add_bool_param('JaxFullRainbowAgent.dueling')
ss.root.add_bool_param('JaxFullRainbowAgent.double_dqn')
ss.root.add_int_param('JaxFullRainbowAgent.num_atoms', 1, 100)
ss.root.add_bool_param('Atari100kRainbowAgent.data_augmentation')
ss.root.add_float_param(
'create_optimizer.learning_rate',
0.0000001,
1.0,
scale_type=pyvizier.ScaleType.LOG)
ss.root.add_float_param(
'create_optimizer.eps', 0.0000001, 1.0, scale_type=pyvizier.ScaleType.LOG)
return ss |
Surrogate function bounds. | def _surrogate_bounds(handler: handler_lib.HPOBHandler, search_space_id: str,
dataset_id: str) -> Tuple[float, float]:
"""Surrogate function bounds."""
surrogate_name = 'surrogate-' + search_space_id + '-' + dataset_id
y_min = handler.surrogates_stats[surrogate_name]['y_min']
y_max = handler.surrogates_stats[surrogate_name]['y_max']
return y_min, y_max |
Generates all test cases. Must be called after InitGoogle(). | def generate_test_class():
"""Generates all test cases. Must be called after InitGoogle()."""
handler = handler_lib.HPOBHandler(
root_dir=hpob_experimenter.ROOT_DIR,
mode=hpob_experimenter.DEFAULT_TEST_MODE,
surrogates_dir=hpob_experimenter.SURROGATES_DIR)
class HpobTest(parameterized.TestCase):
@absltest.skip("Files must be installed manually.")
@parameterized.parameters(
dict(normalize_y=True), dict(normalize_y=False),
dict(normalize_y=True, use_surrogate_values=True),
dict(normalize_y=False, use_surrogate_values=True),
*tuple(dict(na_policy=x) for x in hpob_experimenter.NaPolicy))
def test_all(self, **kwargs):
container = hpob_experimenter.HPOBContainer(handler, **kwargs)
for search_space_id, dataset_id in container.dataset_keys(
hpob_experimenter.TEST):
problem_and_trials = container.get_problem_and_trials(
search_space_id, dataset_id)
experimenter = container.get_experimenter(search_space_id, dataset_id)
_ = experimenter.problem_statement()
# TODO: Add more validations on the problem statement.
objective = experimenter.EvaluateContinuous(
problem_and_trials.trials[0])
experimenter.evaluate([problem_and_trials.trials[0]])
logging.info(
'First five trial metrics: %s',
[
t.final_measurement_or_die.metrics[
hpob_experimenter.METRIC_NAME
].value
for t in problem_and_trials.trials[:5]
],
)
logging.info('objective: %f', objective)
logging.info('evaluated: %s', problem_and_trials.trials[0])
self.assertAlmostEqual(
problem_and_trials.trials[0]
.final_measurement_or_die.metrics[hpob_experimenter.METRIC_NAME]
.value,
objective,
places=1,
)
break
global class_to_test
class_to_test = HpobTest |
Converts ops and nodes to a string format recognized by NASBENCH-201. | def _model_tss_spc(ops: Sequence[str], num_nodes: int) -> str:
"""Converts ops and nodes to a string format recognized by NASBENCH-201."""
nodes, k = [], 0
for i in range(1, num_nodes):
xstrs = []
for j in range(i):
xstrs.append('{:}~{:}'.format(ops[k], j))
k += 1
nodes.append('|' + '|'.join(xstrs) + '|')
return '+'.join(nodes) |
Creates a noise function via NumPy.
See https://bee22.com/resources/bbob%20noisy%20functions.pdf
Args:
noise: Noise specification
dimension: Dimensionality of bbob function that the noise is applied to.
target_value: The noise does not apply to values less than this.
seed:
Returns:
Callable that returns the noisy version of the input.
Raises:
ValueError: if noise is not supported. | def _create_noise_fn(
noise: str,
dimension: int,
target_value: float = 1e-8,
seed: Optional[int] = None,
) -> Callable[[float], float]:
"""Creates a noise function via NumPy.
See https://bee22.com/resources/bbob%20noisy%20functions.pdf
Args:
noise: Noise specification
dimension: Dimensionality of bbob function that the noise is applied to.
target_value: The noise does not apply to values less than this.
seed:
Returns:
Callable that returns the noisy version of the input.
Raises:
ValueError: if noise is not supported.
"""
rng = np.random.default_rng(seed or 0)
if noise == 'NO_NOISE':
noise_fn = lambda v: v
elif noise == 'MODERATE_GAUSSIAN':
noise_fn = lambda v: v * rng.lognormal(0, 0.01)
elif noise == 'SEVERE_GAUSSIAN':
noise_fn = lambda v: v * rng.lognormal(0, 0.1)
elif noise == 'MODERATE_UNIFORM':
noise_fn = functools.partial(
_uniform_noise,
rng=rng,
amplifying_exponent=0.01 * (0.49 + 1.0 / dimension),
shrinking_exponent=0.01,
)
elif noise == 'SEVERE_UNIFORM':
noise_fn = functools.partial(
_uniform_noise,
rng=rng,
amplifying_exponent=0.1 * (0.49 + 1.0 / dimension),
shrinking_exponent=0.1,
)
elif noise == 'MODERATE_SELDOM_CAUCHY':
noise_fn = functools.partial(
_cauchy_noise, rng=rng, noise_strength=0.01, noise_frequency=0.05
)
elif noise == 'SEVERE_SELDOM_CAUCHY':
noise_fn = functools.partial(
_cauchy_noise, rng=rng, noise_strength=0.1, noise_frequency=0.25
)
elif noise == 'LIGHT_ADDITIVE_GAUSSIAN':
return functools.partial(_additive_normal_noise, rng=rng, stddev=0.01)
elif noise == 'MODERATE_ADDITIVE_GAUSSIAN':
return functools.partial(_additive_normal_noise, rng=rng, stddev=0.1)
elif noise == 'SEVERE_ADDITIVE_GAUSSIAN':
return functools.partial(_additive_normal_noise, rng=rng, stddev=1.0)
else:
raise ValueError('Noise was not supported: {}'.format(noise))
return lambda v: _stabilized_noise(v, noise_fn, target_value) |
Uniform noise model for bbob-noisy benchmark.
The noise strength increases when value is small.
Args:
value: Function value to apply noise to.
amplifying_exponent: "alpha" in the paper. The higher this number is, the
more likely it is for the noisy value to be greater than the input value.
0 or less means the noise never amplifies the function value.
shrinking_exponent: "beta" in the paper. The higher this number is, the more
likely it is for the noisy value to be less than the input value. 0 or
less means the noise never shrinks the function value.
rng: Rng.
epsilon: "epsilon" in the paper. Prevents division by zero.
Returns:
Noisy version of value. | def _uniform_noise(
value: float,
amplifying_exponent: float,
shrinking_exponent: float,
rng: np.random.Generator,
epsilon: float = 1e-99,
) -> float:
"""Uniform noise model for bbob-noisy benchmark.
The noise strength increases when value is small.
Args:
value: Function value to apply noise to.
amplifying_exponent: "alpha" in the paper. The higher this number is, the
more likely it is for the noisy value to be greater than the input value.
0 or less means the noise never amplifies the function value.
shrinking_exponent: "beta" in the paper. The higher this number is, the more
likely it is for the noisy value to be less than the input value. 0 or
less means the noise never shrinks the function value.
rng: Rng.
epsilon: "epsilon" in the paper. Prevents division by zero.
Returns:
Noisy version of value.
"""
f1 = np.power(rng.uniform(), np.max([0.0, shrinking_exponent]))
f2 = np.power(1e9 / (value + epsilon), amplifying_exponent * rng.uniform())
return value * f1 * np.max([1.0, f2]) |
Additive normal noise. | def _additive_normal_noise(
value: float, stddev: float, rng: np.random.Generator
) -> float:
"""Additive normal noise."""
return value + rng.normal(0.0, stddev) |
Cauchy noise model for bbob-noisy benchmark.
The noise is infrequent and difficult to analyze due to large outliers.
Args:
value: Function value to apply noise to.
noise_strength: "alpha" in the paper. Its absolute value determines the
noise strength. The recommended setup as in the paper is to use a positive
number.
noise_frequency: "p" in the paper. Determines the probability of the noisy
evaluation. Clipped (not explicitly but effectively) to [0, 1] range.
rng:
Returns:
Noisy version of value. | def _cauchy_noise(
value: float,
noise_strength: float,
noise_frequency: float,
rng: np.random.Generator,
) -> float:
"""Cauchy noise model for bbob-noisy benchmark.
The noise is infrequent and difficult to analyze due to large outliers.
Args:
value: Function value to apply noise to.
noise_strength: "alpha" in the paper. Its absolute value determines the
noise strength. The recommended setup as in the paper is to use a positive
number.
noise_frequency: "p" in the paper. Determines the probability of the noisy
evaluation. Clipped (not explicitly but effectively) to [0, 1] range.
rng:
Returns:
Noisy version of value.
"""
noise = (rng.uniform() < noise_frequency) * rng.standard_cauchy()
return value + noise_strength * np.max([0.0, 1000.0 + noise]) |
Post processing of noise for bbob-noisy benchmark.
We do not apply noise if the value is close to the global optima. This keeps
the optimal value intact.
Args:
value: Function value to apply noise to.
noisy_fn: "f_XX" in the paper. It applies noise to the input.
target_value: If value is less than this number, then we do not apply the
noise.
Returns:
value, if it is less than target_value. Otherwise, noisy version of
value. | def _stabilized_noise(value: float,
noisy_fn: Callable[[float], float],
target_value: float = 1e-8) -> float:
"""Post processing of noise for bbob-noisy benchmark.
We do not apply noise if the value is close to the global optima. This keeps
the optimal value intact.
Args:
value: Function value to apply noise to.
noisy_fn: "f_XX" in the paper. It applies noise to the input.
target_value: If value is less than this number, then we do not apply the
noise.
Returns:
value, if it is less than target_value. Otherwise, noisy version of
value.
"""
if value >= target_value:
return noisy_fn(value) + 1.01 * target_value
else:
return value |
Gets the name of underlying objects. | def _get_name(f):
"""Gets the name of underlying objects."""
if hasattr(f, '__name__'):
return f.__name__
# Next clause handles functools.partial objects.
if hasattr(f, 'func') and hasattr(f.func, '__name__'):
return f.func.__name__
return repr(f) |
Returns default BBOB ProblemStatement for given dimension. | def DefaultBBOBProblemStatement(
dimension: int,
*,
metric_name="bbob_eval",
min_value: float = -5.0,
max_value: float = 5.0,
scale_type=None,
) -> pyvizier.ProblemStatement:
"""Returns default BBOB ProblemStatement for given dimension."""
problem_statement = pyvizier.ProblemStatement()
space = problem_statement.search_space
for dim in range(dimension):
space.root.add_float_param(
name=f"x{dim}",
min_value=min_value,
max_value=max_value,
scale_type=scale_type,
)
problem_statement.metric_information.append(
pyvizier.MetricInformation(
name=metric_name, goal=pyvizier.ObjectiveMetricGoal.MINIMIZE))
return problem_statement |
The BBOB LambdaAlpha matrix creation function.
Args:
alpha: Function parameter.
dim: Dimension of matrix created.
Returns:
Diagonal matrix of dimension dim with values determined by alpha. | def LambdaAlpha(alpha: float, dim: int) -> np.ndarray:
"""The BBOB LambdaAlpha matrix creation function.
Args:
alpha: Function parameter.
dim: Dimension of matrix created.
Returns:
Diagonal matrix of dimension dim with values determined by alpha.
"""
lambda_alpha = np.zeros([dim, dim])
for i in range(dim):
exp = (0.5 * (float(i) / (dim - 1))) if dim > 1 else 0.5
lambda_alpha[i, i] = alpha**exp
return lambda_alpha |
Create a new array by mapping fn() to each element of the original array.
Args:
vector: ndarray to be mapped.
fn: scalar function for mapping.
Returns:
New ndarray be values mapped by fn. | def ArrayMap(vector: np.ndarray, fn: Callable[[float], float]) -> np.ndarray:
"""Create a new array by mapping fn() to each element of the original array.
Args:
vector: ndarray to be mapped.
fn: scalar function for mapping.
Returns:
New ndarray be values mapped by fn.
"""
results = np.zeros(vector.shape)
for i, v in enumerate(vector.flat):
results.flat[i] = fn(v)
return results |
The BBOB T_osz function.
Args:
element: float input.
Returns:
Tosz(input). | def Tosz(element: float) -> float:
"""The BBOB T_osz function.
Args:
element: float input.
Returns:
Tosz(input).
"""
x_carat = 0.0 if element == 0 else math.log(abs(element))
c1 = 10.0 if element > 0 else 5.5
c2 = 7.9 if element > 0 else 3.1
return np.sign(element) * math.exp(
x_carat + 0.049 * (math.sin(c1 * x_carat) + math.sin(c2 * x_carat))) |
The BBOB Tasy function.
Args:
vector: ndarray
beta: Function parameter
Returns:
ndarray with values determined by beta. | def Tasy(vector: np.ndarray, beta: float) -> np.ndarray:
"""The BBOB Tasy function.
Args:
vector: ndarray
beta: Function parameter
Returns:
ndarray with values determined by beta.
"""
dim = len(vector)
result = np.zeros([dim, 1])
for i, val in enumerate(vector.flat):
if val > 0:
t = i / (dim - 1.0) if dim > 1 else 1
exp = 1 + beta * t * (val**0.5)
else:
exp = 1
result[i] = val**exp
return result |
Subsets and Splits