repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
trieste-develop | trieste-develop/tests/unit/test_version.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import trieste
def test_trieste_version() -> None:
assert re.match(r"\d+\.\d+\.\d+", trieste.__version__)
| 711 | 32.904762 | 74 | py |
trieste-develop | trieste-develop/tests/unit/test_bayesian_optimizer.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import tempfile
from collections.abc import Mapping
from pathlib import Path
from typing import NoReturn, Optional
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import (
FixedAcquisitionRule,
assert_datasets_allclose,
empty_dataset,
mk_dataset,
quadratic,
)
from tests.util.models.gpflow.models import (
GaussianProcess,
PseudoTrainableProbModel,
QuadraticMeanAndRBFKernel,
QuadraticMeanAndRBFKernelWithSamplers,
rbf,
)
from trieste.acquisition.rule import AcquisitionRule
from trieste.bayesian_optimizer import BayesianOptimizer, FrozenRecord, OptimizationResult, Record
from trieste.data import Dataset
from trieste.models import ProbabilisticModel, TrainableProbabilisticModel
from trieste.observer import OBJECTIVE, Observer
from trieste.space import Box, SearchSpace
from trieste.types import State, Tag, TensorType
from trieste.utils import Err, Ok
# tags
FOO: Tag = "foo"
BAR: Tag = "bar"
NA: Tag = ""
def _quadratic_observer(x: tf.Tensor) -> Mapping[Tag, Dataset]:
return {NA: Dataset(x, quadratic(x))}
class _PseudoTrainableQuadratic(QuadraticMeanAndRBFKernel, PseudoTrainableProbModel):
pass
class _PseudoTrainableQuadraticWithSamplers(
QuadraticMeanAndRBFKernelWithSamplers, PseudoTrainableProbModel
):
pass
class _Whoops(Exception):
pass
def test_optimization_result_astuple() -> None:
opt_result: OptimizationResult[None] = OptimizationResult(
Err(_Whoops()), [Record({}, {}, None)]
)
final_result, history = opt_result.astuple()
assert final_result is opt_result.final_result
assert history is opt_result.history
def test_optimization_result_try_get_final_datasets_for_successful_optimization() -> None:
data = {FOO: empty_dataset([1], [1])}
result: OptimizationResult[None] = OptimizationResult(
Ok(Record(data, {FOO: _PseudoTrainableQuadratic()}, None)), []
)
assert result.try_get_final_datasets() is data
assert result.try_get_final_dataset() is data[FOO]
def test_optimization_result_status_for_successful_optimization() -> None:
data = {FOO: empty_dataset([1], [1])}
result: OptimizationResult[None] = OptimizationResult(
Ok(Record(data, {FOO: _PseudoTrainableQuadratic()}, None)), []
)
assert result.is_ok
assert not result.is_err
def test_optimization_result_try_get_final_datasets_for_multiple_datasets() -> None:
data = {FOO: empty_dataset([1], [1]), BAR: empty_dataset([2], [2])}
models = {FOO: _PseudoTrainableQuadratic(), BAR: _PseudoTrainableQuadratic()}
result: OptimizationResult[None] = OptimizationResult(Ok(Record(data, models, None)), [])
assert result.try_get_final_datasets() is data
with pytest.raises(ValueError):
result.try_get_final_dataset()
def test_optimization_result_try_get_final_datasets_for_failed_optimization() -> None:
result: OptimizationResult[object] = OptimizationResult(Err(_Whoops()), [])
with pytest.raises(_Whoops):
result.try_get_final_datasets()
def test_optimization_result_status_for_failed_optimization() -> None:
result: OptimizationResult[object] = OptimizationResult(Err(_Whoops()), [])
assert result.is_err
assert not result.is_ok
def test_optimization_result_try_get_final_models_for_successful_optimization() -> None:
models = {FOO: _PseudoTrainableQuadratic()}
result: OptimizationResult[None] = OptimizationResult(
Ok(Record({FOO: empty_dataset([1], [1])}, models, None)), []
)
assert result.try_get_final_models() is models
assert result.try_get_final_model() is models[FOO]
def test_optimization_result_try_get_final_models_for_multiple_models() -> None:
data = {FOO: empty_dataset([1], [1]), BAR: empty_dataset([2], [2])}
models = {FOO: _PseudoTrainableQuadratic(), BAR: _PseudoTrainableQuadratic()}
result: OptimizationResult[None] = OptimizationResult(Ok(Record(data, models, None)), [])
assert result.try_get_final_models() is models
with pytest.raises(ValueError):
result.try_get_final_model()
def test_optimization_result_try_get_final_models_for_failed_optimization() -> None:
result: OptimizationResult[object] = OptimizationResult(Err(_Whoops()), [])
with pytest.raises(_Whoops):
result.try_get_final_models()
def test_optimization_result_try_get_optimal_point_for_successful_optimization() -> None:
data = {FOO: mk_dataset([[0.25, 0.25], [0.5, 0.4]], [[0.8], [0.7]])}
result: OptimizationResult[None] = OptimizationResult(
Ok(Record(data, {FOO: _PseudoTrainableQuadratic()}, None)), []
)
x, y, idx = result.try_get_optimal_point()
npt.assert_allclose(x, [0.5, 0.4])
npt.assert_allclose(y, [0.7])
npt.assert_allclose(idx, 1)
def test_optimization_result_try_get_optimal_point_for_multiple_objectives() -> None:
data = {FOO: mk_dataset([[0.25], [0.5]], [[0.8, 0.5], [0.7, 0.4]])}
result: OptimizationResult[None] = OptimizationResult(
Ok(Record(data, {FOO: _PseudoTrainableQuadratic()}, None)), []
)
with pytest.raises(ValueError):
result.try_get_optimal_point()
def test_optimization_result_try_get_optimal_point_for_failed_optimization() -> None:
result: OptimizationResult[object] = OptimizationResult(Err(_Whoops()), [])
with pytest.raises(_Whoops):
result.try_get_optimal_point()
def test_optimization_result_from_path() -> None:
with tempfile.TemporaryDirectory() as tmpdirname:
opt_result: OptimizationResult[None] = OptimizationResult(
Err(_Whoops()), [Record({}, {}, None)] * 10
)
opt_result.save(tmpdirname)
result, history = OptimizationResult[None].from_path(tmpdirname).astuple()
assert result.is_err
with pytest.raises(_Whoops):
result.unwrap()
assert len(history) == 10
assert all(isinstance(record, FrozenRecord) for record in history)
assert (
r2.load() == r1
for r1, r2 in zip(opt_result.history, history)
if isinstance(r2, FrozenRecord)
)
def test_optimization_result_from_path_partial_result() -> None:
with tempfile.TemporaryDirectory() as tmpdirname:
opt_result: OptimizationResult[None] = OptimizationResult(
Err(_Whoops()), [Record({}, {}, None)] * 10
)
opt_result.save(tmpdirname)
(Path(tmpdirname) / OptimizationResult.RESULTS_FILENAME).unlink()
(Path(tmpdirname) / OptimizationResult.step_filename(9, 10)).unlink()
result, history = OptimizationResult[None].from_path(tmpdirname).astuple()
assert result.is_err
with pytest.raises(FileNotFoundError):
result.unwrap()
assert len(history) == 9
assert all(isinstance(record, FrozenRecord) for record in history)
assert (
r2.load() == r1
for r1, r2 in zip(opt_result.history, history)
if isinstance(r2, FrozenRecord)
)
def test_bayesian_optimizer_optimize_raises_if_invalid_model_training_args() -> None:
data, models = {NA: empty_dataset([1], [1])}, {NA: _PseudoTrainableQuadratic()}
bo = BayesianOptimizer(lambda x: x[:1], Box([-1], [1]))
with pytest.raises(ValueError): # turning off global model training means we do not train
bo.optimize(1, data, models, fit_model=False)
@pytest.mark.parametrize("steps", [0, 1, 2, 5])
def test_bayesian_optimizer_calls_observer_once_per_iteration(steps: int) -> None:
class _CountingObserver:
call_count = 0
def __call__(self, x: tf.Tensor) -> Dataset:
self.call_count += 1
return Dataset(x, tf.reduce_sum(x**2, axis=-1, keepdims=True))
observer = _CountingObserver()
optimizer = BayesianOptimizer(observer, Box([-1], [1]))
data = mk_dataset([[0.5]], [[0.25]])
optimizer.optimize(steps, data, _PseudoTrainableQuadratic()).final_result.unwrap()
assert observer.call_count == steps
@pytest.mark.parametrize("mode", ["early", "fail", "full"])
def test_bayesian_optimizer_continue_optimization(mode: str) -> None:
class _CountingObserver:
call_count = 0
def __call__(self, x: tf.Tensor) -> Dataset:
self.call_count += 1
if self.call_count == 2 and mode == "fail":
raise ValueError
return Dataset(x, tf.reduce_sum(x**2, axis=-1, keepdims=True))
observer = _CountingObserver()
optimizer = BayesianOptimizer(observer, Box([-1], [1]))
data = mk_dataset([[0.5]], [[0.25]])
def early_stop_callback(
_datasets: Mapping[Tag, Dataset],
_models: Mapping[Tag, TrainableProbabilisticModel],
_acquisition_state: object,
) -> bool:
return mode == "early" and observer.call_count == 2
# perform a BO, stopping after 2 steps (for one of three reasons)
num_steps = 5
result = optimizer.optimize(
2 if "full" else num_steps,
data,
_PseudoTrainableQuadratic(),
early_stop_callback=early_stop_callback,
)
assert result.is_err if mode == "fail" else result.is_ok
assert len(result.history) == 2
assert observer.call_count == 2
# continue BO
new_result = optimizer.continue_optimization(num_steps, result)
assert new_result.is_ok
assert len(new_result.history) == num_steps
assert observer.call_count == num_steps + 1 if mode == "fail" else num_steps
def test_bayesian_optimizer_continue_optimization_raises_for_empty_result() -> None:
search_space = Box([-1], [1])
optimizer = BayesianOptimizer(lambda x: {FOO: Dataset(x, x)}, search_space)
rule = FixedAcquisitionRule([[0.0]])
opt_result: OptimizationResult[None] = OptimizationResult(Err(_Whoops()), [])
with pytest.raises(ValueError):
optimizer.continue_optimization(10, opt_result, rule)
@pytest.mark.parametrize("fit_model", ["all", "all_but_init", "never"])
def test_bayesian_optimizer_optimizes_initial_model(fit_model: str) -> None:
class _CountingOptimizerModel(_PseudoTrainableQuadratic):
_optimize_count = 0
def optimize(self, dataset: Dataset) -> None:
self._optimize_count += 1
rule = FixedAcquisitionRule([[0.0]])
model = _CountingOptimizerModel()
final_opt_state, _ = (
BayesianOptimizer(_quadratic_observer, Box([0], [1]))
.optimize(
1,
{NA: mk_dataset([[0.0]], [[0.0]])},
{NA: model},
rule,
fit_model=(fit_model in ["all", "all_but_init"]),
fit_initial_model=(fit_model in ["all"]),
)
.astuple()
)
final_model = final_opt_state.unwrap().model
if fit_model == "all": # optimized at start and end of first BO step
assert final_model._optimize_count == 2 # type: ignore
elif fit_model == "all_but_init": # optimized just at end of first BO step
assert final_model._optimize_count == 1 # type: ignore
else: # never optimized
assert final_model._optimize_count == 0 # type: ignore
@pytest.mark.parametrize(
"datasets, models",
[
({}, {}),
({FOO: empty_dataset([1], [1])}, {}),
({FOO: empty_dataset([1], [1])}, {BAR: _PseudoTrainableQuadratic()}),
(
{FOO: empty_dataset([1], [1])},
{FOO: _PseudoTrainableQuadratic(), BAR: _PseudoTrainableQuadratic()},
),
],
)
def test_bayesian_optimizer_optimize_raises_for_invalid_keys(
datasets: dict[Tag, Dataset], models: dict[Tag, TrainableProbabilisticModel]
) -> None:
search_space = Box([-1], [1])
optimizer = BayesianOptimizer(lambda x: {FOO: Dataset(x, x)}, search_space)
rule = FixedAcquisitionRule([[0.0]])
with pytest.raises(ValueError):
optimizer.optimize(10, datasets, models, rule)
def test_bayesian_optimizer_optimize_raises_for_invalid_rule_keys_and_default_acquisition() -> None:
optimizer = BayesianOptimizer(lambda x: x[:1], Box([-1], [1]))
data, models = {FOO: empty_dataset([1], [1])}, {FOO: _PseudoTrainableQuadratic()}
with pytest.raises(ValueError):
optimizer.optimize(3, data, models)
@pytest.mark.parametrize(
"starting_state, expected_states_received, final_acquisition_state",
[(None, [None, 1, 2], 3), (3, [3, 4, 5], 6)],
)
def test_bayesian_optimizer_uses_specified_acquisition_state(
starting_state: int | None,
expected_states_received: list[int | None],
final_acquisition_state: int | None,
) -> None:
class Rule(AcquisitionRule[State[Optional[int], TensorType], Box, ProbabilisticModel]):
def __init__(self) -> None:
self.states_received: list[int | None] = []
def acquire(
self,
search_space: Box,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> State[int | None, TensorType]:
def go(state: int | None) -> tuple[int | None, TensorType]:
self.states_received.append(state)
if state is None:
state = 0
return state + 1, tf.constant([[0.0]], tf.float64)
return go
rule = Rule()
data, models = {NA: mk_dataset([[0.0]], [[0.0]])}, {NA: _PseudoTrainableQuadratic()}
final_state, history = (
BayesianOptimizer(lambda x: {NA: Dataset(x, x**2)}, Box([-1], [1]))
.optimize(3, data, models, rule, starting_state)
.astuple()
)
assert rule.states_received == expected_states_received
assert final_state.unwrap().acquisition_state == final_acquisition_state
assert [record.acquisition_state for record in history] == expected_states_received
def test_bayesian_optimizer_optimize_for_uncopyable_model() -> None:
class _UncopyableModel(_PseudoTrainableQuadratic):
_optimize_count = 0
def optimize(self, dataset: Dataset) -> None:
self._optimize_count += 1
def __deepcopy__(self, memo: dict[int, object]) -> _UncopyableModel:
if self._optimize_count >= 3:
raise _Whoops
return self
rule = FixedAcquisitionRule([[0.0]])
result, history = (
BayesianOptimizer(_quadratic_observer, Box([0], [1]))
.optimize(
10,
{NA: mk_dataset([[0.0]], [[0.0]])},
{NA: _UncopyableModel()},
rule,
fit_initial_model=False,
)
.astuple()
)
with pytest.raises(NotImplementedError):
result.unwrap()
assert len(history) == 3
def _broken_observer(x: tf.Tensor) -> NoReturn:
raise _Whoops
class _BrokenModel(_PseudoTrainableQuadratic):
def optimize(self, dataset: Dataset) -> NoReturn:
raise _Whoops
class _BrokenRule(AcquisitionRule[NoReturn, SearchSpace, ProbabilisticModel]):
def acquire(
self,
search_space: SearchSpace,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> NoReturn:
raise _Whoops
@pytest.mark.parametrize(
"observer, model, rule",
[
(_broken_observer, _PseudoTrainableQuadratic(), FixedAcquisitionRule([[0.0]])),
(_quadratic_observer, _BrokenModel(), FixedAcquisitionRule([[0.0]])),
(_quadratic_observer, _PseudoTrainableQuadratic(), _BrokenRule()),
],
)
def test_bayesian_optimizer_optimize_for_failed_step(
observer: Observer,
model: TrainableProbabilisticModel,
rule: AcquisitionRule[None, Box, ProbabilisticModel],
) -> None:
optimizer = BayesianOptimizer(observer, Box([0], [1]))
data, models = {NA: mk_dataset([[0.0]], [[0.0]])}, {NA: model}
result, history = optimizer.optimize(3, data, models, rule).astuple()
with pytest.raises(_Whoops):
result.unwrap()
assert len(history) == 1
@pytest.mark.parametrize("num_steps", [-3, -1])
def test_bayesian_optimizer_optimize_raises_for_negative_steps(num_steps: int) -> None:
optimizer = BayesianOptimizer(_quadratic_observer, Box([-1], [1]))
data, models = {NA: empty_dataset([1], [1])}, {NA: _PseudoTrainableQuadratic()}
with pytest.raises(ValueError, match="num_steps"):
optimizer.optimize(num_steps, data, models)
def test_bayesian_optimizer_optimize_is_noop_for_zero_steps() -> None:
class _UnusableModel(TrainableProbabilisticModel):
def predict(self, query_points: TensorType) -> NoReturn:
assert False
def predict_joint(self, query_points: TensorType) -> NoReturn:
assert False
def sample(self, query_points: TensorType, num_samples: int) -> NoReturn:
assert False
def update(self, dataset: Dataset) -> NoReturn:
assert False
def optimize(self, dataset: Dataset) -> NoReturn:
assert False
class _UnusableRule(AcquisitionRule[NoReturn, Box, ProbabilisticModel]):
def acquire(
self,
search_space: Box,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> NoReturn:
assert False
def _unusable_observer(x: tf.Tensor) -> NoReturn:
assert False
data = {NA: mk_dataset([[0.0]], [[0.0]])}
result, history = (
BayesianOptimizer(_unusable_observer, Box([-1], [1]))
.optimize(0, data, {NA: _UnusableModel()}, _UnusableRule())
.astuple()
)
assert history == []
final_data = result.unwrap().datasets
assert len(final_data) == 1
assert_datasets_allclose(final_data[NA], data[NA])
def test_bayesian_optimizer_can_use_two_gprs_for_objective_defined_by_two_dimensions() -> None:
class ExponentialWithUnitVariance(GaussianProcess, PseudoTrainableProbModel):
def __init__(self) -> None:
super().__init__([lambda x: tf.exp(-x)], [rbf()])
class LinearWithUnitVariance(GaussianProcess, PseudoTrainableProbModel):
def __init__(self) -> None:
super().__init__([lambda x: 2 * x], [rbf()])
LINEAR = "linear"
EXPONENTIAL = "exponential"
class AdditionRule(AcquisitionRule[State[Optional[int], TensorType], Box, ProbabilisticModel]):
def acquire(
self,
search_space: Box,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> State[int | None, TensorType]:
def go(previous_state: int | None) -> tuple[int | None, TensorType]:
if previous_state is None:
previous_state = 1
candidate_query_points = search_space.sample(previous_state)
linear_predictions, _ = models[LINEAR].predict(candidate_query_points)
exponential_predictions, _ = models[EXPONENTIAL].predict(candidate_query_points)
target = linear_predictions + exponential_predictions
optimum_idx = tf.argmin(target, axis=0)[0]
next_query_points = tf.expand_dims(candidate_query_points[optimum_idx, ...], axis=0)
return previous_state * 2, next_query_points
return go
def linear_and_exponential(query_points: tf.Tensor) -> dict[Tag, Dataset]:
return {
LINEAR: Dataset(query_points, 2 * query_points),
EXPONENTIAL: Dataset(query_points, tf.exp(-query_points)),
}
data: Mapping[Tag, Dataset] = {
LINEAR: Dataset(tf.constant([[0.0]]), tf.constant([[0.0]])),
EXPONENTIAL: Dataset(tf.constant([[0.0]]), tf.constant([[1.0]])),
}
models: Mapping[Tag, TrainableProbabilisticModel] = {
LINEAR: LinearWithUnitVariance(),
EXPONENTIAL: ExponentialWithUnitVariance(),
}
data = (
BayesianOptimizer(linear_and_exponential, Box(tf.constant([-2.0]), tf.constant([2.0])))
.optimize(20, data, models, AdditionRule())
.try_get_final_datasets()
)
objective_values = data[LINEAR].observations + data[EXPONENTIAL].observations
min_idx = tf.argmin(objective_values, axis=0)[0]
npt.assert_allclose(data[LINEAR].query_points[min_idx], -tf.math.log(2.0), rtol=0.01)
def test_bayesian_optimizer_optimize_doesnt_track_state_if_told_not_to() -> None:
class _UncopyableModel(_PseudoTrainableQuadratic):
def __deepcopy__(self, memo: dict[int, object]) -> NoReturn:
assert False
data, models = {OBJECTIVE: empty_dataset([1], [1])}, {OBJECTIVE: _UncopyableModel()}
history = (
BayesianOptimizer(_quadratic_observer, Box([-1], [1]))
.optimize(5, data, models, track_state=False)
.history
)
assert len(history) == 0
class _DecreasingVarianceModel(QuadraticMeanAndRBFKernel, TrainableProbabilisticModel):
def __init__(self, data: Dataset):
super().__init__()
self._data = data
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
mean, var = super().predict(query_points)
return mean, var / len(self._data)
def update(self, dataset: Dataset) -> None:
self._data = dataset
def optimize(self, dataset: Dataset) -> None:
pass
@pytest.mark.parametrize("save_to_disk", [False, True])
def test_bayesian_optimizer_optimize_tracked_state(save_to_disk: bool) -> None:
class _CountingRule(AcquisitionRule[State[Optional[int], TensorType], Box, ProbabilisticModel]):
def acquire(
self,
search_space: Box,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> State[int | None, TensorType]:
def go(state: int | None) -> tuple[int | None, TensorType]:
new_state = 0 if state is None else state + 1
return new_state, tf.constant([[10.0]], tf.float64) + new_state
return go
with tempfile.TemporaryDirectory() as tmpdirname:
initial_data = mk_dataset([[0.0]], [[0.0]])
model = _DecreasingVarianceModel(initial_data)
_, history = (
BayesianOptimizer(_quadratic_observer, Box([0], [1]))
.optimize(
3,
{NA: initial_data},
{NA: model},
_CountingRule(),
track_path=Path(tmpdirname) if save_to_disk else None,
)
.astuple()
)
assert all(
isinstance(record, FrozenRecord if save_to_disk else Record) for record in history
)
assert [record.acquisition_state for record in history] == [None, 0, 1]
assert_datasets_allclose(history[0].datasets[NA], initial_data)
assert_datasets_allclose(
history[1].datasets[NA], mk_dataset([[0.0], [10.0]], [[0.0], [100.0]])
)
assert_datasets_allclose(
history[2].datasets[NA], mk_dataset([[0.0], [10.0], [11.0]], [[0.0], [100.0], [121.0]])
)
for step in range(3):
record = history[step].load() if save_to_disk else history[step] # type: ignore
assert record.model == record.models[NA]
assert record.dataset == record.datasets[NA]
_, variance_from_saved_model = (
history[step].models[NA].predict(tf.constant([[0.0]], tf.float64))
)
npt.assert_allclose(variance_from_saved_model, 1.0 / (step + 1))
| 24,084 | 35.382175 | 100 | py |
trieste-develop | trieste-develop/tests/unit/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/unit/test_utils.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Mapping
import pytest
from trieste.utils import K, U, V, map_values
@pytest.mark.parametrize(
"f, mapping, expected",
[
(abs, {}, {}),
(abs, {1: -1, -2: 2}, {1: 1, -2: 2}),
(len, {"a": [1, 2, 3], "b": [4, 5]}, {"a": 3, "b": 2}),
],
)
def test_map_values(f: Callable[[U], V], mapping: Mapping[K, U], expected: Mapping[K, V]) -> None:
assert map_values(f, mapping) == expected
| 1,036 | 32.451613 | 98 | py |
trieste-develop | trieste-develop/tests/unit/objectives/test_multi_objectives.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES
from trieste.objectives.multi_objectives import DTLZ1, DTLZ2, VLMOP2, MultiObjectiveTestProblem
from trieste.types import TensorType
@pytest.mark.parametrize(
"test_x, expected",
[
(
tf.constant([[0.0, 0.0]]),
tf.constant([[0.63212055, 0.63212055]]),
),
(
tf.constant([[0.5, 1.0]]),
tf.constant([[0.12074441, 0.9873655]]),
),
(
tf.constant([[[0.5, 1.0]], [[0.0, 0.0]]]),
tf.constant([[[0.12074441, 0.9873655]], [[0.63212055, 0.63212055]]]),
),
(
tf.constant([[[0.5, 1.0], [0.0, 0.0]]]),
tf.constant([[[0.12074441, 0.9873655], [0.63212055, 0.63212055]]]),
),
],
)
def test_vlmop2_has_expected_output(test_x: TensorType, expected: TensorType) -> None:
f = VLMOP2(2).objective
npt.assert_allclose(f(test_x), expected, rtol=1e-5)
@pytest.mark.parametrize(
"test_x, input_dim, num_obj, expected",
[
(tf.constant([[0.0, 0.2, 0.4]]), 3, 2, tf.constant([[0.0, 5.5]])),
(
tf.constant([[[0.0, 0.2, 0.4]], [[0.0, 0.2, 0.4]]]),
3,
2,
tf.constant([[[0.0, 5.5]], [[0.0, 5.5]]]),
),
(tf.constant([[0.8, 0.6, 0.4, 0.2]]), 4, 2, tf.constant([[4.8, 1.2]])),
(tf.constant([[0.1, 0.2, 0.3, 0.4]]), 4, 3, tf.constant([[0.06, 0.24, 2.7]])),
(
tf.constant([[[0.1, 0.2, 0.3, 0.4], [0.1, 0.2, 0.3, 0.4]]]),
4,
3,
tf.constant([[[0.06, 0.24, 2.7], [0.06, 0.24, 2.7]]]),
),
],
)
def test_dtlz1_has_expected_output(
test_x: TensorType, input_dim: int, num_obj: int, expected: TensorType
) -> None:
f = DTLZ1(input_dim, num_obj).objective
npt.assert_allclose(f(test_x), expected, rtol=1e-5)
@pytest.mark.parametrize(
"test_x, input_dim, num_obj, expected",
[
(tf.constant([[0.0, 0.2, 0.4]]), 3, 2, tf.constant([[1.1, 0.0]])),
(
tf.constant([[[0.0, 0.2, 0.4]], [[0.0, 0.2, 0.4]]]),
3,
2,
tf.constant([[[1.1, 0.0]], [[1.1, 0.0]]]),
),
(tf.constant([[0.8, 0.6, 0.4, 0.2]]), 4, 2, tf.constant([[0.3430008637, 1.055672733]])),
(
tf.constant([[[0.8, 0.6, 0.4, 0.2], [0.8, 0.6, 0.4, 0.2]]]),
4,
2,
tf.constant([[[0.3430008637, 1.055672733], [0.3430008637, 1.055672733]]]),
),
(
tf.constant([[0.1, 0.2, 0.3, 0.4]]),
4,
3,
tf.constant([[0.9863148, 0.3204731, 0.16425618]]),
),
],
)
def test_dtlz2_has_expected_output(
test_x: TensorType, input_dim: int, num_obj: int, expected: TensorType
) -> None:
f = DTLZ2(input_dim, num_obj).objective
npt.assert_allclose(f(test_x), expected, rtol=1e-4)
@pytest.mark.parametrize(
"obj_type, input_dim, num_obj, gen_pf_num",
[
(DTLZ1, 3, 2, 1000),
(DTLZ1, 5, 3, 1000),
(DTLZ2, 3, 2, 1000),
(DTLZ2, 12, 6, 1000),
],
)
def test_gen_pareto_front_is_equal_to_math_defined(
obj_type: Callable[[int, int], MultiObjectiveTestProblem],
input_dim: int,
num_obj: int,
gen_pf_num: int,
) -> None:
obj_inst = obj_type(input_dim, num_obj)
pfs = obj_inst.gen_pareto_optimal_points(gen_pf_num, None)
if obj_type == DTLZ1:
tf.assert_equal(tf.reduce_sum(pfs, axis=1), tf.cast(0.5, pfs.dtype))
else:
assert obj_type == DTLZ2
tf.debugging.assert_near(tf.norm(pfs, axis=1), tf.cast(1.0, pfs.dtype), rtol=1e-6)
@pytest.mark.parametrize(
"obj_inst, actual_x",
[
(VLMOP2(2), tf.constant([[0.4, 0.2, 0.5]])),
(DTLZ1(3, 2), tf.constant([[0.3, 0.1]])),
(DTLZ2(5, 2), tf.constant([[0.3, 0.1]])),
],
)
def test_func_raises_specified_input_dim_not_align_with_actual_input_dim(
obj_inst: MultiObjectiveTestProblem, actual_x: TensorType
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
obj_inst.objective(actual_x)
@pytest.mark.parametrize(
"problem, input_dim, num_obj",
[
(VLMOP2(2), 2, 2),
(VLMOP2(10), 10, 2),
(DTLZ1(3, 2), 3, 2),
(DTLZ1(10, 5), 10, 5),
(DTLZ2(3, 2), 3, 2),
(DTLZ2(10, 5), 10, 5),
],
)
@pytest.mark.parametrize("num_obs", [1, 5, 10])
@pytest.mark.parametrize("dtype", [tf.float32, tf.float64])
def test_objective_has_correct_shape_and_dtype(
problem: MultiObjectiveTestProblem,
input_dim: int,
num_obj: int,
num_obs: int,
dtype: tf.DType,
) -> None:
x = problem.search_space.sample(num_obs)
assert x.dtype == tf.float64 # default dtype
x = tf.cast(x, dtype)
y = problem.objective(x)
pf = problem.gen_pareto_optimal_points(num_obs * 2)
assert y.dtype == x.dtype
tf.debugging.assert_shapes([(x, [num_obs, input_dim])])
tf.debugging.assert_shapes([(y, [num_obs, num_obj])])
assert pf.dtype == tf.float64 # default dtype
tf.debugging.assert_shapes([(pf, [num_obs * 2, num_obj])])
| 5,786 | 30.796703 | 96 | py |
trieste-develop | trieste-develop/tests/unit/objectives/test_single_objectives.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Tuple
import numpy.testing as npt
import pytest
import tensorflow as tf
from trieste.objectives import (
Ackley5,
Branin,
GramacyLee,
Hartmann3,
Hartmann6,
Levy8,
LogarithmicGoldsteinPrice,
Michalewicz2,
Michalewicz5,
Michalewicz10,
Rosenbrock4,
ScaledBranin,
Shekel4,
SimpleQuadratic,
SingleObjectiveTestProblem,
Trid10,
)
@pytest.fixture(
name="problem",
params=[
Branin,
ScaledBranin,
SimpleQuadratic,
GramacyLee,
Michalewicz2,
Michalewicz5,
Michalewicz10,
LogarithmicGoldsteinPrice,
Hartmann3,
Rosenbrock4,
Shekel4,
Ackley5,
Hartmann6,
Trid10,
Levy8,
],
)
def _problem_fixture(request: Any) -> Tuple[SingleObjectiveTestProblem, int]:
return request.param
def test_objective_maps_minimizers_to_minimum(
problem: SingleObjectiveTestProblem,
) -> None:
objective = problem.objective
minimizers = problem.minimizers
minimum = problem.minimum
objective_values_at_minimizers = objective(minimizers)
tf.debugging.assert_shapes([(objective_values_at_minimizers, [len(minimizers), 1])])
npt.assert_allclose(objective_values_at_minimizers, tf.squeeze(minimum), atol=1e-4)
def test_no_function_values_are_less_than_global_minimum(
problem: SingleObjectiveTestProblem,
) -> None:
objective = problem.objective
space = problem.search_space
minimum = problem.minimum
samples = space.sample_sobol(100_000 * len(space.lower), skip=0)
npt.assert_array_less(tf.squeeze(minimum) - 1e-6, objective(samples))
@pytest.mark.parametrize("num_obs", [5, 1])
@pytest.mark.parametrize("dtype", [tf.float32, tf.float64])
def test_objective_has_correct_shape_and_dtype(
problem: SingleObjectiveTestProblem,
num_obs: int,
dtype: tf.DType,
) -> None:
x = problem.search_space.sample(num_obs)
x = tf.cast(x, dtype)
y = problem.objective(x)
assert y.dtype == x.dtype
tf.debugging.assert_shapes([(y, [num_obs, 1])])
@pytest.mark.parametrize(
"problem, input_dim",
[
(Branin, 2),
(ScaledBranin, 2),
(SimpleQuadratic, 2),
(GramacyLee, 1),
(Michalewicz2, 2),
(Michalewicz5, 5),
(Michalewicz10, 10),
(LogarithmicGoldsteinPrice, 2),
(Hartmann3, 3),
(Rosenbrock4, 4),
(Shekel4, 4),
(Ackley5, 5),
(Hartmann6, 6),
(Trid10, 10),
(Levy8, 8),
],
)
@pytest.mark.parametrize("num_obs", [5, 1])
def test_search_space_has_correct_shape_and_default_dtype(
problem: SingleObjectiveTestProblem,
input_dim: int,
num_obs: int,
) -> None:
x = problem.search_space.sample(num_obs)
assert x.dtype == tf.float64
tf.debugging.assert_shapes([(x, [num_obs, input_dim])])
| 3,480 | 25.572519 | 88 | py |
trieste-develop | trieste-develop/tests/unit/objectives/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/unit/objectives/test_utils.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy.testing as npt
import tensorflow as tf
from trieste.objectives.utils import mk_multi_observer, mk_observer
def test_mk_observer() -> None:
def foo(x: tf.Tensor) -> tf.Tensor:
return x + 1
x_ = tf.constant([[3.0]])
ys = mk_observer(foo, "bar")(x_)
assert ys.keys() == {"bar"}
npt.assert_array_equal(ys["bar"].query_points, x_)
npt.assert_array_equal(ys["bar"].observations, x_ + 1)
def test_mk_observer_unlabelled() -> None:
def foo(x: tf.Tensor) -> tf.Tensor:
return x + 1
x_ = tf.constant([[3.0]])
ys = mk_observer(foo)(x_)
npt.assert_array_equal(ys.query_points, x_)
npt.assert_array_equal(ys.observations, x_ + 1)
def test_mk_multi_observer() -> None:
x_ = tf.constant([[3.0]])
ys = mk_multi_observer(foo=lambda x: x + 1, bar=lambda x: x - 1)(x_)
assert ys.keys() == {"foo", "bar"}
npt.assert_array_equal(ys["foo"].query_points, x_)
npt.assert_array_equal(ys["foo"].observations, x_ + 1)
npt.assert_array_equal(ys["bar"].query_points, x_)
npt.assert_array_equal(ys["bar"].observations, x_ - 1)
| 1,698 | 31.673077 | 74 | py |
trieste-develop | trieste-develop/tests/unit/models/test_optimizer.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import gpflow
import numpy as np
import pytest
import tensorflow as tf
from tests.util.models.models import fnc_3x_plus_10
from trieste.data import Dataset
from trieste.models.optimizer import create_loss_function
def test_create_loss_function_raises_on_none() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
data = Dataset(x, fnc_3x_plus_10(x))
with pytest.raises(NotImplementedError):
create_loss_function(None, data) # type: ignore
| 1,120 | 34.03125 | 78 | py |
trieste-develop | trieste-develop/tests/unit/models/conftest.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Iterable
from typing import Any, Callable
import pytest
import tensorflow as tf
from gpflow.models import GPModel
from gpflux.models import DeepGP
from tests.util.models.gpflow.models import (
ModelFactoryType,
gpr_model,
sgpr_model,
svgp_model,
vgp_model,
)
from tests.util.models.gpflux.models import (
separate_independent_kernel_two_layer_dgp_model,
simple_two_layer_dgp_model,
two_layer_dgp_model,
)
from trieste.data import Dataset
from trieste.models.gpflow import (
GaussianProcessRegression,
GPflowPredictor,
SparseGaussianProcessRegression,
SparseVariational,
VariationalGaussianProcess,
)
from trieste.models.optimizer import DatasetTransformer, Optimizer
from trieste.types import TensorType
@pytest.fixture(
name="gpflow_interface_factory",
params=[
(GaussianProcessRegression, gpr_model),
(SparseGaussianProcessRegression, sgpr_model),
(VariationalGaussianProcess, vgp_model),
(SparseVariational, svgp_model),
],
ids=lambda mf: mf[1].__name__,
)
def _gpflow_interface_factory(request: Any) -> ModelFactoryType:
def model_interface_factory(
x: TensorType, y: TensorType, optimizer: Optimizer | None = None
) -> tuple[GPflowPredictor, Callable[[TensorType, TensorType], GPModel]]:
model_interface: Callable[..., GPflowPredictor] = request.param[0]
base_model: GaussianProcessRegression = request.param[1](x, y)
reference_model: Callable[[TensorType, TensorType], GPModel] = request.param[1]
return model_interface(base_model, optimizer=optimizer), reference_model
return model_interface_factory
@pytest.fixture(name="dim", params=[1, 10])
def _dim_fixture(request: Any) -> int:
return request.param
def _batcher_bs_100(dataset: Dataset, batch_size: int) -> Iterable[tuple[TensorType, TensorType]]:
ds = tf.data.Dataset.from_tensor_slices(dataset.astuple())
ds = ds.shuffle(100)
ds = ds.batch(batch_size)
ds = ds.repeat()
return iter(ds)
def _batcher_full_batch(dataset: Dataset, batch_size: int) -> tuple[TensorType, TensorType]:
return dataset.astuple()
@pytest.fixture(name="batcher", params=[_batcher_bs_100, _batcher_full_batch])
def _batcher_fixture(request: Any) -> DatasetTransformer:
return request.param
@pytest.fixture(name="compile", params=[True, False])
def _compile_fixture(request: Any) -> bool:
return request.param
@pytest.fixture(
name="two_layer_model",
params=[
two_layer_dgp_model,
simple_two_layer_dgp_model,
separate_independent_kernel_two_layer_dgp_model,
],
)
def _two_layer_model_fixture(request: Any) -> Callable[[TensorType], DeepGP]:
return request.param
| 3,394 | 30.435185 | 98 | py |
trieste-develop | trieste-develop/tests/unit/models/test_interfaces.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Callable, Sequence
import gpflow
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.misc import assert_datasets_allclose, quadratic, random_seed
from tests.util.models.gpflow.models import (
GaussianProcessWithBatchSamplers,
PseudoTrainableProbModel,
QuadraticMeanAndRBFKernel,
gpr_model,
sgpr_model,
)
from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10
from trieste.data import Dataset
from trieste.models import TrainableModelStack, TrainableProbabilisticModel
from trieste.models.interfaces import (
TrainablePredictJointReparamModelStack,
TrainableSupportsPredictJoint,
TrainableSupportsPredictJointHasReparamSampler,
)
from trieste.types import TensorType
class _QuadraticModel(
GaussianProcessWithBatchSamplers, PseudoTrainableProbModel, TrainableSupportsPredictJoint
):
def __init__(
self,
mean_shifts: list[float],
kernel_amplitudes: list[float],
observations_noise: float = 1.0,
):
super().__init__(
[(lambda y: lambda x: quadratic(x) + y)(shift) for shift in mean_shifts],
[tfp.math.psd_kernels.ExponentiatedQuadratic(x) for x in kernel_amplitudes],
observations_noise,
)
def _model_stack() -> (
tuple[
TrainablePredictJointReparamModelStack,
tuple[TrainableSupportsPredictJointHasReparamSampler, ...],
]
):
model01 = _QuadraticModel([0.0, 0.5], [1.0, 0.3])
model2 = _QuadraticModel([2.0], [2.0])
model3 = _QuadraticModel([-1.0], [0.1])
return TrainablePredictJointReparamModelStack((model01, 2), (model2, 1), (model3, 1)), (
model01,
model2,
model3,
)
def test_model_stack_predict() -> None:
stack, (model01, model2, model3) = _model_stack()
query_points = tf.random.uniform([5, 7, 3])
mean, var = stack.predict(query_points)
assert mean.shape == [5, 7, 4]
assert var.shape == [5, 7, 4]
mean01, var01 = model01.predict(query_points)
mean2, var2 = model2.predict(query_points)
mean3, var3 = model3.predict(query_points)
npt.assert_allclose(mean[..., :2], mean01)
npt.assert_allclose(mean[..., 2:3], mean2)
npt.assert_allclose(mean[..., 3:], mean3)
npt.assert_allclose(var[..., :2], var01)
npt.assert_allclose(var[..., 2:3], var2)
npt.assert_allclose(var[..., 3:], var3)
def test_model_stack_predict_joint() -> None:
stack, (model01, model2, model3) = _model_stack()
query_points = tf.random.uniform([5, 7, 3])
mean, cov = stack.predict_joint(query_points)
assert mean.shape == [5, 7, 4]
assert cov.shape == [5, 4, 7, 7]
mean01, cov01 = model01.predict_joint(query_points)
mean2, cov2 = model2.predict_joint(query_points)
mean3, cov3 = model3.predict_joint(query_points)
npt.assert_allclose(mean[..., :2], mean01)
npt.assert_allclose(mean[..., 2:3], mean2)
npt.assert_allclose(mean[..., 3:], mean3)
npt.assert_allclose(cov[..., :2, :, :], cov01)
npt.assert_allclose(cov[..., 2:3, :, :], cov2)
npt.assert_allclose(cov[..., 3:, :, :], cov3)
def test_model_missing_predict_y() -> None:
model = _QuadraticModel([-1.0], [0.1])
x_predict = tf.constant([[0]], gpflow.default_float())
with pytest.raises(NotImplementedError):
model.predict_y(x_predict)
def test_model_stack_missing_predict_y() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model1 = gpr_model(x, fnc_3x_plus_10(x))
model2 = _QuadraticModel([1.0], [2.0])
stack = TrainableModelStack((model1, 1), (model2, 1))
x_predict = tf.constant([[0]], gpflow.default_float())
with pytest.raises(NotImplementedError):
stack.predict_y(x_predict)
def test_model_stack_predict_y() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model1 = gpr_model(x, fnc_3x_plus_10(x))
model2 = sgpr_model(x, fnc_2sin_x_over_3(x))
stack = TrainableModelStack((model1, 1), (model2, 1))
mean, variance = stack.predict_y(x)
npt.assert_allclose(mean[:, 0:1], model1.predict_y(x)[0])
npt.assert_allclose(mean[:, 1:2], model2.predict_y(x)[0])
npt.assert_allclose(variance[:, 0:1], model1.predict_y(x)[1])
npt.assert_allclose(variance[:, 1:2], model2.predict_y(x)[1])
@random_seed
def test_model_stack_sample() -> None:
query_points = tf.random.uniform([5, 7, 3], maxval=10.0)
stack, (model01, model2, model3) = _model_stack()
samples = stack.sample(query_points, 10_000)
assert samples.shape == [5, 10_000, 7, 4]
mean = tf.reduce_mean(samples, axis=1)
var = tf.math.reduce_variance(samples, axis=1)
mean01, var01 = model01.predict(query_points)
mean2, var2 = model2.predict(query_points)
mean3, var3 = model3.predict(query_points)
npt.assert_allclose(mean[..., :2], mean01, rtol=0.01)
npt.assert_allclose(mean[..., 2:3], mean2, rtol=0.01)
npt.assert_allclose(mean[..., 3:], mean3, rtol=0.01)
npt.assert_allclose(var[..., :2], var01, rtol=0.04)
npt.assert_allclose(var[..., 2:3], var2, rtol=0.04)
npt.assert_allclose(var[..., 3:], var3, rtol=0.04)
def test_model_stack_training() -> None:
class Model(GaussianProcessWithBatchSamplers, TrainableProbabilisticModel):
def __init__(
self,
mean_functions: Sequence[Callable[[TensorType], TensorType]],
kernels: Sequence[tfp.math.psd_kernels.PositiveSemidefiniteKernel],
output_dims: slice,
):
super().__init__(mean_functions, kernels)
self._output_dims = output_dims
def _assert_data(self, dataset: Dataset) -> None:
qp, obs = dataset.astuple()
expected_obs = data.observations[..., self._output_dims]
assert_datasets_allclose(dataset, Dataset(qp, expected_obs))
optimize = _assert_data
update = _assert_data
rbf = tfp.math.psd_kernels.ExponentiatedQuadratic()
model01 = Model([quadratic, quadratic], [rbf, rbf], slice(0, 2))
model2 = Model([quadratic], [rbf], slice(2, 3))
model3 = Model([quadratic], [rbf], slice(3, 4))
stack = TrainableModelStack((model01, 2), (model2, 1), (model3, 1))
data = Dataset(tf.random.uniform([5, 7, 3]), tf.random.uniform([5, 7, 4]))
stack.update(data)
stack.optimize(data)
def test_model_stack_reparam_sampler_raises_for_submodels_without_reparam_sampler() -> None:
model01 = _QuadraticModel([0.0, 0.5], [1.0, 0.3])
model2 = QuadraticMeanAndRBFKernel()
model_stack = TrainableModelStack((model01, 2), (model2, 1)) # type: ignore
with pytest.raises(AttributeError):
model_stack.reparam_sampler(1) # type: ignore
def test_model_stack_reparam_sampler() -> None:
query_points = tf.random.uniform([5, 7, 3], maxval=10.0)
stack, (model01, model2, model3) = _model_stack()
sampler = stack.reparam_sampler(10_000)
samples = sampler.sample(query_points)
assert samples.shape == [5, 10_000, 7, 4]
mean = tf.reduce_mean(samples, axis=1)
var = tf.math.reduce_variance(samples, axis=1)
mean01, var01 = model01.predict(query_points)
mean2, var2 = model2.predict(query_points)
mean3, var3 = model3.predict(query_points)
npt.assert_allclose(mean[..., :2], mean01, rtol=0.01)
npt.assert_allclose(mean[..., 2:3], mean2, rtol=0.01)
npt.assert_allclose(mean[..., 3:], mean3, rtol=0.01)
npt.assert_allclose(var[..., :2], var01, rtol=0.04)
npt.assert_allclose(var[..., 2:3], var2, rtol=0.04)
npt.assert_allclose(var[..., 3:], var3, rtol=0.04)
| 8,333 | 35.234783 | 93 | py |
trieste-develop | trieste-develop/tests/unit/models/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/unit/models/test_utils.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest.mock
from typing import Tuple
import gpflow
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.models.models import fnc_3x_plus_10
from trieste.data import Dataset
from trieste.models import TrainableProbabilisticModel
from trieste.models.utils import (
write_summary_data_based_metrics,
write_summary_kernel_parameters,
write_summary_likelihood_parameters,
)
from trieste.types import TensorType
@unittest.mock.patch("trieste.models.gpflow.interface.tf.summary.scalar")
@pytest.mark.parametrize(
"kernel, names, values",
[
pytest.param(
gpflow.kernels.Matern32(),
["kernel.Matern32.variance", "kernel.Matern32.lengthscales"],
[1, 1],
id="Matern32, Default",
),
pytest.param(
gpflow.kernels.Matern52(variance=2.0, lengthscales=[0.2, 0.2]),
[
"kernel.Matern52.variance",
"kernel.Matern52.lengthscales[0]",
"kernel.Matern52.lengthscales[1]",
],
[2, 0.2, 0.2],
id="Matern52, ARD",
),
pytest.param(
gpflow.kernels.Matern12() * gpflow.kernels.Linear(),
[
"kernel.Product.kernels[0].variance",
"kernel.Product.kernels[0].lengthscales",
"kernel.Product.kernels[1].variance",
],
[1, 1, 1],
id="product kernel",
),
],
)
def test_write_summary_kernel_parameters(
mocked_summary_scalar: unittest.mock.MagicMock,
kernel: gpflow.kernels.Kernel,
names: list[str],
values: list[float],
) -> None:
write_summary_kernel_parameters(kernel)
assert mocked_summary_scalar.call_count == len(names)
for i, (n, v) in enumerate(zip(names, values)):
assert mocked_summary_scalar.call_args_list[i][0][0] == n
assert mocked_summary_scalar.call_args_list[i][0][1].numpy() == v
@unittest.mock.patch("trieste.models.gpflow.interface.tf.summary.scalar")
@pytest.mark.parametrize(
"likelihood, names, values",
[
pytest.param(
gpflow.likelihoods.Gaussian(),
["likelihood.Gaussian.variance"],
[1],
id="Gaussian, Default",
),
pytest.param(
gpflow.likelihoods.Gaussian(scale=0.2),
["likelihood.Gaussian.scale"],
[0.2],
id="Gaussian, scale",
),
pytest.param(
gpflow.likelihoods.Gaussian(scale=gpflow.functions.Polynomial(degree=2)),
["likelihood.Gaussian.scale.w"],
[[1, 0, 0]],
id="Gaussian, polynomial",
),
pytest.param(
gpflow.likelihoods.Gaussian(
variance=gpflow.functions.SwitchedFunction(
[
gpflow.functions.Constant(1.0),
gpflow.functions.Constant(1.0),
]
)
),
[
"likelihood.Gaussian.variance.functions[0].c",
"likelihood.Gaussian.variance.functions[1].c",
],
[1, 1],
id="Gaussian, grouped noise variance",
),
pytest.param(
gpflow.likelihoods.Beta(),
["likelihood.Beta.scale"],
[1],
id="Beta, default",
),
pytest.param(
gpflow.likelihoods.HeteroskedasticTFPConditional(
distribution_class=tfp.distributions.Normal,
scale_transform=tfp.bijectors.Exp(),
),
[],
[],
id="HeteroskedasticTFPConditional",
),
],
)
def test_write_summary_likelihood_parameters(
mocked_summary_scalar: unittest.mock.MagicMock,
likelihood: gpflow.likelihoods.Likelihood,
names: list[str],
values: list[float],
) -> None:
write_summary_likelihood_parameters(likelihood)
assert mocked_summary_scalar.call_count == len(names)
for i, (n, v) in enumerate(zip(names, values)):
assert mocked_summary_scalar.call_args_list[i][0][0] == n
assert tf.reduce_all(np.isclose(mocked_summary_scalar.call_args_list[i][0][1].numpy(), v))
@unittest.mock.patch("trieste.logging.tf.summary.histogram")
@unittest.mock.patch("trieste.logging.tf.summary.scalar")
@pytest.mark.parametrize("prefix", ["", "dummy_"])
def test_write_summary_data_based_metrics(
mocked_summary_scalar: unittest.mock.MagicMock,
mocked_summary_histogram: unittest.mock.MagicMock,
prefix: str,
) -> None:
x = tf.constant(np.arange(1, 5).reshape(-1, 1), dtype=gpflow.default_float()) # shape: [4, 1]
y = fnc_3x_plus_10(x)
dataset = Dataset(x, y)
def _mocked_predict(query_points: TensorType) -> Tuple[TensorType, TensorType]:
return (
y,
tf.math.abs(y),
)
mock_model: TrainableProbabilisticModel = unittest.mock.MagicMock(
spec=TrainableProbabilisticModel
)
mock_model.predict = _mocked_predict # type: ignore
write_summary_data_based_metrics(dataset=dataset, model=mock_model, prefix=prefix)
scalar_names_values = [
(f"{prefix}accuracy/predict_mean__mean", tf.reduce_mean(y)),
(f"{prefix}accuracy/predict_variance__mean", tf.reduce_mean(tf.math.abs(y))),
(f"{prefix}accuracy/observations_mean", tf.reduce_mean(y)),
(f"{prefix}accuracy/observations_variance", tf.math.reduce_variance(y)),
(f"{prefix}accuracy/root_mean_square_error", 0.0),
(f"{prefix}accuracy/mean_absolute_error", 0.0),
(f"{prefix}accuracy/z_residuals_std", 0.0),
(
f"{prefix}accuracy/root_mean_variance_error",
tf.math.sqrt(tf.reduce_mean(tf.math.abs(y) ** 2)),
),
]
assert mocked_summary_scalar.call_count == len(scalar_names_values)
for i, (n, v) in enumerate(scalar_names_values):
assert mocked_summary_scalar.call_args_list[i][0][0] == n
assert mocked_summary_scalar.call_args_list[i][0][1].numpy() == v
histogram_names_values = [
(f"{prefix}accuracy/predict_mean", y),
(f"{prefix}accuracy/predict_variance", tf.math.abs(y)),
(f"{prefix}accuracy/observations", y),
(f"{prefix}accuracy/absolute_error", y - y),
(f"{prefix}accuracy/z_residuals", y - y),
(f"{prefix}accuracy/variance_error", tf.math.abs(y)),
]
assert mocked_summary_histogram.call_count == len(histogram_names_values)
for i, (n, v) in enumerate(histogram_names_values):
assert mocked_summary_histogram.call_args_list[i][0][0] == n
assert tf.reduce_all(mocked_summary_histogram.call_args_list[i][0][1] == v)
| 7,394 | 34.724638 | 98 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflux/test_interface.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import gpflow
import numpy.testing as npt
import pytest
import tensorflow as tf
from gpflow.conditionals.util import sample_mvn
from gpflux.helpers import construct_basic_inducing_variables, construct_basic_kernel
from gpflux.layers import GPLayer
from gpflux.models import DeepGP
from tests.util.misc import random_seed
from trieste.data import Dataset
from trieste.models.gpflux import GPfluxPredictor
from trieste.types import TensorType
class _QuadraticPredictor(GPfluxPredictor):
def __init__(
self,
optimizer: tf.optimizers.Optimizer | None = None,
likelihood: gpflow.likelihoods.Likelihood = gpflow.likelihoods.Gaussian(0.01),
):
super().__init__(optimizer=optimizer)
if optimizer is None:
self._optimizer = tf.optimizers.Adam()
else:
self._optimizer = optimizer
self._model_gpflux = _QuadraticGPModel(likelihood=likelihood)
self._model_keras = self._model_gpflux.as_training_model()
@property
def model_gpflux(self) -> DeepGP:
return self._model_gpflux
@property
def model_keras(self) -> tf.keras.Model:
return self._model_keras
@property
def optimizer(self) -> tf.keras.optimizers.Optimizer:
return self._optimizer
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
# Taken from GPflow implementation of `GPModel.predict_f_samples` in gpflow.models.model
mean, cov = self._model_gpflux.predict_f(query_points, full_cov=True)
mean_for_sample = tf.linalg.adjoint(mean)
samples = sample_mvn(mean_for_sample, cov, True, num_samples=num_samples)
samples = tf.linalg.adjoint(samples)
return samples
def update(self, dataset: Dataset) -> None:
return
class _QuadraticGPModel(DeepGP):
def __init__(
self, likelihood: gpflow.likelihoods.Likelihood = gpflow.likelihoods.Gaussian(0.01)
) -> None:
kernel = construct_basic_kernel(
gpflow.kernels.SquaredExponential(), output_dim=1, share_hyperparams=True
)
inducing_var = construct_basic_inducing_variables(
num_inducing=5,
input_dim=1,
share_variables=True,
z_init=tf.random.normal([5, 1], dtype=gpflow.default_float()),
)
gp_layer = GPLayer(kernel, inducing_var, 10)
super().__init__(
[gp_layer], # not actually used
likelihood,
)
def predict_f(
self, Xnew: tf.Tensor, full_cov: bool = False, full_output_cov: bool = False
) -> tuple[tf.Tensor, tf.Tensor]:
assert not full_output_cov, "Test utility not implemented for full output covariance"
mean = tf.reduce_sum(Xnew**2, axis=1, keepdims=True)
*leading, x_samples, y_dims = mean.shape
var_shape = [*leading, y_dims, x_samples, x_samples] if full_cov else mean.shape
return mean, tf.ones(var_shape, dtype=mean.dtype)
def test_gpflux_predictor_predict() -> None:
model = _QuadraticPredictor()
mean, variance = model.predict(tf.constant([[2.5]], gpflow.default_float()))
assert mean.shape == [1, 1]
assert variance.shape == [1, 1]
npt.assert_allclose(mean, [[6.25]], rtol=0.01)
npt.assert_allclose(variance, [[1.0]], rtol=0.01)
@random_seed
def test_gpflux_predictor_sample() -> None:
model = _QuadraticPredictor()
num_samples = 20_000
samples = model.sample(tf.constant([[2.5]], gpflow.default_float()), num_samples)
assert samples.shape == [num_samples, 1, 1]
sample_mean = tf.reduce_mean(samples, axis=0)
sample_variance = tf.reduce_mean((samples - sample_mean) ** 2)
linear_error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, [[6.25]], rtol=linear_error)
npt.assert_allclose(sample_variance, 1.0, rtol=2 * linear_error)
def test_gpflux_predictor_sample_0_samples() -> None:
samples = _QuadraticPredictor().sample(tf.constant([[50.0]], gpflow.default_float()), 0)
assert samples.shape == (0, 1, 1)
def test_gpflux_predictor_get_observation_noise() -> None:
noise_var = 0.1
likelihood = gpflow.likelihoods.Gaussian(noise_var)
model = _QuadraticPredictor(likelihood=likelihood)
npt.assert_allclose(model.get_observation_noise(), noise_var)
def test_gpflux_predictor_get_observation_noise_raises_for_non_gaussian_likelihood() -> None:
likelihood = gpflow.likelihoods.StudentT()
model = _QuadraticPredictor(likelihood=likelihood)
with pytest.raises(NotImplementedError):
model.get_observation_noise()
| 5,238 | 34.639456 | 96 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflux/test_builders.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In this module, we test that we are wrapping GPflux architectures correctly, leading to the same
model.
"""
from __future__ import annotations
import gpflow
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
from gpflux.architectures import Config, build_constant_input_dim_deep_gp
from gpflux.models import DeepGP
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, mk_dataset, quadratic
from trieste.models.gpflux.builders import (
LIKELIHOOD_VARIANCE,
MAX_NUM_INDUCING_POINTS,
NUM_INDUCING_POINTS_PER_DIM,
NUM_LAYERS,
_get_data_stats,
build_vanilla_deep_gp,
)
from trieste.space import Box
def test_build_vanilla_deep_gp_returns_correct_defaults() -> None:
search_space = Box([0.0], [1.0]) ** 4
x = search_space.sample(100)
data = mk_dataset(x, quadratic(x))
empirical_mean, empirical_variance, _ = _get_data_stats(data)
num_inducing = min(
MAX_NUM_INDUCING_POINTS, NUM_INDUCING_POINTS_PER_DIM * search_space.dimension
)
vanilla_deep_gp = build_vanilla_deep_gp(data, search_space)
# basics
assert isinstance(vanilla_deep_gp, DeepGP)
assert len(vanilla_deep_gp.f_layers) == NUM_LAYERS
# check mean function
assert isinstance(vanilla_deep_gp.f_layers[-1].mean_function, gpflow.mean_functions.Constant)
npt.assert_allclose(vanilla_deep_gp.f_layers[-1].mean_function.parameters[0], empirical_mean)
# check kernel
assert isinstance(vanilla_deep_gp.f_layers[-1].kernel.kernel, gpflow.kernels.RBF)
npt.assert_allclose(vanilla_deep_gp.f_layers[-1].kernel.kernel.variance, empirical_variance)
# check likelihood
assert isinstance(vanilla_deep_gp.likelihood_layer.likelihood, gpflow.likelihoods.Gaussian)
npt.assert_allclose(
tf.constant(vanilla_deep_gp.likelihood_layer.likelihood.variance), LIKELIHOOD_VARIANCE
)
assert isinstance(vanilla_deep_gp.likelihood_layer.likelihood.variance, gpflow.Parameter)
assert vanilla_deep_gp.likelihood_layer.likelihood.variance.trainable
# inducing variables and scaling factor
for layer in vanilla_deep_gp.f_layers:
assert layer.inducing_variable.num_inducing == num_inducing
@pytest.mark.parametrize("num_layers", [1, 3])
@pytest.mark.parametrize("likelihood_variance", [1e-5, 10.0])
@pytest.mark.parametrize("trainable_likelihood", [True, False])
@pytest.mark.parametrize("inner_layer_sqrt_factor", [1e-5, 10.0])
def test_build_vanilla_deep_gp_returns_correct_model(
num_layers: int,
likelihood_variance: float,
trainable_likelihood: bool,
inner_layer_sqrt_factor: bool,
) -> None:
num_data = 10
x = np.arange(num_data).reshape(-1, 1).astype(np.double)
data = mk_dataset(x.tolist(), quadratic(x))
search_space = Box([0.0], [10.0])
num_inducing = num_data
vanilla_deep_gp = build_vanilla_deep_gp(
data,
search_space,
num_layers,
num_inducing,
inner_layer_sqrt_factor=inner_layer_sqrt_factor,
likelihood_variance=likelihood_variance,
trainable_likelihood=trainable_likelihood,
)
# check likelihood
npt.assert_allclose(vanilla_deep_gp.likelihood_layer.likelihood.variance, likelihood_variance)
assert vanilla_deep_gp.likelihood_layer.likelihood.variance.trainable == trainable_likelihood
# comparison to the gpflux builder
config = Config(
num_inducing,
inner_layer_sqrt_factor,
likelihood_variance,
)
ref_deep_gp = build_constant_input_dim_deep_gp(x, num_layers=num_layers, config=config)
npt.assert_equal(len(vanilla_deep_gp.f_layers), len(ref_deep_gp.f_layers))
for i, layer in enumerate(vanilla_deep_gp.f_layers):
ref_layer = ref_deep_gp.f_layers[i]
npt.assert_allclose(
tf.sort(layer.inducing_variable.inducing_variable.Z, axis=0),
tf.sort(ref_layer.inducing_variable.inducing_variable.Z, axis=0),
)
npt.assert_allclose(layer.q_sqrt, ref_layer.q_sqrt)
npt.assert_allclose(
vanilla_deep_gp.likelihood_layer.likelihood.variance,
ref_deep_gp.likelihood_layer.likelihood.variance,
)
def test_build_vanilla_deep_gp_raises_for_incorrect_args() -> None:
x = np.arange(10).reshape(-1, 1).astype(np.double)
data = mk_dataset(x.tolist(), quadratic(x))
search_space = Box([0.0], [10.0])
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_vanilla_deep_gp(data, search_space, 0)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_vanilla_deep_gp(data, search_space, num_inducing_points=0)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_vanilla_deep_gp(data, search_space, inner_layer_sqrt_factor=0)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_vanilla_deep_gp(data, search_space, likelihood_variance=0)
@pytest.mark.parametrize("multiplier", [1, 2, 5])
def test_build_vanilla_deep_gp_gives_correct_num_inducing_points_and_num_data(
multiplier: int,
) -> None:
num_data = 5
x = np.arange(num_data).reshape(-1, 1).astype(np.double)
data = mk_dataset(x.tolist(), quadratic(x))
search_space = Box([0.0], [10.0])
num_inducing_points = num_data * multiplier
vanilla_deep_gp = build_vanilla_deep_gp(
data, search_space, num_inducing_points=num_inducing_points
)
# correct num_inducing_points
for layer in vanilla_deep_gp.f_layers:
npt.assert_equal(layer.q_mu.shape[0], num_inducing_points)
# correct num_data
npt.assert_equal(vanilla_deep_gp.num_data, num_data)
for layer in vanilla_deep_gp.f_layers:
npt.assert_equal(layer.num_data, num_data)
| 6,253 | 35.150289 | 98 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflux/test_models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In this module, we test the *behaviour* of Trieste models against reference GPflux models (thus
implicitly assuming the latter are correct).
*NOTE:* Where GPflux models are used as the underlying model in an Trieste model, we should
*not* test that the underlying model is used in any particular way. To do so would break
encapsulation. For example, we should *not* test that methods on the GPflux models are called
(except in the rare case that such behaviour is an explicitly documented behaviour of the
Trieste model).
"""
from __future__ import annotations
import copy
import operator
import tempfile
import unittest.mock
from functools import partial
from typing import Callable
import gpflow
import gpflux.encoders
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
from gpflux.models import DeepGP
from gpflux.models.deep_gp import sample_dgp
from tensorflow.python.keras.callbacks import Callback
from tests.util.misc import random_seed
from tests.util.models.gpflux.models import single_layer_dgp_model
from tests.util.models.keras.models import keras_optimizer_weights
from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10
from trieste.data import Dataset
from trieste.logging import step_number, tensorboard_writer
from trieste.models.gpflux import DeepGaussianProcess
from trieste.models.interfaces import HasTrajectorySampler
from trieste.models.optimizer import KerasOptimizer
from trieste.types import TensorType
def test_deep_gaussian_process_raises_for_non_tf_optimizer(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
dgp = two_layer_model(x)
optimizer = KerasOptimizer(gpflow.optimizers.Scipy())
with pytest.raises(ValueError):
DeepGaussianProcess(dgp, optimizer)
def test_deep_gaussian_process_raises_for_keras_layer() -> None:
keras_layer_1 = tf.keras.layers.Dense(50, activation="relu")
keras_layer_2 = tf.keras.layers.Dense(2, activation="relu")
kernel = gpflow.kernels.SquaredExponential()
num_inducing = 5
inducing_variable = gpflow.inducing_variables.InducingPoints(
np.concatenate(
[
np.random.randn(num_inducing, 2),
],
axis=1,
)
)
gp_layer = gpflux.layers.GPLayer(
kernel,
inducing_variable,
num_data=5,
num_latent_gps=1,
mean_function=gpflow.mean_functions.Zero(),
)
likelihood_layer = gpflux.layers.LikelihoodLayer(gpflow.likelihoods.Gaussian(0.01))
dgp = DeepGP([keras_layer_1, keras_layer_2, gp_layer], likelihood_layer)
with pytest.raises(ValueError):
DeepGaussianProcess(dgp)
def test_deep_gaussian_process_model_attribute(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
dgp = two_layer_model(x)
model = DeepGaussianProcess(dgp)
assert model.model_gpflux is dgp
def test_deep_gaussian_process_update(two_layer_model: Callable[[TensorType], DeepGP]) -> None:
x = tf.zeros([1, 4], dtype=tf.float64)
dgp = two_layer_model(x)
model = DeepGaussianProcess(dgp)
assert model.model_gpflux.num_data == 1
for layer in model.model_gpflux.f_layers:
assert layer.num_data == 1
model.update(Dataset(tf.zeros([5, 4]), tf.zeros([5, 1])))
assert model.model_gpflux.num_data == 5
for layer in model.model_gpflux.f_layers:
assert layer.num_data == 5
@pytest.mark.parametrize(
"new_data",
[Dataset(tf.zeros([3, 5]), tf.zeros([3, 1])), Dataset(tf.zeros([3, 4]), tf.zeros([3, 2]))],
)
def test_deep_gaussian_process_update_raises_for_invalid_shapes(
two_layer_model: Callable[[TensorType], DeepGP], new_data: Dataset
) -> None:
x = tf.zeros([1, 4], dtype=tf.float64)
dgp = two_layer_model(x)
model = DeepGaussianProcess(dgp)
with pytest.raises(ValueError):
model.update(new_data)
def test_deep_gaussian_process_optimize_with_defaults(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = fnc_2sin_x_over_3(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
model = DeepGaussianProcess(two_layer_model(x_observed))
elbo = model.model_gpflux.elbo(data)
model.optimize(dataset)
assert model.model_gpflux.elbo(data) > elbo
@pytest.mark.parametrize("batch_size", [10, 100])
def test_deep_gaussian_process_optimize(
two_layer_model: Callable[[TensorType], DeepGP], batch_size: int
) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = fnc_2sin_x_over_3(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
fit_args = {"batch_size": batch_size, "epochs": 10, "verbose": 0}
optimizer = KerasOptimizer(tf.optimizers.Adam(), fit_args)
model = DeepGaussianProcess(two_layer_model(x_observed), optimizer)
elbo = model.model_gpflux.elbo(data)
model.optimize(dataset)
assert model.model_gpflux.elbo(data) > elbo
def test_deep_gaussian_process_loss(two_layer_model: Callable[[TensorType], DeepGP]) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
reference_model = two_layer_model(x)
model = DeepGaussianProcess(two_layer_model(x))
internal_model = model.model_gpflux
npt.assert_allclose(internal_model.elbo((x, y)), reference_model.elbo((x, y)), rtol=1e-6)
def test_deep_gaussian_process_predict() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
reference_model = single_layer_dgp_model(x)
model = DeepGaussianProcess(single_layer_dgp_model(x))
test_x = tf.constant([[2.5]], dtype=gpflow.default_float())
ref_mean, ref_var = reference_model.predict_f(test_x)
f_mean, f_var = model.predict(test_x)
npt.assert_allclose(f_mean, ref_mean)
npt.assert_allclose(f_var, ref_var)
def test_deep_gaussian_process_predict_broadcasts() -> None:
x = tf.constant(np.arange(6).reshape(3, 2), dtype=gpflow.default_float())
reference_model = single_layer_dgp_model(x)
model = DeepGaussianProcess(single_layer_dgp_model(x))
test_x = tf.constant(np.arange(12).reshape(1, 2, 3, 2), dtype=gpflow.default_float())
ref_mean, ref_var = reference_model.predict_f(test_x)
f_mean, f_var = model.predict(test_x)
assert f_mean.shape == (1, 2, 3, 1)
assert f_var.shape == (1, 2, 3, 1)
npt.assert_allclose(f_mean, ref_mean)
npt.assert_allclose(f_var, ref_var)
@random_seed
def test_deep_gaussian_process_sample(two_layer_model: Callable[[TensorType], DeepGP]) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(two_layer_model(x))
num_samples = 100
test_x = tf.constant([[2.5]], dtype=gpflow.default_float())
samples = model.sample(test_x, num_samples)
assert samples.shape == [num_samples, 1, 1]
sample_mean = tf.reduce_mean(samples, axis=0)
sample_variance = tf.reduce_mean((samples - sample_mean) ** 2)
reference_model = two_layer_model(x)
def get_samples(query_points: TensorType, num_samples: int) -> TensorType:
samples = []
for _ in range(num_samples):
samples.append(sample_dgp(reference_model)(query_points))
return tf.stack(samples)
ref_samples = get_samples(test_x, num_samples)
ref_mean = tf.reduce_mean(ref_samples, axis=0)
ref_variance = tf.reduce_mean((ref_samples - ref_mean) ** 2)
error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, ref_mean, atol=2 * error)
npt.assert_allclose(sample_mean, 0, atol=error)
npt.assert_allclose(sample_variance, ref_variance, atol=4 * error)
def test_deep_gaussian_process_resets_lr_with_lr_schedule(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
epochs = 2
init_lr = 0.01
def scheduler(epoch: int, lr: float) -> float:
if epoch == epoch // 2:
return lr * 0.1
else:
return lr
fit_args = {
"epochs": epochs,
"batch_size": 100,
"verbose": 0,
"callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler),
}
optimizer = KerasOptimizer(tf.optimizers.Adam(init_lr), fit_args)
model = DeepGaussianProcess(two_layer_model(x), optimizer)
npt.assert_allclose(model.model_keras.optimizer.lr.numpy(), init_lr, rtol=1e-6)
model.optimize(Dataset(x, y))
npt.assert_allclose(model.model_keras.optimizer.lr.numpy(), init_lr, rtol=1e-6)
def test_deep_gaussian_process_with_lr_scheduler(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
epochs = 2
init_lr = 1.0
fit_args = {
"epochs": epochs,
"batch_size": 20,
"verbose": 0,
}
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=init_lr, decay_steps=1, decay_rate=0.5
)
optimizer = KerasOptimizer(tf.optimizers.Adam(lr_schedule), fit_args)
model = DeepGaussianProcess(two_layer_model(x), optimizer)
model.optimize(Dataset(x, y))
assert len(model.model_keras.history.history["loss"]) == epochs
def test_deep_gaussian_process_default_optimizer_is_correct(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(two_layer_model(x))
model_fit_args = dict(model.optimizer.fit_args)
model_fit_args.pop("callbacks")
fit_args = {
"verbose": 0,
"epochs": 400,
"batch_size": 1000,
}
assert isinstance(model.optimizer, KerasOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
assert model_fit_args == fit_args
def test_deep_gaussian_process_subclass_default_optimizer_is_correct(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
class DummySubClass(DeepGaussianProcess):
"""Dummy subclass"""
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DummySubClass(two_layer_model(x))
model_fit_args = dict(model.optimizer.fit_args)
model_fit_args.pop("callbacks")
fit_args = {
"verbose": 0,
"epochs": 400,
"batch_size": 1000,
}
assert isinstance(model.optimizer, KerasOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
assert model_fit_args == fit_args
def test_deepgp_deep_copyable() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(partial(single_layer_dgp_model, x))
model_copy = copy.deepcopy(model)
test_x = tf.constant([[2.5]], dtype=gpflow.default_float())
assert model.model_gpflux.inputs.dtype == model_copy.model_gpflux.inputs.dtype
assert model.model_gpflux.targets.dtype == model_copy.model_gpflux.targets.dtype
mean_f, variance_f = model.predict(test_x)
mean_f_copy, variance_f_copy = model_copy.predict(test_x)
npt.assert_allclose(mean_f, mean_f_copy)
npt.assert_allclose(variance_f, variance_f_copy)
# check that updating the original doesn't break or change the deepcopy
dataset = Dataset(x, fnc_3x_plus_10(x))
model.update(dataset)
model.optimize(dataset)
mean_f_updated, variance_f_updated = model.predict(test_x)
mean_f_copy_updated, variance_f_copy_updated = model_copy.predict(test_x)
npt.assert_allclose(mean_f_copy_updated, mean_f_copy)
npt.assert_allclose(variance_f_copy_updated, variance_f_copy)
npt.assert_array_compare(operator.__ne__, mean_f_updated, mean_f)
npt.assert_array_compare(operator.__ne__, variance_f_updated, variance_f)
# # check that we can also update the copy
dataset2 = Dataset(x, fnc_2sin_x_over_3(x))
model_copy.update(dataset2)
model_copy.optimize(dataset2)
mean_f_updated_2, variance_f_updated_2 = model.predict(test_x)
mean_f_copy_updated_2, variance_f_copy_updated_2 = model_copy.predict(test_x)
npt.assert_allclose(mean_f_updated_2, mean_f_updated)
npt.assert_allclose(variance_f_updated_2, variance_f_updated)
npt.assert_array_compare(operator.__ne__, mean_f_copy_updated_2, mean_f_copy_updated)
npt.assert_array_compare(operator.__ne__, variance_f_copy_updated_2, variance_f_copy_updated)
def test_deepgp_tf_saved_model() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(partial(single_layer_dgp_model, x))
with tempfile.TemporaryDirectory() as path:
# create a trajectory sampler (used for sample method)
assert isinstance(model, HasTrajectorySampler)
trajectory_sampler = model.trajectory_sampler()
trajectory = trajectory_sampler.get_trajectory()
# generate client model with predict and sample methods
module = model.get_module_with_variables(trajectory_sampler, trajectory)
module.predict = tf.function(
model.predict, input_signature=[tf.TensorSpec(shape=[None, 1], dtype=tf.float64)]
)
def _sample(query_points: TensorType, num_samples: int) -> TensorType:
trajectory_updated = trajectory_sampler.resample_trajectory(trajectory)
expanded_query_points = tf.expand_dims(query_points, -2) # [N, 1, D]
tiled_query_points = tf.tile(expanded_query_points, [1, num_samples, 1]) # [N, S, D]
return tf.transpose(trajectory_updated(tiled_query_points), [1, 0, 2])[
:, :, :1
] # [S, N, L]
module.sample = tf.function(
_sample,
input_signature=[
tf.TensorSpec(shape=[None, 1], dtype=tf.float64), # query_points
tf.TensorSpec(shape=(), dtype=tf.int32), # num_samples
],
)
tf.saved_model.save(module, str(path))
client_model = tf.saved_model.load(str(path))
# test exported methods
test_x = tf.constant([[2.5]], dtype=gpflow.default_float())
mean_f, variance_f = model.predict(test_x)
mean_f_copy, variance_f_copy = client_model.predict(test_x)
npt.assert_allclose(mean_f, mean_f_copy)
npt.assert_allclose(variance_f, variance_f_copy)
client_model.sample(x, 10)
def test_deepgp_deep_copies_optimizer_state() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(partial(single_layer_dgp_model, x))
dataset = Dataset(x, fnc_3x_plus_10(x))
model.update(dataset)
assert not keras_optimizer_weights(model.optimizer.optimizer)
model.optimize(dataset)
assert keras_optimizer_weights(model.optimizer.optimizer)
npt.assert_allclose(model.optimizer.optimizer.iterations, 400)
assert model.optimizer.fit_args["callbacks"][0].model is model.model_keras
model_copy = copy.deepcopy(model)
assert model.optimizer.optimizer is not model_copy.optimizer.optimizer
npt.assert_allclose(model_copy.optimizer.optimizer.iterations, 400)
npt.assert_equal(
keras_optimizer_weights(model.optimizer.optimizer),
keras_optimizer_weights(model_copy.optimizer.optimizer),
)
assert model_copy.optimizer.fit_args["callbacks"][0].model is model_copy.model_keras
@pytest.mark.parametrize(
"callbacks",
[
[
tf.keras.callbacks.CSVLogger("csv"),
tf.keras.callbacks.EarlyStopping(monitor="loss", patience=100),
tf.keras.callbacks.History(),
tf.keras.callbacks.LambdaCallback(lambda epoch, lr: lr),
tf.keras.callbacks.LearningRateScheduler(lambda epoch, lr: lr),
tf.keras.callbacks.ProgbarLogger(),
tf.keras.callbacks.ReduceLROnPlateau(),
tf.keras.callbacks.RemoteMonitor(),
tf.keras.callbacks.TensorBoard(),
tf.keras.callbacks.TerminateOnNaN(),
],
pytest.param(
[
tf.keras.callbacks.experimental.BackupAndRestore("backup"),
tf.keras.callbacks.BaseLogger(),
tf.keras.callbacks.ModelCheckpoint("weights"),
],
marks=pytest.mark.skip(reason="callbacks currently causing optimize to fail"),
),
],
)
def test_deepgp_deep_copies_different_callback_types(callbacks: list[Callback]) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(partial(single_layer_dgp_model, x))
model.optimizer.fit_args["callbacks"] = callbacks
dataset = Dataset(x, fnc_3x_plus_10(x))
model.update(dataset)
model.optimize(dataset)
model_copy = copy.deepcopy(model)
assert model.optimizer is not model_copy.optimizer
assert tuple(type(callback) for callback in model.optimizer.fit_args["callbacks"]) == tuple(
type(callback) for callback in model_copy.optimizer.fit_args["callbacks"]
)
def test_deepgp_deep_copies_optimization_history() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = DeepGaussianProcess(partial(single_layer_dgp_model, x))
dataset = Dataset(x, fnc_3x_plus_10(x))
model.update(dataset)
model.optimize(dataset)
assert model.model_keras.history.history
expected_history = model.model_keras.history.history
model_copy = copy.deepcopy(model)
assert model_copy.model_keras.history.history
history = model_copy.model_keras.history.history
assert history.keys() == expected_history.keys()
for k, v in expected_history.items():
assert history[k] == v
@unittest.mock.patch("trieste.logging.tf.summary.histogram")
@unittest.mock.patch("trieste.logging.tf.summary.scalar")
@pytest.mark.parametrize("use_dataset", [False, True])
def test_deepgp_log(
mocked_summary_scalar: unittest.mock.MagicMock,
mocked_summary_histogram: unittest.mock.MagicMock,
use_dataset: bool,
) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = fnc_2sin_x_over_3(x_observed)
dataset = Dataset(x_observed, y_observed)
model = DeepGaussianProcess(
single_layer_dgp_model(x_observed),
KerasOptimizer(tf.optimizers.Adam(), {"batch_size": 200, "epochs": 3, "verbose": 0}),
)
model.optimize(dataset)
mocked_summary_writer = unittest.mock.MagicMock()
with tensorboard_writer(mocked_summary_writer):
with step_number(42):
if use_dataset:
model.log(dataset)
else:
model.log(None)
assert len(mocked_summary_writer.method_calls) == 1
assert mocked_summary_writer.method_calls[0][0] == "as_default"
assert mocked_summary_writer.method_calls[0][-1]["step"] == 42
num_scalars = 10 # 3 write_summary_kernel_parameters, write_summary_likelihood_parameters + 7
num_histogram = 3 # 3
if use_dataset: # write_summary_data_based_metrics
num_scalars += 8
num_histogram += 6
assert mocked_summary_scalar.call_count == num_scalars
assert mocked_summary_histogram.call_count == num_histogram
| 20,131 | 35.67031 | 98 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflux/test_sampler.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In this module, we test the *behaviour* of Trieste models against reference GPflux models (thus
implicitly assuming the latter are correct).
*NOTE:* Where GPflux models are used as the underlying model in an Trieste model, we should
*not* test that the underlying model is used in any particular way. To do so would break
encapsulation. For example, we should *not* test that methods on the GPflux models are called
(except in the rare case that such behaviour is an explicitly documented behaviour of the
Trieste model).
"""
from __future__ import annotations
from typing import Callable, Tuple
import gpflow.kernels
import gpflux.layers
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from gpflux.helpers import construct_basic_inducing_variables, construct_basic_kernel
from gpflux.models import DeepGP
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, ShapeLike, mk_dataset, quadratic, random_seed
from tests.util.models.gpflow.models import QuadraticMeanAndRBFKernel
from tests.util.models.gpflux.models import simple_two_layer_dgp_model, two_layer_trieste_dgp
from trieste.data import Dataset
from trieste.models.gpflux import DeepGaussianProcess
from trieste.models.gpflux.sampler import (
DeepGaussianProcessDecoupledLayer,
DeepGaussianProcessDecoupledTrajectorySampler,
DeepGaussianProcessReparamSampler,
dgp_feature_decomposition_trajectory,
)
from trieste.space import Box
from trieste.types import TensorType
@pytest.mark.parametrize("sample_size", [0, -2])
def test_dgp_reparam_sampler_raises_for_invalid_sample_size(sample_size: int) -> None:
search_space = Box([0.0], [1.0]) ** 4
x = search_space.sample(10)
data = mk_dataset(x, quadratic(x))
dgp = two_layer_trieste_dgp(data, search_space)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
DeepGaussianProcessReparamSampler(sample_size, dgp)
def test_dgp_reparam_sampler_raises_for_invalid_model() -> None:
with pytest.raises(ValueError, match="Model must be .*"):
DeepGaussianProcessReparamSampler(10, QuadraticMeanAndRBFKernel()) # type: ignore
@pytest.mark.parametrize("shape", [[], [1], [2], [2, 3, 4]])
def test_dgp_reparam_sampler_sample_raises_for_invalid_at_shape(shape: ShapeLike) -> None:
search_space = Box([0.0], [1.0])
x = search_space.sample(10)
data = mk_dataset(x, quadratic(x))
dgp = two_layer_trieste_dgp(data, search_space)
sampler = DeepGaussianProcessReparamSampler(1, dgp)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
sampler.sample(tf.zeros(shape))
def _build_dataset_and_train_deep_gp(
two_layer_model: Callable[[TensorType], DeepGP]
) -> Tuple[Dataset, DeepGaussianProcess]:
x = tf.random.uniform([100, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
y = tf.random.normal([100, 1], dtype=tf.float64)
dataset = Dataset(x, y)
dgp = two_layer_model(x)
model = DeepGaussianProcess(dgp)
model.optimizer.fit_args = {
"verbose": 0,
"epochs": 200,
"batch_size": 1000,
}
model.optimize(dataset)
return dataset, model
@random_seed
def test_dgp_reparam_sampler_samples_approximate_expected_distribution(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
sample_size = 250
dataset, model = _build_dataset_and_train_deep_gp(two_layer_model)
samples = DeepGaussianProcessReparamSampler(sample_size, model).sample(
dataset.query_points[:, None, :]
) # [N, S, 1, L]
assert samples.shape == [len(dataset.query_points), sample_size, 1, 1]
sample_mean = tf.reduce_mean(samples, axis=1, keepdims=True)
sample_variance = tf.squeeze(tf.reduce_mean((samples - sample_mean) ** 2, axis=1), -2)
sample_mean = tf.squeeze(sample_mean, [1, 2])
num_samples = 50
means = []
vars = []
for _ in range(num_samples):
Fmean_sample, Fvar_sample = model.predict(dataset.query_points)
means.append(Fmean_sample)
vars.append(Fvar_sample)
ref_mean = tf.reduce_mean(tf.stack(means), axis=0)
ref_variance = tf.reduce_mean(tf.stack(vars) + tf.stack(means) ** 2, axis=0) - ref_mean**2
error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, ref_mean, atol=2 * error)
npt.assert_allclose(sample_variance, ref_variance, atol=4 * error)
@random_seed
def test_dgp_reparam_sampler_sample_is_continuous(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
sampler = DeepGaussianProcessReparamSampler(100, model)
xs = tf.random.uniform([100, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)[:, None, :]
npt.assert_array_less(tf.abs(sampler.sample(xs + 1e-20) - sampler.sample(xs)), 1e-20)
def test_dgp_reparam_sampler_sample_is_repeatable(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
sampler = DeepGaussianProcessReparamSampler(100, model)
xs = tf.random.uniform([100, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)[:, None, :]
npt.assert_allclose(sampler.sample(xs), sampler.sample(xs))
@random_seed
def test_dgp_reparam_sampler_samples_are_distinct_for_new_instances(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
sampler1 = DeepGaussianProcessReparamSampler(100, model)
sampler2 = DeepGaussianProcessReparamSampler(100, model)
xs = tf.random.uniform([100, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)[:, None, :]
npt.assert_array_less(1e-9, tf.abs(sampler2.sample(xs) - sampler1.sample(xs)))
@pytest.mark.parametrize("num_features", [0, -2])
def test_dgp_decoupled_trajectory_sampler_raises_for_invalid_number_of_features(
num_features: int, two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
DeepGaussianProcessDecoupledTrajectorySampler(model, num_features=num_features)
def test_dgp_decoupled_trajectory_sampler_raises_for_invalid_model() -> None:
with pytest.raises(ValueError, match="Model must be .*"):
DeepGaussianProcessDecoupledTrajectorySampler(
QuadraticMeanAndRBFKernel(), 10 # type: ignore
)
def _generate_xs_for_decoupled_trajectory(num_evals: int, batch_size: int) -> TensorType:
xs = tf.random.uniform([num_evals, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
xs = tf.expand_dims(xs, -2)
return tf.tile(xs, [1, batch_size, 1])
@pytest.mark.parametrize("num_evals", [10, 100])
def test_dgp_decoupled_trajectory_sampler_returns_trajectory_function_with_correct_shapes(
num_evals: int, two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
batch_size = 5
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
sampler = DeepGaussianProcessDecoupledTrajectorySampler(model)
trajectory = sampler.get_trajectory()
xs = _generate_xs_for_decoupled_trajectory(num_evals, batch_size)
tf.debugging.assert_shapes([(trajectory(xs), [num_evals, batch_size, 1])])
assert isinstance(trajectory, dgp_feature_decomposition_trajectory)
@random_seed
def test_dgp_decoupled_trajectory_sampler_returns_deterministic_trajectory(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
sampler = DeepGaussianProcessDecoupledTrajectorySampler(model)
xs = _generate_xs_for_decoupled_trajectory(10, 5)
trajectory = sampler.get_trajectory()
trajectory_eval_1 = trajectory(xs)
trajectory_eval_2 = trajectory(xs)
npt.assert_allclose(trajectory_eval_1, trajectory_eval_2)
@random_seed
def test_dgp_decoupled_trajectory_sampler_sample_is_continuous(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
sampler = DeepGaussianProcessDecoupledTrajectorySampler(model)
xs = _generate_xs_for_decoupled_trajectory(10, 5)
trajectory = sampler.get_trajectory()
npt.assert_array_less(tf.abs(trajectory(xs + 1e-20) - trajectory(xs)), 1e-20)
@random_seed
def test_dgp_decoupled_trajectory_sampler_samples_approximate_expected_distribution(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
sample_size = 100
dataset, model = _build_dataset_and_train_deep_gp(two_layer_model)
sampler = DeepGaussianProcessDecoupledTrajectorySampler(model)
trajectory = sampler.get_trajectory()
xs = tf.expand_dims(dataset.query_points, -2) # [N, 1, D]
xs = tf.tile(xs, [1, sample_size, 1]) # [N, B, D]
samples = trajectory(xs)
assert samples.shape == [len(dataset.query_points), sample_size, 1]
sample_mean = tf.reduce_mean(samples, axis=1)
sample_variance = tf.math.reduce_variance(samples, axis=1)
num_samples = 50
means = []
vars = []
for _ in range(num_samples):
Fmean_sample, Fvar_sample = model.predict(dataset.query_points)
means.append(Fmean_sample)
vars.append(Fvar_sample)
ref_mean = tf.reduce_mean(tf.stack(means), axis=0)
ref_variance = tf.reduce_mean(tf.stack(vars) + tf.stack(means) ** 2, axis=0) - ref_mean**2
error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, ref_mean, atol=2 * error)
npt.assert_allclose(sample_variance, ref_variance, atol=4 * error)
@random_seed
def test_dgp_decoupled_trajectory_sampler_samples_are_distinct_for_new_instances(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
sampler_1 = DeepGaussianProcessDecoupledTrajectorySampler(model)
trajectory_1 = sampler_1.get_trajectory()
sampler_2 = DeepGaussianProcessDecoupledTrajectorySampler(model)
trajectory_2 = sampler_2.get_trajectory()
xs = _generate_xs_for_decoupled_trajectory(10, 2)
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(trajectory_1(xs) - trajectory_2(xs)))
) # distinct between sample draws
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(trajectory_1(xs)[:, 0] - trajectory_1(xs)[:, 1]))
) # distinct between samples within draws
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(trajectory_2(xs)[:, 0] - trajectory_2(xs)[:, 1]))
) # distinct between samples within draws
@random_seed
def test_dgp_decoupled_trajectory_resample_trajectory_provides_new_samples_without_retracing(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
xs = _generate_xs_for_decoupled_trajectory(10, 5)
sampler = DeepGaussianProcessDecoupledTrajectorySampler(model)
trajectory = sampler.get_trajectory()
evals_1 = trajectory(xs)
for _ in range(5):
trajectory = sampler.resample_trajectory(trajectory)
evals_new = trajectory(xs)
npt.assert_array_less(1e-1, tf.reduce_max(tf.abs(evals_1 - evals_new)))
assert trajectory.__call__._get_tracing_count() == 1 # type: ignore
@random_seed
def test_dgp_decoupled_trajectory_update_trajectory_updates_and_doesnt_retrace(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
xs = _generate_xs_for_decoupled_trajectory(10, 5)
sampler = DeepGaussianProcessDecoupledTrajectorySampler(model)
trajectory = sampler.get_trajectory()
eval_before = trajectory(xs)
for _ in range(3): # do three updates on new data and see if samples are new
x_train = tf.random.uniform([20, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
y_train = tf.random.normal([20, 1], dtype=tf.float64)
new_dataset = Dataset(x_train, y_train)
model.update(new_dataset)
model.optimize(new_dataset)
trajectory_updated = sampler.update_trajectory(trajectory)
eval_after = trajectory(xs)
assert trajectory_updated is trajectory # check update was in place
npt.assert_array_less(
0.1, tf.reduce_max(tf.abs(eval_before - eval_after))
) # two samples should be different
assert trajectory.__call__._get_tracing_count() == 1 # type: ignore
def test_dgp_decoupled_layer_raises_for_invalid_layer() -> None:
w_dim = 1
prior_means = np.zeros(w_dim)
prior_std = np.ones(w_dim)
encoder = gpflux.encoders.DirectlyParameterizedNormalDiag(10, w_dim)
prior = tfp.distributions.MultivariateNormalDiag(prior_means, prior_std)
lv = gpflux.layers.LatentVariableLayer(prior, encoder)
x = tf.random.uniform([100, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
model = DeepGaussianProcess(simple_two_layer_dgp_model(x))
model.model_gpflux.f_layers[0] = lv
with pytest.raises(ValueError, match="Layers other than .*"):
DeepGaussianProcessDecoupledLayer(model, 0)
@pytest.mark.parametrize("num_features", [0, -2])
def test_dgp_decoupled_layer_raises_for_invalid_number_of_features(num_features: int) -> None:
kernel = construct_basic_kernel(
gpflow.kernels.SquaredExponential(), output_dim=1, share_hyperparams=True
)
inducing_var = construct_basic_inducing_variables(
num_inducing=5,
input_dim=1,
share_variables=True,
z_init=tf.random.normal([5, 1], dtype=gpflow.default_float()),
)
layer = gpflux.layers.GPLayer(
kernel,
inducing_var,
num_data=10,
num_latent_gps=2,
)
x = tf.random.uniform([100, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
model = DeepGaussianProcess(simple_two_layer_dgp_model(x))
model.model_gpflux.f_layers[0] = layer
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
DeepGaussianProcessDecoupledLayer(model, 0, num_features)
def test_dgp_decoupled_layer_raises_for_invalid_inducing_variables() -> None:
ip1 = gpflow.inducing_variables.InducingPoints(
tf.random.normal([5, 1], dtype=gpflow.default_float())
)
ip2 = gpflow.inducing_variables.InducingPoints(
tf.random.normal([5, 1], dtype=gpflow.default_float())
)
inducing_var = gpflow.inducing_variables.SeparateIndependentInducingVariables([ip1, ip2])
layer = gpflux.layers.GPLayer(
gpflow.kernels.SeparateIndependent(
[gpflow.kernels.SquaredExponential(), gpflow.kernels.SquaredExponential()]
),
inducing_var,
num_data=10,
num_latent_gps=2,
)
x = tf.random.uniform([100, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
model = DeepGaussianProcess(simple_two_layer_dgp_model(x))
model.model_gpflux.f_layers[0] = layer
with pytest.raises(ValueError, match="SeparateIndependentInducingVariables .*"):
DeepGaussianProcessDecoupledLayer(model, 0)
def test_dgp_decoupled_layer_returns_trajectory_with_correct_shapes(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
num_evals = 20
batch_size = 5
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
layer = model.model_gpflux.f_layers[0]
P = layer.num_latent_gps
decoupled_layer = DeepGaussianProcessDecoupledLayer(model, 0)
xs = _generate_xs_for_decoupled_trajectory(num_evals, batch_size)
tf.debugging.assert_shapes([(decoupled_layer(xs), [num_evals, batch_size, P])])
@random_seed
def test_dgp_decoupled_layer_returns_deterministic_trajectory(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
decoupled_layer = DeepGaussianProcessDecoupledLayer(model, 0)
xs = _generate_xs_for_decoupled_trajectory(10, 5)
eval_1 = decoupled_layer(xs)
eval_2 = decoupled_layer(xs)
npt.assert_allclose(eval_1, eval_2)
@random_seed
def test_dgp_decoupled_layer_samples_are_distinct_for_new_instances(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
decoupled_layer_1 = DeepGaussianProcessDecoupledLayer(model, 0)
decoupled_layer_2 = DeepGaussianProcessDecoupledLayer(model, 0)
xs = _generate_xs_for_decoupled_trajectory(100, 5)
npt.assert_array_less(
1e-2, tf.reduce_sum(tf.abs(decoupled_layer_1(xs) - decoupled_layer_2(xs)))
) # distinct between sample draws
npt.assert_array_less(
1e-2, tf.reduce_sum(tf.abs(decoupled_layer_1(xs)[:, 0] - decoupled_layer_1(xs)[:, 1]))
) # distinct between samples within draws
npt.assert_array_less(
1e-2, tf.reduce_sum(tf.abs(decoupled_layer_2(xs)[:, 0] - decoupled_layer_2(xs)[:, 1]))
) # distinct between samples within draws
@random_seed
def test_dgp_decoupled_layer_resample_provides_new_samples(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
decoupled_layer = DeepGaussianProcessDecoupledLayer(model, 0)
xs = _generate_xs_for_decoupled_trajectory(10, 5)
evals_1 = decoupled_layer(xs)
for _ in range(5):
decoupled_layer.resample()
evals_new = decoupled_layer(xs)
npt.assert_array_less(1e-2, tf.reduce_sum(tf.abs(evals_1 - evals_new)))
@random_seed
def test_dgp_decoupled_layer_update_updates(
two_layer_model: Callable[[TensorType], DeepGP]
) -> None:
_, model = _build_dataset_and_train_deep_gp(two_layer_model)
decoupled_layer = DeepGaussianProcessDecoupledLayer(model, 0)
xs = _generate_xs_for_decoupled_trajectory(10, 5)
evals_1 = decoupled_layer(xs)
original_W = decoupled_layer._feature_functions.W.value().numpy()
original_b = decoupled_layer._feature_functions.b.value().numpy()
for _ in range(5):
x_train = tf.random.uniform([20, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
y_train = tf.random.normal([20, 1], dtype=tf.float64)
new_dataset = Dataset(x_train, y_train)
model.update(new_dataset)
model.optimize(new_dataset)
decoupled_layer.update()
evals_new = decoupled_layer(xs)
npt.assert_array_less(1e-2, tf.reduce_sum(tf.abs(evals_1 - evals_new)))
# Check that RFF weights change
npt.assert_array_less(
1e-2, tf.reduce_sum(tf.abs(original_b - decoupled_layer._feature_functions.b))
)
npt.assert_array_less(
1e-2, tf.reduce_sum(tf.abs(original_W - decoupled_layer._feature_functions.W))
)
| 19,200 | 35.925 | 99 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflux/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/unit/models/keras/test_architectures.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, List, Tuple
import numpy as np
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.misc import empty_dataset
from tests.util.models.keras.models import trieste_keras_ensemble_model
from trieste.models.keras import (
GaussianNetwork,
KerasEnsembleNetwork,
get_tensor_spec_from_data,
negative_log_likelihood,
)
_ENSEMBLE_SIZE = 3
@pytest.fixture(name="ensemble_size", params=[2, 5])
def _ensemble_size_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="independent_normal", params=[False, True])
def _independent_normal_fixture(request: Any) -> bool:
return request.param
@pytest.fixture(name="num_hidden_layers", params=[0, 1, 3])
def _num_hidden_layers_fixture(request: Any) -> int:
return request.param
def test_keras_ensemble_repr(
ensemble_size: int,
independent_normal: bool,
) -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, independent_normal)
expected_repr = f"KerasEnsemble({keras_ensemble._networks!r})"
assert type(keras_ensemble).__name__ in repr(keras_ensemble)
assert repr(keras_ensemble) == expected_repr
def test_keras_ensemble_model_attributes() -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
assert isinstance(keras_ensemble.model, tf.keras.Model)
def test_keras_ensemble_ensemble_size_attributes(ensemble_size: int) -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size)
assert keras_ensemble.ensemble_size == ensemble_size
@pytest.mark.parametrize(
"query_point_shape, observation_shape",
[
([1], [1]),
([5], [1]),
([5], [2]),
],
)
def test_keras_ensemble_build_ensemble_seems_correct(
ensemble_size: int,
independent_normal: bool,
query_point_shape: List[int],
observation_shape: List[int],
) -> None:
n_obs = 10
example_data = empty_dataset(query_point_shape, observation_shape)
query_points = tf.random.uniform([n_obs] + query_point_shape)
keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, independent_normal)
# basics
assert isinstance(keras_ensemble.model, tf.keras.Model)
assert keras_ensemble.model.built
# check ensemble size
assert len(keras_ensemble.model.inputs) == ensemble_size
assert len(keras_ensemble.model.input_names) == ensemble_size
assert len(keras_ensemble.model.output_names) == ensemble_size
# check input shape
for shape in keras_ensemble.model.input_shape:
assert shape[1:] == tf.TensorShape(query_point_shape)
# testing output shape is more complex as probabilistic layers don't have some properties
# we make some predictions instead and then check the output is correct
predictions = keras_ensemble.model.predict([query_points] * ensemble_size)
assert len(predictions) == ensemble_size
for pred in predictions:
assert pred.shape == tf.TensorShape([n_obs] + observation_shape)
# check input/output names
for ens in range(ensemble_size):
ins = ["model_" + str(ens) in i_name for i_name in keras_ensemble.model.input_names]
assert np.any(ins)
outs = ["model_" + str(ens) in o_name for o_name in keras_ensemble.model.output_names]
assert np.any(outs)
# check the model has not been compiled
assert keras_ensemble.model.compiled_loss is None
assert keras_ensemble.model.compiled_metrics is None
assert keras_ensemble.model.optimizer is None
# check correct number of layers
assert len(keras_ensemble.model.layers) == 2 * ensemble_size + 3 * ensemble_size
def test_keras_ensemble_can_be_compiled() -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
keras_ensemble.model.compile(tf.optimizers.Adam(), negative_log_likelihood)
assert keras_ensemble.model.compiled_loss is not None
assert keras_ensemble.model.compiled_metrics is not None
assert keras_ensemble.model.optimizer is not None
class _DummyKerasEnsembleNetwork(KerasEnsembleNetwork):
def connect_layers(self) -> Tuple[tf.Tensor, tf.Tensor]:
raise NotImplementedError
def test_keras_ensemble_network_raises_on_incorrect_tensor_spec() -> None:
with pytest.raises(ValueError):
_DummyKerasEnsembleNetwork(
[1],
tf.TensorSpec(shape=(1,), dtype=tf.float32),
tf.keras.losses.MeanSquaredError(),
)
with pytest.raises(ValueError):
_DummyKerasEnsembleNetwork(
tf.TensorSpec(shape=(1,), dtype=tf.float32),
[1],
tf.keras.losses.MeanSquaredError(),
)
def test_keras_ensemble_network_network_and_layer_name() -> None:
model = _DummyKerasEnsembleNetwork(
tf.TensorSpec(shape=(1,), dtype=tf.float32),
tf.TensorSpec(shape=(1,), dtype=tf.float32),
)
# check defaults
assert model.network_name == ""
assert model.input_layer_name == "input"
assert model.output_layer_name == "output"
# check that network name is changed
model.network_name = "model_"
assert model.network_name == "model_"
assert model.input_layer_name == "model_" + "input"
assert model.output_layer_name == "model_" + "output"
@pytest.mark.parametrize("n_dims", list(range(10)))
def test_keras_ensemble_network_flattened_output_shape(n_dims: int) -> None:
shape = np.random.randint(1, 10, (n_dims,))
tensor = np.random.randint(0, 1, shape)
tensor_spec = tf.TensorSpec(shape)
model = _DummyKerasEnsembleNetwork(
tensor_spec,
tensor_spec,
)
flattened_shape = model.flattened_output_shape
assert flattened_shape == np.size(tensor)
def test_gaussian_network_check_default_hidden_layer_args() -> None:
example_data = empty_dataset([1], [1])
input_tensor_spec, output_tensor_spec = get_tensor_spec_from_data(example_data)
network = GaussianNetwork(
input_tensor_spec,
output_tensor_spec,
)
default_args = ({"units": 50, "activation": "relu"}, {"units": 50, "activation": "relu"})
assert network._hidden_layer_args == default_args
@pytest.mark.parametrize(
"query_point_shape, observation_shape",
[
([1], [1]),
([5], [1]),
([5], [2]),
],
)
def test_gaussian_network_is_correctly_constructed(
query_point_shape: List[int], observation_shape: List[int], num_hidden_layers: int
) -> None:
n_obs = 10
example_data = empty_dataset(query_point_shape, observation_shape)
query_points = tf.random.uniform([n_obs] + query_point_shape)
input_tensor_spec, output_tensor_spec = get_tensor_spec_from_data(example_data)
hidden_layer_args = []
for i in range(num_hidden_layers):
hidden_layer_args.append({"units": 10, "activation": "relu"})
network = GaussianNetwork(
input_tensor_spec,
output_tensor_spec,
hidden_layer_args,
)
network_input, network_output = network.connect_layers()
network_built = tf.keras.Model(inputs=network_input, outputs=network_output)
# check input shape
assert network_input.shape[1:] == tf.TensorShape(query_point_shape)
# testing output shape is more complex as probabilistic layers don't have some properties
# we make some predictions instead and then check the output is correct
predictions = network_built.predict(query_points)
assert predictions.shape == tf.TensorShape([n_obs] + observation_shape)
# check layers
assert isinstance(network_built.layers[0], tf.keras.layers.InputLayer)
assert len(network_built.layers[1:-2]) == num_hidden_layers
assert isinstance(network_built.layers[-1], tfp.layers.DistributionLambda)
def test_multivariatenormaltril_layer_fails_to_serialilze() -> None:
# tfp.layers.MultivariateNormalTriL currently fails to serialize out of the box
# (with different errors in TF2.4 and TF2.5). When that's fixed we can remove our workaround.
layer = tfp.layers.MultivariateNormalTriL(1)
with pytest.raises(Exception):
serialized = tf.keras.utils.serialize_keras_object(layer)
tf.keras.utils.deserialize_keras_object(
serialized, custom_objects={"MultivariateNormalTriL": tfp.layers.MultivariateNormalTriL}
)
| 9,121 | 34.084615 | 100 | py |
trieste-develop | trieste-develop/tests/unit/models/keras/test_interface.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import gpflow
import pytest
import tensorflow as tf
from tests.util.misc import empty_dataset, raise_exc
from trieste.models.keras import KerasPredictor
from trieste.models.optimizer import KerasOptimizer
class _DummyKerasPredictor(KerasPredictor):
@property
def model(self) -> tf.keras.Model:
return raise_exc
def test_keras_predictor_repr_includes_class_name() -> None:
model = _DummyKerasPredictor()
assert type(model).__name__ in repr(model)
def test_keras_predictor_default_optimizer_is_correct() -> None:
model = _DummyKerasPredictor()
assert isinstance(model._optimizer, KerasOptimizer)
assert isinstance(model._optimizer.optimizer, tf.optimizers.Adam)
assert isinstance(model.optimizer, KerasOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Adam)
def test_keras_predictor_check_optimizer_property() -> None:
optimizer = KerasOptimizer(tf.optimizers.RMSprop())
model = _DummyKerasPredictor(optimizer)
assert model.optimizer == optimizer
def test_keras_predictor_raises_on_sample_call() -> None:
model = _DummyKerasPredictor()
with pytest.raises(NotImplementedError):
model.sample(empty_dataset([1], [1]).query_points, 1)
def test_keras_predictor_raises_for_non_tf_optimizer() -> None:
with pytest.raises(ValueError):
_DummyKerasPredictor(optimizer=KerasOptimizer(gpflow.optimizers.Scipy()))
| 2,045 | 30.96875 | 81 | py |
trieste-develop | trieste-develop/tests/unit/models/keras/test_builders.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.misc import empty_dataset
from trieste.models.keras import build_keras_ensemble
@pytest.mark.parametrize("units, activation", [(10, "relu"), (50, tf.keras.activations.tanh)])
@pytest.mark.parametrize("ensemble_size", [2, 5])
@pytest.mark.parametrize("independent_normal", [False, True])
@pytest.mark.parametrize("num_hidden_layers", [0, 1, 3])
@pytest.mark.parametrize("num_outputs", [1, 3])
def test_build_keras_ensemble(
num_outputs: int,
ensemble_size: int,
num_hidden_layers: int,
units: int,
activation: Union[str, tf.keras.layers.Activation],
independent_normal: bool,
) -> None:
example_data = empty_dataset([num_outputs], [num_outputs])
keras_ensemble = build_keras_ensemble(
example_data,
ensemble_size,
num_hidden_layers,
units,
activation,
independent_normal,
)
assert keras_ensemble.ensemble_size == ensemble_size
assert len(keras_ensemble.model.layers) == num_hidden_layers * ensemble_size + 3 * ensemble_size
if num_outputs > 1:
if independent_normal:
assert isinstance(keras_ensemble.model.layers[-1], tfp.layers.IndependentNormal)
else:
assert isinstance(keras_ensemble.model.layers[-1], tfp.layers.MultivariateNormalTriL)
else:
assert isinstance(keras_ensemble.model.layers[-1], tfp.layers.DistributionLambda)
if num_hidden_layers > 0:
for layer in keras_ensemble.model.layers[ensemble_size : -ensemble_size * 2]:
assert layer.units == units
assert layer.activation == activation or layer.activation.__name__ == activation
| 2,334 | 37.278689 | 100 | py |
trieste-develop | trieste-develop/tests/unit/models/keras/test_models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import operator
import tempfile
import unittest.mock
from typing import Any, Optional
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.keras.callbacks import Callback
from tests.util.misc import ShapeLike, empty_dataset, random_seed
from tests.util.models.keras.models import (
keras_optimizer_weights,
trieste_deep_ensemble_model,
trieste_keras_ensemble_model,
)
from trieste.data import Dataset
from trieste.logging import step_number, tensorboard_writer
from trieste.models.interfaces import HasTrajectorySampler
from trieste.models.keras import (
DeepEnsemble,
KerasEnsemble,
negative_log_likelihood,
sample_with_replacement,
)
from trieste.models.optimizer import KerasOptimizer, TrainingData
from trieste.types import TensorType
_ENSEMBLE_SIZE = 3
@pytest.fixture(name="ensemble_size", params=[2, 5])
def _ensemble_size_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="num_outputs", params=[1, 3])
def _num_outputs_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="dataset_size", params=[10, 100])
def _dataset_size_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="independent_normal", params=[False, True])
def _independent_normal_fixture(request: Any) -> bool:
return request.param
@pytest.fixture(name="bootstrap_data", params=[False, True])
def _bootstrap_data_fixture(request: Any) -> bool:
return request.param
def _get_example_data(
query_point_shape: ShapeLike, observation_shape: Optional[ShapeLike] = None
) -> Dataset:
qp = tf.random.uniform(tf.TensorShape(query_point_shape), dtype=tf.float64)
if observation_shape is None:
observation_shape = query_point_shape[:-1] + [1] # type: ignore
obs = tf.random.uniform(tf.TensorShape(observation_shape), dtype=tf.float64)
return Dataset(qp, obs)
def _ensemblise_data(
model: KerasEnsemble, data: Dataset, ensemble_size: int, bootstrap: bool = False
) -> TrainingData:
inputs = {}
outputs = {}
for index in range(ensemble_size):
if bootstrap:
resampled_data = sample_with_replacement(data)
else:
resampled_data = data
input_name = model.model.input_names[index]
output_name = model.model.output_names[index]
inputs[input_name], outputs[output_name] = resampled_data.astuple()
return inputs, outputs
@pytest.mark.parametrize("optimizer", [tf.optimizers.Adam(), tf.optimizers.RMSprop()])
@pytest.mark.parametrize("diversify", [False, True])
def test_deep_ensemble_repr(
optimizer: tf.optimizers.Optimizer, bootstrap_data: bool, diversify: bool
) -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
keras_ensemble.model.compile(optimizer, loss=negative_log_likelihood)
optimizer_wrapper = KerasOptimizer(optimizer, loss=negative_log_likelihood)
model = DeepEnsemble(keras_ensemble, optimizer_wrapper, bootstrap_data, diversify)
expected_repr = (
f"DeepEnsemble({keras_ensemble.model!r}, {optimizer_wrapper!r}, "
f"{bootstrap_data!r}, {diversify!r})"
)
assert type(model).__name__ in repr(model)
assert repr(model) == expected_repr
def test_deep_ensemble_model_attributes() -> None:
example_data = empty_dataset([1], [1])
model, keras_ensemble, optimizer = trieste_deep_ensemble_model(
example_data, _ENSEMBLE_SIZE, False, False
)
keras_ensemble.model.compile(optimizer=optimizer.optimizer, loss=optimizer.loss)
assert model.model is keras_ensemble.model
def test_deep_ensemble_ensemble_size_attributes(ensemble_size: int) -> None:
example_data = empty_dataset([1], [1])
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, False, False)
assert model.ensemble_size == ensemble_size
def test_deep_ensemble_raises_for_incorrect_ensemble_size() -> None:
with pytest.raises(ValueError):
trieste_deep_ensemble_model(empty_dataset([1], [1]), 1)
def test_deep_ensemble_default_optimizer_is_correct() -> None:
example_data = empty_dataset([1], [1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
default_loss = negative_log_likelihood
default_fit_args = {
"verbose": 0,
"epochs": 3000,
"batch_size": 16,
}
del model.optimizer.fit_args["callbacks"]
assert isinstance(model.optimizer, KerasOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
assert model.optimizer.fit_args == default_fit_args
assert model.optimizer.loss == default_loss
def test_deep_ensemble_optimizer_changed_correctly() -> None:
example_data = empty_dataset([1], [1])
custom_fit_args = {
"verbose": 1,
"epochs": 10,
"batch_size": 10,
}
custom_optimizer = tf.optimizers.RMSprop()
custom_loss = tf.keras.losses.MeanSquaredError()
optimizer_wrapper = KerasOptimizer(custom_optimizer, custom_fit_args, custom_loss)
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
model = DeepEnsemble(keras_ensemble, optimizer_wrapper)
assert model.optimizer == optimizer_wrapper
assert model.optimizer.optimizer == custom_optimizer
assert model.optimizer.fit_args == custom_fit_args
def test_deep_ensemble_is_compiled() -> None:
example_data = empty_dataset([1], [1])
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
assert model.model.compiled_loss is not None
assert model.model.compiled_metrics is not None
assert model.model.optimizer is not None
def test_deep_ensemble_resets_lr_with_lr_schedule() -> None:
example_data = _get_example_data([100, 1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
epochs = 2
init_lr = 1.0
def scheduler(epoch: int, lr: float) -> float:
return lr * 0.5
fit_args = {
"epochs": epochs,
"batch_size": 100,
"verbose": 0,
"callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler),
}
optimizer = KerasOptimizer(tf.optimizers.Adam(init_lr), fit_args)
model = DeepEnsemble(keras_ensemble, optimizer)
npt.assert_allclose(model.model.optimizer.lr.numpy(), init_lr, rtol=1e-6)
model.optimize(example_data)
npt.assert_allclose(model.model.history.history["lr"], [0.5, 0.25])
npt.assert_allclose(model.model.optimizer.lr.numpy(), init_lr, rtol=1e-6)
def test_deep_ensemble_with_lr_scheduler() -> None:
example_data = _get_example_data([100, 1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE)
epochs = 2
init_lr = 1.0
fit_args = {
"epochs": epochs,
"batch_size": 20,
"verbose": 0,
}
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=init_lr, decay_steps=1, decay_rate=0.5
)
optimizer = KerasOptimizer(tf.optimizers.Adam(lr_schedule), fit_args)
model = DeepEnsemble(keras_ensemble, optimizer)
model.optimize(example_data)
assert len(model.model.history.history["loss"]) == epochs
def test_deep_ensemble_ensemble_distributions(ensemble_size: int, dataset_size: int) -> None:
example_data = _get_example_data([dataset_size, 1])
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, False, False)
distributions = model.ensemble_distributions(example_data.query_points)
assert len(distributions) == ensemble_size
for dist in distributions:
assert isinstance(dist, tfp.distributions.Distribution)
try:
predicted_means = dist.mean()
except Exception as exc:
assert False, f"calling 'mean' raised an exception {exc}"
try:
predicted_vars = dist.variance()
except Exception as exc:
assert False, f"calling 'variance' raised an exception {exc}"
assert tf.is_tensor(predicted_means)
assert tf.is_tensor(predicted_vars)
assert predicted_means.shape[-2:] == example_data.observations.shape
assert predicted_vars.shape[-2:] == example_data.observations.shape
def test_deep_ensemble_predict_broadcasts(
ensemble_size: int, dataset_size: int, num_outputs: int
) -> None:
# create a model that expects [dataset_size, num_outputs] spec
dummy_data = _get_example_data([dataset_size, num_outputs], [dataset_size, num_outputs])
model, _, _ = trieste_deep_ensemble_model(dummy_data, ensemble_size, False, False)
# check that it handles predictions with leading batch dimensions
query_data = _get_example_data(
[1, 2, dataset_size, num_outputs], [1, 2, dataset_size, num_outputs]
)
predicted_means, predicted_vars = model.predict(query_data.query_points)
assert tf.is_tensor(predicted_vars)
assert predicted_vars.shape == query_data.observations.shape
assert tf.is_tensor(predicted_means)
assert predicted_means.shape == query_data.observations.shape
def test_deep_ensemble_predict_omit_trailing_dim_one(ensemble_size: int, dataset_size: int) -> None:
dummy_data = _get_example_data([dataset_size, 1], [dataset_size, 1])
model, _, _ = trieste_deep_ensemble_model(dummy_data, ensemble_size, False, False)
# Functional has code to "allow (None,) and (None, 1) Tensors to be passed interchangeably"
qp = tf.random.uniform(tf.TensorShape([dataset_size]), dtype=tf.float64)
predicted_means, predicted_vars = model.predict(qp)
assert tf.is_tensor(predicted_vars)
assert predicted_vars.shape == dummy_data.observations.shape
assert tf.is_tensor(predicted_means)
assert predicted_means.shape == dummy_data.observations.shape
def test_deep_ensemble_predict_call_shape(
ensemble_size: int, dataset_size: int, num_outputs: int
) -> None:
example_data = _get_example_data([dataset_size, num_outputs], [dataset_size, num_outputs])
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, False, False)
predicted_means, predicted_vars = model.predict(example_data.query_points)
assert tf.is_tensor(predicted_vars)
assert predicted_vars.shape == example_data.observations.shape
assert tf.is_tensor(predicted_means)
assert predicted_means.shape == example_data.observations.shape
def test_deep_ensemble_predict_ensemble_call_shape(
ensemble_size: int, dataset_size: int, num_outputs: int
) -> None:
example_data = _get_example_data([dataset_size, num_outputs], [dataset_size, num_outputs])
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, False, False)
predicted_means, predicted_vars = model.predict_ensemble(example_data.query_points)
assert predicted_means.shape[-3] == ensemble_size
assert predicted_vars.shape[-3] == ensemble_size
assert tf.is_tensor(predicted_means)
assert tf.is_tensor(predicted_vars)
assert predicted_means.shape[-2:] == example_data.observations.shape
assert predicted_vars.shape[-2:] == example_data.observations.shape
@pytest.mark.parametrize("num_samples", [6, 12])
@pytest.mark.parametrize("dataset_size", [4, 8])
def test_deep_ensemble_sample_call_shape(
num_samples: int, dataset_size: int, num_outputs: int
) -> None:
example_data = _get_example_data([dataset_size, num_outputs], [dataset_size, num_outputs])
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE, False, False)
samples = model.sample(example_data.query_points, num_samples)
assert tf.is_tensor(samples)
assert samples.shape == [num_samples, dataset_size, num_outputs]
@pytest.mark.parametrize("num_samples", [6, 12])
@pytest.mark.parametrize("dataset_size", [4, 8])
def test_deep_ensemble_sample_ensemble_call_shape(
num_samples: int, dataset_size: int, num_outputs: int
) -> None:
example_data = _get_example_data([dataset_size, num_outputs], [dataset_size, num_outputs])
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE, False, False)
samples = model.sample_ensemble(example_data.query_points, num_samples)
assert tf.is_tensor(samples)
assert samples.shape == [num_samples, dataset_size, num_outputs]
@random_seed
def test_deep_ensemble_optimize_with_defaults() -> None:
example_data = _get_example_data([100, 1])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
model.optimize(example_data)
loss = model.model.history.history["loss"]
assert loss[-1] < loss[0]
@random_seed
@pytest.mark.parametrize("epochs", [5, 15])
def test_deep_ensemble_optimize(ensemble_size: int, bootstrap_data: bool, epochs: int) -> None:
example_data = _get_example_data([100, 1])
keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, False)
custom_optimizer = tf.optimizers.RMSprop()
custom_fit_args = {
"verbose": 0,
"epochs": epochs,
"batch_size": 10,
}
custom_loss = tf.keras.losses.MeanSquaredError()
optimizer_wrapper = KerasOptimizer(custom_optimizer, custom_fit_args, custom_loss)
model = DeepEnsemble(keras_ensemble, optimizer_wrapper, bootstrap_data)
model.optimize(example_data)
loss = model.model.history.history["loss"]
ensemble_losses = ["output_loss" in elt for elt in model.model.history.history.keys()]
assert loss[-1] < loss[0]
assert len(loss) == epochs
assert sum(ensemble_losses) == ensemble_size
@random_seed
def test_deep_ensemble_loss(bootstrap_data: bool) -> None:
example_data = _get_example_data([100, 1])
loss = negative_log_likelihood
optimizer = tf.optimizers.Adam()
model = DeepEnsemble(
trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False),
KerasOptimizer(optimizer, loss=loss),
bootstrap_data,
)
reference_model = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
reference_model.model.compile(optimizer=optimizer, loss=loss)
reference_model.model.set_weights(model.model.get_weights())
tranformed_x, tranformed_y = _ensemblise_data(
reference_model, example_data, _ENSEMBLE_SIZE, bootstrap_data
)
loss = model.model.evaluate(tranformed_x, tranformed_y)[: _ENSEMBLE_SIZE + 1]
reference_loss = reference_model.model.evaluate(tranformed_x, tranformed_y)
npt.assert_allclose(tf.constant(loss), reference_loss, rtol=1e-6)
@random_seed
def test_deep_ensemble_predict_ensemble() -> None:
example_data = _get_example_data([100, 1])
loss = negative_log_likelihood
optimizer = tf.optimizers.Adam()
model = DeepEnsemble(
trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False),
KerasOptimizer(optimizer, loss=loss),
)
reference_model = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
reference_model.model.compile(optimizer=optimizer, loss=loss)
reference_model.model.set_weights(model.model.get_weights())
predicted_means, predicted_vars = model.predict_ensemble(example_data.query_points)
tranformed_x, tranformed_y = _ensemblise_data(
reference_model, example_data, _ENSEMBLE_SIZE, False
)
ensemble_distributions = reference_model.model(tranformed_x)
reference_means = tf.convert_to_tensor([dist.mean() for dist in ensemble_distributions])
reference_vars = tf.convert_to_tensor([dist.variance() for dist in ensemble_distributions])
npt.assert_allclose(predicted_means, reference_means)
npt.assert_allclose(predicted_vars, reference_vars)
@random_seed
def test_deep_ensemble_sample() -> None:
example_data = _get_example_data([100, 1])
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE, False, False)
num_samples = 100_000
samples = model.sample(example_data.query_points, num_samples)
sample_mean = tf.reduce_mean(samples, axis=0)
sample_variance = tf.reduce_mean((samples - sample_mean) ** 2, axis=0)
ref_mean, ref_variance = model.predict(example_data.query_points)
error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, ref_mean, atol=4 * error)
npt.assert_allclose(sample_variance, ref_variance, atol=8 * error)
@random_seed
def test_deep_ensemble_sample_ensemble(ensemble_size: int) -> None:
example_data = _get_example_data([20, 1])
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, False, False)
num_samples = 2000
samples = model.sample_ensemble(example_data.query_points, num_samples)
sample_mean = tf.reduce_mean(samples, axis=0)
ref_mean, _ = model.predict(example_data.query_points)
error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, ref_mean, atol=2.5 * error)
@random_seed
def test_deep_ensemble_prepare_data_call(
ensemble_size: int,
bootstrap_data: bool,
) -> None:
n_rows = 100
x = tf.constant(np.arange(0, n_rows, 1), shape=[n_rows, 1])
y = tf.constant(np.arange(0, n_rows, 1), shape=[n_rows, 1])
example_data = Dataset(x, y)
model, _, _ = trieste_deep_ensemble_model(example_data, ensemble_size, bootstrap_data, False)
# call with whole dataset
data = model.prepare_dataset(example_data)
assert isinstance(data, tuple)
for ensemble_data in data:
assert isinstance(ensemble_data, dict)
assert len(ensemble_data.keys()) == ensemble_size
for member_data in ensemble_data:
if bootstrap_data:
assert tf.reduce_any(ensemble_data[member_data] != x)
else:
assert tf.reduce_all(ensemble_data[member_data] == x)
for inp, out in zip(data[0], data[1]):
assert "".join(filter(str.isdigit, inp)) == "".join(filter(str.isdigit, out))
# call with query points alone
inputs = model.prepare_query_points(example_data.query_points)
assert isinstance(inputs, dict)
assert len(inputs.keys()) == ensemble_size
for member_data in inputs:
assert tf.reduce_all(inputs[member_data] == x)
def test_deep_ensemble_deep_copyable() -> None:
example_data = _get_example_data([10, 3], [10, 3])
model, _, _ = trieste_deep_ensemble_model(example_data, 2, False, False)
model_copy = copy.deepcopy(model)
mean_f, variance_f = model.predict(example_data.query_points)
mean_f_copy, variance_f_copy = model_copy.predict(example_data.query_points)
npt.assert_allclose(mean_f, mean_f_copy)
npt.assert_allclose(variance_f, variance_f_copy)
# check that updating the original doesn't break or change the deepcopy
new_example_data = _get_example_data([20, 3], [20, 3])
model.update(new_example_data)
model.optimize(new_example_data)
mean_f_updated, variance_f_updated = model.predict(example_data.query_points)
mean_f_copy_updated, variance_f_copy_updated = model_copy.predict(example_data.query_points)
npt.assert_allclose(mean_f_copy_updated, mean_f_copy)
npt.assert_allclose(variance_f_copy_updated, variance_f_copy)
npt.assert_array_compare(operator.__ne__, mean_f_updated, mean_f)
npt.assert_array_compare(operator.__ne__, variance_f_updated, variance_f)
# check that we can also update the copy
newer_example_data = _get_example_data([30, 3], [30, 3])
model_copy.update(newer_example_data)
model_copy.optimize(newer_example_data)
mean_f_updated_2, variance_f_updated_2 = model.predict(example_data.query_points)
mean_f_copy_updated_2, variance_f_copy_updated_2 = model_copy.predict(example_data.query_points)
npt.assert_allclose(mean_f_updated_2, mean_f_updated)
npt.assert_allclose(variance_f_updated_2, variance_f_updated)
npt.assert_array_compare(operator.__ne__, mean_f_copy_updated_2, mean_f_copy_updated)
npt.assert_array_compare(operator.__ne__, variance_f_copy_updated_2, variance_f_copy_updated)
def test_deep_ensemble_tf_saved_model() -> None:
example_data = _get_example_data([10, 3], [10, 3])
model, _, _ = trieste_deep_ensemble_model(example_data, 2, False, False)
with tempfile.TemporaryDirectory() as path:
# create a trajectory sampler (used for sample method)
assert isinstance(model, HasTrajectorySampler)
trajectory_sampler = model.trajectory_sampler()
trajectory = trajectory_sampler.get_trajectory()
# generate client model with predict and sample methods
module = model.get_module_with_variables(trajectory_sampler, trajectory)
module.predict = tf.function(
model.predict, input_signature=[tf.TensorSpec(shape=[None, 3], dtype=tf.float64)]
)
def _sample(query_points: TensorType, num_samples: int) -> TensorType:
trajectory_updated = trajectory_sampler.resample_trajectory(trajectory)
expanded_query_points = tf.expand_dims(query_points, -2) # [N, 1, D]
tiled_query_points = tf.tile(expanded_query_points, [1, num_samples, 1]) # [N, S, D]
return tf.transpose(trajectory_updated(tiled_query_points), [1, 0, 2])[
:, :, :1
] # [S, N, L]
module.sample = tf.function(
_sample,
input_signature=[
tf.TensorSpec(shape=[None, 3], dtype=tf.float64), # query_points
tf.TensorSpec(shape=(), dtype=tf.int32), # num_samples
],
)
tf.saved_model.save(module, str(path))
client_model = tf.saved_model.load(str(path))
# test exported methods
mean_f, variance_f = model.predict(example_data.query_points)
mean_f_copy, variance_f_copy = client_model.predict(example_data.query_points)
npt.assert_allclose(mean_f, mean_f_copy)
npt.assert_allclose(variance_f, variance_f_copy)
client_model.sample(example_data.query_points, 10)
def test_deep_ensemble_deep_copies_optimizer_state() -> None:
example_data = _get_example_data([10, 3], [10, 3])
model, _, _ = trieste_deep_ensemble_model(example_data, 2, False, False)
new_example_data = _get_example_data([20, 3], [20, 3])
model.update(new_example_data)
assert not keras_optimizer_weights(model.model.optimizer)
model.optimize(new_example_data)
assert keras_optimizer_weights(model.model.optimizer)
model_copy = copy.deepcopy(model)
assert model.model.optimizer is not model_copy.model.optimizer
npt.assert_allclose(model_copy.model.optimizer.iterations, 1)
npt.assert_equal(
keras_optimizer_weights(model.model.optimizer),
keras_optimizer_weights(model_copy.model.optimizer),
)
@pytest.mark.parametrize(
"callbacks",
[
[
tf.keras.callbacks.CSVLogger("csv"),
tf.keras.callbacks.EarlyStopping(monitor="loss", patience=100),
tf.keras.callbacks.History(),
tf.keras.callbacks.LambdaCallback(lambda epoch, lr: lr),
tf.keras.callbacks.LearningRateScheduler(lambda epoch, lr: lr),
tf.keras.callbacks.ProgbarLogger(),
tf.keras.callbacks.ReduceLROnPlateau(),
tf.keras.callbacks.RemoteMonitor(),
tf.keras.callbacks.TensorBoard(),
tf.keras.callbacks.TerminateOnNaN(),
],
pytest.param(
[
tf.keras.callbacks.experimental.BackupAndRestore("backup"),
tf.keras.callbacks.BaseLogger(),
tf.keras.callbacks.ModelCheckpoint("weights"),
],
marks=pytest.mark.skip(reason="callbacks currently causing optimize to fail"),
),
],
)
def test_deep_ensemble_deep_copies_different_callback_types(callbacks: list[Callback]) -> None:
example_data = _get_example_data([10, 3], [10, 3])
model, _, _ = trieste_deep_ensemble_model(example_data, 2, False, False)
model.optimizer.fit_args["callbacks"] = callbacks
new_example_data = _get_example_data([20, 3], [20, 3])
model.update(new_example_data)
model.optimize(new_example_data)
model_copy = copy.deepcopy(model)
assert model.model.optimizer is not model_copy.model.optimizer
assert tuple(type(callback) for callback in model.optimizer.fit_args["callbacks"]) == tuple(
type(callback) for callback in model_copy.optimizer.fit_args["callbacks"]
)
def test_deep_ensemble_deep_copies_optimizer_callback_models() -> None:
example_data = _get_example_data([10, 3], [10, 3])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
new_example_data = _get_example_data([20, 3], [20, 3])
model.update(new_example_data)
model.optimize(new_example_data)
callback = model.optimizer.fit_args["callbacks"][0]
assert isinstance(callback, tf.keras.callbacks.EarlyStopping)
assert callback.model is model.model
model_copy = copy.deepcopy(model)
callback_copy = model_copy.optimizer.fit_args["callbacks"][0]
assert isinstance(callback_copy, tf.keras.callbacks.EarlyStopping)
assert callback_copy.model is model_copy.model is not callback.model
npt.assert_equal(callback_copy.model.get_weights(), callback.model.get_weights())
def test_deep_ensemble_deep_copies_optimizer_without_callbacks() -> None:
example_data = _get_example_data([10, 3], [10, 3])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
del model.optimizer.fit_args["callbacks"]
model_copy = copy.deepcopy(model)
assert model_copy.optimizer is not model.optimizer
assert model_copy.optimizer.fit_args == model.optimizer.fit_args
def test_deep_ensemble_deep_copies_optimization_history() -> None:
example_data = _get_example_data([10, 3], [10, 3])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
model.optimize(example_data)
assert model.model.history.history
expected_history = model.model.history.history
model_copy = copy.deepcopy(model)
assert model_copy.model.history.history
history = model_copy.model.history.history
assert history.keys() == expected_history.keys()
for k, v in expected_history.items():
assert history[k] == v
@unittest.mock.patch("trieste.logging.tf.summary.histogram")
@unittest.mock.patch("trieste.logging.tf.summary.scalar")
@pytest.mark.parametrize("use_dataset", [True, False])
def test_deep_ensemble_log(
mocked_summary_scalar: unittest.mock.MagicMock,
mocked_summary_histogram: unittest.mock.MagicMock,
use_dataset: bool,
) -> None:
example_data = _get_example_data([10, 3], [10, 3])
keras_ensemble = trieste_keras_ensemble_model(example_data, _ENSEMBLE_SIZE, False)
model = DeepEnsemble(keras_ensemble)
model.optimize(example_data)
mocked_summary_writer = unittest.mock.MagicMock()
with tensorboard_writer(mocked_summary_writer):
with step_number(42):
if use_dataset:
model.log(example_data)
else:
model.log(None)
assert len(mocked_summary_writer.method_calls) == 1
assert mocked_summary_writer.method_calls[0][0] == "as_default"
assert mocked_summary_writer.method_calls[0][-1]["step"] == 42
num_scalars = 5 # 5 loss and metrics specific
num_histogram = 1 # 1 loss and metrics specific
if use_dataset: # write_summary_data_based_metrics
num_scalars += 8
num_histogram += 6
assert mocked_summary_scalar.call_count == num_scalars
assert mocked_summary_histogram.call_count == num_histogram
| 28,485 | 37.390836 | 100 | py |
trieste-develop | trieste-develop/tests/unit/models/keras/test_sampler.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import random
from typing import Any, Callable, Optional, cast
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import empty_dataset, quadratic, random_seed
from tests.util.models.keras.models import trieste_deep_ensemble_model
from trieste.data import Dataset
from trieste.models.keras import (
DeepEnsemble,
DeepEnsembleTrajectorySampler,
deep_ensemble_trajectory,
)
from trieste.types import TensorType
_ENSEMBLE_SIZE = 3
@pytest.fixture(name="diversify", params=[True, False])
def _diversify_fixture(request: Any) -> bool:
return request.param
@pytest.fixture(name="num_evals", params=[9, 19])
def _num_evals_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="batch_size", params=[1, 2])
def _batch_size_fixture(request: Any) -> int:
return request.param
@pytest.fixture(name="num_outputs", params=[1, 3])
def _num_outputs_fixture(request: Any) -> int:
return request.param
def test_ensemble_trajectory_sampler_returns_trajectory_function_with_correctly_shaped_output(
num_evals: int,
batch_size: int,
dim: int,
diversify: bool,
num_outputs: int,
) -> None:
"""
Inputs should be [N,B,d] while output should be [N,B,M]. Note that for diversify
option only single output models are allowed.
"""
example_data = empty_dataset([dim], [num_outputs])
test_data = tf.random.uniform([num_evals, batch_size, dim]) # [N, B, d]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = sampler.get_trajectory()
assert trajectory(test_data).shape == (num_evals, batch_size, num_outputs)
def test_ensemble_trajectory_sampler_returns_deterministic_trajectory(
num_evals: int, batch_size: int, dim: int, diversify: bool, num_outputs: int
) -> None:
"""
Evaluating the same data with the same trajectory multiple times should yield
exactly the same output.
"""
example_data = empty_dataset([dim], [num_outputs])
test_data = tf.random.uniform([num_evals, batch_size, dim]) # [N, B, d]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = sampler.get_trajectory()
eval_1 = trajectory(test_data)
eval_2 = trajectory(test_data)
npt.assert_allclose(eval_1, eval_2)
@pytest.mark.parametrize("seed", [42, None])
def test_ensemble_trajectory_sampler_is_not_too_deterministic(
seed: Optional[int], diversify: bool
) -> None:
"""
Different trajectories should have different internal state, even if we set the global RNG seed.
"""
num_evals, batch_size, dim = 19, 5, 10
state = "_eps" if diversify else "_indices"
example_data = empty_dataset([dim], [1])
test_data = tf.random.uniform([num_evals, batch_size, dim]) # [N, B, d]
model1, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 2)
model2, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 2)
tf.random.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
# check that the initialised states are different
trajectory1 = DeepEnsembleTrajectorySampler(model1, diversify=diversify).get_trajectory()
trajectory2 = DeepEnsembleTrajectorySampler(model2, diversify=diversify).get_trajectory()
eval1 = trajectory1(test_data)
eval2 = trajectory2(test_data)
npt.assert_raises(AssertionError, npt.assert_allclose, eval1, eval2)
npt.assert_raises(
AssertionError,
npt.assert_allclose,
getattr(trajectory1, state),
getattr(trajectory2, state),
)
# check that the state remains different after resampling
for _ in range(2):
cast(deep_ensemble_trajectory, trajectory1).resample()
cast(deep_ensemble_trajectory, trajectory2).resample()
eval1 = trajectory1(test_data)
eval2 = trajectory2(test_data)
npt.assert_raises(AssertionError, npt.assert_allclose, eval1, eval2)
npt.assert_raises(
AssertionError,
npt.assert_allclose,
getattr(trajectory1, state),
getattr(trajectory2, state),
)
def test_ensemble_trajectory_sampler_samples_are_distinct_for_new_instances(
diversify: bool,
) -> None:
"""
If seeds are not fixed instantiating a new sampler should give us different trajectories.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, D]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 10)
def _get_trajectory_evaluation(
model: DeepEnsemble, diversify: bool, seed: int
) -> Callable[[TensorType], TensorType]:
"""This allows us to set a different seed for each instance"""
@random_seed(seed=seed)
def foo(query_points: TensorType) -> TensorType:
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = sampler.get_trajectory()
return trajectory(query_points)
return foo
eval_1 = _get_trajectory_evaluation(model, diversify, 0)(test_data)
eval_2 = _get_trajectory_evaluation(model, diversify, 1)(test_data)
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(eval_1 - eval_2))
) # distinct between seperate draws
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(eval_1[:, 0] - eval_1[:, 1]))
) # distinct for two samples within same draw
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(eval_2[:, 0] - eval_2[:, 1]))
) # distinct for two samples within same draw
@random_seed
def test_ensemble_trajectory_sampler_samples_are_distinct_within_batch(diversify: bool) -> None:
"""
Samples for elements of the batch should be different. Note that when diversify is not used,
for small ensembles we could randomnly choose the same network and then we would get the same
result.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, D]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 3)
sampler1 = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory1 = sampler1.get_trajectory()
sampler2 = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory2 = sampler2.get_trajectory()
eval_1 = trajectory1(test_data)
eval_2 = trajectory2(test_data)
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(eval_1[:, 0] - eval_1[:, 1]))
) # distinct for two samples within same draw
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(eval_2[:, 0] - eval_2[:, 1]))
) # distinct for two samples within same draw
@random_seed
def test_ensemble_trajectory_sampler_eps_broadcasted_correctly() -> None:
"""
We check if eps are broadcasted correctly in diversify mode.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, D]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
trajectory_sampler = DeepEnsembleTrajectorySampler(model, diversify=True)
trajectory = trajectory_sampler.get_trajectory()
_ = trajectory(test_data) # first call needed to initialize the state
trajectory._eps.assign(tf.constant([[0], [1]], dtype=tf.float64)) # type: ignore
evals = trajectory(test_data)
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(evals[:, 0] - evals[:, 1]))
) # distinct for two samples within same draw
npt.assert_allclose(
evals[:, 0], model.predict(test_data[:, 0])[0], rtol=5e-6
) # since we set first eps to 0, that trajectory should equal predicted means
@random_seed
def test_ensemble_trajectory_sampler_resample_with_new_sampler_does_not_change_old_sampler(
diversify: bool,
) -> None:
"""
Generating a new trajectory and resampling it will not affect a previous
trajectory instance. Before resampling evaluations from both trajectories
are the same.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, D]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 3)
sampler = DeepEnsembleTrajectorySampler(model, diversify)
trajectory1 = sampler.get_trajectory()
evals_11 = trajectory1(test_data)
trajectory2 = sampler.get_trajectory()
evals_21 = trajectory2(test_data)
trajectory2 = sampler.resample_trajectory(trajectory2)
evals_22 = trajectory2(test_data)
evals_12 = trajectory1(test_data)
npt.assert_array_less(1e-1, tf.reduce_max(tf.abs(evals_22 - evals_21)))
npt.assert_allclose(evals_11, evals_21)
npt.assert_allclose(evals_11, evals_12)
@random_seed
def test_ensemble_trajectory_sampler_new_trajectories_diverge(diversify: bool) -> None:
"""
Generating two trajectories from the same sampler and resampling them will lead to different
trajectories, even though they were initially the same.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, D]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 3)
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory11 = sampler.get_trajectory()
evals_11 = trajectory11(test_data)
trajectory12 = sampler.resample_trajectory(trajectory11)
evals_12 = trajectory12(test_data)
trajectory21 = sampler.get_trajectory()
evals_21 = trajectory21(test_data)
trajectory22 = sampler.resample_trajectory(trajectory21)
evals_22 = trajectory22(test_data)
npt.assert_allclose(evals_11, evals_21)
npt.assert_array_less(1e-1, tf.reduce_max(tf.abs(evals_22 - evals_12)))
npt.assert_array_less(1e-1, tf.reduce_max(tf.abs(evals_11 - evals_12)))
npt.assert_array_less(1e-1, tf.reduce_max(tf.abs(evals_21 - evals_22)))
@random_seed
def test_ensemble_trajectory_sampler_resample_provides_new_samples_without_retracing(
diversify: bool,
) -> None:
"""
Resampling a trajectory should be done without retracing, we also check whether we
get different samples.
"""
example_data = empty_dataset([1], [1])
test_data = tf.linspace([-10.0], [10.0], 100)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE * 3)
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = sampler.get_trajectory()
evals_1 = trajectory(test_data)
trajectory = sampler.resample_trajectory(trajectory)
evals_2 = trajectory(test_data)
trajectory = sampler.resample_trajectory(trajectory)
evals_3 = trajectory(test_data)
# no retracing
assert trajectory.__call__._get_tracing_count() == 1 # type: ignore
# check all samples are different
npt.assert_array_less(1e-4, tf.abs(evals_1 - evals_2))
npt.assert_array_less(1e-4, tf.abs(evals_2 - evals_3))
npt.assert_array_less(1e-4, tf.abs(evals_1 - evals_3))
@random_seed
def test_ensemble_trajectory_sampler_update_trajectory_updates_and_doesnt_retrace(
diversify: bool,
) -> None:
"""
We do updates after updating the model, check if model is indeed changed and verify
that samples are new.
"""
dim = 3
batch_size = 2
num_data = 100
example_data = empty_dataset([dim], [1])
test_data = tf.random.uniform([num_data, batch_size, dim]) # [N, B, d]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
trajectory_sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = trajectory_sampler.get_trajectory()
eval_before = trajectory(test_data)
for _ in range(3):
x_train = tf.random.uniform([num_data, dim]) # [N, d]
new_dataset = Dataset(x_train, quadratic(x_train))
model = cast(DeepEnsemble, trajectory_sampler._model)
old_weights = model.model.get_weights()
model.optimize(new_dataset)
trajectory_updated = trajectory_sampler.update_trajectory(trajectory)
eval_after = trajectory(test_data)
assert trajectory_updated is trajectory # check update was in place
npt.assert_array_less(1e-4, tf.abs(model.model.get_weights()[0], old_weights[0]))
npt.assert_array_less(
0.01, tf.reduce_max(tf.abs(eval_before - eval_after))
) # two samples should be different
assert trajectory.__call__._get_tracing_count() == 1 # type: ignore
@random_seed
def test_ensemble_trajectory_sampler_trajectory_on_subsets_same_as_set(diversify: bool) -> None:
"""
We check if the trajectory called on a set of data is the same as calling it on subsets.
"""
x_train = 10 * tf.random.uniform([10000, 1]) # [N, d]
train_data = Dataset(x_train, quadratic(x_train))
test_data = tf.linspace([-10.0], [10.0], 300)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, d]
model, _, _ = trieste_deep_ensemble_model(train_data, _ENSEMBLE_SIZE)
model.optimize(train_data)
trajectory_sampler = DeepEnsembleTrajectorySampler(model, diversify)
trajectory = trajectory_sampler.get_trajectory()
eval_all = trajectory(test_data)
eval_1 = trajectory(test_data[0:100, :])
eval_2 = trajectory(test_data[100:200, :])
eval_3 = trajectory(test_data[200:300, :])
npt.assert_allclose(eval_all, tf.concat([eval_1, eval_2, eval_3], axis=0), rtol=5e-6)
@random_seed
def test_ensemble_trajectory_sampler_trajectory_is_continuous(diversify: bool) -> None:
"""
We check if the trajectory seems to give continuous output, for delta x we get delta y.
"""
x_train = 10 * tf.random.uniform([10000, 1]) # [N, d]
train_data = Dataset(x_train, quadratic(x_train))
test_data = tf.linspace([-10.0], [10.0], 300)
test_data = tf.expand_dims(test_data, -2) # [N, 1, d]
test_data = tf.tile(test_data, [1, 2, 1]) # [N, 2, d]
model, _, _ = trieste_deep_ensemble_model(train_data, _ENSEMBLE_SIZE)
trajectory_sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = trajectory_sampler.get_trajectory()
npt.assert_array_less(tf.abs(trajectory(test_data + 1e-20) - trajectory(test_data)), 1e-20)
def test_ensemble_trajectory_sampler_returns_state(batch_size: int, diversify: bool) -> None:
dim = 3
num_evals = 10
example_data = empty_dataset([dim], [1])
test_data = tf.random.uniform([num_evals, batch_size, dim]) # [N, B, d]
model, _, _ = trieste_deep_ensemble_model(example_data, _ENSEMBLE_SIZE)
sampler = DeepEnsembleTrajectorySampler(model, diversify=diversify)
trajectory = cast(deep_ensemble_trajectory, sampler.get_trajectory())
if diversify:
dtype = tf.float64
rnd_state_name = "eps"
else:
dtype = tf.int32
rnd_state_name = "indices"
# before calling the trajectory internal state should not be initialized
state_pre_call = trajectory.get_state()
assert not state_pre_call["initialized"]
assert state_pre_call["batch_size"] == 0
assert tf.equal(tf.size(state_pre_call[rnd_state_name]), 0)
assert state_pre_call[rnd_state_name].dtype == dtype
# after calling the trajectory internal state should be initialized
_ = trajectory(test_data)
state_post_call = trajectory.get_state()
assert state_post_call["initialized"]
assert state_post_call["batch_size"] == batch_size
assert tf.equal(tf.size(state_post_call[rnd_state_name]), batch_size)
assert state_post_call[rnd_state_name].dtype == dtype
| 17,015 | 35.515021 | 100 | py |
trieste-develop | trieste-develop/tests/unit/models/keras/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/unit/models/keras/test_utils.py | # Copyright 2021 The Bellman Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import tensorflow as tf
from tests.util.misc import ShapeLike, empty_dataset, random_seed
from trieste.data import Dataset
from trieste.models.keras.utils import (
get_tensor_spec_from_data,
sample_model_index,
sample_with_replacement,
)
def test_get_tensor_spec_from_data_raises_for_incorrect_dataset() -> None:
dataset = empty_dataset([1], [1])
with pytest.raises(ValueError):
get_tensor_spec_from_data(dataset.query_points)
@pytest.mark.parametrize(
"query_point_shape, observation_shape",
[([1], [1]), ([2], [1]), ([5], [1]), ([5], [2]), ([3, 2], [3, 1])],
)
def test_get_tensor_spec_from_data(
query_point_shape: ShapeLike, observation_shape: ShapeLike
) -> None:
dataset = empty_dataset(query_point_shape, observation_shape)
input_spec, output_spec = get_tensor_spec_from_data(dataset)
assert input_spec.shape == query_point_shape
assert input_spec.dtype == dataset.query_points.dtype
assert input_spec.name == "query_points"
assert output_spec.shape == observation_shape
assert output_spec.dtype == dataset.observations.dtype
assert output_spec.name == "observations"
def test_sample_with_replacement_raises_for_invalid_dataset() -> None:
dataset = empty_dataset([1], [1])
with pytest.raises(ValueError):
sample_with_replacement(dataset.query_points)
def test_sample_with_replacement_raises_for_empty_dataset() -> None:
dataset = empty_dataset([1], [1])
with pytest.raises(tf.errors.InvalidArgumentError):
sample_with_replacement(dataset)
@random_seed
@pytest.mark.parametrize("rank", [2, 3])
def test_sample_with_replacement_seems_correct(rank: int) -> None:
n_rows = 100
if rank == 2:
x = tf.constant(np.arange(0, n_rows, 1), shape=[n_rows, 1])
y = tf.constant(np.arange(0, n_rows, 1), shape=[n_rows, 1])
elif rank == 3:
x = tf.constant(np.arange(0, n_rows, 1).repeat(2), shape=[n_rows, 2, 1])
y = tf.constant(np.arange(0, n_rows, 1).repeat(2), shape=[n_rows, 2, 1])
dataset = Dataset(x, y)
dataset_resampled = sample_with_replacement(dataset)
# basic check that original dataset has not been changed
assert tf.reduce_all(dataset.query_points == x)
assert tf.reduce_all(dataset.observations == y)
# x and y should be resampled the same, and should differ from the original
assert tf.reduce_all(dataset_resampled.query_points == dataset_resampled.observations)
assert tf.reduce_any(dataset_resampled.query_points != x)
assert tf.reduce_any(dataset_resampled.observations != y)
# values are likely to repeat due to replacement
_, _, count = tf.unique_with_counts(tf.squeeze(dataset_resampled.query_points[:, 0]))
assert tf.reduce_any(count > 1)
# mean of bootstrap samples should be close to true mean
mean = [
tf.reduce_mean(
tf.cast(sample_with_replacement(dataset).query_points[:, 0], dtype=tf.float32)
)
for _ in range(100)
]
x = tf.cast(x[:, 0], dtype=tf.float32)
assert (tf.reduce_mean(mean) - tf.reduce_mean(x)) < 1
assert tf.math.abs(tf.math.reduce_std(mean) - tf.math.reduce_std(x) / 10.0) < 0.1
@pytest.mark.parametrize("size", [2, 10])
@pytest.mark.parametrize("num_samples", [0, 1, 10])
def test_sample_model_index_call_shape(size: int, num_samples: int) -> None:
indices = sample_model_index(size, num_samples)
assert indices.shape == (num_samples,)
@random_seed
@pytest.mark.parametrize("size", [2, 5, 10, 20])
def test_sample_model_index_size(size: int) -> None:
indices = sample_model_index(size, 1000)
assert tf.math.reduce_variance(tf.cast(indices, tf.float32)) > 0
assert tf.reduce_min(indices) >= 0
assert tf.reduce_max(indices) < size
@pytest.mark.parametrize("size", [10, 20, 50, 100])
def test_sample_model_index_no_replacement(size: int) -> None:
indices = sample_model_index(size, size)
assert tf.reduce_sum(indices) == tf.reduce_sum(tf.range(size))
assert tf.reduce_all(tf.unique_with_counts(indices)[2] == 1)
| 4,687 | 34.78626 | 90 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflow/test_interface.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Any
import gpflow
import numpy.testing as npt
import tensorflow as tf
from gpflow.models import GPModel
from tests.util.misc import random_seed
from trieste.data import Dataset
from trieste.models.gpflow import BatchReparametrizationSampler, GPflowPredictor
class _QuadraticPredictor(GPflowPredictor):
@property
def model(self) -> GPModel:
return _QuadraticGPModel()
def update(self, dataset: Dataset) -> None:
return
class _QuadraticGPModel(GPModel):
def __init__(self) -> None:
super().__init__(
gpflow.kernels.Polynomial(2), # not actually used
gpflow.likelihoods.Gaussian(),
num_latent_gps=1,
)
def predict_f(
self, Xnew: tf.Tensor, full_cov: bool = False, full_output_cov: bool = False
) -> tuple[tf.Tensor, tf.Tensor]:
assert not full_output_cov, "Test utility not implemented for full output covariance"
mean = tf.reduce_sum(Xnew**2, axis=1, keepdims=True)
*leading, x_samples, y_dims = mean.shape
var_shape = [*leading, y_dims, x_samples, x_samples] if full_cov else mean.shape
return mean, tf.ones(var_shape, dtype=mean.dtype)
def maximum_log_likelihood_objective(self, *args: Any, **kwargs: Any) -> tf.Tensor:
raise NotImplementedError
def test_gpflow_predictor_predict() -> None:
model = _QuadraticPredictor()
mean, variance = model.predict(tf.constant([[2.5]], gpflow.default_float()))
assert mean.shape == [1, 1]
assert variance.shape == [1, 1]
npt.assert_allclose(mean, [[6.25]], rtol=0.01)
npt.assert_allclose(variance, [[1.0]], rtol=0.01)
@random_seed
def test_gpflow_predictor_sample() -> None:
model = _QuadraticPredictor()
num_samples = 20_000
samples = model.sample(tf.constant([[2.5]], gpflow.default_float()), num_samples)
assert samples.shape == [num_samples, 1, 1]
sample_mean = tf.reduce_mean(samples, axis=0)
sample_variance = tf.reduce_mean((samples - sample_mean) ** 2)
linear_error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, [[6.25]], rtol=linear_error)
npt.assert_allclose(sample_variance, 1.0, rtol=2 * linear_error)
def test_gpflow_predictor_sample_0_samples() -> None:
samples = _QuadraticPredictor().sample(tf.constant([[50.0]], gpflow.default_float()), 0)
assert samples.shape == (0, 1, 1)
def test_gpflow_reparam_sampler_returns_a_param_sampler() -> None:
sampler = _QuadraticPredictor().reparam_sampler(10)
assert isinstance(sampler, BatchReparametrizationSampler)
assert sampler._sample_size == 10
def test_gpflow_reparam_sampler_returns_reparam_sampler_with_correct_samples() -> None:
num_samples = 20_000
sampler = _QuadraticPredictor().reparam_sampler(num_samples)
samples = sampler.sample(tf.constant([[2.5]], gpflow.default_float()))
assert samples.shape == [num_samples, 1, 1]
sample_mean = tf.reduce_mean(samples, axis=0)
sample_variance = tf.reduce_mean((samples - sample_mean) ** 2)
linear_error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
npt.assert_allclose(sample_mean, [[6.25]], rtol=linear_error)
npt.assert_allclose(sample_variance, 1.0, rtol=2 * linear_error)
| 3,895 | 34.743119 | 93 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflow/test_builders.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In this module, we test that we are wrapping GPflow architectures correctly, leading to the same
model.
"""
from __future__ import annotations
import math
from typing import Any, List, Optional
import gpflow
import numpy.testing as npt
import pytest
import tensorflow as tf
from gpflow.models import GPR, SGPR, SVGP, VGP, GPModel
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, mk_dataset, quadratic
from tests.util.models.gpflow.models import mock_data
from trieste.data import Dataset
from trieste.models.gpflow.builders import (
CLASSIFICATION_KERNEL_VARIANCE,
CLASSIFICATION_KERNEL_VARIANCE_NOISE_FREE,
KERNEL_LENGTHSCALE,
MAX_NUM_INDUCING_POINTS,
NUM_INDUCING_POINTS_PER_DIM,
SIGNAL_NOISE_RATIO_LIKELIHOOD,
_get_data_stats,
build_gpr,
build_multifidelity_autoregressive_models,
build_multifidelity_nonlinear_autoregressive_models,
build_sgpr,
build_svgp,
build_vgp_classifier,
)
from trieste.models.gpflow.models import GaussianProcessRegression
from trieste.space import Box, DiscreteSearchSpace, SearchSpace
from trieste.types import TensorType
@pytest.mark.parametrize("kernel_priors", [True, False])
@pytest.mark.parametrize("likelihood_variance", [None, 1e-10, 10.0])
@pytest.mark.parametrize("trainable_likelihood", [True, False])
def test_build_gpr_returns_correct_model(
kernel_priors: bool, likelihood_variance: Optional[float], trainable_likelihood: bool
) -> None:
qp, obs = mock_data()
data = mk_dataset(qp, obs)
search_space = Box([0.0], [1.0]) ** qp.shape[-1]
model = build_gpr(data, search_space, kernel_priors, likelihood_variance, trainable_likelihood)
empirical_mean, empirical_variance, _ = _get_data_stats(data)
# basics
assert isinstance(model, GPR)
assert model.data == (qp, obs)
# check the likelihood
_check_likelihood(model, False, likelihood_variance, empirical_variance, trainable_likelihood)
# check the mean function
_check_mean_function(model, False, empirical_mean)
# check the kernel
_check_kernel(model, False, None, empirical_variance, kernel_priors, False)
@pytest.mark.parametrize("likelihood_variance", [-1, 0.0])
def test_build_gpr_raises_for_invalid_likelihood_variance(likelihood_variance: float) -> None:
qp, obs = mock_data()
data = mk_dataset(qp, obs)
search_space = Box([0.0], [1.0]) ** qp.shape[-1]
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_gpr(data, search_space, likelihood_variance=likelihood_variance)
@pytest.mark.parametrize("kernel_priors", [True, False])
@pytest.mark.parametrize("likelihood_variance", [None, 1e-10, 10.0])
@pytest.mark.parametrize("trainable_likelihood", [True, False])
@pytest.mark.parametrize("num_inducing_points", [None, 3, 100])
@pytest.mark.parametrize("trainable_inducing_points", [True, False])
def test_build_sgpr_returns_correct_model(
kernel_priors: bool,
likelihood_variance: Optional[float],
trainable_likelihood: bool,
num_inducing_points: Optional[int],
trainable_inducing_points: bool,
) -> None:
qp, obs = mock_data()
data = mk_dataset(qp, obs)
search_space = Box([0.0], [1.0]) ** qp.shape[-1]
model = build_sgpr(
data,
search_space,
kernel_priors,
likelihood_variance,
trainable_likelihood,
num_inducing_points,
trainable_inducing_points,
)
empirical_mean, empirical_variance, _ = _get_data_stats(data)
# basics
assert isinstance(model, SGPR)
assert model.data == (qp, obs)
# check the likelihood
_check_likelihood(model, False, likelihood_variance, empirical_variance, trainable_likelihood)
# check the mean function
_check_mean_function(model, False, empirical_mean)
# check the kernel
_check_kernel(model, False, None, empirical_variance, kernel_priors, False)
# check the inducing points
_check_inducing_points(model, search_space, num_inducing_points, trainable_inducing_points)
@pytest.mark.parametrize("likelihood_variance", [-1, 0.0])
def test_build_sgpr_raises_for_invalid_likelihood_variance(likelihood_variance: float) -> None:
qp, obs = mock_data()
data = mk_dataset(qp, obs)
search_space = Box([0.0], [1.0]) ** qp.shape[-1]
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_sgpr(data, search_space, likelihood_variance=likelihood_variance)
@pytest.mark.parametrize("num_inducing_points", [-1, 0])
def test_build_sgpr_raises_for_invalid_num_inducing_points(num_inducing_points: int) -> None:
qp, obs = mock_data()
data = mk_dataset(qp, obs)
search_space = Box([0.0], [1.0]) ** qp.shape[-1]
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_sgpr(data, search_space, num_inducing_points=num_inducing_points)
@pytest.mark.parametrize("kernel_priors", [True, False])
@pytest.mark.parametrize("noise_free", [True, False])
@pytest.mark.parametrize("kernel_variance", [None, 0.1, 10.0])
def test_build_vgp_classifier_returns_correct_model(
kernel_priors: bool, noise_free: bool, kernel_variance: Optional[float]
) -> None:
qp, obs = mock_data()
data = mk_dataset(qp, obs)
search_space = Box([0.0], [1.0]) ** qp.shape[-1]
model = build_vgp_classifier(data, search_space, kernel_priors, noise_free, kernel_variance)
# breakpoint()
# basics
assert isinstance(model, VGP)
assert model.data == (qp, obs)
# check the likelihood
_check_likelihood(model, True, None, None, False)
# check the mean function
_check_mean_function(model, True, None)
# check the kernel
_check_kernel(model, True, kernel_variance, 0.0, kernel_priors, noise_free)
@pytest.mark.parametrize("kernel_variance", [-1, 0.0])
def test_build_vgp_classifier_raises_for_invalid_kernel_variance(kernel_variance: float) -> None:
qp, obs = mock_data()
data = mk_dataset(qp, obs)
search_space = Box([0.0], [1.0]) ** qp.shape[-1]
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_vgp_classifier(data, search_space, kernel_variance=kernel_variance)
@pytest.mark.parametrize("classification", [True, False])
@pytest.mark.parametrize("kernel_priors", [True, False])
@pytest.mark.parametrize("likelihood_variance", [None, 1e-10, 10.0])
@pytest.mark.parametrize("trainable_likelihood", [True, False])
@pytest.mark.parametrize("num_inducing_points", [None, 3, 100])
@pytest.mark.parametrize("trainable_inducing_points", [True, False])
def test_build_svgp_returns_correct_model(
classification: bool,
kernel_priors: bool,
likelihood_variance: Optional[float],
trainable_likelihood: bool,
num_inducing_points: Optional[int],
trainable_inducing_points: bool,
) -> None:
qp, obs = mock_data()
data = mk_dataset(qp, obs)
search_space = Box([0.0], [1.0]) ** qp.shape[-1]
model = build_svgp(
data,
search_space,
classification,
kernel_priors,
likelihood_variance,
trainable_likelihood,
num_inducing_points,
trainable_inducing_points,
)
empirical_mean, empirical_variance, _ = _get_data_stats(data)
# basics
assert isinstance(model, SVGP)
# check the likelihood
_check_likelihood(
model, classification, likelihood_variance, empirical_variance, trainable_likelihood
)
# check the mean function
_check_mean_function(model, classification, empirical_mean)
# check the kernel
_check_kernel(model, classification, None, empirical_variance, kernel_priors, False)
# check the inducing points
_check_inducing_points(model, search_space, num_inducing_points, trainable_inducing_points)
@pytest.mark.parametrize("likelihood_variance", [-1, 0.0])
def test_build_svgp_raises_for_invalid_likelihood_variance(likelihood_variance: float) -> None:
qp, obs = mock_data()
data = mk_dataset(qp, obs)
search_space = Box([0.0], [1.0]) ** qp.shape[-1]
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_svgp(data, search_space, likelihood_variance=likelihood_variance)
@pytest.mark.parametrize("num_inducing_points", [-1, 0])
def test_build_svgp_raises_for_invalid_num_inducing_points(num_inducing_points: int) -> None:
qp, obs = mock_data()
data = mk_dataset(qp, obs)
search_space = Box([0.0], [1.0]) ** qp.shape[-1]
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
build_svgp(data, search_space, num_inducing_points=num_inducing_points)
@pytest.mark.parametrize(
"lower, upper",
[([0.0, 0.0], [1.0, 10.0]), ([0.0, -1.0], [4.0, 2.0]), ([-10.0, -2.0], [-1.0, -1.0])],
)
@pytest.mark.parametrize("builder", [build_gpr, build_sgpr, build_svgp, build_vgp_classifier])
def test_builder_returns_correct_lengthscales_for_unequal_box_bounds(
lower: List[float], upper: List[float], builder: Any
) -> None:
search_space = Box(lower, upper)
qp = search_space.sample(10)
data = mk_dataset(qp, quadratic(qp))
model = builder(data, search_space)
expected_lengthscales = (
KERNEL_LENGTHSCALE
* (search_space.upper - search_space.lower)
* math.sqrt(search_space.dimension)
)
npt.assert_allclose(model.kernel.lengthscales, expected_lengthscales, rtol=1e-6)
@pytest.mark.parametrize(
"points",
[
([[0.0, 0.0], [1.0, 10.0]]),
([[0.0, -1.0], [4.0, 2.0]]),
([[-10.0, -2.0], [-1.0, -1.0]]),
([[0.0, 1.0], [2.0, 1.0]]),
([[0.0, 1.0], [0.0, 10.0]]),
],
)
@pytest.mark.parametrize("builder", [build_gpr, build_sgpr, build_svgp, build_vgp_classifier])
def test_builder_returns_correct_lengthscales_for_unequal_discrete_bounds(
points: List[List[float]], builder: Any
) -> None:
search_space = DiscreteSearchSpace(tf.constant(points, dtype=tf.float64))
qp = search_space.sample(10)
data = mk_dataset(qp, quadratic(qp))
# breakpoint()
model = builder(data, search_space)
expected_lengthscales = (
KERNEL_LENGTHSCALE
* (search_space.upper - search_space.lower)
* math.sqrt(search_space.dimension)
)
search_space_collapsed = tf.equal(search_space.upper, search_space.lower)
expected_lengthscales = tf.where(
search_space_collapsed, tf.cast(1.0, dtype=gpflow.default_float()), expected_lengthscales
)
npt.assert_allclose(model.kernel.lengthscales, expected_lengthscales, rtol=1e-6)
@pytest.mark.parametrize("model_type", ("linear", "nonlinear"))
def test_build_multifidelity_builds_correct_n_gprs(model_type: str) -> None:
dataset = Dataset(
tf.Variable(
[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 1.0], [4.0, 2.0], [5.0, 0.0]],
dtype=tf.float64,
),
tf.Variable([[2.0], [4.0], [6.0], [8.0], [10.0], [12.0]], dtype=tf.float64),
)
search_space = Box([0.0], [10.0])
if model_type == "linear":
gprs = build_multifidelity_autoregressive_models(dataset, 3, search_space)
else:
gprs = build_multifidelity_nonlinear_autoregressive_models(dataset, 3, search_space)
assert len(gprs) == 3
for gpr in gprs:
assert isinstance(gpr, GaussianProcessRegression)
@pytest.mark.parametrize("model_type", ("linear", "nonlinear"))
def test_build_multifidelity_raises_for_bad_fidelity(model_type: str) -> None:
dataset = Dataset(
tf.Variable(
[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 1.0], [4.0, 2.0], [5.0, 0.0]],
dtype=tf.float64,
),
tf.Variable([[2.0], [4.0], [6.0], [8.0], [10.0], [12.0]], dtype=tf.float64),
)
search_space = Box([0.0], [10.0])
with pytest.raises(ValueError):
if model_type == "linear":
build_multifidelity_autoregressive_models(dataset, -1, search_space)
else:
build_multifidelity_nonlinear_autoregressive_models(dataset, -1, search_space)
@pytest.mark.parametrize(
"query_points,observations",
(
(
[[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 1.0], [4.0, 2.0]], # Only 1 point for fid 0
[[2.0], [4.0], [6.0], [8.0], [10.0]],
),
(
[[0.0, 0.0], [2.0, 2.0], [4.0, 2.0], [5.0, 0.0]], # Missing middle fid entirely
[[2.0], [4.0], [6.0], [8.0]],
),
(
[[0.0, 0.0], [2.0, 1.0], [4.0, 1.0], [5.0, 0.0]], # 2 fid data, but fid set as 3
[[2.0], [4.0], [6.0], [8.0]],
),
),
)
@pytest.mark.parametrize("model_type", ("linear", "nonlinear"))
def test_build_multifidelity_raises_not_enough_datapoints(
query_points: TensorType, observations: TensorType, model_type: str
) -> None:
dataset = Dataset(
tf.Variable(
query_points,
dtype=tf.float64,
),
tf.Variable(observations, dtype=tf.float64),
)
search_space = Box([0.0], [10.0])
with pytest.raises(ValueError):
if model_type == "linear":
build_multifidelity_autoregressive_models(dataset, 3, search_space)
else:
build_multifidelity_nonlinear_autoregressive_models(dataset, 3, search_space)
@pytest.mark.parametrize("model_type", ("linear", "nonlinear"))
def test_build_multifidelity_raises_not_multifidelity_data(
model_type: str,
) -> None:
dataset = Dataset(
tf.Variable(
[[0.0], [1.0], [2.0], [3.0], [4.0]],
dtype=tf.float64,
),
tf.Variable([[2.0], [4.0], [6.0], [8.0], [10.0]], dtype=tf.float64),
)
search_space = Box([0.0], [10.0])
with pytest.raises(ValueError):
if model_type == "linear":
build_multifidelity_autoregressive_models(dataset, 3, search_space)
else:
build_multifidelity_nonlinear_autoregressive_models(dataset, 3, search_space)
def _check_likelihood(
model: GPModel,
classification: bool,
likelihood_variance: Optional[float],
empirical_variance: Optional[TensorType],
trainable_likelihood: bool,
) -> None:
if classification:
assert isinstance(model.likelihood, gpflow.likelihoods.Bernoulli)
else:
assert isinstance(model.likelihood, gpflow.likelihoods.Gaussian)
if likelihood_variance is not None:
npt.assert_allclose(
tf.constant(model.likelihood.variance), likelihood_variance, rtol=1e-6
)
else:
npt.assert_allclose(
tf.constant(model.likelihood.variance),
empirical_variance / SIGNAL_NOISE_RATIO_LIKELIHOOD**2,
rtol=1e-6,
)
assert isinstance(model.likelihood.variance, gpflow.Parameter)
assert model.likelihood.variance.trainable == trainable_likelihood
def _check_mean_function(
model: GPModel, classification: bool, empirical_mean: Optional[TensorType]
) -> None:
assert isinstance(model.mean_function, gpflow.mean_functions.Constant)
if classification:
npt.assert_allclose(model.mean_function.parameters[0], 0.0, rtol=1e-6)
else:
assert empirical_mean is not None
npt.assert_allclose(model.mean_function.parameters[0], empirical_mean, rtol=1e-6)
def _check_kernel(
model: GPModel,
classification: bool,
kernel_variance: Optional[float],
empirical_variance: TensorType,
kernel_priors: bool,
noise_free: bool,
) -> None:
assert isinstance(model.kernel, gpflow.kernels.Matern52)
if classification:
if kernel_variance is not None:
variance = kernel_variance
else:
if noise_free:
variance = CLASSIFICATION_KERNEL_VARIANCE_NOISE_FREE
else:
variance = CLASSIFICATION_KERNEL_VARIANCE
else:
variance = float(empirical_variance)
npt.assert_allclose(model.kernel.variance, variance, rtol=1e-6)
if kernel_priors:
if noise_free:
assert model.kernel.variance.prior is None
else:
assert model.kernel.variance.prior is not None
assert model.kernel.lengthscales.prior is not None
else:
assert model.kernel.variance.prior is None
assert model.kernel.lengthscales.prior is None
def _check_inducing_points(
model: GPModel,
search_space: SearchSpace,
num_inducing_points: Optional[int],
trainable_inducing_points: bool,
) -> None:
if num_inducing_points is None:
num_inducing_points = min(
MAX_NUM_INDUCING_POINTS, NUM_INDUCING_POINTS_PER_DIM * search_space.dimension
)
assert model.inducing_variable.num_inducing == num_inducing_points
assert model.inducing_variable.parameters[0].trainable == trainable_inducing_points
| 17,212 | 33.914807 | 99 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflow/test_models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In this module, we test the *behaviour* of trieste models against reference GPflow models (thus
implicitly assuming the latter are correct).
*NOTE:* Where GPflow models are used as the underlying model in an trieste model, we should
*not* test that the underlying model is used in any particular way. To do so would break
encapsulation. For example, we should *not* test that methods on the GPflow models are called
(except in the rare case that such behaviour is an explicitly documented behaviour of the
trieste model).
"""
from __future__ import annotations
import unittest.mock
from time import time
from typing import Callable, Union, cast
import gpflow
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.config import Config, as_context
from gpflow.inducing_variables import (
SeparateIndependentInducingVariables,
SharedIndependentInducingVariables,
)
from gpflow.models import SGPR, SVGP, VGP
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, random_seed
from tests.util.models.gpflow.models import (
ModelFactoryType,
gpr_model,
mock_data,
sgpr_model,
svgp_model,
svgp_model_by_type,
svgp_model_with_mean,
vgp_matern_model,
vgp_model,
)
from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10
from trieste.data import Dataset, add_fidelity_column
from trieste.logging import step_number, tensorboard_writer
from trieste.models import ProbabilisticModelType, TrainableProbabilisticModel
from trieste.models.gpflow import (
GaussianProcessRegression,
GPflowPredictor,
MultifidelityAutoregressive,
MultifidelityNonlinearAutoregressive,
SparseGaussianProcessRegression,
SparseVariational,
VariationalGaussianProcess,
)
from trieste.models.gpflow.builders import (
build_multifidelity_autoregressive_models,
build_multifidelity_nonlinear_autoregressive_models,
)
from trieste.models.gpflow.inducing_point_selectors import (
ConditionalImprovementReduction,
ConditionalVarianceReduction,
InducingPointSelector,
KMeansInducingPointSelector,
RandomSubSampleInducingPointSelector,
UniformInducingPointSelector,
)
from trieste.models.gpflow.sampler import (
DecoupledTrajectorySampler,
RandomFourierFeatureTrajectorySampler,
)
from trieste.models.optimizer import BatchOptimizer, DatasetTransformer, Optimizer
from trieste.space import Box
from trieste.types import TensorType
from trieste.utils import DEFAULTS
def _3x_plus_gaussian_noise(x: tf.Tensor) -> tf.Tensor:
return 3.0 * x + np.random.normal(scale=0.01, size=x.shape)
def test_gpflow_wrappers_loss(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
model, _reference_model = gpflow_interface_factory(x, y)
internal_model = model.model
reference_model = _reference_model(x, y)
if isinstance(internal_model, SVGP):
args = {"data": (x, y)}
else:
args = {}
npt.assert_allclose(
internal_model.training_loss(**args), reference_model.training_loss(**args), rtol=1e-6
)
def test_gpflow_wrappers_update(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_3x_plus_10(x)
model, _reference_model = gpflow_interface_factory(x, y)
x_new = tf.concat([x, tf.constant([[10.0], [11.0]], dtype=gpflow.default_float())], 0)
new_data = Dataset(x_new, fnc_3x_plus_10(x_new))
# Would be nice if ModelFactoryType could return an intersection type of
# GPflowPredictor and TrainableProbabilisticModel but this isn't possible
cast(TrainableProbabilisticModel, model).update(new_data)
reference_model = _reference_model(x_new, fnc_3x_plus_10(x_new))
internal_model = model.model
if isinstance(internal_model, SVGP):
args = {"data": (new_data.query_points, new_data.observations)}
else:
args = {}
npt.assert_allclose(
internal_model.training_loss(**args), reference_model.training_loss(**args), rtol=1e-6
)
@random_seed
def test_gpflow_wrappers_default_optimize(
gpflow_interface_factory: ModelFactoryType,
) -> None:
data = mock_data()
model, _ = gpflow_interface_factory(*data)
internal_model = model.model
if isinstance(internal_model, SVGP):
args = {"data": data}
else:
args = {}
loss = internal_model.training_loss(**args)
model.optimize(Dataset(*data))
assert internal_model.training_loss(**args) < loss
def test_gpflow_wrappers_ref_optimize(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = fnc_2sin_x_over_3(x)
data = Dataset(x, y)
model, _reference_model = gpflow_interface_factory(x, y)
reference_model = _reference_model(x, y)
model.optimize(data)
internal_model = model.model
if isinstance(internal_model, SVGP):
data_iter = iter(
tf.data.Dataset.from_tensor_slices(data.astuple())
.shuffle(len(data))
.batch(100)
.prefetch(tf.data.experimental.AUTOTUNE)
.repeat()
)
tf.optimizers.Adam().minimize(
reference_model.training_loss_closure(data=data_iter, compile=False),
reference_model.trainable_variables,
)
# there is a difference here and the code is pretty much the same
# not sure where it comes from
npt.assert_allclose(
internal_model.training_loss(next(data_iter)),
reference_model.training_loss(next(data_iter)),
rtol=1e-1,
)
else:
reference_model.data = (
tf.Variable(
reference_model.data[0],
trainable=False,
shape=[None, *reference_model.data[0].shape[1:]],
),
tf.Variable(
reference_model.data[1],
trainable=False,
shape=[None, *reference_model.data[1].shape[1:]],
),
)
gpflow.optimizers.Scipy().minimize(
reference_model.training_loss_closure(compile=True),
reference_model.trainable_variables,
)
npt.assert_allclose(
internal_model.training_loss(), reference_model.training_loss(), rtol=1e-6
)
@random_seed
def test_gpflow_predictor_get_observation_noise_raises_for_likelihood_without_variance(
gpflow_interface_factory: ModelFactoryType,
) -> None:
data = mock_data()
model, _ = gpflow_interface_factory(*data)
model.model.likelihood = gpflow.likelihoods.Gaussian() # has variance attribute
model.get_observation_noise()
model.model.likelihood = gpflow.likelihoods.Bernoulli() # does not have variance attribute
with pytest.raises(NotImplementedError):
model.get_observation_noise()
def test_gpflow_wrappers_predict_y(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model, _ = gpflow_interface_factory(x, _3x_plus_gaussian_noise(x))
x_predict = tf.constant([[50.5]], gpflow.default_float())
mean_f, variance_f = model.predict(x_predict)
mean_y, variance_y = model.predict_y(x_predict)
npt.assert_allclose(mean_f, mean_y)
npt.assert_array_less(variance_f, variance_y)
@unittest.mock.patch("trieste.logging.tf.summary.histogram")
@unittest.mock.patch("trieste.logging.tf.summary.scalar")
@pytest.mark.parametrize("use_dataset", [False, True])
def test_gpflow_wrappers_log(
mocked_summary_scalar: unittest.mock.MagicMock,
mocked_summary_histogram: unittest.mock.MagicMock,
use_dataset: bool,
gpflow_interface_factory: ModelFactoryType,
) -> None:
x = tf.constant(np.arange(1, 5).reshape(-1, 1), dtype=gpflow.default_float()) # shape: [4, 1]
y = fnc_3x_plus_10(x)
dataset = Dataset(x, y)
model, _ = gpflow_interface_factory(x, y)
model.optimize(dataset)
mocked_summary_writer = unittest.mock.MagicMock()
with tensorboard_writer(mocked_summary_writer):
with step_number(42):
if use_dataset:
model.log(dataset)
else:
model.log(None)
assert len(mocked_summary_writer.method_calls) == 1
assert mocked_summary_writer.method_calls[0][0] == "as_default"
assert mocked_summary_writer.method_calls[0][-1]["step"] == 42
num_scalars = 3 # 3 write_summary_kernel_parameters, write_summary_likelihood_parameters
num_histogram = 0 # 0
if use_dataset: # write_summary_data_based_metrics
num_scalars += 8
num_histogram += 6
assert mocked_summary_scalar.call_count == num_scalars
assert mocked_summary_histogram.call_count == num_histogram
@random_seed
def test_gpflow_models_pairwise_covariance(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(1, 5).reshape(-1, 1), dtype=gpflow.default_float()) # shape: [4, 1]
y = fnc_3x_plus_10(x)
model, _ = gpflow_interface_factory(x, y)
if isinstance(model.model, (VGP, SVGP)): # for speed just update q_sqrt rather than optimize
num_inducing_points = tf.shape(model.model.q_sqrt)[1]
sampled_q_sqrt = tfp.distributions.WishartTriL(5, tf.eye(num_inducing_points)).sample(1)
model.model.q_sqrt.assign(sampled_q_sqrt)
model.update_posterior_cache()
query_points_1 = tf.concat([0.5 * x, 0.5 * x], 0) # shape: [8, 1]
query_points_2 = tf.concat([2 * x, 2 * x, 2 * x], 0) # shape: [12, 1]
all_query_points = tf.concat([query_points_1, query_points_2], 0)
_, predictive_covariance = model.predict_joint(all_query_points)
expected_covariance = predictive_covariance[0, :8, 8:]
actual_covariance = model.covariance_between_points( # type: ignore
query_points_1, query_points_2
)
np.testing.assert_allclose(expected_covariance, actual_covariance[0], atol=1e-4)
@random_seed
def test_gpflow_models_raise_for_pairwise_covariance_for_invalid_query_points(
gpflow_interface_factory: ModelFactoryType,
) -> None:
data = mock_data()
model, _ = gpflow_interface_factory(*data)
with pytest.raises(ValueError):
model.covariance_between_points(data[0], tf.expand_dims(data[0], axis=0)) # type: ignore
@random_seed
@pytest.mark.parametrize(
"after_model_optimize",
[pytest.param(True, id="optimize"), pytest.param(False, id="no-optimize")],
)
@pytest.mark.parametrize(
"after_model_update", [pytest.param(True, id="update"), pytest.param(False, id="no-update")]
)
def test_gpflow_models_cached_predictions_correct(
after_model_optimize: bool,
after_model_update: bool,
gpflow_interface_factory: ModelFactoryType,
) -> None:
x = np.linspace(0, 5, 10).reshape((-1, 1))
y = fnc_2sin_x_over_3(x)
data = x, y
dataset = Dataset(*data)
model, _ = gpflow_interface_factory(x, y)
if after_model_optimize:
model._optimizer = BatchOptimizer(gpflow.optimizers.Scipy(), max_iter=1)
model.optimize(dataset)
if after_model_update:
new_x = np.linspace(0, 5, 3).reshape((-1, 1))
new_y = fnc_2sin_x_over_3(new_x)
new_dataset = Dataset(new_x, new_y)
cast(TrainableProbabilisticModel, model).update(new_dataset)
x_predict = np.linspace(0, 5, 2).reshape((-1, 1))
# get cached predictions
cached_fmean, cached_fvar = model.predict(x_predict)
cached_joint_mean, cached_joint_var = model.predict_joint(x_predict)
cached_ymean, cached_yvar = model.predict_y(x_predict)
# get reference (slow) predictions from underlying model
reference_fmean, reference_fvar = model.model.predict_f(x_predict)
reference_joint_mean, reference_joint_var = model.model.predict_f(x_predict, full_cov=True)
reference_ymean, reference_yvar = model.model.predict_y(x_predict)
npt.assert_allclose(cached_fmean, reference_fmean)
npt.assert_allclose(cached_ymean, reference_ymean)
npt.assert_allclose(cached_joint_mean, reference_joint_mean)
npt.assert_allclose(cached_fvar, reference_fvar, atol=1e-5)
npt.assert_allclose(cached_yvar, reference_yvar, atol=1e-5)
npt.assert_allclose(cached_joint_var, reference_joint_var, atol=1e-5)
npt.assert_allclose(cached_yvar - model.get_observation_noise(), cached_fvar, atol=5e-5)
@random_seed
def test_gpflow_models_cached_predictions_faster(
gpflow_interface_factory: ModelFactoryType,
) -> None:
x = np.linspace(0, 10, 10).reshape((-1, 1))
y = fnc_2sin_x_over_3(x)
model, _ = gpflow_interface_factory(x, y)
n_calls = 100
x_predict = np.linspace(0, 5, 2).reshape((-1, 1))
t_0 = time()
[model.predict(x_predict) for _ in range(n_calls)]
time_with_cache = time() - t_0
t_0 = time()
[model.model.predict_f(x_predict) for _ in range(n_calls)]
time_without_cache = time() - t_0
npt.assert_array_less(time_with_cache, time_without_cache)
def test_gaussian_process_regression_raises_for_invalid_init() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = fnc_3x_plus_10(x)
with pytest.raises(ValueError):
GaussianProcessRegression(gpr_model(x, y), num_kernel_samples=-1)
with pytest.raises(ValueError):
GaussianProcessRegression(gpr_model(x, y), num_rff_features=-1)
with pytest.raises(ValueError):
GaussianProcessRegression(gpr_model(x, y), num_rff_features=0)
with pytest.raises(ValueError):
optimizer1 = BatchOptimizer(gpflow.optimizers.Scipy())
GaussianProcessRegression(gpr_model(x, y), optimizer=optimizer1)
with pytest.raises(ValueError):
optimizer2 = Optimizer(tf.optimizers.Adam())
GaussianProcessRegression(gpr_model(x, y), optimizer=optimizer2)
def test_gaussian_process_regression_correctly_inits_mean_function() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = fnc_3x_plus_10(x)
m = gpflow.models.GPR((x, y), gpflow.kernels.RBF())
model = GaussianProcessRegression(m)
assert isinstance(model.get_mean_function(), gpflow.mean_functions.Zero)
m = gpflow.models.GPR(
(x, y), gpflow.kernels.RBF(), mean_function=gpflow.mean_functions.Linear()
)
model = GaussianProcessRegression(m)
assert isinstance(model.get_mean_function(), gpflow.mean_functions.Linear)
def test_gaussian_process_regression_optimize_with_defaults(compile: bool) -> None:
data = mock_data()
model = GaussianProcessRegression(gpr_model(*data))
loss = model.model.training_loss()
model.optimize(Dataset(*data))
assert model.model.training_loss() < loss
def test_gaussian_process_regression_optimize(compile: bool) -> None:
data = mock_data()
optimizer = Optimizer(gpflow.optimizers.Scipy(), compile=compile)
model = GaussianProcessRegression(gpr_model(*data), optimizer)
loss = model.model.training_loss()
model.optimize(Dataset(*data))
assert model.model.training_loss() < loss
def test_gaussian_process_regression_correctly_returns_internal_data() -> None:
data = mock_data()
model = GaussianProcessRegression(gpr_model(*data))
returned_data = model.get_internal_data()
npt.assert_array_equal(returned_data.query_points, data[0])
npt.assert_array_equal(returned_data.observations, data[1])
@random_seed
@unittest.mock.patch(
"trieste.models.gpflow.models.GaussianProcessRegression.find_best_model_initialization"
)
@pytest.mark.parametrize("prior_for_lengthscale", [True, False])
@pytest.mark.parametrize("num_kernel_samples", [10, 0])
def test_gaussian_process_regression_correctly_counts_params_that_can_be_sampled(
mocked_model_initializer: unittest.mock.MagicMock,
dim: int,
prior_for_lengthscale: bool,
num_kernel_samples: int,
) -> None:
x = tf.constant(np.arange(1, 5 * dim + 1).reshape(-1, dim), dtype=tf.float64) # shape: [5, d]
optimizer = Optimizer(
optimizer=gpflow.optimizers.Scipy(),
minimize_args={"options": dict(maxiter=10)},
)
model = GaussianProcessRegression(
gpr_model(x, fnc_3x_plus_10(x)), optimizer=optimizer, num_kernel_samples=num_kernel_samples
)
model.model.kernel = gpflow.kernels.RBF(lengthscales=tf.ones([dim], dtype=tf.float64))
model.model.likelihood.variance.assign(1.0)
gpflow.set_trainable(model.model.likelihood, True)
if prior_for_lengthscale:
model.model.kernel.lengthscales.prior = tfp.distributions.LogNormal(
loc=tf.math.log(model.model.kernel.lengthscales), scale=1.0
)
else:
upper = tf.cast([10.0] * dim, dtype=tf.float64)
lower = upper / 100
model.model.kernel.lengthscales = gpflow.Parameter(
model.model.kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
model.model.likelihood.variance.prior = tfp.distributions.LogNormal(
loc=tf.cast(-2.0, dtype=tf.float64), scale=tf.cast(5.0, dtype=tf.float64)
)
dataset = Dataset(x, tf.cast(fnc_3x_plus_10(x), dtype=tf.float64))
model.optimize(dataset)
if num_kernel_samples == 0:
mocked_model_initializer.assert_not_called()
else:
mocked_model_initializer.assert_called_once()
num_samples = mocked_model_initializer.call_args[0][0]
npt.assert_array_equal(num_samples, num_kernel_samples * (dim + 1))
def test_gaussian_process_regression_best_initialization_changes_params_with_priors(
dim: int,
) -> None:
x = tf.constant(
np.arange(1, 1 + 10 * dim).reshape(-1, dim), dtype=gpflow.default_float()
) # shape: [10, dim]
model = GaussianProcessRegression(gpr_model(x, fnc_3x_plus_10(x)[:, 0:1]))
model.model.kernel = gpflow.kernels.RBF(lengthscales=[0.2] * dim)
model.model.kernel.lengthscales.prior = tfp.distributions.LogNormal(
loc=tf.math.log(model.model.kernel.lengthscales), scale=1.0
)
model.find_best_model_initialization(2)
npt.assert_allclose(1.0, model.model.kernel.variance)
npt.assert_array_equal(dim, model.model.kernel.lengthscales.shape)
npt.assert_raises(
AssertionError, npt.assert_allclose, [0.2, 0.2], model.model.kernel.lengthscales
)
def test_gaussian_process_regression_best_initialization_changes_params_with_sigmoid_bijectors(
dim: int,
) -> None:
x = tf.constant(
np.arange(1, 1 + 10 * dim).reshape(-1, dim), dtype=gpflow.default_float()
) # shape: [10, dim]
model = GaussianProcessRegression(gpr_model(x, fnc_3x_plus_10(x)[:, 0:1]))
model.model.kernel = gpflow.kernels.RBF(lengthscales=[0.2] * dim)
upper = tf.cast([10.0] * dim, dtype=tf.float64)
lower = upper / 100
model.model.kernel.lengthscales = gpflow.Parameter(
model.model.kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
model.find_best_model_initialization(2)
npt.assert_allclose(1.0, model.model.kernel.variance)
npt.assert_array_equal(dim, model.model.kernel.lengthscales.shape)
npt.assert_raises(
AssertionError, npt.assert_allclose, [0.2, 0.2], model.model.kernel.lengthscales
)
@random_seed
def test_gaussian_process_regression_best_initialization_improves_training_loss(dim: int) -> None:
x = tf.constant(
np.arange(1, 1 + 10 * dim).reshape(-1, dim), dtype=gpflow.default_float()
) # shape: [10, dim]
model = GaussianProcessRegression(gpr_model(x, fnc_3x_plus_10(x)[:, 0:1]))
model.model.kernel = gpflow.kernels.RBF(variance=0.01, lengthscales=[0.011] * dim)
upper = tf.cast([100.0] * dim, dtype=tf.float64)
lower = upper / 10000
model.model.kernel.lengthscales = gpflow.Parameter(
model.model.kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
pre_init_likelihood = -model.model.training_loss()
model.find_best_model_initialization(10)
post_init_likelihood = -model.model.training_loss()
npt.assert_array_less(pre_init_likelihood, post_init_likelihood)
@random_seed
def test_gaussian_process_regression_best_initialization_improves_likelihood(dim: int) -> None:
x = tf.constant(
np.arange(1, 1 + 10 * dim).reshape(-1, dim), dtype=gpflow.default_float()
) # shape: [10, dim]
model = GaussianProcessRegression(gpr_model(x, fnc_3x_plus_10(x)[:, 0:1]))
model.model.kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.2] * dim)
model.model.kernel.variance.prior = tfp.distributions.LogNormal(
loc=np.float64(-2.0), scale=np.float64(1.0)
)
upper = tf.cast([10.0] * dim, dtype=tf.float64)
lower = upper / 100
model.model.kernel.lengthscales = gpflow.Parameter(
model.model.kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
pre_init_loss = model.model.training_loss()
model.find_best_model_initialization(100)
post_init_loss = model.model.training_loss()
npt.assert_array_less(post_init_loss, pre_init_loss)
def test_gaussian_process_regression_default_optimizer_is_correct() -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
model = GaussianProcessRegression(gpr_model(x_observed[:10], y_observed[:10]))
assert isinstance(model.optimizer, Optimizer)
assert isinstance(model.optimizer.optimizer, gpflow.optimizers.Scipy)
@pytest.mark.parametrize("num_outputs", [1, 2])
def test_gaussian_process_regression_pairwise_covariance(num_outputs: int) -> None:
x = tf.constant(np.arange(1, 5).reshape(-1, 1), dtype=gpflow.default_float()) # shape: [4, 1]
y = fnc_3x_plus_10(x)
model = GaussianProcessRegression(gpr_model(x, tf.repeat(y, num_outputs, axis=1)))
query_points_1 = tf.concat([0.5 * x, 0.5 * x], 0) # shape: [8, 1]
query_points_2 = tf.concat([2 * x, 2 * x, 2 * x], 0) # shape: [12, 1]
all_query_points = tf.concat([query_points_1, query_points_2], 0)
_, predictive_covariance = model.predict_joint(all_query_points)
expected_covariance = predictive_covariance[:, :8, 8:]
actual_covariance = model.covariance_between_points(query_points_1, query_points_2)
np.testing.assert_allclose(expected_covariance, actual_covariance, atol=1e-5)
def test_gaussian_process_regression_trajectory_sampler_raises_multi_latent_gp() -> None:
x = tf.constant(np.arange(1, 5).reshape(-1, 1), dtype=gpflow.default_float()) # shape: [4, 1]
y = fnc_3x_plus_10(x)
model = GaussianProcessRegression(gpr_model(x, tf.repeat(y, 2, axis=1)))
with pytest.raises(NotImplementedError):
model.trajectory_sampler()
@random_seed
@pytest.mark.parametrize("use_decoupled_sampler", [True, False])
@pytest.mark.parametrize("use_mean_function", [True, False])
@pytest.mark.parametrize("noise_var", [1e-5, 1e-1])
def test_gaussian_process_regression_trajectory_sampler_has_correct_samples(
use_decoupled_sampler: bool,
use_mean_function: bool,
noise_var: float,
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model = GaussianProcessRegression(
gpr_model(x, _3x_plus_gaussian_noise(x)), use_decoupled_sampler=use_decoupled_sampler
)
model.model.likelihood.variance.assign(noise_var)
if use_mean_function:
model.model.mean_function = gpflow.mean_functions.Linear()
model.update_posterior_cache()
num_samples = 100
trajectory_sampler = model.trajectory_sampler()
if use_decoupled_sampler:
assert isinstance(trajectory_sampler, DecoupledTrajectorySampler)
else:
assert isinstance(trajectory_sampler, RandomFourierFeatureTrajectorySampler)
trajectory = trajectory_sampler.get_trajectory()
x_predict = tf.constant([[1.0], [2.0], [3.0], [1.5], [2.5], [3.5]], gpflow.default_float())
x_predict_parallel = tf.expand_dims(x_predict, -2) # [N, 1, D]
x_predict_parallel = tf.tile(x_predict_parallel, [1, num_samples, 1]) # [N, B, D]
samples = trajectory(x_predict_parallel) # [N, B, 1]
sample_mean = tf.reduce_mean(samples, axis=1) # [N, 1]
sample_variance = tf.math.reduce_variance(samples, axis=1) # [N, 1]
true_mean, true_variance = model.predict(x_predict)
# test predictions approx correct away from data
npt.assert_allclose(sample_mean[3:] + 1.0, true_mean[3:] + 1.0, rtol=0.1)
npt.assert_allclose(sample_variance[3:], true_variance[3:], rtol=0.5)
# test predictions correct at data
npt.assert_allclose(sample_mean[:3] + 1.0, true_mean[:3] + 1.0, rtol=0.1)
npt.assert_allclose(sample_variance[:3], true_variance[:3], rtol=0.5)
def test_gaussian_process_regression_conditional_predict_equations() -> None:
x = gpflow.utilities.to_default_float(
tf.constant(np.arange(1, 8).reshape(-1, 1) / 8.0)
) # shape: [7, 1]
y = fnc_2sin_x_over_3(x)
gpflow_model_7 = gpr_model(x, y)
gpflow_model_7.mean_function = gpflow.mean_functions.Linear()
model7 = GaussianProcessRegression(gpflow_model_7)
gpflow_model_5 = gpr_model(x[:5, :], y[:5, :])
gpflow_model_5.mean_function = gpflow.mean_functions.Linear()
model5 = GaussianProcessRegression(gpflow_model_5)
additional_data = Dataset(x[5:, :], y[5:, :])
query_points = tf.concat([0.5 * x, 2.0 * x], 0) # shape: [14, 1]
predj_mean7, predj_cov7 = model7.predict_joint(query_points)
predj_mean5, predj_cov5 = model5.conditional_predict_joint(query_points, additional_data)
pred_mean7, pred_var7 = model7.predict(query_points)
pred_mean5, pred_var5 = model5.conditional_predict_f(query_points, additional_data)
predy_mean7, predy_var7 = model7.predict_y(query_points)
predy_mean5, predy_var5 = model5.conditional_predict_y(query_points, additional_data)
np.testing.assert_allclose(tf.transpose(tf.linalg.diag_part(predj_cov5)), pred_var5, atol=1e-5)
np.testing.assert_allclose(predj_mean5, pred_mean5, atol=1e-5)
np.testing.assert_allclose(predj_mean5, predj_mean7, atol=1e-5)
np.testing.assert_allclose(pred_mean7, pred_mean5, atol=1e-5)
np.testing.assert_allclose(pred_var7, pred_var5, atol=1e-5)
np.testing.assert_allclose(predj_cov7, predj_cov5, atol=1e-5)
np.testing.assert_allclose(predy_mean7, predy_mean5, atol=1e-5)
np.testing.assert_allclose(predy_var7, predy_var5, atol=1e-5)
def test_gaussian_process_regression_conditional_predict_equations_broadcast() -> None:
x = gpflow.utilities.to_default_float(
tf.constant(np.arange(1, 24).reshape(-1, 1) / 8.0)
) # shape: [23, 1]
y = fnc_2sin_x_over_3(x)
model5 = GaussianProcessRegression(gpr_model(x[:5, :], y[:5, :]))
additional_data = Dataset(tf.reshape(x[5:, :], [3, 6, -1]), tf.reshape(y[5:, :], [3, 6, -1]))
query_points = tf.concat([0.5 * x, 2.0 * x], 0) # shape: [46, 1]
predj_mean5, predj_cov5 = model5.conditional_predict_joint(query_points, additional_data)
pred_mean5, pred_var5 = model5.conditional_predict_f(query_points, additional_data)
predy_mean5, predy_var5 = model5.conditional_predict_y(query_points, additional_data)
for i in range(3):
xi = tf.concat([x[:5, :], additional_data.query_points[i, ...]], axis=0)
yi = tf.concat([y[:5, :], additional_data.observations[i, ...]], axis=0)
modeli = GaussianProcessRegression(gpr_model(xi, yi))
predj_meani, predj_covi = modeli.predict_joint(query_points)
pred_meani, pred_vari = modeli.predict(query_points)
predy_meani, predy_vari = modeli.predict_y(query_points)
np.testing.assert_allclose(predj_mean5[i, ...], predj_meani, atol=1e-5)
np.testing.assert_allclose(pred_meani, pred_mean5[i, ...], atol=1e-5)
np.testing.assert_allclose(pred_vari, pred_var5[i, ...], atol=1e-5)
np.testing.assert_allclose(predj_covi, predj_cov5[i, ...], atol=1e-5)
np.testing.assert_allclose(predy_vari, predy_var5[i, ...], atol=1e-5)
np.testing.assert_allclose(predy_vari, predy_var5[i, ...], atol=1e-5)
def test_gaussian_process_regression_conditional_predict_f_sample() -> None:
x = gpflow.utilities.to_default_float(
tf.constant(np.arange(1, 24).reshape(-1, 1) / 8.0)
) # shape: [23, 1]
y = fnc_2sin_x_over_3(x)
model5 = GaussianProcessRegression(gpr_model(x[:5, :], y[:5, :]))
additional_data = Dataset(tf.reshape(x[5:, :], [3, 6, -1]), tf.reshape(y[5:, :], [3, 6, -1]))
query_points = tf.concat([0.5 * x, 2.0 * x], 0) # shape: [46, 1]
samples = model5.conditional_predict_f_sample(query_points, additional_data, num_samples=100000)
npt.assert_array_equal([3, 100000, 46, 1], samples.shape)
for i in range(3):
xi = tf.concat([x[:5, :], additional_data.query_points[i, ...]], axis=0)
yi = tf.concat([y[:5, :], additional_data.observations[i, ...]], axis=0)
modeli = GaussianProcessRegression(gpr_model(xi, yi))
predj_meani, predj_covi = modeli.predict_joint(query_points)
sample_mean = tf.reduce_mean(samples[i], axis=0)
sample_cov = tfp.stats.covariance(samples[i, :, :, 0], sample_axis=0)
np.testing.assert_allclose(sample_mean, predj_meani, atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(sample_cov, predj_covi[0], atol=1e-2, rtol=1e-2)
def test_sparse_gaussian_process_regression_raises_for_invalid_init() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = fnc_3x_plus_10(x)
with pytest.raises(ValueError):
SparseGaussianProcessRegression(sgpr_model(x, y), num_rff_features=-1)
with pytest.raises(ValueError):
SparseGaussianProcessRegression(sgpr_model(x, y), num_rff_features=0)
with pytest.raises(ValueError):
optimizer1 = BatchOptimizer(gpflow.optimizers.Scipy())
SparseGaussianProcessRegression(sgpr_model(x, y), optimizer=optimizer1)
with pytest.raises(ValueError):
optimizer2 = Optimizer(tf.optimizers.Adam())
SparseGaussianProcessRegression(sgpr_model(x, y), optimizer=optimizer2)
def test_sparse_gaussian_process_regression_correctly_inits_mean_function() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = fnc_3x_plus_10(x)
m = gpflow.models.SGPR((x, y), gpflow.kernels.RBF(), x)
model = SparseGaussianProcessRegression(m)
assert isinstance(model.get_mean_function(), gpflow.mean_functions.Zero)
m = gpflow.models.SGPR(
(x, y), gpflow.kernels.RBF(), x, mean_function=gpflow.mean_functions.Linear()
)
model = SparseGaussianProcessRegression(m)
assert isinstance(model.get_mean_function(), gpflow.mean_functions.Linear)
def test_sparse_gaussian_process_regression_default_optimizer_is_correct() -> None:
data = mock_data()
model = SparseGaussianProcessRegression(sgpr_model(*data))
assert isinstance(model.optimizer, Optimizer)
assert isinstance(model.optimizer.optimizer, gpflow.optimizers.Scipy)
def test_sparse_gaussian_process_regression_model_attribute() -> None:
sgpr = sgpr_model(*mock_data())
model = SparseGaussianProcessRegression(sgpr)
assert model.model is sgpr
assert isinstance(model.model, SGPR)
assert model.inducing_point_selector is None
def test_sparse_gaussian_process_regression_correctly_returns_internal_data() -> None:
data = mock_data()
model = SparseGaussianProcessRegression(sgpr_model(*data))
returned_data = model.get_internal_data()
npt.assert_array_equal(returned_data.query_points, data[0])
npt.assert_array_equal(returned_data.observations, data[1])
def test_sparse_gaussian_process_regression_update_updates_num_data() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = fnc_3x_plus_10(x)
m = SparseGaussianProcessRegression(sgpr_model(x, y))
num_data = m.model.num_data.numpy()
x_new = tf.concat([x, [[10.0], [11.0]]], 0)
y_new = fnc_3x_plus_10(x_new)
m.update(Dataset(x_new, y_new))
new_num_data = m.model.num_data.numpy()
assert new_num_data - num_data == 2
def test_sparse_gaussian_process_regression_optimize_with_defaults() -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
model = SparseGaussianProcessRegression(sgpr_model(x_observed, y_observed))
loss = model.model.training_loss()
model.optimize(dataset)
assert model.model.training_loss() < loss
def test_sparse_gaussian_process_regression_optimize(compile: bool) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
optimizer = Optimizer(gpflow.optimizers.Scipy(), compile=compile)
model = SparseGaussianProcessRegression(sgpr_model(x_observed, y_observed), optimizer=optimizer)
loss = model.model.training_loss()
model.optimize(dataset)
assert model.model.training_loss() < loss
def test_sparse_gaussian_process_regression_trajectory_sampler_raises_multi_latent_gp() -> None:
data = mock_data()
model = SparseGaussianProcessRegression(sgpr_model(*data, num_latent_gps=2))
with pytest.raises(NotImplementedError):
model.trajectory_sampler()
@random_seed
@pytest.mark.parametrize("noise_var", [1e-5, 1e-1])
@pytest.mark.parametrize("use_mean_function", [True, False])
def test_sparse_gaussian_process_regression_trajectory_sampler_has_correct_samples(
use_mean_function: bool,
noise_var: float,
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
sgpr = SGPR(
(x, _3x_plus_gaussian_noise(x)), gpflow.kernels.Matern32(), x, noise_variance=noise_var
)
model = SparseGaussianProcessRegression(sgpr)
if use_mean_function:
model.model.mean_function = gpflow.mean_functions.Linear()
num_samples = 100
trajectory_sampler = model.trajectory_sampler()
assert isinstance(trajectory_sampler, DecoupledTrajectorySampler)
trajectory = trajectory_sampler.get_trajectory()
x_predict = tf.constant([[1.0], [2.0], [3.0], [1.5], [2.5], [3.5]], gpflow.default_float())
x_predict_parallel = tf.expand_dims(x_predict, -2) # [N, 1, D]
x_predict_parallel = tf.tile(x_predict_parallel, [1, num_samples, 1]) # [N, B, D]
samples = trajectory(x_predict_parallel) # [N, B, 1]
sample_mean = tf.reduce_mean(samples, axis=1) # [N, 1]
sample_variance = tf.math.reduce_variance(samples, axis=1) # [N, 1]
true_mean, true_variance = model.predict(x_predict)
# test predictions approx correct away from data
npt.assert_allclose(sample_mean[3:] + 1.0, true_mean[3:] + 1.0, rtol=0.1)
npt.assert_allclose(sample_variance[3:], true_variance[3:], rtol=0.25)
# test predictions almost correct at data
npt.assert_allclose(sample_mean[:3] + 1.0, true_mean[:3] + 1.0, rtol=0.1)
npt.assert_allclose(sample_variance[:3], true_variance[:3], rtol=0.25)
def test_sparse_gaussian_process_regression_get_inducing_raises_multi_latent_gp() -> None:
data = mock_data()
model = SparseGaussianProcessRegression(sgpr_model(*data, num_latent_gps=2))
with pytest.raises(NotImplementedError):
model.get_inducing_variables()
def test_sparse_gaussian_process_regression_correctly_returns_inducing_points() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
data = Dataset(x, fnc_3x_plus_10(x))
model = SparseGaussianProcessRegression(sgpr_model(data.query_points, data.observations))
model.update(data)
inducing_points, q_mu, q_sqrt, w = model.get_inducing_variables()
ref_q_mu, ref_q_var = model.model.compute_qu()
ref_q_sqrt = tf.linalg.cholesky(ref_q_var)
ref_q_sqrt = tf.expand_dims(ref_q_sqrt, 0)
npt.assert_allclose(inducing_points, model.model.inducing_variable.Z, atol=1e-5)
npt.assert_allclose(q_mu, ref_q_mu, atol=1e-5)
npt.assert_allclose(q_sqrt, ref_q_sqrt, atol=1e-5)
assert not w
@pytest.mark.parametrize(
"selector",
[
UniformInducingPointSelector(Box([0.0], [1.0])),
RandomSubSampleInducingPointSelector(),
KMeansInducingPointSelector(),
ConditionalVarianceReduction(),
ConditionalImprovementReduction(),
],
)
def test_sparse_gaussian_process_regression_assigns_correct_inducing_point_selector(
selector: InducingPointSelector[SparseGaussianProcessRegression],
) -> None:
model = sgpr_model(*mock_data())
sv = SparseGaussianProcessRegression(model, inducing_point_selector=selector)
assert isinstance(sv.inducing_point_selector, type(selector))
@pytest.mark.parametrize("recalc_every_model_update", [True, False])
def test_sparse_gaussian_process_regression_chooses_new_inducing_points_correct_number_of_times(
recalc_every_model_update: bool,
) -> None:
model = sgpr_model(*mock_data())
selector = UniformInducingPointSelector(
Box([0.0], [1.0]), recalc_every_model_update=recalc_every_model_update
)
sv = SparseGaussianProcessRegression(model, inducing_point_selector=selector)
old_inducing_points = sv.model.inducing_variable.Z.numpy()
sv.update(Dataset(*mock_data()))
first_inducing_points = sv.model.inducing_variable.Z.numpy()
npt.assert_raises(
AssertionError, npt.assert_array_equal, old_inducing_points, first_inducing_points
)
sv.update(Dataset(*mock_data()))
second_inducing_points = sv.model.inducing_variable.Z.numpy()
if recalc_every_model_update:
npt.assert_raises(
AssertionError, npt.assert_array_equal, old_inducing_points, second_inducing_points
)
npt.assert_raises(
AssertionError, npt.assert_array_equal, first_inducing_points, second_inducing_points
)
else:
npt.assert_raises(
AssertionError, npt.assert_array_equal, old_inducing_points, second_inducing_points
)
npt.assert_array_equal(first_inducing_points, second_inducing_points)
@random_seed
def test_sparse_gaussian_process_regression_update_inducing_points_raises_changed_shape() -> None:
model = SparseGaussianProcessRegression(
sgpr_model(
tf.zeros([5, 2], gpflow.default_float()), tf.zeros([5, 1], gpflow.default_float())
),
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES): # current inducing point has 2 elements
model._update_inducing_variables(tf.zeros([3, 2], gpflow.default_float()))
@pytest.mark.parametrize(
"new_data",
[Dataset(tf.zeros([3, 5]), tf.zeros([3, 1])), Dataset(tf.zeros([3, 4]), tf.zeros([3, 2]))],
)
def test_sparse_gaussian_process_regression_update_raises_for_invalid_shapes(
new_data: Dataset,
) -> None:
model = SparseGaussianProcessRegression(
sgpr_model(
tf.zeros([1, 4], gpflow.default_float()), tf.zeros([1, 1], gpflow.default_float())
),
)
with pytest.raises(ValueError):
model.update(new_data)
def test_variational_gaussian_process_raises_for_invalid_init() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = fnc_3x_plus_10(x)
with pytest.raises(ValueError):
VariationalGaussianProcess(vgp_model(x, y), natgrad_gamma=1)
with pytest.raises(ValueError):
VariationalGaussianProcess(vgp_model(x, y), num_rff_features=-1)
with pytest.raises(ValueError):
VariationalGaussianProcess(vgp_model(x, y), num_rff_features=0)
with pytest.raises(ValueError):
optimizer = Optimizer(gpflow.optimizers.Scipy())
VariationalGaussianProcess(vgp_model(x, y), optimizer=optimizer, use_natgrads=True)
with pytest.raises(ValueError):
optimizer = BatchOptimizer(gpflow.optimizers.Scipy())
VariationalGaussianProcess(vgp_model(x, y), optimizer=optimizer, use_natgrads=True)
with pytest.raises(ValueError):
optimizer = Optimizer(tf.optimizers.Adam())
VariationalGaussianProcess(vgp_model(x, y), optimizer=optimizer, use_natgrads=False)
def test_variational_gaussian_process_regression_correctly_inits_mean_function() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = fnc_3x_plus_10(x)
m = gpflow.models.VGP((x, y), gpflow.kernels.RBF(), x)
model = VariationalGaussianProcess(m)
assert isinstance(model.get_mean_function(), gpflow.mean_functions.Zero)
m = gpflow.models.VGP(
(x, y), gpflow.kernels.RBF(), x, mean_function=gpflow.mean_functions.Linear()
)
model = VariationalGaussianProcess(m)
assert isinstance(model.get_mean_function(), gpflow.mean_functions.Linear)
def test_variational_gaussian_process_update_updates_num_data() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
y = fnc_3x_plus_10(x)
m = VariationalGaussianProcess(vgp_model(x, y))
num_data = m.model.num_data.numpy()
x_new = tf.concat([x, [[10.0], [11.0]]], 0)
y_new = fnc_3x_plus_10(x_new)
m.update(Dataset(x_new, y_new))
new_num_data = m.model.num_data.numpy()
assert new_num_data - num_data == 2
def test_variational_gaussian_process_correctly_returns_inducing_points() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
data = Dataset(x, fnc_3x_plus_10(x))
model = VariationalGaussianProcess(vgp_model(data.query_points, data.observations))
model.update(data)
inducing_points, q_mu, q_sqrt, whiten = model.get_inducing_variables()
npt.assert_allclose(inducing_points, x, atol=1e-5)
npt.assert_allclose(q_mu, model.model.q_mu, atol=1e-5)
npt.assert_allclose(q_sqrt, model.model.q_sqrt, atol=1e-5)
assert whiten
def test_variational_gaussian_process_update() -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
data = Dataset(x, fnc_3x_plus_10(x))
m = VariationalGaussianProcess(vgp_model(data.query_points, data.observations))
reference_model = vgp_model(data.query_points, data.observations)
npt.assert_allclose(m.model.q_mu, reference_model.q_mu, atol=1e-5)
npt.assert_allclose(m.model.q_sqrt, reference_model.q_sqrt, atol=1e-5)
x_new = tf.concat([x, tf.constant([[10.0], [11.0]], dtype=gpflow.default_float())], 0)
new_data = Dataset(x_new, fnc_3x_plus_10(x_new))
m.update(new_data)
reference_model_new = vgp_model(new_data.query_points, new_data.observations)
npt.assert_allclose(m.model.q_mu, reference_model_new.q_mu, atol=1e-5)
npt.assert_allclose(m.model.q_sqrt, reference_model_new.q_sqrt, atol=1e-5)
@random_seed
def test_variational_gaussian_process_update_q_mu_sqrt_unchanged() -> None:
x_observed = tf.constant(np.arange(10).reshape((-1, 1)), dtype=gpflow.default_float())
y_observed = fnc_2sin_x_over_3(x_observed)
model = VariationalGaussianProcess(vgp_matern_model(x_observed, y_observed))
old_q_mu = model.model.q_mu.numpy()
old_q_sqrt = model.model.q_sqrt.numpy()
data = Dataset(x_observed, y_observed)
model.update(data)
new_q_mu = model.model.q_mu.numpy()
new_q_sqrt = model.model.q_sqrt.numpy()
npt.assert_allclose(old_q_mu, new_q_mu, atol=1e-5)
npt.assert_allclose(old_q_sqrt, new_q_sqrt, atol=1e-5)
def test_variational_gaussian_process_trajectory_sampler_raises_multi_latent_gp() -> None:
data = mock_data()
model = VariationalGaussianProcess(vgp_model(*data, num_latent_gps=2))
with pytest.raises(NotImplementedError):
model.trajectory_sampler()
@random_seed
@pytest.mark.parametrize("use_mean_function", [True, False])
@pytest.mark.parametrize("noise_var", [1e-5, 1e-1])
def test_variational_gaussian_process_trajectory_sampler_has_correct_samples(
use_mean_function: bool,
noise_var: float,
) -> None:
x_observed = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y_observed = _3x_plus_gaussian_noise(x_observed)
optimizer = BatchOptimizer(tf.optimizers.Adam(), max_iter=20)
likelihood = gpflow.likelihoods.Gaussian(noise_var)
kernel = gpflow.kernels.Matern32(lengthscales=0.2)
if use_mean_function:
mean = gpflow.mean_functions.Linear()
else:
mean = gpflow.mean_functions.Zero()
vgp = VGP((x_observed, y_observed), kernel, likelihood, mean_function=mean)
model = VariationalGaussianProcess(vgp, optimizer=optimizer, use_natgrads=True)
model.update(Dataset(x_observed, y_observed))
model.optimize(Dataset(x_observed, y_observed))
num_samples = 100
trajectory_sampler = model.trajectory_sampler()
assert isinstance(trajectory_sampler, DecoupledTrajectorySampler)
trajectory = trajectory_sampler.get_trajectory()
x_predict = tf.constant([[1.0], [2.0], [3.0], [1.5], [2.5], [3.5]], gpflow.default_float())
x_predict_parallel = tf.expand_dims(x_predict, -2) # [N, 1, D]
x_predict_parallel = tf.tile(x_predict_parallel, [1, num_samples, 1]) # [N, B, D]
samples = trajectory(x_predict_parallel) # [N, B, 1]
sample_mean = tf.reduce_mean(samples, axis=1) # [N, 1]
sample_variance = tf.math.reduce_variance(samples, axis=1) # [N, 1]
true_mean, true_variance = model.predict(x_predict)
# test predictions approx correct away from data
npt.assert_allclose(sample_mean[3:] + 1.0, true_mean[3:] + 1.0, rtol=0.1)
npt.assert_allclose(sample_variance[3:], true_variance[3:], rtol=0.25)
# test predictions correct at data
npt.assert_allclose(sample_mean[:3] + 1.0, true_mean[:3] + 1.0, rtol=0.1)
npt.assert_allclose(sample_variance[:3], true_variance[:3], rtol=0.25)
@random_seed
def test_variational_gaussian_process_predict() -> None:
x_observed = tf.constant(np.arange(3).reshape((-1, 1)), dtype=gpflow.default_float())
y_observed = _3x_plus_gaussian_noise(x_observed)
model = VariationalGaussianProcess(vgp_model(x_observed, y_observed))
internal_model = model.model
gpflow.optimizers.Scipy().minimize(
internal_model.training_loss_closure(),
internal_model.trainable_variables,
)
model.update_posterior_cache()
x_predict = tf.constant([[1.5]], gpflow.default_float())
mean, variance = model.predict(x_predict)
mean_y, variance_y = model.predict_y(x_predict)
reference_model = vgp_model(x_observed, y_observed)
reference_model.data = (
tf.Variable(
reference_model.data[0],
trainable=False,
shape=[None, *reference_model.data[0].shape[1:]],
),
tf.Variable(
reference_model.data[1],
trainable=False,
shape=[None, *reference_model.data[1].shape[1:]],
),
)
gpflow.optimizers.Scipy().minimize(
reference_model.training_loss_closure(),
reference_model.trainable_variables,
)
reference_mean, reference_variance = reference_model.predict_f(x_predict)
npt.assert_allclose(mean, reference_mean)
npt.assert_allclose(variance, reference_variance, atol=1e-3)
npt.assert_allclose(variance_y - model.get_observation_noise(), variance, atol=5e-5)
@pytest.mark.parametrize("use_natgrads", [True, False])
def test_variational_gaussian_process_optimize_with_and_without_natgrads(
batcher: DatasetTransformer, compile: bool, use_natgrads: bool
) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
if use_natgrads:
optimizer = BatchOptimizer(
tf.optimizers.Adam(),
max_iter=10,
batch_size=10,
dataset_builder=batcher,
compile=compile,
)
else:
optimizer = Optimizer(gpflow.optimizers.Scipy(), compile=compile) # type:ignore
model = VariationalGaussianProcess(
vgp_model(x_observed[:10], y_observed[:10]), optimizer=optimizer, use_natgrads=use_natgrads
)
loss = model.model.training_loss()
model.optimize(dataset)
assert model.model.training_loss() < loss
def test_variational_gaussian_process_optimize_natgrads_only_updates_variational_params(
compile: bool,
) -> None:
x_observed = np.linspace(0, 100, 10).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
class DummyBatchOptimizer(BatchOptimizer):
def optimize(self, model: tf.Module, dataset: Dataset) -> None:
pass
optimizer = DummyBatchOptimizer(tf.optimizers.Adam(), compile=compile, max_iter=10)
model = VariationalGaussianProcess(
vgp_matern_model(x_observed[:10], y_observed[:10]), optimizer=optimizer, use_natgrads=True
)
old_num_trainable_params = len(model.model.trainable_variables)
old_kernel_params = model.get_kernel().parameters[0].numpy()
old_q_mu = model.model.q_mu.numpy()
old_q_sqrt = model.model.q_sqrt.numpy()
model.optimize(dataset)
new_num_trainable_params = len(model.model.trainable_variables)
new_kernel_params = model.get_kernel().parameters[0].numpy()
new_q_mu = model.model.q_mu.numpy()
new_q_sqrt = model.model.q_sqrt.numpy()
npt.assert_allclose(old_kernel_params, new_kernel_params, atol=1e-3)
npt.assert_equal(old_num_trainable_params, new_num_trainable_params)
npt.assert_raises(AssertionError, npt.assert_allclose, old_q_mu, new_q_mu)
npt.assert_raises(AssertionError, npt.assert_allclose, old_q_sqrt, new_q_sqrt)
@pytest.mark.parametrize("use_natgrads", [True, False])
def test_variational_gaussian_process_default_optimizer_is_correct(use_natgrads: bool) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
model = VariationalGaussianProcess(
vgp_model(x_observed[:10], y_observed[:10]), use_natgrads=use_natgrads
)
if use_natgrads:
assert isinstance(model.optimizer, BatchOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
else:
assert isinstance(model.optimizer, Optimizer)
assert isinstance(model.optimizer.optimizer, gpflow.optimizers.Scipy)
def test_sparse_variational_raises_for_model_with_q_diag_true() -> None:
x = mock_data()[0]
model = SVGP(
gpflow.kernels.Matern32(),
gpflow.likelihoods.Gaussian(),
x[:2],
num_data=len(x),
q_diag=True,
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
SparseVariational(model)
def test_sparse_variational_model_attribute() -> None:
model = svgp_model(*mock_data())
sv = SparseVariational(model)
assert sv.model is model
assert isinstance(sv.model, SVGP)
assert sv.inducing_point_selector is None
@pytest.mark.parametrize(
"selector",
[
UniformInducingPointSelector(Box([0.0], [1.0])),
RandomSubSampleInducingPointSelector(),
KMeansInducingPointSelector(),
ConditionalVarianceReduction(),
ConditionalImprovementReduction(),
],
)
def test_sparse_variational_assigns_correct_inducing_point_selector(
selector: InducingPointSelector[SparseVariational],
) -> None:
model = svgp_model(*mock_data())
sv = SparseVariational(model, inducing_point_selector=selector)
assert isinstance(sv.inducing_point_selector, type(selector))
@pytest.mark.parametrize("recalc_every_model_update", [True, False])
def test_sparse_variational_chooses_new_inducing_points_correct_number_of_times(
recalc_every_model_update: bool,
) -> None:
model = svgp_model(*mock_data())
selector = UniformInducingPointSelector(
Box([0.0], [1.0]), recalc_every_model_update=recalc_every_model_update
)
sv = SparseVariational(model, inducing_point_selector=selector)
old_inducing_points = sv.model.inducing_variable.Z.numpy()
sv.update(Dataset(*mock_data()))
first_inducing_points = sv.model.inducing_variable.Z.numpy()
npt.assert_raises(
AssertionError, npt.assert_array_equal, old_inducing_points, first_inducing_points
)
sv.update(Dataset(*mock_data()))
second_inducing_points = sv.model.inducing_variable.Z.numpy()
if recalc_every_model_update:
npt.assert_raises(
AssertionError, npt.assert_array_equal, old_inducing_points, second_inducing_points
)
npt.assert_raises(
AssertionError, npt.assert_array_equal, first_inducing_points, second_inducing_points
)
else:
npt.assert_raises(
AssertionError, npt.assert_array_equal, old_inducing_points, second_inducing_points
)
npt.assert_array_equal(first_inducing_points, second_inducing_points)
@random_seed
@pytest.mark.parametrize(
"mo_type", ["shared+shared", "separate+shared", "separate+separate", "auto"]
)
def test_sparse_variational_update_updates_num_data(mo_type: str) -> None:
x = tf.constant(np.arange(1, 7).reshape(-1, 1), dtype=gpflow.default_float()) # shape: [6, 1]
svgp = svgp_model_by_type(x, mo_type, True)
model = SparseVariational(svgp)
model.update(Dataset(tf.zeros([5, 1]), tf.zeros([5, 2])))
assert model.model.num_data == 5
@pytest.mark.parametrize("whiten", [True, False])
def test_sparse_variational_correctly_returns_inducing_points(whiten: bool) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
data = Dataset(x, fnc_3x_plus_10(x))
model = SparseVariational(svgp_model(data.query_points, data.observations))
model.model.whiten = whiten
model.update(data)
inducing_points, q_mu, q_sqrt, w = model.get_inducing_variables()
npt.assert_allclose(inducing_points, model.model.inducing_variable.Z, atol=1e-5)
npt.assert_allclose(q_mu, model.model.q_mu, atol=1e-5)
npt.assert_allclose(q_sqrt, model.model.q_sqrt, atol=1e-5)
assert whiten == w
@random_seed
@pytest.mark.parametrize(
"mo_type", ["shared+shared", "separate+shared", "separate+separate", "auto"]
)
@pytest.mark.parametrize("whiten", [True, False])
def test_sparse_variational_correctly_returns_inducing_points_for_multi_output(
whiten: bool, mo_type: str
) -> None:
x = tf.constant(np.arange(6).reshape(-1, 1), dtype=gpflow.default_float())
svgp = svgp_model_by_type(x, mo_type, whiten)
model = SparseVariational(svgp)
model.model.whiten = whiten
model.update(Dataset(tf.zeros([5, 1]), tf.zeros([5, 2])))
inducing_points, q_mu, q_sqrt, w = model.get_inducing_variables()
if isinstance(model.model.inducing_variable, SharedIndependentInducingVariables):
npt.assert_allclose(
inducing_points,
cast(TensorType, model.model.inducing_variable.inducing_variable).Z,
atol=1e-5,
)
elif isinstance(model.model.inducing_variable, SeparateIndependentInducingVariables):
for i, points in enumerate(model.model.inducing_variable.inducing_variables):
npt.assert_allclose(inducing_points[i], cast(TensorType, points).Z, atol=1e-5)
else:
npt.assert_allclose(inducing_points, model.model.inducing_variable.Z, atol=1e-5)
npt.assert_allclose(q_mu, model.model.q_mu, atol=1e-5)
npt.assert_allclose(q_sqrt, model.model.q_sqrt, atol=1e-5)
assert whiten == w
@random_seed
def test_sparse_variational_updates_inducing_points_raises_if_you_change_shape() -> None:
model = SparseVariational(
svgp_model(tf.zeros([5, 2]), tf.zeros([5, 1])),
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES): # current inducing point has 2 elements
model._update_inducing_variables(tf.zeros([3, 2]))
@pytest.mark.parametrize(
"new_data",
[Dataset(tf.zeros([3, 5]), tf.zeros([3, 1])), Dataset(tf.zeros([3, 4]), tf.zeros([3, 2]))],
)
def test_sparse_variational_update_raises_for_invalid_shapes(new_data: Dataset) -> None:
model = SparseVariational(
svgp_model(tf.zeros([1, 4]), tf.zeros([1, 1])),
)
with pytest.raises(ValueError):
model.update(new_data)
def test_sparse_variational_correctly_inits_mean_function() -> None:
x_np = np.arange(5, dtype=np.float64).reshape(-1, 1)
x = tf.convert_to_tensor(x_np, x_np.dtype)
m = gpflow.models.SVGP(gpflow.kernels.RBF(), gpflow.likelihoods.Gaussian(), x, num_data=len(x))
model = SparseVariational(m)
assert isinstance(model.get_mean_function(), gpflow.mean_functions.Zero)
m = gpflow.models.SVGP(
gpflow.kernels.RBF(),
gpflow.likelihoods.Gaussian(),
x,
mean_function=gpflow.mean_functions.Linear(),
num_data=len(x),
)
model = SparseVariational(m)
assert isinstance(model.get_mean_function(), gpflow.mean_functions.Linear)
def test_sparse_variational_optimize_with_defaults() -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
optimizer = BatchOptimizer(tf.optimizers.Adam(), max_iter=20)
model = SparseVariational(svgp_model(x_observed, y_observed), optimizer=optimizer)
loss = model.model.training_loss(data)
model.optimize(dataset)
assert model.model.training_loss(data) < loss
def test_sparse_variational_optimize(batcher: DatasetTransformer, compile: bool) -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
data = x_observed, y_observed
dataset = Dataset(*data)
optimizer = BatchOptimizer(
tf.optimizers.Adam(),
max_iter=10,
batch_size=10,
dataset_builder=batcher,
compile=compile,
)
model = SparseVariational(svgp_model(x_observed, y_observed), optimizer=optimizer)
loss = model.model.training_loss(data)
model.optimize(dataset)
assert model.model.training_loss(data) < loss
@random_seed
@pytest.mark.parametrize("use_mean_function", [True, False])
@pytest.mark.parametrize("noise_var", [1e-5, 1e-2])
@pytest.mark.parametrize("whiten", [True, False])
@pytest.mark.parametrize("kernel_type", ["single", "shared", "separate"])
def test_sparse_variational_trajectory_sampler_has_correct_samples(
use_mean_function: bool,
noise_var: float,
whiten: bool,
kernel_type: str,
) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
y = _3x_plus_gaussian_noise(x)
if kernel_type != "single":
y = tf.tile(y, [1, 2])
if use_mean_function:
mean = gpflow.mean_functions.Linear()
else:
mean = gpflow.mean_functions.Zero()
svgp = svgp_model_by_type(x, kernel_type + "+shared", whiten, len(x), noise_var, mean)
optimizer = BatchOptimizer(tf.optimizers.Adam(1.0), max_iter=10)
model = SparseVariational(svgp, optimizer=optimizer)
model.update(Dataset(x, y))
model.optimize(Dataset(x, y))
num_samples = 6000
trajectory_sampler = model.trajectory_sampler()
assert isinstance(trajectory_sampler, DecoupledTrajectorySampler)
trajectory = trajectory_sampler.get_trajectory()
x_predict = tf.constant([[1.0], [2.0], [3.0], [1.5], [2.5], [3.5]], gpflow.default_float())
x_predict_parallel = tf.expand_dims(x_predict, -2) # [N, 1, D]
x_predict_parallel = tf.tile(x_predict_parallel, [1, num_samples, 1]) # [N, B, D]
samples = trajectory(x_predict_parallel) # [N, B, L]
sample_mean = tf.reduce_mean(samples, axis=1) # [N, L]
sample_variance = tf.math.reduce_variance(samples, axis=1) # [N, L]
true_mean, true_variance = model.predict(x_predict)
# test predictions approx correct away from data
npt.assert_allclose(sample_mean[3:] + 1.0, true_mean[3:] + 1.0, rtol=0.04)
npt.assert_allclose(sample_variance[3:], true_variance[3:], rtol=0.1)
# test predictions almost correct at data
npt.assert_allclose(sample_mean[:3] + 1.0, true_mean[:3] + 1.0, rtol=0.04)
npt.assert_allclose(sample_variance[:3], true_variance[:3], rtol=0.1)
def test_sparse_variational_default_optimizer_is_correct() -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
model = SparseVariational(svgp_model(x_observed, y_observed))
assert isinstance(model.optimizer, BatchOptimizer)
assert isinstance(model.optimizer.optimizer, tf.optimizers.Optimizer)
def test_sparse_variational_raises_for_invalid_init() -> None:
x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
y_observed = _3x_plus_gaussian_noise(x_observed)
with pytest.raises(ValueError):
SparseVariational(svgp_model(x_observed, y_observed), num_rff_features=0)
with pytest.raises(ValueError):
SparseVariational(svgp_model(x_observed, y_observed), num_rff_features=-1)
with pytest.raises(ValueError):
optimizer1 = BatchOptimizer(gpflow.optimizers.Scipy())
SparseVariational(svgp_model(x_observed, y_observed), optimizer=optimizer1)
with pytest.raises(ValueError):
optimizer2 = Optimizer(tf.optimizers.Adam())
SparseVariational(svgp_model(x_observed, y_observed), optimizer=optimizer2)
@random_seed
@pytest.mark.parametrize("whiten", [True, False])
@pytest.mark.parametrize(
"mo_type", ["shared+shared", "separate+shared", "separate+separate", "auto"]
)
def test_sparse_variational_pairwise_covariance_for_non_whitened(
whiten: bool, mo_type: str
) -> None:
x = tf.constant(np.arange(1, 7).reshape(-1, 1), dtype=gpflow.default_float()) # shape: [6, 1]
y1 = fnc_3x_plus_10(x)
y2 = y1 * 0.5
svgp = svgp_model_by_type(x, mo_type, whiten)
model = SparseVariational(svgp, BatchOptimizer(tf.optimizers.Adam(), max_iter=3, batch_size=10))
model.model.whiten = whiten
model.optimize(Dataset(x, tf.concat([y1, y2], axis=-1)))
query_points_1 = tf.concat([0.5 * x, 0.5 * x], 0) # shape: [12, 1]
query_points_2 = tf.concat([2 * x, 2 * x, 2 * x], 0) # shape: [18, 1]
all_query_points = tf.concat([query_points_1, query_points_2], 0)
_, predictive_covariance = model.predict_joint(all_query_points)
expected_covariance = predictive_covariance[:, :12, 12:]
actual_covariance = model.covariance_between_points(query_points_1, query_points_2)
np.testing.assert_allclose(expected_covariance, actual_covariance, atol=1e-4)
class DummyInducingPointSelector(InducingPointSelector[GPflowPredictor]):
def __init__(self, new_inducing_points: TensorType, recalc_every_model_update: bool = True):
super().__init__(recalc_every_model_update)
self._new_inducing_points = new_inducing_points
def _recalculate_inducing_points(
self, M: int, model: ProbabilisticModelType, dataset: Dataset
) -> TensorType:
return self._new_inducing_points
@random_seed
@pytest.mark.parametrize("whiten", [False, True])
def test_sparse_variational_inducing_updates_preserves_posterior(
whiten: bool,
) -> None:
default_jitter = 0.0
with as_context(Config(jitter=default_jitter)), unittest.mock.patch.object(
DEFAULTS, "JITTER", default_jitter
):
x = tf.constant(np.linspace(0.0, 1.0, 8).reshape(-1, 1), dtype=gpflow.default_float())
y1 = fnc_3x_plus_10(x)
num_inducing_points = 4
xnew = tf.constant(
np.linspace(0.31, 0.77, num_inducing_points).reshape(-1, 1),
dtype=gpflow.default_float(),
)
svgp = svgp_model_with_mean(x, y1, whiten, num_inducing_points)
inducing_point_selector = DummyInducingPointSelector(xnew)
model = SparseVariational(
svgp,
BatchOptimizer(tf.optimizers.Adam(), max_iter=3, batch_size=10),
inducing_point_selector=inducing_point_selector,
)
np.testing.assert_array_equal(model.model.inducing_variable.Z, x[:num_inducing_points])
old_mu, old_sqrt = model.predict_joint(xnew) # predict old posterior
model.update(Dataset(x, y1)) # this changes inducing points to xnew
np.testing.assert_array_equal(model.model.inducing_variable.Z, xnew)
new_mu, new_sqrt = model.predict_joint(xnew) # predict new posterior
np.testing.assert_allclose(old_mu, new_mu, atol=1e-9)
np.testing.assert_allclose(old_sqrt, new_sqrt, atol=1e-9)
def multifidelity_autoregressive_nd_dataset(n_dims: int = 1) -> Dataset:
dataset = Dataset(
tf.Variable(
[
[0.0] * n_dims + [0.0],
[1.0] * n_dims + [1.0],
[2.0] * n_dims + [2.0],
[3.0] * n_dims + [1.0],
[4.0] * n_dims + [2.0],
[5.0] * n_dims + [0.0],
],
dtype=tf.float64,
),
tf.Variable([[2.0], [4.0], [6.0], [8.0], [10.0], [12.0]], dtype=tf.float64),
)
return dataset
def multifidelity_autoregressive_model(n_dims: int) -> MultifidelityAutoregressive:
search_space = Box([0.0] * n_dims, [10.0] * n_dims)
gprs = build_multifidelity_autoregressive_models(
multifidelity_autoregressive_nd_dataset(n_dims=n_dims),
num_fidelities=3,
input_search_space=search_space,
)
return MultifidelityAutoregressive(gprs)
def multifidelity_nonlinear_autoregressive_model(
n_dims: int,
) -> MultifidelityNonlinearAutoregressive:
search_space = Box([0.0] * n_dims, [10.0] * n_dims)
gprs = build_multifidelity_nonlinear_autoregressive_models(
multifidelity_autoregressive_nd_dataset(n_dims=n_dims),
num_fidelities=3,
input_search_space=search_space,
)
return MultifidelityNonlinearAutoregressive(gprs)
MULTIFIDELITY_MODEL_BUILDER_TYPE = Callable[
[int], Union[MultifidelityAutoregressive, MultifidelityNonlinearAutoregressive]
]
@pytest.mark.parametrize(
"input_data,output_shape",
(
([[0.1, 0.0], [1.1, 1.0], [2.1, 2.0]], [3, 1]),
([[0.1, 0.0, 0.0], [1.1, 1.0, 1.0], [2.1, 2.0, 2.0]], [3, 1]),
([[0.1, 0.0, 0.0, 0.0], [1.1, 1.0, 1.0, 1.0], [2.1, 2.0, 2.0, 2.0]], [3, 1]),
([[[0.1, 0.0], [1.1, 1.0], [2.1, 2.0]]] * 5, [5, 3, 1]),
([[[[0.1, 0.0], [1.1, 1.0], [2.1, 2.0]]] * 5] * 7, [7, 5, 3, 1]),
),
)
@pytest.mark.parametrize(
"multifidelity_model",
(multifidelity_autoregressive_model, multifidelity_nonlinear_autoregressive_model),
)
def test_multifidelity_autoregressive_predict_returns_expected_shape(
input_data: list[list[Union[float, list[float]]]],
output_shape: list[int],
multifidelity_model: MULTIFIDELITY_MODEL_BUILDER_TYPE,
) -> None:
query_points = tf.Variable(input_data, dtype=tf.float64)
D = query_points.shape[-1] - 1
model = multifidelity_model(D)
pred_mean, pred_var = model.predict(query_points)
assert pred_mean.shape == output_shape
assert pred_var.shape == output_shape
@pytest.mark.parametrize(
"input_data,output_shape",
(
([[0.1, 0.0], [1.1, 1.0], [2.1, 2.0]], [3, 1]),
([[0.1, 0.0, 0.0], [1.1, 1.0, 1.0], [2.1, 2.0, 2.0]], [3, 1]),
([[0.1, 0.0, 0.0, 0.0], [1.1, 1.0, 1.0, 1.0], [2.1, 2.0, 2.0, 2.0]], [3, 1]),
([[[0.1, 0.0], [1.1, 1.0], [2.1, 2.0]]] * 5, [5, 3, 1]),
([[[[0.1, 0.0], [1.1, 1.0], [2.1, 2.0]]] * 5] * 7, [7, 5, 3, 1]),
),
)
@pytest.mark.parametrize(
"multifidelity_model",
(multifidelity_autoregressive_model, multifidelity_nonlinear_autoregressive_model),
)
def test_multifidelity_autoregressive_predict_y_returns_expected_shape(
input_data: list[list[Union[float, list[float]]]],
output_shape: list[int],
multifidelity_model: MULTIFIDELITY_MODEL_BUILDER_TYPE,
) -> None:
query_points = tf.Variable(input_data, dtype=tf.float64)
D = query_points.shape[-1] - 1
model = multifidelity_model(D)
pred_mean, pred_var = model.predict_y(query_points)
assert pred_mean.shape == output_shape
assert pred_var.shape == output_shape
@pytest.mark.parametrize(
"input_data,output_shape",
(
([[0.1, 0.0], [1.1, 1.0], [2.1, 2.0]], [3, 1]),
([[0.1, 0.0, 0.0], [1.1, 1.0, 1.0], [2.1, 2.0, 2.0]], [3, 1]),
([[0.1, 0.0, 0.0, 0.0], [1.1, 1.0, 1.0, 1.0], [2.1, 2.0, 2.0, 2.0]], [3, 1]),
([[[0.1, 0.0], [1.1, 1.0], [2.1, 2.0]]] * 5, [5, 3, 1]),
([[[[0.1, 0.0], [1.1, 1.0], [2.1, 2.0]]] * 5] * 7, [7, 5, 3, 1]),
),
)
@pytest.mark.parametrize(
"multifidelity_model",
(multifidelity_autoregressive_model, multifidelity_nonlinear_autoregressive_model),
)
def test_multifidelity_autoregressive_sample_returns_expected_shape(
input_data: list[list[Union[float, list[float]]]],
output_shape: list[int],
multifidelity_model: MULTIFIDELITY_MODEL_BUILDER_TYPE,
) -> None:
query_points = tf.Variable(input_data, dtype=tf.float64)
D = query_points.shape[-1] - 1
model = multifidelity_model(D)
samples = model.sample(query_points, 13)
assert samples.shape == output_shape[:-2] + [13] + output_shape[-2:]
@pytest.mark.parametrize(
"multifidelity_model",
(multifidelity_autoregressive_model, multifidelity_nonlinear_autoregressive_model),
)
def test_multifidelity_autoregressive_covariance_with_top_fidelity_returns_expected_shape(
multifidelity_model: MULTIFIDELITY_MODEL_BUILDER_TYPE,
) -> None:
model = multifidelity_model(1)
input_data = tf.Variable([[0.1, 0.0], [1.1, 1.0], [2.1, 2.0]], dtype=tf.float64)
covs = model.covariance_with_top_fidelity(input_data)
assert covs.shape == [3, 1]
@pytest.mark.parametrize(
"input_data", (([[0.1, 0.0], [1.1, -1.0], [2.1, 2.0]]), [[0.1, 0.0], [1.1, 3.0], [2.1, 2.0]])
)
@pytest.mark.parametrize(
"multifidelity_model",
(multifidelity_autoregressive_model, multifidelity_nonlinear_autoregressive_model),
)
def test_multifidelity_autoregressive_raises_bad_fidleity(
input_data: list[list[float]],
multifidelity_model: MULTIFIDELITY_MODEL_BUILDER_TYPE,
) -> None:
input_data = tf.Variable(input_data, dtype=tf.float64)
model = multifidelity_model(1)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
model.predict(input_data)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
model.predict_y(input_data)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
model.sample(input_data, 13)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
model.covariance_with_top_fidelity(input_data)
@pytest.mark.parametrize(
"multifidelity_model",
(multifidelity_autoregressive_model, multifidelity_nonlinear_autoregressive_model),
)
def test_multifidelity_autoregressive_update_increases_internal_data_count(
multifidelity_model: MULTIFIDELITY_MODEL_BUILDER_TYPE,
) -> None:
model = multifidelity_model(1)
if isinstance(model, MultifidelityAutoregressive):
initial_fid_0_data_length = tf.shape(
model.lowest_fidelity_signal_model.get_internal_data().query_points
)[0]
initial_fid_1_data_length = tf.shape(
model.fidelity_residual_models[1].get_internal_data().query_points
)[0]
initial_fid_2_data_length = tf.shape(
model.fidelity_residual_models[2].get_internal_data().query_points
)[0]
else:
initial_fid_0_data_length = tf.shape(
model.fidelity_models[0].get_internal_data().query_points
)[0]
initial_fid_1_data_length = tf.shape(
model.fidelity_models[1].get_internal_data().query_points
)[0]
initial_fid_2_data_length = tf.shape(
model.fidelity_models[2].get_internal_data().query_points
)[0]
new_data = Dataset(
tf.Variable([[0.2, 0.0], [0.3, 0.0], [0.5, 1.0]], dtype=tf.float64),
tf.Variable([[1.0], [2.0], [3.0]], dtype=tf.float64),
)
model.update(multifidelity_autoregressive_nd_dataset(n_dims=1) + new_data)
if isinstance(model, MultifidelityAutoregressive):
assert (
tf.shape(model.lowest_fidelity_signal_model.get_internal_data().query_points)[0]
== initial_fid_0_data_length + 2
)
assert (
tf.shape(model.fidelity_residual_models[1].get_internal_data().query_points)[0]
== initial_fid_1_data_length + 1
)
assert (
tf.shape(model.fidelity_residual_models[2].get_internal_data().query_points)[0]
== initial_fid_2_data_length
)
else:
assert (
tf.shape(model.fidelity_models[0].get_internal_data().query_points)[0]
== initial_fid_0_data_length + 2
)
assert (
tf.shape(model.fidelity_models[1].get_internal_data().query_points)[0]
== initial_fid_1_data_length + 1
)
assert (
tf.shape(model.fidelity_models[2].get_internal_data().query_points)[0]
== initial_fid_2_data_length
)
@pytest.mark.parametrize(
"new_data,problem",
(
([[0.0, 8.0]], "too_high_fid"),
([[0.0, -1.0]], "negative_fid"),
([[0.0, 1.3]], "non_int_fid"),
),
)
@pytest.mark.parametrize(
"multifidelity_model",
(multifidelity_autoregressive_model, multifidelity_nonlinear_autoregressive_model),
)
def test_multifidelity_autoregressive_update_raises_for_bad_new_data(
new_data: list[list[float]],
problem: str,
multifidelity_model: MULTIFIDELITY_MODEL_BUILDER_TYPE,
) -> None:
new_dataset = Dataset(
tf.Variable(new_data, dtype=tf.float64), tf.Variable([[0.1]], dtype=tf.float64)
)
model = multifidelity_model(1)
dataset = multifidelity_autoregressive_nd_dataset()
if problem == "non_int_fid":
with pytest.raises(tf.errors.InvalidArgumentError):
model.update(dataset + new_dataset)
else:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
model.update(dataset + new_dataset)
@pytest.mark.parametrize(
"model_type",
("linear", "nonlinear"),
)
def test_multifidelity_autoregressive_optimize_reduces_losses(model_type: str) -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low) + tf.random.normal(xs_low.shape, mean=0, stddev=1e-1, dtype=tf.float64)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model: Union[MultifidelityAutoregressive, MultifidelityNonlinearAutoregressive]
if model_type == "linear":
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
else:
model = MultifidelityNonlinearAutoregressive(
build_multifidelity_nonlinear_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
if isinstance(model, MultifidelityAutoregressive):
starting_f0_model_loss = model.lowest_fidelity_signal_model.model.training_loss()
starting_f1_model_loss = model.fidelity_residual_models[1].model.training_loss()
model.update(dataset)
model.optimize(dataset)
assert model.lowest_fidelity_signal_model.model.training_loss() < starting_f0_model_loss
assert model.fidelity_residual_models[1].model.training_loss() < starting_f1_model_loss
else:
starting_f0_model_loss = model.fidelity_models[0].model.training_loss()
starting_f1_model_loss = model.fidelity_models[1].model.training_loss()
model.update(dataset)
model.optimize(dataset)
assert model.fidelity_models[0].model.training_loss() < starting_f0_model_loss
assert model.fidelity_models[1].model.training_loss() < starting_f1_model_loss
@pytest.mark.parametrize(
"new_data,problem",
(
([[0.0, 8.0]], "too_high_fid"),
([[0.0, -1.0]], "negative_fid"),
([[0.0, 1.3]], "non_int_fid"),
),
)
@pytest.mark.parametrize(
"multifidelity_model",
(multifidelity_autoregressive_model, multifidelity_nonlinear_autoregressive_model),
)
def test_multifidelity_autoregressive_optimize_raises_for_bad_new_data(
new_data: list[list[float]],
problem: str,
multifidelity_model: MULTIFIDELITY_MODEL_BUILDER_TYPE,
) -> None:
new_dataset = Dataset(
tf.Variable(new_data, dtype=tf.float64), tf.Variable([[0.1]], dtype=tf.float64)
)
model = multifidelity_model(1)
dataset = multifidelity_autoregressive_nd_dataset()
if problem == "non_int_fid":
with pytest.raises(tf.errors.InvalidArgumentError):
model.optimize(dataset + new_dataset)
else:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
model.optimize(dataset + new_dataset)
@pytest.mark.parametrize(
"model_type",
("linear", "nonlinear"),
)
def test_multifidelity_autoregressive_sample_aligns_with_predict(model_type: str) -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model: Union[MultifidelityAutoregressive, MultifidelityNonlinearAutoregressive]
if model_type == "linear":
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.lowest_fidelity_signal_model.model.likelihood.variance.assign(1.1e-6)
gpflow.set_trainable(model.lowest_fidelity_signal_model.model.likelihood, False)
else:
model = MultifidelityNonlinearAutoregressive(
build_multifidelity_nonlinear_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
model.optimize(dataset)
test_locations = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
lf_test_locations = add_fidelity_column(test_locations, 0)
hf_test_locations = add_fidelity_column(test_locations, 1)
concat_test_locations = tf.concat([lf_test_locations, hf_test_locations], axis=0)
true_means, true_vars = model.predict(concat_test_locations)
if isinstance(model, MultifidelityAutoregressive):
samples = model.sample(concat_test_locations, 100000)
else:
samples = model.sample(concat_test_locations, 10000)
sample_means = tf.reduce_mean(samples, axis=0)
sample_vars = tf.math.reduce_variance(samples, axis=0)
if isinstance(model, MultifidelityAutoregressive):
npt.assert_allclose(true_means, sample_means, atol=1e-3, rtol=1e-3)
npt.assert_allclose(true_vars, sample_vars, atol=1e-3, rtol=1e-3)
else:
npt.assert_allclose(true_means, sample_means, atol=1e-2, rtol=1e-2)
npt.assert_allclose(true_vars, sample_vars, atol=1e-2, rtol=1e-2)
@pytest.mark.parametrize(
"model_type",
("linear", "nonlinear"),
)
def test_multifidelity_autoregressive_samples_are_varied(model_type: str) -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model: Union[MultifidelityAutoregressive, MultifidelityNonlinearAutoregressive]
if model_type == "linear":
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
else:
model = MultifidelityNonlinearAutoregressive(
build_multifidelity_nonlinear_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
test_locations = tf.Variable([[5.1]], dtype=tf.float64)
lf_test_locations = add_fidelity_column(test_locations, 0)
hf_test_locations = add_fidelity_column(test_locations, 1)
lf_samples = model.sample(lf_test_locations, 2)
assert lf_samples[0] != lf_samples[1]
hf_samples = model.sample(hf_test_locations, 2)
assert hf_samples[0] != hf_samples[1]
| 82,395 | 37.884379 | 100 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflow/test_sampler.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import math
import unittest
from typing import Any, Callable, List, Tuple, Type
from unittest.mock import MagicMock
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from scipy import stats
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, ShapeLike, quadratic, random_seed
from tests.util.models.gpflow.models import (
GaussianProcess,
QuadraticMeanAndRBFKernel,
QuadraticMeanAndRBFKernelWithSamplers,
quadratic_mean_rbf_kernel_model,
rbf,
svgp_model,
svgp_model_by_type,
)
from trieste.data import Dataset
from trieste.models.gpflow import (
BatchReparametrizationSampler,
DecoupledTrajectorySampler,
IndependentReparametrizationSampler,
RandomFourierFeatureTrajectorySampler,
SparseVariational,
feature_decomposition_trajectory,
)
from trieste.models.gpflow.sampler import (
FeatureDecompositionTrajectorySamplerModel,
qmc_normal_samples,
)
from trieste.models.interfaces import (
ReparametrizationSampler,
SupportsGetInducingVariables,
SupportsPredictJoint,
)
from trieste.objectives import Branin
REPARAMETRIZATION_SAMPLERS: List[Type[ReparametrizationSampler[SupportsPredictJoint]]] = [
BatchReparametrizationSampler,
IndependentReparametrizationSampler,
]
DecoupledSamplingModel = Callable[[Dataset], Tuple[int, FeatureDecompositionTrajectorySamplerModel]]
@pytest.fixture(name="sampling_dataset")
def _sampling_dataset() -> Dataset:
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
return dataset
@pytest.fixture(
name="decoupled_sampling_model",
params=[
pytest.param(
lambda dataset: (1, quadratic_mean_rbf_kernel_model(dataset)), id="one_op_custom"
),
# whiten testing is covered in tests/unit/models/gpflow/test_models.py
pytest.param(
lambda dataset: (
1,
SparseVariational(svgp_model(dataset.query_points, dataset.observations)),
),
id="one_op_svgp",
),
pytest.param(
lambda dataset: (
2,
SparseVariational(
svgp_model_by_type(dataset.query_points, "separate+shared", whiten=False)
),
),
id="two_op_svgp",
),
],
)
def _decoupled_sampling_model_fixture(request: Any) -> DecoupledSamplingModel:
return request.param
@pytest.mark.parametrize(
"sampler",
REPARAMETRIZATION_SAMPLERS,
)
def test_reparametrization_sampler_reprs(
sampler: type[BatchReparametrizationSampler | IndependentReparametrizationSampler],
) -> None:
assert (
repr(sampler(20, QuadraticMeanAndRBFKernel()))
== f"{sampler.__name__}(20, QuadraticMeanAndRBFKernel())"
)
@pytest.mark.parametrize("qmc", [True, False])
def test_independent_reparametrization_sampler_sample_raises_for_negative_jitter(qmc: bool) -> None:
sampler = IndependentReparametrizationSampler(100, QuadraticMeanAndRBFKernel(), qmc=qmc)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
sampler.sample(tf.constant([[0.0]]), jitter=-1e-6)
@pytest.mark.parametrize("qmc", [True, False])
@pytest.mark.parametrize("sample_size", [0, -2])
def test_independent_reparametrization_sampler_raises_for_invalid_sample_size(
sample_size: int,
qmc: bool,
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
IndependentReparametrizationSampler(sample_size, QuadraticMeanAndRBFKernel(), qmc=qmc)
@pytest.mark.parametrize("qmc", [True, False])
@pytest.mark.parametrize("shape", [[], [1], [2], [2, 3]])
def test_independent_reparametrization_sampler_sample_raises_for_invalid_at_shape(
shape: ShapeLike,
qmc: bool,
) -> None:
sampler = IndependentReparametrizationSampler(1, QuadraticMeanAndRBFKernel(), qmc=qmc)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
sampler.sample(tf.zeros(shape))
def _assert_kolmogorov_smirnov_95(
# fmt: off
samples: tf.Tensor, # [..., S]
distribution: tfp.distributions.Distribution
# fmt: on
) -> None:
assert distribution.event_shape == ()
tf.debugging.assert_shapes([(samples, [..., "S"])])
sample_size = samples.shape[-1]
samples_sorted = tf.sort(samples, axis=-1) # [..., S]
edf = tf.range(1.0, sample_size + 1, dtype=samples.dtype) / sample_size # [S]
expected_cdf = distribution.cdf(samples_sorted) # [..., S]
_95_percent_bound = 1.36 / math.sqrt(sample_size)
assert tf.reduce_max(tf.abs(edf - expected_cdf)) < _95_percent_bound
def _dim_two_gp(mean_shift: tuple[float, float] = (0.0, 0.0)) -> GaussianProcess:
matern52 = tfp.math.psd_kernels.MaternFiveHalves(
amplitude=tf.cast(2.3, tf.float64), length_scale=tf.cast(0.5, tf.float64)
)
return GaussianProcess(
[lambda x: mean_shift[0] + Branin.objective(x), lambda x: mean_shift[1] + quadratic(x)],
[matern52, rbf()],
)
@random_seed
@unittest.mock.patch(
"trieste.models.gpflow.sampler.qmc_normal_samples", side_effect=qmc_normal_samples
)
@pytest.mark.parametrize("qmc", [True, False])
def test_independent_reparametrization_sampler_samples_approximate_expected_distribution(
mocked_qmc: MagicMock, qmc: bool
) -> None:
sample_size = 1000
x = tf.random.uniform([100, 1, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
model = _dim_two_gp()
samples = IndependentReparametrizationSampler(sample_size, model, qmc=qmc).sample(
x
) # [N, S, 1, L]
assert samples.shape == [len(x), sample_size, 1, 2]
mean, var = model.predict(tf.squeeze(x, -2)) # [N, L], [N, L]
_assert_kolmogorov_smirnov_95(
tf.linalg.matrix_transpose(tf.squeeze(samples, -2)),
tfp.distributions.Normal(mean[..., None], tf.sqrt(var[..., None])),
)
assert mocked_qmc.call_count == qmc
@random_seed
@pytest.mark.parametrize(
"compiler",
[
pytest.param(lambda x: x, id="uncompiled"),
pytest.param(tf.function, id="tf_function"),
pytest.param(tf.function(jit_compile=True), id="jit_compile"),
],
)
@pytest.mark.parametrize("qmc", [True, False])
def test_independent_reparametrization_sampler_sample_is_continuous(
compiler: Callable[..., Any], qmc: bool
) -> None:
sampler = IndependentReparametrizationSampler(100, _dim_two_gp(), qmc=qmc, qmc_skip=False)
sample = compiler(sampler.sample)
xs = tf.random.uniform([100, 1, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
npt.assert_array_less(tf.abs(sample(xs + 1e-20) - sample(xs)), 1e-20)
@pytest.mark.parametrize("qmc", [True, False])
@pytest.mark.parametrize("qmc_skip", [True, False])
def test_independent_reparametrization_sampler_sample_is_repeatable(
qmc: bool, qmc_skip: bool
) -> None:
sampler = IndependentReparametrizationSampler(100, _dim_two_gp(), qmc=qmc, qmc_skip=qmc_skip)
xs = tf.random.uniform([100, 1, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
npt.assert_allclose(sampler.sample(xs), sampler.sample(xs))
@random_seed
@pytest.mark.parametrize("qmc", [True, False])
@pytest.mark.parametrize("qmc_skip", [True, False])
def test_independent_reparametrization_sampler_samples_are_distinct_for_new_instances(
qmc: bool,
qmc_skip: bool,
) -> None:
sampler1 = IndependentReparametrizationSampler(100, _dim_two_gp(), qmc=qmc, qmc_skip=qmc_skip)
sampler2 = IndependentReparametrizationSampler(100, _dim_two_gp(), qmc=qmc, qmc_skip=qmc_skip)
xs = tf.random.uniform([100, 1, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
if qmc and not qmc_skip:
npt.assert_raises(
AssertionError,
npt.assert_array_less,
1e-9,
tf.abs(sampler2.sample(xs) - sampler1.sample(xs)),
)
else:
npt.assert_array_less(1e-9, tf.abs(sampler2.sample(xs) - sampler1.sample(xs)))
@pytest.mark.parametrize("qmc", [True, False])
@pytest.mark.parametrize("qmc_skip", [True, False])
def test_independent_reparametrization_sampler_reset_sampler(qmc: bool, qmc_skip: bool) -> None:
sampler = IndependentReparametrizationSampler(100, _dim_two_gp(), qmc=qmc, qmc_skip=qmc_skip)
assert not sampler._initialized
xs = tf.random.uniform([100, 1, 2], minval=-10.0, maxval=10.0, dtype=tf.float64)
samples1 = sampler.sample(xs)
assert sampler._initialized
sampler.reset_sampler()
assert not sampler._initialized
samples2 = sampler.sample(xs)
assert sampler._initialized
if qmc and not qmc_skip:
npt.assert_raises(AssertionError, npt.assert_array_less, 1e-9, tf.abs(samples2 - samples1))
else:
npt.assert_array_less(1e-9, tf.abs(samples2 - samples1))
@pytest.mark.parametrize("qmc", [True, False])
@pytest.mark.parametrize("sample_size", [0, -2])
def test_batch_reparametrization_sampler_raises_for_invalid_sample_size(
sample_size: int, qmc: bool
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
BatchReparametrizationSampler(sample_size, _dim_two_gp(), qmc=qmc)
@random_seed
@unittest.mock.patch(
"trieste.models.gpflow.sampler.qmc_normal_samples", side_effect=qmc_normal_samples
)
@pytest.mark.parametrize("qmc", [True, False])
def test_batch_reparametrization_sampler_samples_approximate_mean_and_covariance(
mocked_qmc: MagicMock, qmc: bool
) -> None:
model = _dim_two_gp()
sample_size = 10_000
leading_dims = [3]
batch_size = 4
xs = tf.random.uniform(leading_dims + [batch_size, 2], maxval=1.0, dtype=tf.float64)
samples = BatchReparametrizationSampler(sample_size, model, qmc=qmc).sample(xs)
assert mocked_qmc.call_count == qmc
if qmc:
assert mocked_qmc.call_args[0][0] == 2 * sample_size # num_results
assert mocked_qmc.call_args[0][1] == batch_size # dim
assert samples.shape == leading_dims + [sample_size, batch_size, 2]
samples_mean = tf.reduce_mean(samples, axis=-3)
samples_covariance = tf.transpose(
tfp.stats.covariance(samples, sample_axis=-3, event_axis=-2), [0, 3, 1, 2]
)
model_mean, model_cov = model.predict_joint(xs)
npt.assert_allclose(samples_mean, model_mean, rtol=0.02)
npt.assert_allclose(samples_covariance, model_cov, rtol=0.04)
@pytest.mark.parametrize(
"compiler",
[
pytest.param(lambda x: x, id="uncompiled"),
pytest.param(tf.function, id="tf_function"),
pytest.param(tf.function(jit_compile=True), id="jit_compile"),
],
)
@pytest.mark.parametrize("qmc", [True, False])
def test_batch_reparametrization_sampler_samples_are_continuous(
compiler: Callable[..., Any], qmc: bool
) -> None:
sampler = BatchReparametrizationSampler(100, _dim_two_gp(), qmc=qmc, qmc_skip=False)
sample = compiler(sampler.sample)
xs = tf.random.uniform([3, 5, 7, 2], dtype=tf.float64)
npt.assert_array_less(tf.abs(sample(xs + 1e-20) - sample(xs)), 1e-20)
@pytest.mark.parametrize("qmc", [True, False])
@pytest.mark.parametrize("qmc_skip", [True, False])
def test_batch_reparametrization_sampler_samples_are_repeatable(qmc: bool, qmc_skip: bool) -> None:
sampler = BatchReparametrizationSampler(100, _dim_two_gp(), qmc=qmc, qmc_skip=qmc_skip)
xs = tf.random.uniform([3, 5, 7, 2], dtype=tf.float64)
npt.assert_allclose(sampler.sample(xs), sampler.sample(xs))
@pytest.mark.parametrize("qmc", [True, False])
@pytest.mark.parametrize("qmc_skip", [True, False])
def test_batch_reparametrization_sampler_different_batch_sizes(qmc: bool, qmc_skip: bool) -> None:
sampler = BatchReparametrizationSampler(100, _dim_two_gp(), qmc=qmc, qmc_skip=qmc_skip)
xs = tf.random.uniform([3, 5, 7, 2], dtype=tf.float64)
npt.assert_allclose(sampler.sample(xs), sampler.sample(xs))
sampler.reset_sampler()
xs = tf.random.uniform([3, 5, 10, 2], dtype=tf.float64)
npt.assert_allclose(sampler.sample(xs), sampler.sample(xs))
@random_seed
@pytest.mark.parametrize("qmc", [True, False])
@pytest.mark.parametrize("qmc_skip", [True, False])
def test_batch_reparametrization_sampler_samples_are_distinct_for_new_instances(
qmc: bool, qmc_skip: bool
) -> None:
model = _dim_two_gp()
sampler1 = BatchReparametrizationSampler(100, model, qmc=qmc, qmc_skip=qmc_skip)
sampler2 = BatchReparametrizationSampler(100, model, qmc=qmc, qmc_skip=qmc_skip)
xs = tf.random.uniform([3, 5, 7, 2], dtype=tf.float64)
if qmc and not qmc_skip:
npt.assert_raises(
AssertionError,
npt.assert_array_less,
1e-9,
tf.abs(sampler2.sample(xs) - sampler1.sample(xs)),
)
else:
npt.assert_array_less(1e-9, tf.abs(sampler2.sample(xs) - sampler1.sample(xs)))
@pytest.mark.parametrize("at", [tf.constant([0.0]), tf.constant(0.0), tf.ones([0, 1])])
@pytest.mark.parametrize("qmc", [True, False])
def test_batch_reparametrization_sampler_sample_raises_for_invalid_at_shape(
at: tf.Tensor, qmc: bool
) -> None:
sampler = BatchReparametrizationSampler(100, QuadraticMeanAndRBFKernel(), qmc=qmc)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
sampler.sample(at)
@pytest.mark.parametrize("qmc", [True, False])
def test_batch_reparametrization_sampler_sample_raises_for_negative_jitter(qmc: bool) -> None:
sampler = BatchReparametrizationSampler(100, QuadraticMeanAndRBFKernel(), qmc=qmc)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
sampler.sample(tf.constant([[0.0]]), jitter=-1e-6)
@pytest.mark.parametrize("qmc", [True, False])
def test_batch_reparametrization_sampler_sample_raises_for_inconsistent_batch_size(
qmc: bool,
) -> None:
sampler = BatchReparametrizationSampler(100, QuadraticMeanAndRBFKernel(), qmc=qmc)
sampler.sample(tf.constant([[0.0], [1.0], [2.0]]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
sampler.sample(tf.constant([[0.0], [1.0]]))
@pytest.mark.parametrize("qmc", [True, False])
@pytest.mark.parametrize("qmc_skip", [True, False])
def test_batch_reparametrization_sampler_reset_sampler(qmc: bool, qmc_skip: bool) -> None:
sampler = BatchReparametrizationSampler(
100, QuadraticMeanAndRBFKernel(), qmc=qmc, qmc_skip=qmc_skip
)
assert not sampler._initialized
xs = tf.constant([[0.0], [1.0], [2.0]])
samples1 = sampler.sample(xs)
assert sampler._initialized
sampler.reset_sampler()
assert not sampler._initialized
samples2 = sampler.sample(xs)
assert sampler._initialized
if qmc and not qmc_skip:
npt.assert_raises(AssertionError, npt.assert_array_less, 1e-9, tf.abs(samples2 - samples1))
else:
npt.assert_array_less(1e-9, tf.abs(samples2 - samples1))
@pytest.mark.parametrize("num_features", [0, -2])
def test_rff_trajectory_sampler_raises_for_invalid_number_of_features(
num_features: int,
) -> None:
dataset = Dataset(
tf.constant([[-2.0]], dtype=tf.float64), tf.constant([[4.1]], dtype=tf.float64)
)
model = quadratic_mean_rbf_kernel_model(dataset)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
RandomFourierFeatureTrajectorySampler(model, num_features=num_features)
def test_rff_trajectory_sampler_raises_for_a_non_gpflow_kernel() -> None:
dataset = Dataset(tf.constant([[-2.0]]), tf.constant([[4.1]]))
model = QuadraticMeanAndRBFKernelWithSamplers(dataset=dataset)
with pytest.raises(AssertionError):
RandomFourierFeatureTrajectorySampler(model, num_features=100)
@pytest.mark.parametrize("num_evals", [1, 5])
@pytest.mark.parametrize("num_features", [5, 10])
@pytest.mark.parametrize("batch_size", [1])
def test_rff_trajectory_sampler_returns_trajectory_function_with_correct_shapes(
num_evals: int,
num_features: int,
batch_size: int,
) -> None:
dataset = Dataset(
tf.constant([[-2.0]], dtype=tf.float64), tf.constant([[4.1]], dtype=tf.float64)
)
model = quadratic_mean_rbf_kernel_model(dataset)
sampler = RandomFourierFeatureTrajectorySampler(model, num_features=num_features)
trajectory = sampler.get_trajectory()
xs = tf.linspace(
[-10.0],
[10.0],
num_evals,
) # [N, D]
xs = tf.cast(xs, tf.float64)
xs_with_dummy_batch_dim = tf.expand_dims(xs, -2) # [N, 1, D]
xs_with_full_batch_dim = tf.tile(xs_with_dummy_batch_dim, [1, batch_size, 1]) # [N, B, D]
tf.debugging.assert_shapes([(trajectory(xs_with_full_batch_dim), [num_evals, batch_size, 1])])
tf.debugging.assert_shapes(
[(trajectory._feature_functions(xs), [num_evals, num_features])] # type: ignore
)
assert isinstance(trajectory, feature_decomposition_trajectory)
@random_seed
@pytest.mark.parametrize("batch_size", [1, 5])
def test_rff_trajectory_sampler_returns_deterministic_trajectory(
batch_size: int,
sampling_dataset: Dataset,
) -> None:
model = quadratic_mean_rbf_kernel_model(sampling_dataset)
sampler = RandomFourierFeatureTrajectorySampler(model, num_features=100)
trajectory = sampler.get_trajectory()
xs = sampling_dataset.query_points
xs = tf.expand_dims(xs, -2) # [N, 1, D]
xs = tf.tile(xs, [1, batch_size, 1]) # [N, B, D]
trajectory_eval_1 = trajectory(xs)
trajectory_eval_2 = trajectory(xs)
npt.assert_allclose(trajectory_eval_1, trajectory_eval_2)
def test_rff_trajectory_sampler_returns_same_posterior_from_each_calculation_method(
sampling_dataset: Dataset,
) -> None:
model = quadratic_mean_rbf_kernel_model(sampling_dataset)
sampler = RandomFourierFeatureTrajectorySampler(model, num_features=100)
sampler.get_trajectory()
posterior_1 = sampler._prepare_theta_posterior_in_design_space()
posterior_2 = sampler._prepare_theta_posterior_in_gram_space()
npt.assert_allclose(posterior_1.loc, posterior_2.loc, rtol=0.02)
npt.assert_allclose(posterior_1.scale_tril, posterior_2.scale_tril, rtol=0.02)
@random_seed
def test_rff_trajectory_sampler_samples_are_distinct_for_new_instances(
sampling_dataset: Dataset,
) -> None:
model = quadratic_mean_rbf_kernel_model(sampling_dataset)
sampler1 = RandomFourierFeatureTrajectorySampler(model, num_features=100)
trajectory1 = sampler1.get_trajectory()
sampler2 = RandomFourierFeatureTrajectorySampler(model, num_features=100)
trajectory2 = sampler2.get_trajectory()
xs = sampling_dataset.query_points
xs = tf.expand_dims(xs, -2) # [N, 1, d]
xs = tf.tile(xs, [1, 2, 1]) # [N, 2, D]
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(trajectory1(xs) - trajectory2(xs)))
) # distinct between seperate draws
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(trajectory1(xs)[:, 0] - trajectory1(xs)[:, 1]))
) # distinct for two samples within same draw
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(trajectory2(xs)[:, 0] - trajectory2(xs)[:, 1]))
) # distinct for two samples within same draw
@random_seed
@pytest.mark.parametrize("batch_size", [1, 5])
def test_rff_trajectory_resample_trajectory_provides_new_samples_without_retracing(
batch_size: int,
sampling_dataset: Dataset,
) -> None:
model = quadratic_mean_rbf_kernel_model(sampling_dataset)
xs = sampling_dataset.query_points
xs = tf.expand_dims(xs, -2) # [N, 1, d]
xs = tf.tile(xs, [1, batch_size, 1]) # [N, B, D]
sampler = RandomFourierFeatureTrajectorySampler(model, num_features=100)
trajectory = sampler.get_trajectory()
evals_1 = trajectory(xs)
for _ in range(5):
trajectory = sampler.resample_trajectory(trajectory)
evals_new = trajectory(xs)
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(evals_1 - evals_new))
) # check all samples are different
assert trajectory.__call__._get_tracing_count() == 1 # type: ignore
@random_seed
@pytest.mark.parametrize("batch_size", [1, 5])
def test_rff_trajectory_update_trajectory_updates_and_doesnt_retrace(
batch_size: int,
sampling_dataset: Dataset,
) -> None:
model = quadratic_mean_rbf_kernel_model(sampling_dataset)
x_range = tf.random.uniform([5], 1.0, 2.0) # sample test locations
x_range = tf.cast(x_range, dtype=tf.float64)
xs_predict = tf.reshape(
tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2)
)
xs_predict_with_batching = tf.expand_dims(xs_predict, -2)
xs_predict_with_batching = tf.tile(xs_predict_with_batching, [1, batch_size, 1]) # [N, B, D]
trajectory_sampler = RandomFourierFeatureTrajectorySampler(model)
trajectory = trajectory_sampler.get_trajectory()
eval_before = trajectory(xs_predict_with_batching)
for _ in range(3): # do three updates on new data and see if samples are new
x_range = tf.random.uniform([5], 1.0, 2.0)
x_range = tf.cast(x_range, dtype=tf.float64)
x_train = tf.reshape(
tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2)
)
new_dataset = Dataset(x_train, quadratic(x_train))
new_lengthscales = 0.5 * model.kernel.lengthscales
model.update(new_dataset)
model.kernel.lengthscales.assign(new_lengthscales) # change params to mimic optimization
trajectory_updated = trajectory_sampler.update_trajectory(trajectory)
eval_after = trajectory(xs_predict_with_batching)
assert trajectory_updated is trajectory # check update was in place
npt.assert_allclose(
trajectory_sampler._feature_functions.kernel.lengthscales, new_lengthscales
)
npt.assert_allclose(
trajectory._feature_functions.kernel.lengthscales, new_lengthscales # type: ignore
)
npt.assert_array_less(
0.09, tf.reduce_max(tf.abs(eval_before - eval_after))
) # two samples should be different
assert trajectory.__call__._get_tracing_count() == 1 # type: ignore
@pytest.mark.parametrize("num_features", [0, -2])
def test_decoupled_trajectory_sampler_raises_for_invalid_number_of_features(
num_features: int,
) -> None:
dataset = Dataset(
tf.constant([[-2.0]], dtype=tf.float64), tf.constant([[4.1]], dtype=tf.float64)
)
model = quadratic_mean_rbf_kernel_model(dataset)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
DecoupledTrajectorySampler(model, num_features=num_features)
def test_decoupled_trajectory_sampler_raises_for_a_non_gpflow_kernel() -> None:
dataset = Dataset(tf.constant([[-2.0]]), tf.constant([[4.1]]))
model = QuadraticMeanAndRBFKernelWithSamplers(dataset=dataset)
with pytest.raises(AssertionError):
DecoupledTrajectorySampler(model, num_features=100)
@pytest.mark.parametrize("num_evals", [10, 100])
@pytest.mark.parametrize("num_features", [5, 50])
@pytest.mark.parametrize("batch_size", [1, 5])
def test_decoupled_trajectory_sampler_returns_trajectory_function_with_correct_shapes(
num_evals: int,
num_features: int,
batch_size: int,
decoupled_sampling_model: DecoupledSamplingModel,
) -> None:
dataset = Dataset(
tf.constant([[-2.0]], dtype=tf.float64), tf.constant([[4.1]], dtype=tf.float64)
)
N = len(dataset.query_points)
L, model = decoupled_sampling_model(dataset)
sampler = DecoupledTrajectorySampler(model, num_features=num_features)
trajectory = sampler.get_trajectory()
xs = tf.linspace([-10.0], [10.0], num_evals) # [N, D]
xs = tf.cast(xs, dtype=tf.float64)
xs_with_dummy_batch_dim = tf.expand_dims(xs, -2) # [N, 1, D]
xs_with_full_batch_dim = tf.tile(xs_with_dummy_batch_dim, [1, batch_size, 1]) # [N, B, D]
tf.debugging.assert_shapes([(trajectory(xs_with_full_batch_dim), [num_evals, batch_size, L])])
if L > 1:
tf.debugging.assert_shapes(
[(trajectory._feature_functions(xs), [L, num_evals, num_features + N])] # type: ignore
)
else:
tf.debugging.assert_shapes(
[(trajectory._feature_functions(xs), [num_evals, num_features + N])] # type: ignore
)
assert isinstance(trajectory, feature_decomposition_trajectory)
@random_seed
@pytest.mark.parametrize("batch_size", [1, 5])
def test_decoupled_trajectory_sampler_returns_deterministic_trajectory(
batch_size: int,
sampling_dataset: Dataset,
decoupled_sampling_model: DecoupledSamplingModel,
) -> None:
_, model = decoupled_sampling_model(sampling_dataset)
sampler = DecoupledTrajectorySampler(model, num_features=100)
trajectory = sampler.get_trajectory()
xs = sampling_dataset.query_points
xs = tf.expand_dims(xs, -2) # [N, 1, D]
xs = tf.tile(xs, [1, batch_size, 1]) # [N, B, D]
trajectory_eval_1 = trajectory(xs)
trajectory_eval_2 = trajectory(xs)
npt.assert_allclose(trajectory_eval_1, trajectory_eval_2)
@random_seed
def test_decoupled_trajectory_sampler_samples_are_distinct_for_new_instances(
sampling_dataset: Dataset,
) -> None:
model = quadratic_mean_rbf_kernel_model(sampling_dataset)
sampler1 = DecoupledTrajectorySampler(model, num_features=100)
trajectory1 = sampler1.get_trajectory()
sampler2 = DecoupledTrajectorySampler(model, num_features=100)
trajectory2 = sampler2.get_trajectory()
xs = sampling_dataset.query_points
xs = tf.expand_dims(xs, -2) # [N, 1, d]
xs = tf.tile(xs, [1, 2, 1]) # [N, 2, D]
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(trajectory1(xs) - trajectory2(xs)))
) # distinct between sample draws
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(trajectory1(xs)[:, 0] - trajectory1(xs)[:, 1]))
) # distinct between samples within draws
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(trajectory2(xs)[:, 0] - trajectory2(xs)[:, 1]))
) # distinct between samples within draws
@random_seed
@pytest.mark.parametrize("batch_size", [1, 5])
def test_decoupled_trajectory_resample_trajectory_provides_new_samples_without_retracing(
batch_size: int,
sampling_dataset: Dataset,
decoupled_sampling_model: DecoupledSamplingModel,
) -> None:
_, model = decoupled_sampling_model(sampling_dataset)
xs = sampling_dataset.query_points
xs = tf.expand_dims(xs, -2) # [N, 1, d]
xs = tf.tile(xs, [1, batch_size, 1]) # [N, B, D]
sampler = DecoupledTrajectorySampler(model, num_features=100)
trajectory = sampler.get_trajectory()
evals_1 = trajectory(xs)
trace_count_before = trajectory.__call__._get_tracing_count() # type: ignore
for _ in range(5):
trajectory = sampler.resample_trajectory(trajectory)
evals_new = trajectory(xs)
npt.assert_array_less(
1e-1, tf.reduce_max(tf.abs(evals_1 - evals_new))
) # check all samples are different
assert trajectory.__call__._get_tracing_count() == trace_count_before # type: ignore
@random_seed
@pytest.mark.parametrize("batch_size", [1, 5])
def test_decoupled_trajectory_update_trajectory_updates_and_doesnt_retrace(
batch_size: int,
sampling_dataset: Dataset,
decoupled_sampling_model: DecoupledSamplingModel,
) -> None:
L, model = decoupled_sampling_model(sampling_dataset)
x_range = tf.random.uniform([5], 1.0, 2.0) # sample test locations
x_range = tf.cast(x_range, dtype=tf.float64)
xs_predict = tf.reshape(
tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2)
)
xs_predict_with_batching = tf.expand_dims(xs_predict, -2)
xs_predict_with_batching = tf.tile(xs_predict_with_batching, [1, batch_size, 1]) # [N, B, D]
trajectory_sampler = DecoupledTrajectorySampler(model)
trajectory = trajectory_sampler.get_trajectory()
eval_before = trajectory(xs_predict_with_batching)
trace_count_before = trajectory.__call__._get_tracing_count() # type: ignore
if L > 1:
# pick the first kernel to check
_model_lengthscales = model.get_kernel().kernels[0].lengthscales
_trajectory_sampler_lengthscales = trajectory_sampler._feature_functions.kernel.kernels[
0
].lengthscales
_trajectory_lengthscales = trajectory._feature_functions.kernel.kernels[ # type: ignore
0
].lengthscales
else:
_model_lengthscales = model.get_kernel().lengthscales
_trajectory_sampler_lengthscales = trajectory_sampler._feature_functions.kernel.lengthscales
_trajectory_lengthscales = trajectory._feature_functions.kernel.lengthscales # type: ignore
for _ in range(3): # do three updates on new data and see if samples are new
x_range = tf.random.uniform([5], 1.0, 2.0)
x_range = tf.cast(x_range, dtype=tf.float64)
x_train = tf.reshape(
tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2)
)
new_dataset = Dataset(x_train, tf.tile(quadratic(x_train), [1, L]))
new_lengthscales = 0.5 * _model_lengthscales
model.update(new_dataset) # type: ignore
_model_lengthscales.assign(new_lengthscales) # change params to mimic optimization
trajectory_updated = trajectory_sampler.update_trajectory(trajectory)
eval_after = trajectory(xs_predict_with_batching)
assert trajectory_updated is trajectory # check update was in place
npt.assert_allclose(_trajectory_sampler_lengthscales, new_lengthscales)
npt.assert_allclose(_trajectory_lengthscales, new_lengthscales)
npt.assert_array_less(
0.1, tf.reduce_max(tf.abs(eval_before - eval_after))
) # two samples should be different
# check that inducing points in canonical features closure were updated in place
if isinstance(model, SupportsGetInducingVariables):
iv = model.get_inducing_variables()[0]
else:
iv = x_train
npt.assert_array_equal(trajectory_sampler._feature_functions._inducing_points, iv)
npt.assert_array_equal(trajectory._feature_functions._inducing_points, iv) # type: ignore
assert trajectory.__call__._get_tracing_count() == trace_count_before # type: ignore
@random_seed
@pytest.mark.parametrize("noise_var", [1e-5, 1e-1])
def test_rff_and_decoupled_trajectory_give_similar_results(
noise_var: float,
sampling_dataset: Dataset,
) -> None:
model = quadratic_mean_rbf_kernel_model(sampling_dataset)
model._noise_variance = tf.constant(noise_var, dtype=tf.float64)
x_range = tf.linspace(1.4, 1.8, 3)
x_range = tf.cast(x_range, dtype=tf.float64)
xs_predict = tf.reshape(
tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2)
)
batch_size = 50
xs_predict_with_batching = tf.expand_dims(xs_predict, -2)
xs_predict_with_batching = tf.tile(xs_predict_with_batching, [1, batch_size, 1]) # [N, B, D]
trajectory_sampler_1 = RandomFourierFeatureTrajectorySampler(model)
trajectory_1 = trajectory_sampler_1.get_trajectory()
eval_1 = trajectory_1(xs_predict_with_batching)
trajectory_sampler_2 = DecoupledTrajectorySampler(model)
trajectory_2 = trajectory_sampler_2.get_trajectory()
eval_2 = trajectory_2(xs_predict_with_batching)
npt.assert_allclose(
tf.reduce_mean(eval_1, 1), tf.reduce_mean(eval_2, 1), rtol=0.01
) # means across samples should roughly agree for different samplers
npt.assert_allclose(
tf.math.reduce_variance(eval_1, 1), tf.math.reduce_variance(eval_2, 1), rtol=1.0
) # variance across samples should (very) roughly agree for different samplers
@pytest.mark.parametrize("n_sample_dim", [2, 5])
@pytest.mark.parametrize("skip", [0, 10_000])
def test_qmc_samples_return_standard_normal_samples(n_sample_dim: int, skip: int) -> None:
n_samples = 10_000
qmc_samples = qmc_normal_samples(num_samples=n_samples, n_sample_dim=n_sample_dim, skip=skip)
# should be multivariate normal with zero correlation
for i in range(n_sample_dim):
assert stats.kstest(qmc_samples[:, i], stats.norm.cdf).pvalue > 0.99
for j in range(n_sample_dim):
if i != j:
assert stats.pearsonr(qmc_samples[:, i], qmc_samples[:, j])[0] < 0.005
def test_qmc_samples_skip() -> None:
samples_1a = qmc_normal_samples(25, 100)
samples_1b = qmc_normal_samples(25, 100)
npt.assert_allclose(samples_1a, samples_1b)
samples_2a = qmc_normal_samples(25, 100, skip=100)
samples_2b = qmc_normal_samples(25, 100, skip=100)
npt.assert_allclose(samples_2a, samples_2b)
npt.assert_raises(AssertionError, npt.assert_allclose, samples_1a, samples_2a)
def test_qmc_samples__num_samples_is_a_tensor() -> None:
num_samples = 5
n_sample_dim = 100
expected_samples = qmc_normal_samples(num_samples, n_sample_dim)
npt.assert_allclose(
qmc_normal_samples(tf.constant(num_samples), n_sample_dim), expected_samples
)
npt.assert_allclose(
qmc_normal_samples(tf.constant(num_samples), tf.constant(n_sample_dim)), expected_samples
)
npt.assert_allclose(
qmc_normal_samples(tf.constant(num_samples), n_sample_dim), expected_samples
)
@pytest.mark.parametrize(
("num_samples", "n_sample_dim"),
(
[1, 1],
[0, 1],
[1, 0],
[3, 5],
),
)
def test_qmc_samples_shapes(num_samples: int, n_sample_dim: int) -> None:
samples = qmc_normal_samples(num_samples=num_samples, n_sample_dim=n_sample_dim)
expected_samples_shape = (num_samples, n_sample_dim)
assert samples.shape == expected_samples_shape
@pytest.mark.parametrize(
("num_samples", "n_sample_dim", "skip", "expected_error_type"),
(
[-1, 1, 1, tf.errors.InvalidArgumentError],
[1, -1, 1, tf.errors.InvalidArgumentError],
[1, 1, -1, tf.errors.InvalidArgumentError],
[1.5, 1, 1, TypeError],
[1, 1.5, 1, TypeError],
[1, 1, 1.5, TypeError],
),
)
def test_qmc_samples_shapes__invalid_values(
num_samples: int, n_sample_dim: int, skip: int, expected_error_type: Any
) -> None:
with pytest.raises(expected_error_type):
qmc_normal_samples(num_samples=num_samples, n_sample_dim=n_sample_dim, skip=skip)
| 34,843 | 37.501657 | 100 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflow/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/unit/models/gpflow/test_inducing_point_selectors.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In this module, we test the *behaviour* of trieste's iducing point selectors.
"""
from __future__ import annotations
from typing import Callable
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, random_seed
from tests.util.models.gpflow.models import mock_data, svgp_model
from tests.util.models.models import fnc_3x_plus_10
from trieste.data import Dataset
from trieste.models.gpflow import SparseVariational
from trieste.models.gpflow.inducing_point_selectors import (
ConditionalImprovementReduction,
ConditionalVarianceReduction,
InducingPointSelector,
KMeansInducingPointSelector,
ModelBasedImprovementQualityFunction,
QualityFunction,
RandomSubSampleInducingPointSelector,
UniformInducingPointSelector,
UnitQualityFunction,
greedy_inference_dpp,
)
from trieste.space import Box, SearchSpace
@pytest.mark.parametrize(
"selector",
[
UniformInducingPointSelector(Box([0.0], [1.0])),
RandomSubSampleInducingPointSelector(),
KMeansInducingPointSelector(),
ConditionalVarianceReduction(),
ConditionalImprovementReduction(),
],
)
def test_inducing_point_selectors_raise_if_more_than_one_set_of_inducing_points(
selector: InducingPointSelector[SparseVariational],
) -> None:
dataset = Dataset(*mock_data())
svgp = svgp_model(*mock_data())
model = SparseVariational(svgp)
inducing_points = [mock_data()[0], mock_data()[0]]
with pytest.raises(NotImplementedError):
selector.calculate_inducing_points(inducing_points, model, dataset)
@pytest.mark.parametrize("more_inducing_points_than_data", [True, False])
@pytest.mark.parametrize(
"selector",
[
UniformInducingPointSelector(Box([0.0], [1.0])),
RandomSubSampleInducingPointSelector(),
KMeansInducingPointSelector(),
ConditionalVarianceReduction(),
ConditionalImprovementReduction(),
],
)
def test_inducing_point_selectors_returns_correctly_shaped_inducing_points(
selector: InducingPointSelector[SparseVariational],
more_inducing_points_than_data: bool,
) -> None:
dataset = Dataset(*mock_data())
svgp = svgp_model(*mock_data())
model = SparseVariational(svgp)
if more_inducing_points_than_data:
inducing_points = tf.concat([mock_data()[0], mock_data()[0]], 0)
else:
inducing_points = mock_data()[0]
new_inducing_points = selector.calculate_inducing_points(inducing_points, model, dataset)
npt.assert_array_equal(inducing_points.shape, new_inducing_points.shape)
@random_seed
@pytest.mark.parametrize(
"selector",
[
UniformInducingPointSelector(Box([0.0, -1.0], [1.0, 0.0])),
RandomSubSampleInducingPointSelector(),
KMeansInducingPointSelector(),
ConditionalVarianceReduction(),
ConditionalImprovementReduction(),
],
)
def test_inducing_point_selectors_choose_points_still_in_space(
selector: InducingPointSelector[SparseVariational],
) -> None:
search_space = Box([0.0, -1.0], [1.0, 0.0])
X = tf.constant([[0.01, -0.99], [0.99, -0.01]], dtype=tf.float64)
Y = fnc_3x_plus_10(X)
dataset = Dataset(X, Y)
svgp = svgp_model(X, Y)
model = SparseVariational(svgp)
inducing_points = search_space.sample(10)
new_inducing_points = selector.calculate_inducing_points(inducing_points, model, dataset)
assert tf.reduce_all([point in search_space for point in new_inducing_points])
@random_seed
@pytest.mark.parametrize(
"selector_name",
[
RandomSubSampleInducingPointSelector,
KMeansInducingPointSelector,
ConditionalVarianceReduction,
ConditionalImprovementReduction,
],
)
@pytest.mark.parametrize("recalc_every_model_update", [True, False])
def test_inducing_point_selectors_update_correct_number_of_times(
selector_name: Callable[[SearchSpace, bool], InducingPointSelector[SparseVariational]],
recalc_every_model_update: bool,
) -> None:
selector = selector_name(recalc_every_model_update) # type: ignore
dataset = Dataset(*mock_data())
svgp = svgp_model(*mock_data())
model = SparseVariational(svgp)
inducing_points = mock_data()[0]
new_inducing_points_1 = selector.calculate_inducing_points(inducing_points, model, dataset)
new_inducing_points_2 = selector.calculate_inducing_points(
new_inducing_points_1, model, dataset
)
npt.assert_raises(AssertionError, npt.assert_allclose, inducing_points, new_inducing_points_1)
npt.assert_raises(AssertionError, npt.assert_allclose, inducing_points, new_inducing_points_2)
if recalc_every_model_update:
npt.assert_raises(
AssertionError, npt.assert_allclose, inducing_points, new_inducing_points_2
)
else:
npt.assert_array_equal(new_inducing_points_1, new_inducing_points_2)
def test_unit_quality_function_returns_correct_scores() -> None:
search_space = Box([0.0, -1.0], [1.0, 0.0])
X = search_space.sample(100)
Y = fnc_3x_plus_10(X)
dataset = Dataset(X, Y)
svgp = svgp_model(X, Y)
model = SparseVariational(svgp)
quality_scores = UnitQualityFunction()(model, dataset)
npt.assert_array_equal(
quality_scores, tf.ones(tf.shape(dataset.query_points)[0], dtype=tf.float64)
)
@random_seed
def test_improvement_quality_function_returns_approximately_correct_scores() -> None:
search_space = Box([0.0, -1.0], [1.0, 0.0])
X = search_space.sample(10)
Y = fnc_3x_plus_10(X)
dataset = Dataset(X, Y)
svgp = svgp_model(X, Y)
model = SparseVariational(svgp)
quality_scores = ModelBasedImprovementQualityFunction()(model, dataset)
samples = model.sample(dataset.query_points, 10000)[:, :, 0] # [S, N]
baseline = tf.reduce_max(tf.reduce_mean(samples, 0))
empirical_scores = tf.maximum(baseline - samples, 0.0) # [S, N]
empirical_scores = tf.reduce_mean(empirical_scores, 0) # [N]
npt.assert_allclose(quality_scores, empirical_scores, atol=0.01)
def test_greedy_inference_dpp_raises_errors() -> None:
search_space = Box([0.0, -1.0], [1.0, 0.0])
X = search_space.sample(100)
Y = fnc_3x_plus_10(X)
dataset = Dataset(X, Y)
svgp = svgp_model(X, Y)
model = SparseVariational(svgp)
quality_scores = UnitQualityFunction()(model, dataset)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES): # dataset must be populated
greedy_inference_dpp(10, svgp.kernel, quality_scores, dataset=None) # type: ignore
with pytest.raises(TF_DEBUGGING_ERROR_TYPES): # dataset size must match quality score
greedy_inference_dpp(10, svgp.kernel, quality_scores[:-1], dataset)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES): # sample must be smaller than dataset size
greedy_inference_dpp(101, svgp.kernel, quality_scores, dataset)
@pytest.mark.parametrize(
"quality_function",
[UnitQualityFunction(), ModelBasedImprovementQualityFunction()],
)
@pytest.mark.parametrize("num_points", [1, 10, 50])
def test_greedy_inference_dpp_returns_correct_number_of_points(
quality_function: QualityFunction, num_points: int
) -> None:
search_space = Box([0.0, -1.0], [1.0, 0.0])
X = search_space.sample(100)
Y = fnc_3x_plus_10(X)
dataset = Dataset(X, Y)
svgp = svgp_model(X, Y)
model = SparseVariational(svgp)
quality_scores = quality_function(model, dataset)
sample = greedy_inference_dpp(num_points, svgp.kernel, quality_scores, dataset)
assert sample.shape[0] == num_points
assert sample.shape[1] == search_space.dimension
| 8,211 | 36.158371 | 98 | py |
trieste-develop | trieste-develop/tests/unit/models/gpflow/test_utils.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import operator
import tempfile
from typing import cast
import gpflow
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.misc import random_seed
from tests.util.models.gpflow.models import ModelFactoryType
from tests.util.models.models import fnc_2sin_x_over_3
from trieste.data import Dataset
from trieste.models import TrainableProbabilisticModel
from trieste.models.gpflow import (
SparseVariational,
check_optimizer,
randomize_hyperparameters,
squeeze_hyperparameters,
)
from trieste.models.interfaces import HasTrajectorySampler
from trieste.models.optimizer import BatchOptimizer, Optimizer
from trieste.types import TensorType
def test_gaussian_process_deep_copyable(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model, _ = gpflow_interface_factory(x, fnc_2sin_x_over_3(x))
model_copy = copy.deepcopy(model)
x_predict = tf.constant([[50.5]], gpflow.default_float())
# check deepcopy predicts same values as original
mean_f, variance_f = model.predict(x_predict)
mean_f_copy, variance_f_copy = model_copy.predict(x_predict)
npt.assert_equal(mean_f, mean_f_copy)
npt.assert_equal(variance_f, variance_f_copy)
# check that updating the original doesn't break or change the deepcopy
x_new = tf.concat([x, tf.constant([[10.0], [11.0]], dtype=gpflow.default_float())], 0)
new_data = Dataset(x_new, fnc_2sin_x_over_3(x_new))
cast(TrainableProbabilisticModel, model).update(new_data)
model.optimize(new_data)
mean_f_updated, variance_f_updated = model.predict(x_predict)
mean_f_copy_updated, variance_f_copy_updated = model_copy.predict(x_predict)
npt.assert_equal(mean_f_copy_updated, mean_f_copy)
npt.assert_equal(variance_f_copy_updated, variance_f_copy)
npt.assert_array_compare(operator.__ne__, mean_f_updated, mean_f)
npt.assert_array_compare(operator.__ne__, variance_f_updated, variance_f)
# check that updating the copy works too
x_new2 = tf.constant([[20.0], [30.0]], dtype=gpflow.default_float())
new_data2 = Dataset(x_new2, fnc_2sin_x_over_3(x_new2))
cast(TrainableProbabilisticModel, model_copy).update(new_data2)
model_copy.optimize(new_data2)
if not isinstance(model, SparseVariational):
assert model_copy._posterior is not None
npt.assert_array_equal(model_copy._posterior.X_data, x_new2)
mean_f_copy_updated2, variance_f_copy_updated2 = model_copy.predict(x_predict)
npt.assert_array_compare(operator.__ne__, mean_f_copy_updated, mean_f_copy_updated2)
npt.assert_array_compare(operator.__ne__, variance_f_copy_updated, variance_f_copy_updated2)
def test_gaussian_process_tf_saved_model(gpflow_interface_factory: ModelFactoryType) -> None:
x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
model, _ = gpflow_interface_factory(x, fnc_2sin_x_over_3(x))
with tempfile.TemporaryDirectory() as path:
# create a trajectory sampler (used for sample method)
assert isinstance(model, HasTrajectorySampler)
trajectory_sampler = model.trajectory_sampler()
trajectory = trajectory_sampler.get_trajectory()
# generate client model with predict and sample methods
module = model.get_module_with_variables(trajectory_sampler, trajectory)
module.predict = tf.function(
model.predict, input_signature=[tf.TensorSpec(shape=[None, 1], dtype=tf.float64)]
)
def _sample(query_points: TensorType, num_samples: int) -> TensorType:
trajectory_updated = trajectory_sampler.resample_trajectory(trajectory)
expanded_query_points = tf.expand_dims(query_points, -2) # [N, 1, D]
tiled_query_points = tf.tile(expanded_query_points, [1, num_samples, 1]) # [N, S, D]
return tf.transpose(trajectory_updated(tiled_query_points), [1, 0, 2])[
:, :, :1
] # [S, N, L]
module.sample = tf.function(
_sample,
input_signature=[
tf.TensorSpec(shape=[None, 1], dtype=tf.float64), # query_points
tf.TensorSpec(shape=(), dtype=tf.int32), # num_samples
],
)
tf.saved_model.save(module, str(path))
client_model = tf.saved_model.load(str(path))
# test exported methods
x_predict = tf.constant([[50.5]], gpflow.default_float())
mean_f, variance_f = model.predict(x_predict)
mean_f_copy, variance_f_copy = client_model.predict(x_predict)
npt.assert_equal(mean_f, mean_f_copy)
npt.assert_equal(variance_f, variance_f_copy)
client_model.sample(x, 10)
@random_seed
@pytest.mark.parametrize("compile", [False, True])
def test_randomize_hyperparameters_randomizes_kernel_parameters_with_priors(
dim: int, compile: bool
) -> None:
kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.2] * dim)
kernel.lengthscales.prior = tfp.distributions.LogNormal(
loc=tf.math.log(kernel.lengthscales), scale=1.0
)
compiler = tf.function if compile else lambda x: x
compiler(randomize_hyperparameters)(kernel)
npt.assert_allclose(1.0, kernel.variance)
npt.assert_array_equal(dim, kernel.lengthscales.shape)
npt.assert_raises(AssertionError, npt.assert_allclose, [0.2] * dim, kernel.lengthscales)
assert len(np.unique(kernel.lengthscales)) == dim
@random_seed
@pytest.mark.parametrize("compile", [False, True])
def test_randomize_hyperparameters_randomizes_kernel_parameters_with_const_priors(
dim: int, compile: bool
) -> None:
kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.2] * dim)
kernel.lengthscales.prior = tfp.distributions.LogNormal(
loc=tf.math.log(0.2), scale=1.0 # constant loc should be applied to every dimension
)
compiler = tf.function if compile else lambda x: x
compiler(randomize_hyperparameters)(kernel)
npt.assert_allclose(1.0, kernel.variance)
npt.assert_array_equal(dim, kernel.lengthscales.shape)
npt.assert_raises(AssertionError, npt.assert_allclose, [0.2] * dim, kernel.lengthscales)
assert len(np.unique(kernel.lengthscales)) == dim
@random_seed
def test_randomize_hyperparameters_randomizes_constrained_kernel_parameters(dim: int) -> None:
kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.2] * dim)
upper = tf.cast([10.0] * dim, dtype=tf.float64)
lower = upper / 100
kernel.lengthscales = gpflow.Parameter(
kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
randomize_hyperparameters(kernel)
npt.assert_allclose(1.0, kernel.variance)
npt.assert_array_equal(dim, kernel.lengthscales.shape)
npt.assert_raises(AssertionError, npt.assert_allclose, [0.2] * dim, kernel.lengthscales)
@random_seed
def test_randomize_hyperparameters_randomizes_kernel_parameters_with_constraints_or_priors(
dim: int,
) -> None:
kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.2] * dim)
upper = tf.cast([10.0] * dim, dtype=tf.float64)
lower = upper / 100
kernel.lengthscales = gpflow.Parameter(
kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
kernel.variance.prior = tfp.distributions.LogNormal(loc=np.float64(-2.0), scale=np.float64(1.0))
randomize_hyperparameters(kernel)
npt.assert_raises(AssertionError, npt.assert_allclose, 1.0, kernel.variance)
npt.assert_array_equal(dim, kernel.lengthscales.shape)
npt.assert_raises(AssertionError, npt.assert_allclose, [0.2] * dim, kernel.lengthscales)
@random_seed
def test_randomize_hyperparameters_samples_from_constraints_when_given_prior_and_constraint(
dim: int,
) -> None:
kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.2] * dim)
upper = tf.cast([0.5] * dim, dtype=tf.float64)
lower = upper / 100
kernel.lengthscales = gpflow.Parameter(
kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
kernel.lengthscales.prior = tfp.distributions.Uniform(low=10.0, high=100.0)
kernel.variance.prior = tfp.distributions.LogNormal(loc=np.float64(-2.0), scale=np.float64(1.0))
randomize_hyperparameters(kernel)
npt.assert_array_less(kernel.lengthscales, [0.5] * dim)
npt.assert_raises(AssertionError, npt.assert_allclose, [0.2] * dim, kernel.lengthscales)
@random_seed
def test_randomize_hyperparameters_samples_different_values_for_multi_dimensional_params() -> None:
kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.2, 0.2])
upper = tf.cast([10.0] * 2, dtype=tf.float64)
lower = upper / 100
kernel.lengthscales = gpflow.Parameter(
kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
randomize_hyperparameters(kernel)
npt.assert_raises(
AssertionError, npt.assert_allclose, kernel.lengthscales[0], kernel.lengthscales[1]
)
@random_seed
def test_squeeze_sigmoid_hyperparameters() -> None:
kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.1 + 1e-3, 0.5 - 1e-3])
upper = tf.cast([0.5, 0.5], dtype=tf.float64)
lower = upper / 5.0
kernel.lengthscales = gpflow.Parameter(
kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
squeeze_hyperparameters(kernel, alpha=0.1)
npt.assert_array_almost_equal(kernel.lengthscales, [0.1 + 4e-2, 0.5 - 4e-2])
@random_seed
def test_squeeze_softplus_hyperparameters() -> None:
lik = gpflow.likelihoods.Gaussian(variance=1.01e-6)
squeeze_hyperparameters(lik, epsilon=0.2)
npt.assert_array_almost_equal(tf.constant(lik.variance), 0.2 + 1e-6)
@random_seed
def test_squeeze_raises_for_invalid_epsilon() -> None:
lik = gpflow.likelihoods.Gaussian(variance=1.01e-6)
with pytest.raises(ValueError):
squeeze_hyperparameters(lik, epsilon=-1.0)
@pytest.mark.parametrize("alpha", [-0.1, 0.0, 1.1])
def test_squeeze_raises_for_invalid_alpha(alpha: float) -> None:
kernel = gpflow.kernels.RBF(variance=1.0, lengthscales=[0.2, 0.2])
upper = tf.cast([0.5, 0.5], dtype=tf.float64)
lower = upper / 5.0
kernel.lengthscales = gpflow.Parameter(
kernel.lengthscales, transform=tfp.bijectors.Sigmoid(low=lower, high=upper)
)
with pytest.raises(ValueError):
squeeze_hyperparameters(kernel, alpha)
def test_check_optimizer_raises_for_invalid_optimizer_wrapper_combination() -> None:
with pytest.raises(ValueError):
optimizer1 = BatchOptimizer(gpflow.optimizers.Scipy())
check_optimizer(optimizer1)
with pytest.raises(ValueError):
optimizer2 = Optimizer(tf.optimizers.Adam())
check_optimizer(optimizer2)
| 11,456 | 39.917857 | 100 | py |
trieste-develop | trieste-develop/tests/unit/utils/test_misc.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from time import sleep
from typing import Any
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, ShapeLike, various_shapes
from trieste.types import TensorType
from trieste.utils.misc import Err, Ok, Timer, flatten_leading_dims, jit, shapes_equal, to_numpy
@pytest.mark.parametrize("apply", [True, False])
@pytest.mark.parametrize(
"kwargs",
[
{},
{"autograph": False},
{"input_signature": [tf.TensorSpec(()), tf.TensorSpec(())]},
],
)
def test_jit_function_behaviour_unchanged(apply: bool, kwargs: Any) -> None:
@jit(apply, **kwargs)
def add(t: tf.Tensor, u: tf.Tensor) -> tf.Tensor:
return t + u
assert add(tf.constant(1.0), tf.constant(2.0)) == tf.constant(3.0)
@pytest.mark.parametrize("apply", [True, False])
@pytest.mark.parametrize("kwargs", [{}, {"autograph": False}])
def test_jit_compiles_function(apply: bool, kwargs: Any) -> None:
@jit(apply, **kwargs)
def one() -> tf.Tensor:
return tf.constant(0)
tf_function_type = type(tf.function(lambda x: x))
assert isinstance(one, tf_function_type) == apply
@pytest.mark.parametrize("this_shape", various_shapes())
@pytest.mark.parametrize("that_shape", various_shapes())
def test_shapes_equal(this_shape: ShapeLike, that_shape: ShapeLike) -> None:
assert shapes_equal(tf.ones(this_shape), tf.ones(that_shape)) == (this_shape == that_shape)
@pytest.mark.parametrize(
"t, expected",
[
(tf.constant(0), np.array(0)),
(np.arange(12).reshape(3, -1), np.arange(12).reshape(3, -1)),
(tf.reshape(tf.range(12), [3, -1]), np.arange(12).reshape(3, -1)),
],
)
def test_to_numpy(t: TensorType, expected: "np.ndarray[Any, Any]") -> None:
npt.assert_array_equal(to_numpy(t), expected)
def test_ok() -> None:
assert Ok(1).unwrap() == 1
assert Ok(1).is_ok is True
assert Ok(1).is_err is False
def test_err() -> None:
with pytest.raises(ValueError):
Err(ValueError()).unwrap()
assert Err(ValueError()).is_ok is False
assert Err(ValueError()).is_err is True
def test_Timer() -> None:
sleep_time = 0.1
with Timer() as timer:
sleep(sleep_time)
npt.assert_allclose(timer.time, sleep_time, rtol=0.01)
def test_Timer_with_nesting() -> None:
sleep_time = 0.1
with Timer() as timer_1:
sleep(sleep_time)
with Timer() as timer_2:
sleep(sleep_time)
npt.assert_allclose(timer_1.time, 2.0 * sleep_time, rtol=0.01)
npt.assert_allclose(timer_2.time, 1.0 * sleep_time, rtol=0.01)
def test_flatten_leading_dims() -> None:
x_old = tf.random.uniform([2, 3, 4, 5]) # [2, 3, 4, 5]
flat_x_old, unflatten = flatten_leading_dims(x_old) # [24, 5]
npt.assert_array_equal(tf.shape(flat_x_old), [24, 5])
x_new = unflatten(flat_x_old) # [2, 3, 4, 5]
npt.assert_array_equal(x_old, x_new)
def test_unflatten_raises_for_invalid_shape() -> None:
x_old = tf.random.uniform([2, 3, 4, 5]) # [2, 3, 4, 5]
flat_x_old, unflatten = flatten_leading_dims(x_old) # [24, 5]
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
unflatten(x_old)
def test_unflatten_returns_correct_shape() -> None:
x = tf.random.uniform([2, 3, 4, 5])
flat_x, unflatten = flatten_leading_dims(x) # [24, 5]
y1 = tf.random.uniform([24, 7])
y2 = tf.random.uniform([24, 7, 11])
unflat_y1 = unflatten(y1)
unflat_y2 = unflatten(y2)
npt.assert_array_equal(tf.shape(unflat_y1), [2, 3, 4, 7])
npt.assert_array_equal(tf.shape(unflat_y2), [2, 3, 4, 7, 11])
@pytest.mark.parametrize(
"output_dims,expected_shape",
[
(1, [120]),
(2, [24, 5]),
(3, [6, 4, 5]),
(4, [2, 3, 4, 5]),
],
)
def test_flatten_leading_dims_output_dims(output_dims: int, expected_shape: list[int]) -> None:
x_old = tf.random.uniform([2, 3, 4, 5])
flat_x_old, unflatten = flatten_leading_dims(x_old, output_dims=output_dims)
npt.assert_array_equal(tf.shape(flat_x_old), expected_shape)
x_new = unflatten(flat_x_old)
npt.assert_array_equal(x_old, x_new)
@pytest.mark.parametrize("output_dims", [-1, 0, 5, 100])
def test_flatten_leading_dims_invalid_output_dims(output_dims: int) -> None:
x_old = tf.random.uniform([2, 3, 4, 5]) # [2, 3, 4, 5]
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
flatten_leading_dims(x_old, output_dims=output_dims)
| 5,093 | 30.639752 | 96 | py |
trieste-develop | trieste-develop/tests/unit/utils/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/unit/acquisition/test_optimizer.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
import unittest.mock
from math import ceil
from typing import Any, Callable, Optional, Tuple, TypeVar, Union
from unittest.mock import MagicMock
import numpy.testing as npt
import pytest
import scipy.optimize as spo
import tensorflow as tf
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, quadratic, random_seed
from trieste.acquisition import AcquisitionFunction
from trieste.acquisition.optimizer import (
AcquisitionOptimizer,
FailedOptimizationError,
automatic_optimizer_selector,
batchify_joint,
batchify_vectorize,
generate_continuous_optimizer,
generate_random_search_optimizer,
get_bounds_of_box_relaxation_around_point,
optimize_discrete,
)
from trieste.acquisition.utils import split_acquisition_function_calls
from trieste.logging import tensorboard_writer
from trieste.objectives import Ackley5, Branin, Hartmann3, Hartmann6, ScaledBranin, SimpleQuadratic
from trieste.space import (
Box,
DiscreteSearchSpace,
LinearConstraint,
SearchSpace,
TaggedProductSearchSpace,
)
from trieste.types import TensorType
def _quadratic_sum(shift: list[float]) -> AcquisitionFunction:
return lambda x: tf.reduce_sum(0.5 - quadratic(x - shift), axis=-2)
def _delta_function(power: float) -> AcquisitionFunction:
return lambda x: tf.reduce_sum((1 / (x**power)), -1)
def test_generate_random_search_optimizer_raises_with_invalid_sample_size() -> None:
with pytest.raises(ValueError):
generate_random_search_optimizer(num_samples=-5)
@pytest.mark.parametrize("batch_size", [0, -2])
def test_optimize_discrete_raises_with_invalid_vectorized_batch_size(batch_size: int) -> None:
search_space = DiscreteSearchSpace(tf.constant([[-0.5], [0.2], [1.2], [1.7]]))
acq_fn = _quadratic_sum([1.0])
with pytest.raises(ValueError):
optimize_discrete(search_space, (acq_fn, batch_size))
@pytest.mark.parametrize("batch_size", [0, -2])
def test_random_optimizer_raises_with_invalid_vectorized_batch_size(batch_size: int) -> None:
search_space = Box([-1], [2])
acq_fn = _quadratic_sum([1.0])
with pytest.raises(ValueError):
generate_random_search_optimizer()(search_space, (acq_fn, batch_size))
SP = TypeVar("SP", bound=SearchSpace)
@random_seed
@pytest.mark.parametrize(
"search_space, shift, expected_maximizer, optimizers",
[
(
DiscreteSearchSpace(tf.constant([[-0.5], [0.2], [1.2], [1.7]])),
[1.0],
[[1.2]],
[optimize_discrete, generate_random_search_optimizer()],
), # 1D
(
DiscreteSearchSpace(tf.constant([[-0.5, -0.3], [-0.2, 0.3], [0.2, -0.3], [1.2, 0.4]])),
[0.3, -0.4],
[[0.2, -0.3]],
[optimize_discrete, generate_random_search_optimizer()],
), # 2D
(
Box([-1], [2]),
[1.0],
[[1.0]],
[generate_random_search_optimizer(10_000)],
), # 1D
(
Box(tf.constant([-1], dtype=tf.float64), tf.constant([2], dtype=tf.float64)),
[1.0],
[[1.0]],
[generate_random_search_optimizer(10_000)],
), # 1D with tf bounds
(
Box([-1, -2], [1.5, 2.5]),
[0.3, -0.4],
[[0.3, -0.4]],
[generate_random_search_optimizer(10_000)],
), # 2D
(
Box([-1, -2], [1.5, 2.5]),
[1.0, 4],
[[1.0, 2.5]],
[generate_random_search_optimizer(10_000)],
), # 2D with maximum outside search space
],
)
@pytest.mark.parametrize("split_acquisition_function", [False, True])
def test_discrete_and_random_optimizer_on_quadratic(
search_space: SP,
shift: list[float],
expected_maximizer: list[list[float]],
optimizers: list[AcquisitionOptimizer[SP]],
split_acquisition_function: bool,
) -> None:
for optimizer in optimizers:
if split_acquisition_function:
optimizer = split_acquisition_function_calls(optimizer, 97)
maximizer = optimizer(search_space, _quadratic_sum(shift))
if optimizer is optimize_discrete:
npt.assert_allclose(maximizer, expected_maximizer, rtol=1e-4)
else:
npt.assert_allclose(maximizer, expected_maximizer, rtol=1e-1)
@random_seed
@pytest.mark.parametrize(
"neg_function, expected_maximizer, search_space",
[
(Ackley5.objective, Ackley5.minimizers, Ackley5.search_space),
(Hartmann3.objective, Hartmann3.minimizers, Hartmann3.search_space),
(Hartmann6.objective, Hartmann6.minimizers, Hartmann6.search_space),
],
)
def test_random_search_optimizer_on_toy_problems(
neg_function: Callable[[TensorType], TensorType],
expected_maximizer: TensorType,
search_space: SearchSpace,
) -> None:
def target_function(x: TensorType) -> TensorType:
return -1 * neg_function(tf.squeeze(x, 1))
optimizer: AcquisitionOptimizer[SearchSpace] = generate_random_search_optimizer(1_000_000)
maximizer = optimizer(search_space, target_function)
npt.assert_allclose(maximizer, expected_maximizer, rtol=2e-1)
def test_generate_continuous_optimizer_raises_with_invalid_init_params() -> None:
with pytest.raises(ValueError):
generate_continuous_optimizer(num_initial_samples=-5)
with pytest.raises(ValueError):
generate_continuous_optimizer(num_optimization_runs=-5)
with pytest.raises(ValueError):
generate_continuous_optimizer(num_optimization_runs=5, num_initial_samples=4)
with pytest.raises(ValueError):
generate_continuous_optimizer(num_recovery_runs=-5)
@pytest.mark.parametrize("num_optimization_runs", [1, 10])
@pytest.mark.parametrize("num_recovery_runs", [1, 10])
def test_optimize_continuous_raises_for_impossible_optimization(
num_optimization_runs: int, num_recovery_runs: int
) -> None:
search_space = Box([-1, -1], [1, 2])
optimizer = generate_continuous_optimizer(
num_optimization_runs=num_optimization_runs, num_recovery_runs=num_recovery_runs
)
with pytest.raises(FailedOptimizationError) as e:
optimizer(search_space, _delta_function(10))
assert (
str(e.value)
== f"""
Acquisition function optimization failed,
even after {num_recovery_runs + num_optimization_runs} restarts.
"""
)
@pytest.mark.parametrize("batch_size", [0, -2])
def test_optimize_continuous_raises_with_invalid_vectorized_batch_size(batch_size: int) -> None:
search_space = Box([-1], [2])
acq_fn = _quadratic_sum([1.0])
with pytest.raises(ValueError):
generate_continuous_optimizer()(search_space, (acq_fn, batch_size))
@pytest.mark.parametrize("num_optimization_runs", [1, 10])
@pytest.mark.parametrize("num_initial_samples", [1000, 5000])
def test_optimize_continuous_correctly_uses_init_params(
num_optimization_runs: int, num_initial_samples: int
) -> None:
querying_initial_sample = True
def _target_fn(x: TensorType) -> TensorType:
nonlocal querying_initial_sample
if querying_initial_sample: # check size of initial sample
assert tf.shape(x)[0] == num_initial_samples
else: # check evaluations are in parallel with correct batch size
assert tf.shape(x)[0] == num_optimization_runs
querying_initial_sample = False
return _quadratic_sum([0.5, 0.5])(x)
optimizer = generate_continuous_optimizer(num_initial_samples, num_optimization_runs)
optimizer(Box([-1], [1]), _target_fn)
@unittest.mock.patch("trieste.logging.tf.summary.text")
@unittest.mock.patch("trieste.logging.tf.summary.scalar")
@pytest.mark.parametrize("failed_first_optimization", [True, False])
@pytest.mark.parametrize("num_recovery_runs", [0, 2, 10])
def test_optimize_continuous_recovery_runs(
mocked_summary_scalar: unittest.mock.MagicMock,
mocked_summary_text: unittest.mock.MagicMock,
failed_first_optimization: bool,
num_recovery_runs: int,
) -> None:
currently_failing = failed_first_optimization
num_batch_evals = 0
num_evals = 0
def _target_fn(x: TensorType) -> TensorType:
nonlocal currently_failing
nonlocal num_batch_evals
nonlocal num_evals
num_evals += 1
if (
tf.shape(x)[0] > 1
): # count when batch eval (i.e. random init or when doing recovery runs)
num_batch_evals += 1
if (
num_batch_evals > 1
): # after random init, the next batch eval will be start of recovery run
assert tf.shape(x)[0] in (
num_recovery_runs,
1, # when generating improvement_on_initial_samples log
) # check that we do correct number of recovery runs
currently_failing = False
if currently_failing: # use function that is impossible to optimize
return _delta_function(10)(x)
else:
return _quadratic_sum([0.5, 0.5])(x) # use function that is easy to optimize
with tensorboard_writer(unittest.mock.MagicMock()):
optimizer = generate_continuous_optimizer(
num_optimization_runs=1, num_recovery_runs=num_recovery_runs
)
if failed_first_optimization and (num_recovery_runs == 0):
with pytest.raises(FailedOptimizationError):
optimizer(Box([-1], [1]), _target_fn)
else:
optimizer(Box([-1], [1]), _target_fn)
# check we also generated the expected tensorboard logs
scalar_logs = {call[0][0]: call[0][1:] for call in mocked_summary_scalar.call_args_list}
if failed_first_optimization and (num_recovery_runs == 0):
assert not scalar_logs
else:
assert set(scalar_logs) == {
"spo_af_evaluations",
"spo_improvement_on_initial_samples",
}
# also evaluated once for the initial points, and again when generating the log
assert scalar_logs["spo_af_evaluations"][0] == num_evals - 2
text_logs = {call[0][0]: call[0][1:] for call in mocked_summary_text.call_args_list}
if failed_first_optimization and (num_recovery_runs > 0):
assert set(text_logs) == {"spo_recovery_run"}
else:
assert not text_logs
def test_optimize_continuous_when_target_raises_exception() -> None:
num_queries = 0
def _target_fn(x: TensorType) -> TensorType:
nonlocal num_queries
if num_queries > 1: # after initial sample return inf
return -1 * Hartmann3.objective(tf.squeeze(x, 1)) / 0.0
num_queries += 1
return -1 * Hartmann3.objective(tf.squeeze(x, 1))
optimizer = generate_continuous_optimizer(optimizer_args={"options": {"maxiter": 10}})
with pytest.raises(FailedOptimizationError):
optimizer(Hartmann3.search_space, _target_fn)
def test_continuous_optimizer_returns_raise_on_infeasible_points() -> None:
def target_function(x: TensorType) -> TensorType:
return -1 * ScaledBranin.objective(tf.squeeze(x, 1))
search_space = Box([0.0, 0.0], [1.0, 1.0], [LinearConstraint(A=tf.eye(2), lb=0.5, ub=0.5)])
optimizer = generate_continuous_optimizer(
num_initial_samples=1_000, num_optimization_runs=10, optimizer_args=dict(method="l-bfgs-b")
)
with pytest.raises(FailedOptimizationError):
optimizer(search_space, target_function)
@random_seed
@pytest.mark.parametrize(
"search_space, shift, expected_maximizer",
[
(
Box([-1], [2]),
[1.0],
[[1.0]],
), # 1D
(
Box([-1, -2], [1.5, 2.5]),
[0.3, -0.4],
[[0.3, -0.4]],
), # 2D
(
Box([-1, -2], [1.5, 2.5]),
[1.0, 4],
[[1.0, 2.5]],
), # 2D with maximum outside search space
(
Box([-1, -2, 1], [1.5, 2.5, 1.5]),
[0.3, -0.4, 0.5],
[[0.3, -0.4, 1.0]],
), # 3D
(
TaggedProductSearchSpace([Box([-1, -2], [1.5, 2.5])]),
[0.3, -0.4],
[[0.3, -0.4]],
), # Tagged space of just 2D Box
(
TaggedProductSearchSpace(
[
DiscreteSearchSpace(
tf.constant([[0.4, -2.0], [0.3, -0.4], [0.0, 2.5]], dtype=tf.float64)
)
]
),
[0.3, -0.4],
[[0.3, -0.4]],
), # Tagged space of just 2D discrete
(
TaggedProductSearchSpace(
[
Box([-1], [1.5]),
DiscreteSearchSpace(tf.constant([[-2.0], [-0.4], [2.5]], dtype=tf.float64)),
]
),
[0.3, -0.4],
[[0.3, -0.4]],
), # Tagged space of 1D Box, 1D discrete
(
TaggedProductSearchSpace(
[
Box([-1, -2], [1.5, 2.5]),
DiscreteSearchSpace(tf.constant([[1.0], [1.25], [1.5]], dtype=tf.float64)),
]
),
[0.3, -0.4, 0.5],
[[0.3, -0.4, 1.0]],
), # Tagged space of 2D Box, 1D discrete
(
TaggedProductSearchSpace(
[
Box([-1], [1.5]),
DiscreteSearchSpace(
tf.constant([[-0.4, 1.0], [0.0, 1.25], [1.0, 1.5]], dtype=tf.float64)
),
]
),
[0.3, -0.4, 0.5],
[[0.3, -0.4, 1.0]],
), # Tagged space of 1D Box, 2D discrete
(
TaggedProductSearchSpace(
[
Box([-1], [1.5]),
DiscreteSearchSpace(tf.constant([[-0.4], [0.0], [1.0]], dtype=tf.float64)),
DiscreteSearchSpace(tf.constant([[1.0], [1.25], [1.5]], dtype=tf.float64)),
]
),
[0.3, -0.4, 0.5],
[[0.3, -0.4, 1.0]],
), # Tagged space of 1D Box, 1D discrete, 1D discrete
],
)
@pytest.mark.parametrize(
"optimizer",
[
generate_continuous_optimizer(num_optimization_runs=3),
generate_continuous_optimizer(num_optimization_runs=3, num_recovery_runs=0),
generate_continuous_optimizer(num_optimization_runs=1, num_initial_samples=5),
],
)
def test_continuous_optimizer_on_quadratic(
search_space: Box,
shift: list[float],
expected_maximizer: list[list[float]],
optimizer: AcquisitionOptimizer[Box],
) -> None:
maximizer = optimizer(search_space, _quadratic_sum(shift))
npt.assert_allclose(maximizer, expected_maximizer, rtol=1e-3)
@random_seed
@pytest.mark.parametrize(
"neg_function, expected_maximizer, search_space",
[
(Ackley5.objective, Ackley5.minimizers, Ackley5.search_space),
(Hartmann3.objective, Hartmann3.minimizers, Hartmann3.search_space),
(Hartmann6.objective, Hartmann6.minimizers, Hartmann6.search_space),
],
)
def test_continuous_optimizer_on_toy_problems(
neg_function: Callable[[TensorType], TensorType],
expected_maximizer: TensorType,
search_space: Box,
) -> None:
def target_function(x: TensorType) -> TensorType:
return -1 * neg_function(tf.squeeze(x, 1))
optimizer = generate_continuous_optimizer(num_initial_samples=1_000, num_optimization_runs=10)
maximizer = optimizer(search_space, target_function)
npt.assert_allclose(maximizer, expected_maximizer, rtol=1e-1)
@pytest.mark.parametrize(
"search_space, point",
[
(Box([-1], [2]), tf.constant([[0.0]], dtype=tf.float64)),
(Box([-1, -2], [1.5, 2.5]), tf.constant([[0.0, 0.0]])),
(DiscreteSearchSpace(tf.constant([[-0.5], [0.2], [1.2], [1.7]])), tf.constant([[0.2]])),
],
)
def test_get_bounds_of_box_relaxation_around_point_raises_for_not_product_spaces(
search_space: DiscreteSearchSpace | Box,
point: TensorType,
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
get_bounds_of_box_relaxation_around_point(search_space, point) # type: ignore
@pytest.mark.parametrize(
"search_space, point, lower, upper",
[
(
TaggedProductSearchSpace(
[
Box([-1.0], [1.5]),
DiscreteSearchSpace(tf.constant([[-2.0], [-0.4], [2.5]], dtype=tf.float64)),
]
),
tf.constant([[0.0, -0.4]], dtype=tf.float64),
[-1, -0.4],
[1.5, -0.4],
), # Tagged space of 1D Box and 1D discrete
(
TaggedProductSearchSpace(
[
Box([-1.0, -2.0], [1.5, 2.5]),
DiscreteSearchSpace(tf.constant([[1.0], [1.25], [1.5]], dtype=tf.float64)),
]
),
tf.constant([[0.0, 1.0, 1.25]], dtype=tf.float64),
[-1.0, -2.0, 1.25],
[1.5, 2.5, 1.25],
), # Tagged space of 2D Box and 1D discrete
(
TaggedProductSearchSpace(
[
Box([-1.0], [1.5]),
DiscreteSearchSpace(
tf.constant([[-0.4, 1.0], [0.0, 1.25], [1.0, 1.5]], dtype=tf.float64)
),
]
),
tf.constant([[-1.0, 1.0, 1.5]], dtype=tf.float64),
[-1.0, 1.0, 1.5],
[1.5, 1.0, 1.5],
), # Tagged space of 1D Box and 2D discrete
(
TaggedProductSearchSpace(
[
Box([-1.0], [1.5]),
DiscreteSearchSpace(tf.constant([[-0.4], [0.0], [1.0]], dtype=tf.float64)),
DiscreteSearchSpace(tf.constant([[1.0], [1.25], [1.5]], dtype=tf.float64)),
]
),
tf.constant([[-1.0, 1.0, 1.5]], dtype=tf.float64),
[-1.0, 1.0, 1.5],
[1.5, 1.0, 1.5],
), # Tagged space of 1D Box, 1D discrete and 1D discrete
],
)
def test_get_bounds_of_box_relaxation_around_point(
search_space: TaggedProductSearchSpace,
point: TensorType,
lower: TensorType,
upper: TensorType,
) -> None:
bounds = get_bounds_of_box_relaxation_around_point(search_space, point)
npt.assert_array_equal(bounds.lb, lower)
npt.assert_array_equal(bounds.ub, upper)
def test_batchify_joint_raises_with_invalid_batch_size() -> None:
batch_size_one_optimizer = generate_continuous_optimizer()
with pytest.raises(ValueError):
batchify_joint(batch_size_one_optimizer, -5)
@pytest.mark.parametrize("batch_size", [1, 2, 3, 5])
def test_batchify_joint_raises_with_vectorized_acquisition_function(batch_size: int) -> None:
batch_size_one_optimizer = generate_continuous_optimizer()
optimizer = batchify_joint(batch_size_one_optimizer, 5)
search_space = Box([-1], [1])
acq_fn = _quadratic_sum([0.5])
with pytest.raises(ValueError):
optimizer(search_space, (acq_fn, batch_size))
@random_seed
@pytest.mark.parametrize("batch_size", [1, 2, 3, 5])
@pytest.mark.parametrize(
"search_space, acquisition, maximizer",
[
(Box([-1], [1]), _quadratic_sum([0.5]), ([[0.5]])),
(Box([-1, -1, -1], [1, 1, 1]), _quadratic_sum([0.5, -0.5, 0.2]), ([[0.5, -0.5, 0.2]])),
],
)
def test_batchify_joint(
search_space: Box, acquisition: AcquisitionFunction, maximizer: TensorType, batch_size: int
) -> None:
batch_size_one_optimizer = generate_continuous_optimizer(num_optimization_runs=5)
batch_optimizer = batchify_joint(batch_size_one_optimizer, batch_size)
points = batch_optimizer(search_space, acquisition)
assert points.shape == [batch_size] + search_space.lower.shape
for point in points:
npt.assert_allclose(tf.expand_dims(point, 0), maximizer, rtol=2e-4)
def test_batchify_vectorized_raises_with_invalid_batch_size() -> None:
batch_size_one_optimizer = generate_continuous_optimizer()
with pytest.raises(ValueError):
batchify_vectorize(batch_size_one_optimizer, -5)
@pytest.mark.parametrize("batch_size", [1, 2, 3, 5])
def test_batchify_vectorize_raises_with_vectorized_acquisition_function(batch_size: int) -> None:
batch_size_one_optimizer = generate_continuous_optimizer()
optimizer = batchify_vectorize(batch_size_one_optimizer, 5)
search_space = Box([-1], [1])
acq_fn = _quadratic_sum([0.5])
with pytest.raises(ValueError):
optimizer(search_space, (acq_fn, batch_size))
@random_seed
@pytest.mark.parametrize(
"optimizer", [generate_random_search_optimizer(10_000), generate_continuous_optimizer()]
)
@pytest.mark.parametrize("split_acquisition_function", [False, True])
def test_batchify_vectorized_for_random_and_continuous_optimizers_on_vectorized_quadratic(
optimizer: AcquisitionOptimizer[Box],
split_acquisition_function: bool,
) -> None:
search_space = Box([-1, -2], [1.5, 2.5])
shifts = [[0.3, -0.4], [1.0, 4]]
expected_maximizers = [[0.3, -0.4], [1.0, 2.5]]
vectorized_batch_size = 2
def vectorized_target(x: TensorType) -> TensorType: # [N, V, D] -> [N,V]
individual_func = [
_quadratic_sum(shifts[i])(x[:, i : i + 1, :]) for i in range(vectorized_batch_size)
]
return tf.concat(individual_func, axis=-1)
batched_optimizer = batchify_vectorize(optimizer, batch_size=vectorized_batch_size)
if split_acquisition_function:
batched_optimizer = split_acquisition_function_calls(batched_optimizer, 1000)
maximizers = batched_optimizer(search_space, vectorized_target)
npt.assert_allclose(maximizers, expected_maximizers, rtol=1e-1)
def test_batchify_vectorized_for_discrete_optimizer_on_vectorized_quadratic() -> None:
search_space = DiscreteSearchSpace(
tf.constant([[0.3, -0.4], [1.0, 2.5], [0.2, 0.5], [0.5, 2.0], [2.0, 0.1]])
)
shifts = [[0.3, -0.4], [1.0, 4]]
expected_maximizers = [[0.3, -0.4], [1.0, 2.5]]
vectorized_batch_size = 2
def vectorized_target(x: TensorType) -> TensorType: # [N, V, D] -> [N,V]
individual_func = [
_quadratic_sum(shifts[i])(x[:, i : i + 1, :]) for i in range(vectorized_batch_size)
]
return tf.concat(individual_func, axis=-1)
batched_optimizer = batchify_vectorize(optimize_discrete, batch_size=vectorized_batch_size)
maximizers = batched_optimizer(search_space, vectorized_target)
npt.assert_allclose(maximizers, expected_maximizers, rtol=1e-1)
@random_seed
@pytest.mark.parametrize("vectorization", [1, 5])
@pytest.mark.parametrize(
"neg_function, expected_maximizer, search_space",
[
(Ackley5.objective, Ackley5.minimizers, Ackley5.search_space),
(Hartmann3.objective, Hartmann3.minimizers, Hartmann3.search_space),
(Hartmann6.objective, Hartmann6.minimizers, Hartmann6.search_space),
],
)
def test_batchify_vectorized_for_continuous_optimizer_on_duplicated_toy_problems(
vectorization: int,
neg_function: Callable[[TensorType], TensorType],
expected_maximizer: TensorType,
search_space: Box,
) -> None:
def target_function(x: TensorType) -> TensorType: # [N,V,D] -> [N, V]
individual_func = [-1 * neg_function(x[:, i, :]) for i in range(vectorization)]
return tf.concat(individual_func, axis=-1) # vectorize by repeating same function
optimizer = batchify_vectorize(
generate_continuous_optimizer(num_initial_samples=1_000, num_optimization_runs=10),
batch_size=vectorization,
)
maximizer = optimizer(search_space, target_function)
npt.assert_allclose(maximizer, tf.tile(expected_maximizer, [vectorization, 1]), rtol=1e-1)
@random_seed
def test_batchify_vectorized_for_continuous_optimizer_on_vectorized_toy_problems() -> None:
search_space = Branin.search_space
functions = [Branin.objective, ScaledBranin.objective, SimpleQuadratic.objective]
expected_maximimums = [-Branin.minimum, -ScaledBranin.minimum, -SimpleQuadratic.minimum]
vectorized_batch_size = 3
def target_function(x: TensorType) -> TensorType: # [N,V,D] -> [N, V]
individual_func = [-1 * functions[i](x[:, i, :]) for i in range(vectorized_batch_size)]
return tf.concat(individual_func, axis=-1) # vectorize by concatenating three functions
optimizer = batchify_vectorize(
generate_continuous_optimizer(num_initial_samples=1_000, num_optimization_runs=10),
batch_size=vectorized_batch_size,
)
maximizer = optimizer(search_space, target_function)
npt.assert_allclose(
target_function(maximizer[None, :, :]), tf.transpose(expected_maximimums), rtol=1e-5
)
@random_seed
@pytest.mark.parametrize(
"search_space, acquisition, maximizer",
[
(
DiscreteSearchSpace(tf.constant([[-0.5], [0.2], [1.2], [1.7]])),
_quadratic_sum([1.0]),
[[1.2]],
),
(Box([0], [1]), _quadratic_sum([0.5]), ([[0.5]])),
(Box([-1, -1, -1], [1, 1, 1]), _quadratic_sum([0.5, -0.5, 0.2]), ([[0.5, -0.5, 0.2]])),
(
TaggedProductSearchSpace(
[
Box([-1, -1], [1, 1]),
DiscreteSearchSpace(tf.constant([[-0.2], [0.0], [0.2]], dtype=tf.float64)),
]
),
_quadratic_sum([0.5, -0.5, 0.2]),
([[0.5, -0.5, 0.2]]),
),
],
)
def test_automatic_optimizer_selector(
search_space: Box,
acquisition: AcquisitionFunction,
maximizer: TensorType,
) -> None:
optimizer = automatic_optimizer_selector
point = optimizer(search_space, acquisition)
npt.assert_allclose(point, maximizer, rtol=2e-4)
def test_split_acquisition_function_calls_raises_with_invalid_batch_size() -> None:
optimizer = generate_continuous_optimizer()
with pytest.raises(ValueError):
split_acquisition_function_calls(optimizer, -5)
@pytest.mark.parametrize("batch_size", [1, 2, 9, 10, 11, 19, 20, 21, 100])
def test_split_acquisition_function(batch_size: int) -> None:
acquisition_function = MagicMock()
acquisition_function.side_effect = lambda x: x
def dummy_optimizer(
search_space: SearchSpace,
f: Union[AcquisitionFunction, Tuple[AcquisitionFunction, int]],
) -> TensorType:
af, n = f if isinstance(f, tuple) else (f, 1)
return af(tf.linspace([0, 0], [1, 1], n))
batched_optimizer = split_acquisition_function_calls(dummy_optimizer, batch_size)
value = batched_optimizer(Box([0, 0], [1, 1]), (acquisition_function, 10))
npt.assert_array_equal(value, tf.linspace([0, 0], [1, 1], 10))
# since each row has two elements, actual batch size will always be even
expected_batch_size = 2 * ceil(batch_size / 2)
assert all(
tf.size(call[0][0]) <= expected_batch_size for call in acquisition_function.call_args_list
)
assert acquisition_function.call_count == ceil(20 / expected_batch_size)
@unittest.mock.patch("scipy.optimize.minimize")
@pytest.mark.parametrize(
"search_space, optimizer_args, expected_method, expected_constraints",
[
(Branin.search_space, None, "l-bfgs-b", []),
(Branin.search_space, dict(method="trust-constr"), "trust-constr", []),
(Branin.search_space, dict(constraints="dummy"), "l-bfgs-b", "dummy"),
(
Branin.search_space,
dict(method="trust-constr", constraints="dummy"),
"trust-constr",
"dummy",
),
(
Box([0, 0], [1, 1], [LinearConstraint(A=tf.eye(2), lb=0, ub=1)]),
None,
"trust-constr",
[LinearConstraint(A=tf.eye(2), lb=0, ub=1)],
),
],
)
def test_optimizer_scipy_method_select(
mocked_minimize: MagicMock,
search_space: Box,
optimizer_args: Optional[dict[str, Any]],
expected_method: str,
expected_constraints: Optional[str],
) -> None:
def target_function(x: TensorType) -> TensorType:
return -1 * Branin.objective(tf.squeeze(x, 1))
def side_effect(*args: Any, **kwargs: Any) -> spo.OptimizeResult:
return spo.OptimizeResult(fun=0.0, nfev=0, x=Branin.minimizers[0].numpy(), success=True)
mocked_minimize.side_effect = side_effect
optimizer = generate_continuous_optimizer(
num_initial_samples=2, num_optimization_runs=2, optimizer_args=optimizer_args
)
optimizer(search_space, target_function)
received_method = mocked_minimize.call_args[1]["method"]
assert received_method == expected_method
if "constraints" in mocked_minimize.call_args[1]:
received_constraints = mocked_minimize.call_args[1]["constraints"]
elif search_space.has_constraints:
received_constraints = search_space.constraints
else:
received_constraints = None
assert received_constraints == expected_constraints
| 29,533 | 36.432193 | 99 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/test_rule.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
from collections.abc import Mapping
from typing import Callable, Optional
import gpflow
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import empty_dataset, quadratic, random_seed
from tests.util.models.gpflow.models import (
GaussianProcess,
QuadraticMeanAndRBFKernel,
QuadraticMeanAndRBFKernelWithSamplers,
)
from trieste.acquisition import (
AcquisitionFunction,
AcquisitionFunctionBuilder,
NegativeLowerConfidenceBound,
SingleModelAcquisitionBuilder,
SingleModelGreedyAcquisitionBuilder,
VectorizedAcquisitionFunctionBuilder,
)
from trieste.acquisition.optimizer import AcquisitionOptimizer
from trieste.acquisition.rule import (
TURBO,
AcquisitionRule,
AsynchronousGreedy,
AsynchronousOptimization,
AsynchronousRuleState,
BatchHypervolumeSharpeRatioIndicator,
DiscreteThompsonSampling,
EfficientGlobalOptimization,
RandomSampling,
TrustRegion,
)
from trieste.acquisition.sampler import (
ExactThompsonSampler,
GumbelSampler,
ThompsonSampler,
ThompsonSamplerFromTrajectory,
)
from trieste.data import Dataset
from trieste.models import ProbabilisticModel
from trieste.models.interfaces import TrainableSupportsGetKernel
from trieste.observer import OBJECTIVE
from trieste.space import Box
from trieste.types import State, Tag, TensorType
def _line_search_maximize(
search_space: Box, f: AcquisitionFunction, num_query_points: int = 1
) -> TensorType:
if num_query_points != 1:
raise ValueError("_line_search_maximizer only defined for batches of size 1")
if len(search_space.lower) != 1:
raise ValueError("_line_search_maximizer only defined for search spaces of dimension 1")
xs = tf.linspace(search_space.lower, search_space.upper, 10**6)
return xs[tf.squeeze(tf.argmax(f(tf.expand_dims(xs, 1)))), None]
@pytest.mark.parametrize(
"num_search_space_samples, num_query_points",
[
(0, 50),
(-2, 50),
(10, 0),
(10, -2),
],
)
def test_discrete_thompson_sampling_raises_for_invalid_init_params(
num_search_space_samples: int, num_query_points: int
) -> None:
with pytest.raises(ValueError):
DiscreteThompsonSampling(num_search_space_samples, num_query_points)
@pytest.mark.parametrize(
"models",
[
{},
{"foo": QuadraticMeanAndRBFKernel()},
{"foo": QuadraticMeanAndRBFKernel(), OBJECTIVE: QuadraticMeanAndRBFKernel()},
],
)
@pytest.mark.parametrize("datasets", [{}, {OBJECTIVE: empty_dataset([1], [1])}])
def test_discrete_thompson_sampling_raises_for_invalid_models_keys(
datasets: dict[Tag, Dataset], models: dict[Tag, ProbabilisticModel]
) -> None:
search_space = Box([-1], [1])
rule = DiscreteThompsonSampling(100, 10)
with pytest.raises(ValueError):
rule.acquire(search_space, models, datasets=datasets)
@pytest.mark.parametrize("models", [{}, {OBJECTIVE: QuadraticMeanAndRBFKernel()}])
@pytest.mark.parametrize(
"datasets",
[
{},
{"foo": empty_dataset([1], [1])},
{"foo": empty_dataset([1], [1]), OBJECTIVE: empty_dataset([1], [1])},
],
)
def test_discrete_thompson_sampling_raises_for_invalid_dataset_keys(
datasets: dict[Tag, Dataset], models: dict[Tag, ProbabilisticModel]
) -> None:
search_space = Box([-1], [1])
rule = DiscreteThompsonSampling(10, 100)
with pytest.raises(ValueError):
rule.acquire(search_space, models, datasets=datasets)
@pytest.mark.parametrize(
"sampler",
[
ExactThompsonSampler(sample_min_value=True),
ThompsonSamplerFromTrajectory(sample_min_value=True),
],
)
def test_discrete_thompson_sampling_raises_if_passed_sampler_with_sample_min_value_True(
sampler: ThompsonSampler[GaussianProcess],
) -> None:
with pytest.raises(ValueError):
DiscreteThompsonSampling(100, 10, thompson_sampler=sampler)
@pytest.mark.parametrize(
"thompson_sampler",
[
ExactThompsonSampler(sample_min_value=False),
ThompsonSamplerFromTrajectory(sample_min_value=False),
],
)
def test_discrete_thompson_sampling_initialized_with_correct_sampler(
thompson_sampler: ThompsonSampler[GaussianProcess],
) -> None:
ts = DiscreteThompsonSampling(100, 10, thompson_sampler=thompson_sampler)
assert ts._thompson_sampler == thompson_sampler
def test_discrete_thompson_sampling_raises_if_use_fourier_features_with_incorrect_model() -> None:
search_space = Box([-2.2, -1.0], [1.3, 3.3])
ts = DiscreteThompsonSampling(
100, 10, thompson_sampler=ThompsonSamplerFromTrajectory(sample_min_value=False)
)
dataset = Dataset(tf.zeros([1, 2], dtype=tf.float64), tf.zeros([1, 1], dtype=tf.float64))
model = QuadraticMeanAndRBFKernel(noise_variance=tf.constant(1.0, dtype=tf.float64))
with pytest.raises(ValueError):
ts.acquire_single(search_space, model, dataset=dataset) # type: ignore
def test_discrete_thompson_sampling_raises_for_gumbel_sampler() -> None:
with pytest.raises(ValueError):
DiscreteThompsonSampling(100, 10, thompson_sampler=GumbelSampler(sample_min_value=False))
@pytest.mark.parametrize(
"thompson_sampler",
[
ExactThompsonSampler(sample_min_value=False),
ThompsonSamplerFromTrajectory(sample_min_value=False),
],
)
@pytest.mark.parametrize("num_query_points", [1, 10])
def test_discrete_thompson_sampling_acquire_returns_correct_shape(
thompson_sampler: ThompsonSampler[GaussianProcess], num_query_points: int
) -> None:
search_space = Box([-2.2, -1.0], [1.3, 3.3])
ts = DiscreteThompsonSampling(100, num_query_points, thompson_sampler=thompson_sampler)
dataset = Dataset(tf.zeros([1, 2], dtype=tf.float64), tf.zeros([1, 1], dtype=tf.float64))
model = QuadraticMeanAndRBFKernelWithSamplers(
dataset=dataset, noise_variance=tf.constant(1.0, dtype=tf.float64)
)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
query_points = ts.acquire_single(search_space, model, dataset=dataset)
npt.assert_array_equal(query_points.shape, tf.constant([num_query_points, 2]))
@pytest.mark.parametrize("num_query_points", [-1, 0])
def test_random_sampling_raises_for_invalid_init_params(num_query_points: int) -> None:
with pytest.raises(ValueError):
RandomSampling(num_query_points)
@pytest.mark.parametrize("num_query_points", [1, 10, 50])
def test_random_sampling_acquire_returns_correct_shape(num_query_points: int) -> None:
search_space = Box([-2.2, -1.0], [1.3, 3.3])
rule = RandomSampling(num_query_points)
dataset = Dataset(tf.zeros([1, 2], dtype=tf.float64), tf.zeros([1, 1], dtype=tf.float64))
model = QuadraticMeanAndRBFKernelWithSamplers(
dataset=dataset, noise_variance=tf.constant(1.0, dtype=tf.float64)
)
query_points = rule.acquire_single(search_space, model)
npt.assert_array_equal(query_points.shape, tf.constant([num_query_points, 2]))
def test_efficient_global_optimization_raises_for_no_query_points() -> None:
with pytest.raises(ValueError):
EfficientGlobalOptimization(num_query_points=0)
def test_efficient_global_optimization_raises_for_no_batch_fn_with_many_query_points() -> None:
with pytest.raises(ValueError):
EfficientGlobalOptimization(num_query_points=2)
@pytest.mark.parametrize("optimizer", [_line_search_maximize, None])
def test_efficient_global_optimization(optimizer: AcquisitionOptimizer[Box]) -> None:
class NegQuadratic(SingleModelAcquisitionBuilder[ProbabilisticModel]):
def __init__(self) -> None:
self._updated = False
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
return lambda x: -quadratic(tf.squeeze(x, -2) - 1)
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
self._updated = True
return function
function = NegQuadratic()
search_space = Box([-10], [10])
ego = EfficientGlobalOptimization(function, optimizer)
data, model = empty_dataset([1], [1]), QuadraticMeanAndRBFKernel(x_shift=1)
query_point = ego.acquire_single(search_space, model, dataset=data)
npt.assert_allclose(query_point, [[1]], rtol=1e-4)
assert not function._updated
query_point = ego.acquire(search_space, {OBJECTIVE: model})
npt.assert_allclose(query_point, [[1]], rtol=1e-4)
assert function._updated
def test_efficient_global_optimization_initial_acquisition_function() -> None:
class NoisyNegQuadratic(SingleModelAcquisitionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
noise = tf.random.uniform([], -0.05, 0.05, dtype=tf.float64)
return lambda x: -quadratic(tf.squeeze(x, -2) - 1) + noise
def update_acquisition_function(
self,
function: AcquisitionFunction,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
return function
builder = NoisyNegQuadratic()
search_space = Box([-10], [10])
ego = EfficientGlobalOptimization[Box, ProbabilisticModel](builder)
data, model = empty_dataset([1], [1]), QuadraticMeanAndRBFKernel(x_shift=1)
ego.acquire_single(search_space, model, dataset=data)
assert ego.acquisition_function is not None
# check that we can create a new EGO with the exact same AF state
acq_func = copy.deepcopy(ego.acquisition_function)
ego_copy = EfficientGlobalOptimization[Box, ProbabilisticModel](
builder, initial_acquisition_function=acq_func
)
ego_copy.acquire_single(search_space, model, dataset=data)
assert ego_copy.acquisition_function is not None
x = search_space.sample(1)
npt.assert_allclose(ego.acquisition_function(x), ego_copy.acquisition_function(x))
# check that if we don't do this, the AF state might vary
ego_non_copy = EfficientGlobalOptimization[Box, ProbabilisticModel](builder)
ego_non_copy.acquire_single(search_space, model, dataset=data)
assert ego_non_copy.acquisition_function is not None
npt.assert_raises(
AssertionError,
npt.assert_allclose,
ego.acquisition_function(x),
ego_non_copy.acquisition_function(x),
)
class _JointBatchModelMinusMeanMaximumSingleBuilder(AcquisitionFunctionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return lambda at: -tf.reduce_max(models[OBJECTIVE].predict(at)[0], axis=-2)
@random_seed
@pytest.mark.parametrize(
"rule_fn",
[
lambda acq, batch_size: EfficientGlobalOptimization(acq, num_query_points=batch_size),
lambda acq, batch_size: AsynchronousOptimization(acq, num_query_points=batch_size),
],
)
# As a side effect, this test ensures and EGO and AsynchronousOptimization
# behave similarly in sync mode
def test_joint_batch_acquisition_rule_acquire(
rule_fn: Callable[
# callable input type(s)
[_JointBatchModelMinusMeanMaximumSingleBuilder, int],
# callable output type
AcquisitionRule[TensorType, Box, ProbabilisticModel]
| AcquisitionRule[State[TensorType, AsynchronousRuleState], Box, ProbabilisticModel],
]
) -> None:
search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
num_query_points = 4
acq = _JointBatchModelMinusMeanMaximumSingleBuilder()
acq_rule: AcquisitionRule[TensorType, Box, ProbabilisticModel] | AcquisitionRule[
State[TensorType, AsynchronousRuleState], Box, ProbabilisticModel
] = rule_fn(acq, num_query_points)
dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
points_or_stateful = acq_rule.acquire_single(
search_space, QuadraticMeanAndRBFKernel(), dataset=dataset
)
if callable(points_or_stateful):
_, query_point = points_or_stateful(None)
else:
query_point = points_or_stateful
npt.assert_allclose(tf.constant(query_point), [[0.0, 0.0]] * num_query_points, atol=1e-3)
class _GreedyBatchModelMinusMeanMaximumSingleBuilder(
SingleModelGreedyAcquisitionBuilder[ProbabilisticModel]
):
def __init__(self) -> None:
self._update_count = 0
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
pending_points: TensorType = None,
) -> AcquisitionFunction:
if pending_points is None:
return lambda at: -tf.reduce_max(model.predict(at)[0], axis=-2)
else:
best_pending_score = tf.reduce_max(model.predict(pending_points)[0])
return lambda at: -tf.math.maximum(
tf.reduce_max(model.predict(at)[0], axis=-2), best_pending_score
)
def update_acquisition_function(
self,
function: Optional[AcquisitionFunction],
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
pending_points: Optional[TensorType] = None,
new_optimization_step: bool = True,
) -> AcquisitionFunction:
self._update_count += 1
return self.prepare_acquisition_function(
model, dataset=dataset, pending_points=pending_points
)
@random_seed
@pytest.mark.parametrize(
"rule_fn",
[
lambda acq, batch_size: EfficientGlobalOptimization(acq, num_query_points=batch_size),
lambda acq, batch_size: AsynchronousGreedy(acq, num_query_points=batch_size),
],
)
# As a side effect, this test ensures and EGO and AsynchronousGreedy
# behave similarly in sync mode
def test_greedy_batch_acquisition_rule_acquire(
rule_fn: Callable[
# callable input type(s)
[_GreedyBatchModelMinusMeanMaximumSingleBuilder, int],
# callable output type
AcquisitionRule[TensorType, Box, ProbabilisticModel]
| AcquisitionRule[State[TensorType, AsynchronousRuleState], Box, ProbabilisticModel],
]
) -> None:
search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
num_query_points = 4
acq = _GreedyBatchModelMinusMeanMaximumSingleBuilder()
assert acq._update_count == 0
acq_rule: AcquisitionRule[TensorType, Box, ProbabilisticModel] | AcquisitionRule[
State[TensorType, AsynchronousRuleState], Box, ProbabilisticModel
] = rule_fn(acq, num_query_points)
dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
points_or_stateful = acq_rule.acquire_single(
search_space, QuadraticMeanAndRBFKernel(), dataset=dataset
)
if callable(points_or_stateful):
_, query_points = points_or_stateful(None)
else:
query_points = points_or_stateful
assert acq._update_count == num_query_points - 1
npt.assert_allclose(tf.constant(query_points), [[0.0, 0.0]] * num_query_points, atol=1e-3)
points_or_stateful = acq_rule.acquire_single(
search_space, QuadraticMeanAndRBFKernel(), dataset=dataset
)
if callable(points_or_stateful):
_, query_points = points_or_stateful(None)
else:
query_points = points_or_stateful
npt.assert_allclose(tf.constant(query_points), [[0.0, 0.0]] * num_query_points, atol=1e-3)
assert acq._update_count == 2 * num_query_points - 1
class _VectorizedBatchModelMinusMeanMaximumSingleBuilder(
VectorizedAcquisitionFunctionBuilder[ProbabilisticModel]
):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return lambda at: tf.squeeze(-models[OBJECTIVE].predict(at)[0], -1)
@random_seed
def test_vectorized_batch_acquisition_rule_acquire() -> None:
search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
num_query_points = 4
acq = _VectorizedBatchModelMinusMeanMaximumSingleBuilder()
acq_rule: AcquisitionRule[TensorType, Box, ProbabilisticModel] = EfficientGlobalOptimization(
acq, num_query_points=num_query_points
)
dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
points_or_stateful = acq_rule.acquire_single(
search_space, QuadraticMeanAndRBFKernel(), dataset=dataset
)
if callable(points_or_stateful):
_, query_point = points_or_stateful(None)
else:
query_point = points_or_stateful
npt.assert_allclose(query_point, [[0.0, 0.0]] * num_query_points, atol=1e-3)
def test_async_greedy_raises_for_non_greedy_function() -> None:
non_greedy_function_builder = NegativeLowerConfidenceBound()
with pytest.raises(NotImplementedError):
# we are deliberately passing in wrong object
# hence type ignore
AsynchronousGreedy(non_greedy_function_builder) # type: ignore
def test_async_optimization_raises_for_incorrect_query_points() -> None:
with pytest.raises(ValueError):
AsynchronousOptimization(num_query_points=0)
with pytest.raises(ValueError):
AsynchronousOptimization(num_query_points=-5)
def test_async_greedy_raises_for_incorrect_query_points() -> None:
with pytest.raises(ValueError):
AsynchronousGreedy(
builder=_GreedyBatchModelMinusMeanMaximumSingleBuilder(), num_query_points=0
)
with pytest.raises(ValueError):
AsynchronousGreedy(
builder=_GreedyBatchModelMinusMeanMaximumSingleBuilder(), num_query_points=-5
)
@random_seed
@pytest.mark.parametrize(
"async_rule",
[
AsynchronousOptimization(_JointBatchModelMinusMeanMaximumSingleBuilder()),
AsynchronousGreedy(_GreedyBatchModelMinusMeanMaximumSingleBuilder()),
],
)
def test_async_keeps_track_of_pending_points(
async_rule: AcquisitionRule[
State[Optional[AsynchronousRuleState], TensorType], Box, ProbabilisticModel
]
) -> None:
search_space = Box(tf.constant([-2.2, -1.0]), tf.constant([1.3, 3.3]))
dataset = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
state_fn = async_rule.acquire_single(search_space, QuadraticMeanAndRBFKernel(), dataset=dataset)
state, point1 = state_fn(None)
state, point2 = state_fn(state)
assert state is not None
assert state.pending_points is not None
assert len(state.pending_points) == 2
# pretend we saw observation for the first point
new_observations = Dataset(
query_points=point1,
observations=tf.constant([[1]], dtype=tf.float32),
)
state_fn = async_rule.acquire_single(
search_space,
QuadraticMeanAndRBFKernel(),
dataset=dataset + new_observations,
)
state, point3 = state_fn(state)
assert state is not None
assert state.pending_points is not None
assert len(state.pending_points) == 2
# we saw first point, so pendings points are
# second point and new third point
npt.assert_allclose(state.pending_points, tf.concat([point2, point3], axis=0))
@pytest.mark.parametrize("datasets", [{}, {"foo": empty_dataset([1], [1])}])
@pytest.mark.parametrize(
"models", [{}, {"foo": QuadraticMeanAndRBFKernel()}, {OBJECTIVE: QuadraticMeanAndRBFKernel()}]
)
def test_trust_region_raises_for_missing_datasets_key(
datasets: dict[Tag, Dataset], models: dict[Tag, ProbabilisticModel]
) -> None:
search_space = Box([-1], [1])
rule = TrustRegion()
with pytest.raises(ValueError):
rule.acquire(search_space, models, datasets=datasets)
class _Midpoint(AcquisitionRule[TensorType, Box, ProbabilisticModel]):
def acquire(
self,
search_space: Box,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> TensorType:
return (search_space.upper[None] + search_space.lower[None]) / 2
@pytest.mark.parametrize(
"rule, expected_query_point",
[
(EfficientGlobalOptimization(NegativeLowerConfidenceBound(0)), [[0.0, 0.0]]),
(_Midpoint(), [[-0.45, 1.15]]),
],
)
def test_trust_region_for_default_state(
rule: AcquisitionRule[TensorType, Box, ProbabilisticModel], expected_query_point: TensorType
) -> None:
tr = TrustRegion(rule)
dataset = Dataset(tf.constant([[0.1, 0.2]]), tf.constant([[0.012]]))
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
state, query_point = tr.acquire_single(
search_space, QuadraticMeanAndRBFKernel(), dataset=dataset
)(None)
assert state is not None
npt.assert_array_almost_equal(query_point, expected_query_point, 5)
npt.assert_array_almost_equal(state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(state.acquisition_space.upper, upper_bound)
npt.assert_array_almost_equal(state.y_min, [0.012])
assert state.is_global
@pytest.mark.parametrize(
"rule, expected_query_point",
[
(EfficientGlobalOptimization(NegativeLowerConfidenceBound(0)), [[0.0, 0.0]]),
(_Midpoint(), [[-0.45, 1.15]]),
],
)
def test_trust_region_successful_global_to_global_trust_region_unchanged(
rule: AcquisitionRule[TensorType, Box, ProbabilisticModel], expected_query_point: TensorType
) -> None:
tr = TrustRegion(rule)
dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.3]]))
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
eps = 0.5 * (search_space.upper - search_space.lower) / 10
previous_y_min = dataset.observations[0]
is_global = True
previous_state = TrustRegion.State(search_space, eps, previous_y_min, is_global)
current_state, query_point = tr.acquire(
search_space,
{OBJECTIVE: QuadraticMeanAndRBFKernel()},
datasets={OBJECTIVE: dataset},
)(previous_state)
assert current_state is not None
npt.assert_array_almost_equal(current_state.eps, previous_state.eps)
assert current_state.is_global
npt.assert_array_almost_equal(query_point, expected_query_point, 5)
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(current_state.acquisition_space.upper, upper_bound)
@pytest.mark.parametrize(
"rule",
[
EfficientGlobalOptimization(NegativeLowerConfidenceBound(0)),
_Midpoint(),
],
)
def test_trust_region_for_unsuccessful_global_to_local_trust_region_unchanged(
rule: AcquisitionRule[TensorType, Box, ProbabilisticModel]
) -> None:
tr = TrustRegion(rule)
dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.5]]))
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
eps = 0.5 * (search_space.upper - search_space.lower) / 10
previous_y_min = dataset.observations[0]
is_global = True
acquisition_space = search_space
previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min, is_global)
current_state, query_point = tr.acquire(
search_space,
{OBJECTIVE: QuadraticMeanAndRBFKernel()},
datasets={OBJECTIVE: dataset},
)(previous_state)
assert current_state is not None
npt.assert_array_almost_equal(current_state.eps, previous_state.eps)
assert not current_state.is_global
npt.assert_array_less(lower_bound, current_state.acquisition_space.lower)
npt.assert_array_less(current_state.acquisition_space.upper, upper_bound)
assert query_point[0] in current_state.acquisition_space
@pytest.mark.parametrize(
"rule",
[
EfficientGlobalOptimization(NegativeLowerConfidenceBound(0)),
_Midpoint(),
],
)
def test_trust_region_for_successful_local_to_global_trust_region_increased(
rule: AcquisitionRule[TensorType, Box, ProbabilisticModel]
) -> None:
tr = TrustRegion(rule)
dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.3]]))
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
eps = 0.5 * (search_space.upper - search_space.lower) / 10
previous_y_min = dataset.observations[0]
is_global = False
acquisition_space = Box(dataset.query_points[0] - eps, dataset.query_points[0] + eps)
previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min, is_global)
current_state, _ = tr.acquire(
search_space,
{OBJECTIVE: QuadraticMeanAndRBFKernel()},
datasets={OBJECTIVE: dataset},
)(previous_state)
assert current_state is not None
npt.assert_array_less(previous_state.eps, current_state.eps) # current TR larger than previous
assert current_state.is_global
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(current_state.acquisition_space.upper, upper_bound)
@pytest.mark.parametrize(
"rule",
[
EfficientGlobalOptimization(NegativeLowerConfidenceBound(0)),
_Midpoint(),
],
)
def test_trust_region_for_unsuccessful_local_to_global_trust_region_reduced(
rule: AcquisitionRule[TensorType, Box, ProbabilisticModel]
) -> None:
tr = TrustRegion(rule)
dataset = Dataset(tf.constant([[0.1, 0.2], [-0.1, -0.2]]), tf.constant([[0.4], [0.5]]))
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
eps = 0.5 * (search_space.upper - search_space.lower) / 10
previous_y_min = dataset.observations[0]
is_global = False
acquisition_space = Box(dataset.query_points[0] - eps, dataset.query_points[0] + eps)
previous_state = TrustRegion.State(acquisition_space, eps, previous_y_min, is_global)
current_state, _ = tr.acquire(
search_space,
{OBJECTIVE: QuadraticMeanAndRBFKernel()},
datasets={OBJECTIVE: dataset},
)(previous_state)
assert current_state is not None
npt.assert_array_less(current_state.eps, previous_state.eps) # current TR smaller than previous
assert current_state.is_global
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
def test_trust_region_state_deepcopy() -> None:
tr_state = TrustRegion.State(
Box(tf.constant([1.2]), tf.constant([3.4])), tf.constant(5.6), tf.constant(7.8), False
)
tr_state_copy = copy.deepcopy(tr_state)
npt.assert_allclose(tr_state_copy.acquisition_space.lower, tr_state.acquisition_space.lower)
npt.assert_allclose(tr_state_copy.acquisition_space.upper, tr_state.acquisition_space.upper)
npt.assert_allclose(tr_state_copy.eps, tr_state.eps)
npt.assert_allclose(tr_state_copy.y_min, tr_state.y_min)
assert tr_state_copy.is_global == tr_state.is_global
@pytest.mark.parametrize("datasets", [{}, {"foo": empty_dataset([1], [1])}])
@pytest.mark.parametrize(
"models",
[
{},
{"foo": QuadraticMeanAndRBFKernelWithSamplers(empty_dataset([1], [1]))},
{OBJECTIVE: QuadraticMeanAndRBFKernelWithSamplers(empty_dataset([1], [1]))},
],
)
def test_turbo_raises_for_missing_datasets_key(
datasets: Mapping[Tag, Dataset], models: Mapping[Tag, TrainableSupportsGetKernel]
) -> None:
search_space = Box([-1], [1])
rule = TURBO(search_space)
with pytest.raises(ValueError):
rule.acquire(search_space, models, datasets=datasets)
@pytest.mark.parametrize("num_trust_regions", [-1, 0, 10])
def test_turbo_rasise_for_invalid_num_trust_regions(num_trust_regions: int) -> None:
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
if num_trust_regions == 10: # to be removed once we enable multiple trust regions
with pytest.raises(NotImplementedError):
TURBO(search_space, num_trust_regions=num_trust_regions)
else:
with pytest.raises(ValueError):
TURBO(search_space, num_trust_regions=num_trust_regions)
@pytest.mark.parametrize(
"L_init, L_max, L_min, failure_tolerance, success_tolerance",
[
(-1.0, 0.1, 1.0, 1, 1),
(10.0, -1.0, 1.0, 1, 1),
(10.0, 1.0, -4.0, 1, 1),
(10.0, 1.0, 4.0, -1, 2),
(10.0, 1.0, 4.0, 1, -1),
],
)
def test_turbo_rasise_for_invalid_trust_region_params(
L_init: float,
L_max: float,
L_min: float,
failure_tolerance: int,
success_tolerance: int,
) -> None:
lower_bound = tf.constant([-2.2, -1.0])
upper_bound = tf.constant([1.3, 3.3])
search_space = Box(lower_bound, upper_bound)
with pytest.raises(ValueError):
TURBO(
search_space,
L_init=L_init,
L_max=L_max,
L_min=L_min,
failure_tolerance=failure_tolerance,
success_tolerance=success_tolerance,
)
def test_turbo_heuristics_for_param_init_work() -> None:
lower_bound = tf.constant([-2.0] * 20)
upper_bound = tf.constant([1.0] * 20)
search_space = Box(lower_bound, upper_bound)
rule = TURBO(search_space)
assert rule._L_init == 0.8 * 3.0
assert rule._L_min == (0.5**7) * 3.0
assert rule._L_max == 1.6 * 3.0
assert rule._failure_tolerance == 20
assert isinstance(rule._rule, DiscreteThompsonSampling)
assert rule._rule._num_search_space_samples == 2_000
assert rule._local_models is None
rule = TURBO(search_space, rule=EfficientGlobalOptimization())
assert isinstance(rule._rule, EfficientGlobalOptimization)
def test_turbo_acquire_uses_and_updates_correct_local_model() -> None:
dataset_1 = Dataset(
tf.constant([[0.0, 0.0]], dtype=tf.float64), tf.constant([[0.012]], dtype=tf.float64)
)
dataset_2 = Dataset(
tf.constant([[10.0, 10.0]], dtype=tf.float64), tf.constant([[1.012]], dtype=tf.float64)
)
lower_bound = tf.constant([0.0, 0.0], dtype=tf.float64)
upper_bound = tf.constant([20.0, 20.0], dtype=tf.float64)
search_space = Box(lower_bound, upper_bound)
global_model = QuadraticMeanAndRBFKernelWithSamplers(
dataset_1, noise_variance=tf.constant(1e-5, dtype=tf.float64)
)
global_model.kernel = gpflow.kernels.RBF(
lengthscales=tf.constant([4.0, 1.0], dtype=tf.float64), variance=1e-5
) # need a gpflow kernel for TURBO
# if user doesnt give a local model, then we refit the global model
tr = TURBO(search_space)
assert tr._local_models is None
_, _ = tr.acquire_single(search_space, global_model, dataset=dataset_2)(None)
assert tr._local_models is not None
assert isinstance( # type: ignore[unreachable]
tr._local_models[OBJECTIVE].kernel, gpflow.kernels.RBF
)
npt.assert_array_equal(tr._local_models[OBJECTIVE]._dataset[0], dataset_2.query_points)
# if user gives a local model, then we use that one
local_model = QuadraticMeanAndRBFKernelWithSamplers(
dataset_1, noise_variance=tf.constant(1e-5, dtype=tf.float64)
)
local_model.kernel = gpflow.kernels.Matern52(
lengthscales=tf.constant([4.0, 1.0], dtype=tf.float64), variance=1e-5
) # need a gpflow kernel for TURBO
tr = TURBO(search_space, local_models={OBJECTIVE: local_model})
assert isinstance(tr._local_models[OBJECTIVE].get_kernel(), gpflow.kernels.Matern52)
_, _ = tr.acquire_single(search_space, global_model, dataset=dataset_2)(None)
# check updated correct model
assert isinstance(tr._local_models[OBJECTIVE].get_kernel(), gpflow.kernels.Matern52)
npt.assert_array_equal(tr._local_models[OBJECTIVE]._dataset[0], dataset_2.query_points)
@pytest.mark.parametrize("num_query_points", [1, 2])
def test_turbo_acquire_returns_correct_shape(num_query_points: int) -> None:
dataset = Dataset(
tf.constant([[0.0, 0.0]], dtype=tf.float64), tf.constant([[0.012]], dtype=tf.float64)
)
lower_bound = tf.constant([0.0, 0.0], dtype=tf.float64)
upper_bound = tf.constant([1.0, 1.0], dtype=tf.float64)
search_space = Box(lower_bound, upper_bound)
rule = DiscreteThompsonSampling(1_000, num_query_points)
tr = TURBO(search_space, rule=rule)
model = QuadraticMeanAndRBFKernelWithSamplers(
dataset, noise_variance=tf.constant(1e-5, dtype=tf.float64)
)
model.kernel = gpflow.kernels.RBF(
lengthscales=tf.constant([4.0, 1.0], dtype=tf.float64), variance=1e-5
) # need a gpflow kernel for TURBO
_, query_points = tr.acquire_single(search_space, model, dataset=dataset)(None)
npt.assert_array_equal(tf.shape(query_points), [num_query_points, 2])
@random_seed
def test_turbo_for_default_state() -> None:
dataset = Dataset(
tf.constant([[0.0, 0.0]], dtype=tf.float64), tf.constant([[0.012]], dtype=tf.float64)
)
lower_bound = tf.constant([0.0, 0.0], dtype=tf.float64)
upper_bound = tf.constant([1.0, 1.0], dtype=tf.float64)
search_space = Box(lower_bound, upper_bound)
tr = TURBO(search_space, rule=DiscreteThompsonSampling(100, 1))
model = QuadraticMeanAndRBFKernelWithSamplers(
dataset, noise_variance=tf.constant(1e-5, dtype=tf.float64)
)
model.kernel = gpflow.kernels.RBF(
lengthscales=tf.constant([4.0, 1.0], dtype=tf.float64), variance=1e-5
) # need a gpflow kernel for TURBO
state, query_point = tr.acquire_single(search_space, model, dataset=dataset)(None)
assert state is not None
npt.assert_array_almost_equal(state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(
state.acquisition_space.upper, tf.constant([0.8, 0.2], dtype=tf.float64)
)
npt.assert_array_almost_equal(state.y_min, [0.012])
npt.assert_array_almost_equal(state.L, tf.cast(0.8, dtype=tf.float64))
assert state.success_counter == 0
assert state.failure_counter == 0
def test_turbo_doesnt_change_size_unless_needed() -> None:
dataset = Dataset(
tf.constant([[0.0, 0.0]], dtype=tf.float64), tf.constant([[0.012]], dtype=tf.float64)
)
models = {
OBJECTIVE: QuadraticMeanAndRBFKernelWithSamplers(
dataset, noise_variance=tf.constant(1e-5, dtype=tf.float64)
)
}
models[OBJECTIVE].kernel = gpflow.kernels.RBF(
lengthscales=tf.constant([4.0, 1.0], dtype=tf.float64), variance=1e-5
) # need a gpflow kernel for TURBO
lower_bound = tf.constant([0.0, 0.0], dtype=tf.float64)
upper_bound = tf.constant([1.0, 1.0], dtype=tf.float64)
search_space = Box(lower_bound, upper_bound)
tr = TURBO(search_space)
# success but not enough to trigger size change
previous_y_min = dataset.observations[0] + 2.0 # force success
for failure_counter in [0, 1]:
for success_counter in [0, 1]:
previous_state = TURBO.State(
search_space,
tf.constant(0.8, dtype=tf.float64),
failure_counter,
success_counter,
previous_y_min,
)
current_state, _ = tr.acquire(
search_space,
models,
datasets={OBJECTIVE: dataset},
)(previous_state)
assert current_state is not None
npt.assert_array_almost_equal(current_state.L, tf.cast(0.8, dtype=tf.float64))
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(
current_state.acquisition_space.upper, tf.constant([0.8, 0.2], dtype=tf.float64)
)
assert current_state.success_counter == success_counter + 1
assert current_state.failure_counter == 0
# failure but not enough to trigger size change
previous_y_min = dataset.observations[0] # force failure
for success_counter in [0, 1, 2]:
previous_state = TURBO.State(
search_space, tf.constant(0.8, dtype=tf.float64), 0, success_counter, previous_y_min
)
current_state, _ = tr.acquire(
search_space,
models,
datasets={OBJECTIVE: dataset},
)(previous_state)
assert current_state is not None
npt.assert_array_almost_equal(current_state.L, tf.cast(0.8, dtype=tf.float64))
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(
current_state.acquisition_space.upper, tf.constant([0.8, 0.2], dtype=tf.float64)
)
assert current_state.success_counter == 0
assert current_state.failure_counter == 1
def test_turbo_does_change_size_correctly_when_needed() -> None:
dataset = Dataset(
tf.constant([[0.0, 0.0]], dtype=tf.float64), tf.constant([[0.012]], dtype=tf.float64)
)
models = {
OBJECTIVE: QuadraticMeanAndRBFKernelWithSamplers(
dataset, noise_variance=tf.constant(1e-5, dtype=tf.float64)
)
}
models[OBJECTIVE].kernel = gpflow.kernels.RBF(
lengthscales=tf.constant([4.0, 1.0], dtype=tf.float64), variance=1e-5
) # need a gpflow kernel for TURBO
lower_bound = tf.constant([0.0, 0.0], dtype=tf.float64)
upper_bound = tf.constant([1.0, 1.0], dtype=tf.float64)
search_space = Box(lower_bound, upper_bound)
tr = TURBO(search_space, failure_tolerance=2)
# hits success limit
previous_y_min = dataset.observations[0] + 2.0 # force success
for failure_counter in [0, 1]:
previous_state = TURBO.State(
search_space, tf.constant(0.8, dtype=tf.float64), failure_counter, 2, previous_y_min
)
current_state, _ = tr.acquire(
search_space,
models,
datasets={OBJECTIVE: dataset},
)(previous_state)
assert current_state is not None
npt.assert_array_almost_equal(current_state.L, tf.cast(1.6, dtype=tf.float64))
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(
current_state.acquisition_space.upper, tf.constant([1.0, 0.4], dtype=tf.float64)
)
assert current_state.success_counter == 0
assert current_state.failure_counter == 0
# hits failure limit
previous_y_min = dataset.observations[0] # force failure
for success_counter in [0, 1, 2]:
previous_state = TURBO.State(
search_space, tf.constant(0.8, dtype=tf.float64), 1, success_counter, previous_y_min
)
current_state, _ = tr.acquire(
search_space,
models,
datasets={OBJECTIVE: dataset},
)(previous_state)
assert current_state is not None
npt.assert_array_almost_equal(current_state.L, tf.cast(0.4, dtype=tf.float64))
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(
current_state.acquisition_space.upper, tf.constant([0.4, 0.1], dtype=tf.float64)
)
assert current_state.success_counter == 0
assert current_state.failure_counter == 0
def test_turbo_restarts_tr_when_too_small() -> None:
dataset = Dataset(
tf.constant([[0.0, 0.0]], dtype=tf.float64), tf.constant([[0.012]], dtype=tf.float64)
)
models = {
OBJECTIVE: QuadraticMeanAndRBFKernelWithSamplers(
dataset, noise_variance=tf.constant(1e-5, dtype=tf.float64)
)
}
models[OBJECTIVE].kernel = gpflow.kernels.RBF(
variance=1e-5, lengthscales=tf.constant([4.0, 1.0], dtype=tf.float64)
) # need a gpflow kernel for TURBO
lower_bound = tf.constant([0.0, 0.0], dtype=tf.float64)
upper_bound = tf.constant([1.0, 1.0], dtype=tf.float64)
search_space = Box(lower_bound, upper_bound)
tr = TURBO(search_space)
# first check what happens if L is too small from the start
previous_y_min = dataset.observations[0]
failure_counter = 1
success_counter = 1
L = tf.constant(1e-10, dtype=tf.float64)
previous_search_space = Box(lower_bound / 2.0, upper_bound / 5.0)
previous_state = TURBO.State(
previous_search_space, L, failure_counter, success_counter, previous_y_min
)
current_state, _ = tr.acquire(
search_space,
models,
datasets={OBJECTIVE: dataset},
)(previous_state)
assert current_state is not None
npt.assert_array_almost_equal(current_state.L, tf.cast(0.8, dtype=tf.float64))
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(
current_state.acquisition_space.upper, tf.constant([0.8, 0.2], dtype=tf.float64)
)
assert current_state.success_counter == 0
assert current_state.failure_counter == 0
# secondly check what happens if L is too small after triggering decreasing the region
previous_state = TURBO.State(
previous_search_space, 0.5**6 - 0.1, 1, success_counter, previous_y_min
)
current_state, _ = tr.acquire(
search_space,
models,
datasets={OBJECTIVE: dataset},
)(previous_state)
assert current_state is not None
npt.assert_array_almost_equal(current_state.L, tf.cast(0.8, dtype=tf.float64))
npt.assert_array_almost_equal(current_state.acquisition_space.lower, lower_bound)
npt.assert_array_almost_equal(
current_state.acquisition_space.upper, tf.constant([0.8, 0.2], dtype=tf.float64)
)
assert current_state.success_counter == 0
assert current_state.failure_counter == 0
def test_turbo_state_deepcopy() -> None:
tr_state = TURBO.State(
acquisition_space=Box(tf.constant([1.2]), tf.constant([3.4])),
L=0.8,
failure_counter=0,
success_counter=0,
y_min=tf.constant(7.8),
)
tr_state_copy = copy.deepcopy(tr_state)
npt.assert_allclose(tr_state_copy.acquisition_space.lower, tr_state.acquisition_space.lower)
npt.assert_allclose(tr_state_copy.acquisition_space.upper, tr_state.acquisition_space.upper)
npt.assert_allclose(tr_state_copy.L, tr_state.L)
npt.assert_allclose(tr_state_copy.failure_counter, tr_state.failure_counter)
npt.assert_allclose(tr_state_copy.success_counter, tr_state.success_counter)
npt.assert_allclose(tr_state_copy.y_min, tr_state.y_min)
def test_asynchronous_rule_state_pending_points() -> None:
pending_points = tf.constant([[1], [2], [3]])
state = AsynchronousRuleState(pending_points)
assert state.pending_points is not None
npt.assert_array_equal(pending_points, state.pending_points)
def test_asynchronous_rule_state_raises_incorrect_shape() -> None:
with pytest.raises(ValueError):
AsynchronousRuleState(tf.constant([1, 2]))
with pytest.raises(ValueError):
AsynchronousRuleState(tf.constant([[[1], [2]]]))
def test_asynchronous_rule_state_has_pending_points() -> None:
state = AsynchronousRuleState(None)
assert not state.has_pending_points
state = AsynchronousRuleState(tf.zeros([0, 2]))
assert not state.has_pending_points
pending_points = tf.constant([[1], [2], [3]])
state = AsynchronousRuleState(pending_points)
assert state.has_pending_points
def test_asynchronous_rule_remove_points_raises_shape_mismatch() -> None:
state = AsynchronousRuleState(tf.constant([[1], [2], [3]]))
with pytest.raises(ValueError):
state.remove_points(tf.constant([[1, 1]]))
state = AsynchronousRuleState(tf.constant([[1, 1], [2, 2]]))
with pytest.raises(ValueError):
state.remove_points(tf.constant([[1]]))
state = AsynchronousRuleState(tf.constant([[1, 1], [2, 2]]))
with pytest.raises(ValueError):
state.remove_points(tf.constant([[[1, 1], [2, 2]]]))
def test_asynchronous_rule_state_remove_points() -> None:
# brace yourself, there are many test cases here
pending_points = tf.constant([[1], [2], [3]])
# first
state = AsynchronousRuleState(pending_points)
state = state.remove_points(tf.constant([[1]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[2], [3]])
# neither first nor last
state = AsynchronousRuleState(pending_points)
state = state.remove_points(tf.constant([[2]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[1], [3]])
# last
state = AsynchronousRuleState(pending_points)
state = state.remove_points(tf.constant([[3]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[1], [2]])
# unknown point, nothing to remove
state = AsynchronousRuleState(pending_points)
state = state.remove_points(tf.constant([[4]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[1], [2], [3]])
# duplicated pending points - only remove one occurence
state = AsynchronousRuleState(tf.constant([[1], [2], [3], [2]]))
state = state.remove_points(tf.constant([[2]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[1], [3], [2]])
# duplicated pending points - remove a dupe and not a dupe
state = AsynchronousRuleState(tf.constant([[1], [2], [3], [2]]))
state = state.remove_points(tf.constant([[2], [3]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[1], [2]])
# duplicated pending points - remove both dupes
state = AsynchronousRuleState(tf.constant([[1], [2], [3], [2]]))
state = state.remove_points(tf.constant([[2], [2]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[1], [3]])
# duplicated pending points - dupe, not a dupe, unknown point
state = AsynchronousRuleState(tf.constant([[1], [2], [3], [2]]))
state = state.remove_points(tf.constant([[2], [3], [4]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[1], [2]])
# remove from empty
state = AsynchronousRuleState(None)
state = state.remove_points(tf.constant([[2]]))
assert not state.has_pending_points
# remove all
state = AsynchronousRuleState(pending_points)
state = state.remove_points(pending_points)
assert not state.has_pending_points
# bigger last dimension
state = AsynchronousRuleState(tf.constant([[1, 1], [2, 3]]))
state = state.remove_points(tf.constant([[1, 1], [2, 2], [3, 3], [1, 2]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[2, 3]])
def test_asynchronous_rule_add_pending_points_raises_shape_mismatch() -> None:
state = AsynchronousRuleState(tf.constant([[1], [2], [3]]))
with pytest.raises(ValueError):
state.add_pending_points(tf.constant([[1, 1]]))
state = AsynchronousRuleState(tf.constant([[1, 1], [2, 2]]))
with pytest.raises(ValueError):
state.add_pending_points(tf.constant([[1]]))
state = AsynchronousRuleState(tf.constant([[1, 1], [2, 2]]))
with pytest.raises(ValueError):
state.add_pending_points(tf.constant([[[1, 1], [2, 2]]]))
def test_asynchronous_rule_add_pending_points() -> None:
state = AsynchronousRuleState(None)
state = state.add_pending_points(tf.constant([[1]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[1]])
state = AsynchronousRuleState(tf.constant([[1], [2]]))
state = state.add_pending_points(tf.constant([[1]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[1], [2], [1]])
state = AsynchronousRuleState(tf.constant([[1, 1], [2, 2]]))
state = state.add_pending_points(tf.constant([[3, 3], [4, 4]]))
assert state.pending_points is not None
npt.assert_array_equal(state.pending_points, [[1, 1], [2, 2], [3, 3], [4, 4]])
@pytest.mark.parametrize(
"batch_size,ga_population_size,ga_n_generations,filter_threshold",
[
(-2, 500, 200, 0.1),
(0, 500, 200, 0.1),
(10, -2, 200, 0.1),
(10, 0, 200, 0.1),
(10, 500, -2, 0.1),
(10, 500, 0, 0.1),
(10, 500, 200, -0.1),
(10, 500, 200, 1.1),
],
)
@pytest.mark.qhsri
def test_qhsri_raises_invalid_parameters(
batch_size: int, ga_population_size: int, ga_n_generations: int, filter_threshold: float
) -> None:
with pytest.raises(ValueError):
BatchHypervolumeSharpeRatioIndicator(
batch_size, ga_population_size, ga_n_generations, filter_threshold
)
@pytest.mark.parametrize(
"models",
[
{},
{"foo": QuadraticMeanAndRBFKernel()},
{"foo": QuadraticMeanAndRBFKernel(), OBJECTIVE: QuadraticMeanAndRBFKernel()},
],
)
@pytest.mark.parametrize("datasets", [{}, {OBJECTIVE: empty_dataset([1], [1])}])
@pytest.mark.qhsri
def test_qhsri_raises_for_invalid_models_keys(
datasets: dict[Tag, Dataset], models: dict[Tag, ProbabilisticModel]
) -> None:
search_space = Box([-1], [1])
rule = BatchHypervolumeSharpeRatioIndicator()
with pytest.raises(ValueError):
rule.acquire(search_space, models, datasets=datasets)
@pytest.mark.parametrize("models", [{}, {OBJECTIVE: QuadraticMeanAndRBFKernel()}])
@pytest.mark.parametrize(
"datasets",
[
{},
{"foo": empty_dataset([1], [1])},
{"foo": empty_dataset([1], [1]), OBJECTIVE: empty_dataset([1], [1])},
],
)
@pytest.mark.qhsri
def test_qhsri_raises_for_invalid_dataset_keys(
datasets: dict[Tag, Dataset], models: dict[Tag, ProbabilisticModel]
) -> None:
search_space = Box([-1], [1])
rule = BatchHypervolumeSharpeRatioIndicator()
with pytest.raises(ValueError):
rule.acquire(search_space, models, datasets=datasets)
| 50,990 | 37.71754 | 100 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/test_interface.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Iterator, List, Mapping, Optional, Tuple, cast
import pytest
from tests.util.misc import empty_dataset, raise_exc
from tests.util.models.gpflow.models import QuadraticMeanAndRBFKernel
from trieste.acquisition import (
AugmentedExpectedImprovement,
BatchMonteCarloExpectedImprovement,
ExpectedConstrainedHypervolumeImprovement,
ExpectedConstrainedImprovement,
ExpectedHypervolumeImprovement,
ExpectedImprovement,
NegativeLowerConfidenceBound,
NegativePredictiveMean,
PredictiveVariance,
ProbabilityOfFeasibility,
)
from trieste.acquisition.interface import (
AcquisitionFunction,
AcquisitionFunctionBuilder,
SingleModelAcquisitionBuilder,
SingleModelGreedyAcquisitionBuilder,
)
from trieste.data import Dataset
from trieste.models import ProbabilisticModel
from trieste.models.interfaces import SupportsPredictJoint
from trieste.observer import OBJECTIVE
from trieste.types import Tag, TensorType
from trieste.utils import DEFAULTS
class _ArbitrarySingleBuilder(SingleModelAcquisitionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
return raise_exc
class _ArbitraryGreedySingleBuilder(SingleModelGreedyAcquisitionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
pending_points: Optional[TensorType] = None,
) -> AcquisitionFunction:
return raise_exc
def test_single_model_acquisition_builder_raises_immediately_for_wrong_key() -> None:
builder = _ArbitrarySingleBuilder().using("foo")
with pytest.raises(KeyError):
builder.prepare_acquisition_function(
{"bar": QuadraticMeanAndRBFKernel()}, datasets={"bar": empty_dataset([1], [1])}
)
def test_single_model_acquisition_builder_repr_includes_class_name() -> None:
builder = _ArbitrarySingleBuilder()
assert type(builder).__name__ in repr(builder)
def test_single_model_acquisition_builder_using_passes_on_correct_dataset_and_model() -> None:
class Builder(SingleModelAcquisitionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
model: ProbabilisticModel,
dataset: Optional[Dataset] = None,
) -> AcquisitionFunction:
assert dataset is data["foo"]
assert model is models["foo"]
return raise_exc
FOO: Tag = "foo"
BAR: Tag = "bar"
data = {FOO: empty_dataset([1], [1]), BAR: empty_dataset([1], [1])}
models = {FOO: QuadraticMeanAndRBFKernel(), BAR: QuadraticMeanAndRBFKernel()}
Builder().using(FOO).prepare_acquisition_function(models, datasets=data)
def test_single_model_greedy_acquisition_builder_raises_immediately_for_wrong_key() -> None:
builder = _ArbitraryGreedySingleBuilder().using("foo")
with pytest.raises(KeyError):
builder.prepare_acquisition_function(
{"bar": QuadraticMeanAndRBFKernel()}, {"bar": empty_dataset([1], [1])}, None
)
def test_single_model_greedy_acquisition_builder_repr_includes_class_name() -> None:
builder = _ArbitraryGreedySingleBuilder()
assert type(builder).__name__ in repr(builder)
@pytest.mark.parametrize(
"function, function_repr",
cast(
List[Tuple[SingleModelAcquisitionBuilder[SupportsPredictJoint]]],
[
(ExpectedImprovement(), "ExpectedImprovement(None)"),
(AugmentedExpectedImprovement(), "AugmentedExpectedImprovement()"),
(NegativeLowerConfidenceBound(1.96), "NegativeLowerConfidenceBound(1.96)"),
(NegativePredictiveMean(), "NegativePredictiveMean()"),
(ProbabilityOfFeasibility(0.5), "ProbabilityOfFeasibility(0.5)"),
(
ExpectedHypervolumeImprovement(),
"ExpectedHypervolumeImprovement(get_reference_point)",
),
(
BatchMonteCarloExpectedImprovement(10_000),
f"BatchMonteCarloExpectedImprovement(10000, jitter={DEFAULTS.JITTER})",
),
(PredictiveVariance(), f"PredictiveVariance(jitter={DEFAULTS.JITTER})"),
],
),
)
def test_single_model_acquisition_function_builder_reprs(
function: SingleModelAcquisitionBuilder[SupportsPredictJoint], function_repr: str
) -> None:
assert repr(function) == function_repr
assert repr(function.using("TAG")) == f"{function_repr} using tag 'TAG'"
assert (
repr(ExpectedConstrainedImprovement("TAG", function.using("TAG"), 0.0))
== f"ExpectedConstrainedImprovement('TAG', {function_repr} using tag 'TAG', 0.0, None)"
)
assert (
repr(ExpectedConstrainedHypervolumeImprovement("TAG", function.using("TAG"), 0.0))
== f"ExpectedConstrainedHypervolumeImprovement('TAG', "
f"{function_repr} using tag 'TAG', 0.0, get_reference_point)"
)
class CustomDatasets(Mapping[Tag, Dataset]):
"""Custom dataset mapping to show that we can store metadata in the datasets argument."""
def __init__(self, datasets: Mapping[Tag, Dataset], iteration_id: int):
self.iteration_id = iteration_id
self._datasets = dict(datasets)
def __getitem__(self, key: Tag) -> Dataset:
return self._datasets[key]
def __setitem__(self, key: Tag, value: Dataset) -> None:
self._datasets[key] = value
def __delitem__(self, key: Tag) -> None:
del self._datasets[key]
def __iter__(self) -> Iterator[Tag]:
return iter(self._datasets)
def __len__(self) -> int:
return len(self._datasets)
def test_custom_dataset_mapping() -> None:
"""
Check that the datasets argument can be an arbitrary Mapping[Tag, Dataset], not just
a dict. In particular, check that we can store metadata there and retrieve it in the
acquisition function.
"""
class _CustomData(AcquisitionFunctionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
assert datasets is not None
assert len(datasets) == 1
assert set(datasets) == {OBJECTIVE}
assert len(datasets[OBJECTIVE]) == 0
assert "FOO" not in datasets
assert isinstance(datasets, CustomDatasets)
assert datasets.iteration_id == 2
return raise_exc
data = CustomDatasets({OBJECTIVE: empty_dataset([1], [1])}, 2)
models = {OBJECTIVE: QuadraticMeanAndRBFKernel()}
_CustomData().prepare_acquisition_function(models, data)
| 7,441 | 36.396985 | 95 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/test_sampler.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import gpflow
import pytest
import tensorflow as tf
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, ShapeLike, quadratic, random_seed
from tests.util.models.gpflow.models import (
QuadraticMeanAndRBFKernel,
QuadraticMeanAndRBFKernelWithSamplers,
)
from trieste.acquisition.sampler import (
ExactThompsonSampler,
GumbelSampler,
ThompsonSamplerFromTrajectory,
)
from trieste.data import Dataset
from trieste.space import Box
@pytest.mark.parametrize("sample_size", [0, -2])
def test_gumbel_sampler_raises_for_invalid_sample_size(
sample_size: int,
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
GumbelSampler(sample_min_value=True).sample(
QuadraticMeanAndRBFKernel(), sample_size, tf.zeros((100, 1))
)
@pytest.mark.parametrize("shape", [[], [1], [2], [1, 2, 3]])
def test_gumbel_sampler_sample_raises_for_invalid_at_shape(
shape: ShapeLike,
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
GumbelSampler(sample_min_value=True).sample(QuadraticMeanAndRBFKernel(), 1, tf.zeros(shape))
@pytest.mark.parametrize("sample_size", [10, 100])
def test_gumbel_sampler_returns_correctly_shaped_samples(sample_size: int) -> None:
search_space = Box([0, 0], [1, 1])
gumbel_sampler = GumbelSampler(sample_min_value=True)
query_points = search_space.sample(5)
gumbel_samples = gumbel_sampler.sample(QuadraticMeanAndRBFKernel(), sample_size, query_points)
tf.debugging.assert_shapes([(gumbel_samples, [sample_size, 1])])
def test_gumbel_samples_are_minima() -> None:
search_space = Box([0, 0], [1, 1])
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model = QuadraticMeanAndRBFKernel()
gumbel_sampler = GumbelSampler(sample_min_value=True)
query_points = search_space.sample(100)
query_points = tf.concat([dataset.query_points, query_points], 0)
gumbel_samples = gumbel_sampler.sample(model, 5, query_points)
fmean, _ = model.predict(dataset.query_points)
assert max(gumbel_samples) < min(fmean)
@pytest.mark.parametrize("sample_size", [0, -2])
def test_exact_thompson_sampler_raises_for_invalid_sample_size(
sample_size: int,
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ExactThompsonSampler().sample(QuadraticMeanAndRBFKernel(), sample_size, tf.zeros([100, 1]))
@pytest.mark.parametrize("shape", [[], [1], [2], [1, 2, 3]])
def test_exact_thompson_sampler_sample_raises_for_invalid_at_shape(
shape: ShapeLike,
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ExactThompsonSampler().sample(QuadraticMeanAndRBFKernel(), 5, tf.zeros(shape))
@pytest.mark.parametrize("sample_min_value", [True, False])
@pytest.mark.parametrize("sample_size", [10, 100])
def test_exact_thompson_sampler_returns_correctly_shaped_samples(
sample_min_value: bool, sample_size: int
) -> None:
search_space = Box([0, 0], [1, 1])
thompson_sampler = ExactThompsonSampler(sample_min_value=sample_min_value)
query_points = search_space.sample(500)
thompson_samples = thompson_sampler.sample(
QuadraticMeanAndRBFKernel(), sample_size, query_points
)
if sample_min_value:
tf.debugging.assert_shapes([(thompson_samples, [sample_size, 1])])
else:
tf.debugging.assert_shapes([(thompson_samples, [sample_size, 2])])
def test_exact_thompson_samples_are_minima() -> None:
search_space = Box([0, 0], [1, 1])
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model = QuadraticMeanAndRBFKernel()
thompson_sampler = ExactThompsonSampler(sample_min_value=True)
query_points = search_space.sample(100)
query_points = tf.concat([dataset.query_points, query_points], 0)
thompson_samples = thompson_sampler.sample(model, 5, query_points)
fmean, _ = model.predict(dataset.query_points)
assert max(thompson_samples) < min(fmean)
@pytest.mark.parametrize("sample_size", [0, -2])
def test_thompson_trajectory_sampler_raises_for_invalid_sample_size(
sample_size: int,
) -> None:
dataset = Dataset(tf.constant([[-2.0]]), tf.constant([[4.1]]))
model = QuadraticMeanAndRBFKernelWithSamplers(dataset=dataset)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ThompsonSamplerFromTrajectory().sample(model, sample_size, tf.zeros([100, 1]))
@pytest.mark.parametrize("shape", [[], [1], [2], [1, 2, 3]])
def test_thompson_trajectory_sampler_sample_raises_for_invalid_at_shape(
shape: ShapeLike,
) -> None:
dataset = Dataset(
tf.constant([[-2.0]], dtype=tf.float64), tf.constant([[4.1]], dtype=tf.float64)
)
model = QuadraticMeanAndRBFKernelWithSamplers(
dataset=dataset, noise_variance=tf.constant(1.0, dtype=tf.float64)
)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
sampler = ThompsonSamplerFromTrajectory()
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
sampler.sample(model, 1, tf.zeros(shape))
@pytest.mark.parametrize("sample_min_value", [True, False])
@pytest.mark.parametrize("sample_size", [10, 100])
def test_thompson_trajectory_sampler_returns_correctly_shaped_samples(
sample_min_value: bool, sample_size: int
) -> None:
search_space = Box([0.0, 0.0], [1.0, 1.0])
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model = QuadraticMeanAndRBFKernelWithSamplers(
dataset=dataset, noise_variance=tf.constant(1.0, dtype=tf.float64)
)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
sampler = ThompsonSamplerFromTrajectory(sample_min_value=sample_min_value)
query_points = search_space.sample(100)
thompson_samples = sampler.sample(model, sample_size, query_points)
if sample_min_value:
tf.debugging.assert_shapes([(thompson_samples, [sample_size, 1])])
else:
tf.debugging.assert_shapes([(thompson_samples, [sample_size, 2])])
@random_seed
def test_thompson_trajectory_samples_are_minima() -> None:
search_space = Box([0.0, 0.0], [1.0, 1.0])
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model = QuadraticMeanAndRBFKernelWithSamplers(
dataset=dataset, noise_variance=tf.constant(1e-10, dtype=tf.float64)
)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
sampler = ThompsonSamplerFromTrajectory(sample_min_value=True)
query_points = search_space.sample(1000)
query_points = tf.concat([dataset.query_points, query_points], 0)
thompson_samples = sampler.sample(model, 1, query_points)
fmean, _ = model.predict(dataset.query_points)
assert max(thompson_samples) < min(fmean)
| 8,096 | 36.486111 | 100 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/unit/acquisition/test_combination.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Mapping, Sequence
from typing import Optional
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import empty_dataset, raise_exc
from tests.util.models.gpflow.models import QuadraticMeanAndRBFKernel
from trieste.acquisition import AcquisitionFunction
from trieste.acquisition.combination import Map, Product, Reducer, Sum
from trieste.acquisition.rule import AcquisitionFunctionBuilder
from trieste.data import Dataset
from trieste.models import ProbabilisticModel
from trieste.types import Tag
# tags
TAG: Tag = ""
def test_reducer_raises_for_no_builders() -> None:
class UseFirst(Reducer[ProbabilisticModel]):
def _reduce(self, inputs: Sequence[tf.Tensor]) -> tf.Tensor:
return inputs[0]
with pytest.raises(tf.errors.InvalidArgumentError):
UseFirst()
def test_reducer__repr_builders() -> None:
class Dummy(Reducer[ProbabilisticModel]):
_reduce = raise_exc
class Builder(AcquisitionFunctionBuilder[ProbabilisticModel]):
def __init__(self, name: str):
self._name = name
def __repr__(self) -> str:
return f"Builder({self._name!r})"
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return raise_exc
assert repr(Dummy(Builder("foo"))) == "Dummy(Builder('foo'))"
assert repr(Dummy(Builder("foo"), Builder("bar"))) == "Dummy(Builder('foo'), Builder('bar'))"
class _Static(AcquisitionFunctionBuilder[ProbabilisticModel]):
def __init__(self, f: AcquisitionFunction):
self._f = f
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return self._f
def update_acquisition_function(
self,
function: AcquisitionFunction,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return lambda x: function(x) + 1
def test_reducer__reduce() -> None:
class Mean(Reducer[ProbabilisticModel]):
def _reduce(self, inputs: Sequence[tf.Tensor]) -> tf.Tensor:
return tf.reduce_mean(inputs, axis=0)
mean = Mean(_Static(lambda x: -2.0 * x), _Static(lambda x: 3.0 * x))
data, models = {TAG: empty_dataset([1], [1])}, {TAG: QuadraticMeanAndRBFKernel()}
acq = mean.prepare_acquisition_function(models, datasets=data)
xs = tf.random.uniform([3, 5, 1], minval=-1.0)
npt.assert_allclose(acq(xs), 0.5 * xs)
def test_sum() -> None:
sum_ = Sum(_Static(lambda x: x), _Static(lambda x: x**2), _Static(lambda x: x**3))
data, models = {TAG: empty_dataset([1], [1])}, {TAG: QuadraticMeanAndRBFKernel()}
acq = sum_.prepare_acquisition_function(models, datasets=data)
xs = tf.random.uniform([3, 5, 1], minval=-1.0)
npt.assert_allclose(acq(xs), xs + xs**2 + xs**3)
def test_product() -> None:
prod = Product(_Static(lambda x: x + 1), _Static(lambda x: x + 2))
data, models = {TAG: empty_dataset([1], [1])}, {TAG: QuadraticMeanAndRBFKernel()}
acq = prod.prepare_acquisition_function(models, datasets=data)
xs = tf.random.uniform([3, 5, 1], minval=-1.0, dtype=tf.float64)
npt.assert_allclose(acq(xs), (xs + 1) * (xs + 2))
def test_reducer_calls_update() -> None:
prod = Product(_Static(lambda x: x + 1), _Static(lambda x: x + 2))
data, models = {TAG: empty_dataset([1], [1])}, {TAG: QuadraticMeanAndRBFKernel()}
acq = prod.prepare_acquisition_function(models, datasets=data)
acq = prod.update_acquisition_function(acq, models, datasets=data)
xs = tf.random.uniform([3, 5, 1], minval=-1.0, dtype=tf.float64)
npt.assert_allclose(acq(xs), (xs + 2) * (xs + 3))
@pytest.mark.parametrize("reducer_class", [Sum, Product])
def test_sum_and_product_for_single_builder(
reducer_class: type[Sum[ProbabilisticModel] | Product[ProbabilisticModel]],
) -> None:
data, models = {TAG: empty_dataset([1], [1])}, {TAG: QuadraticMeanAndRBFKernel()}
acq = reducer_class(_Static(lambda x: x**2)).prepare_acquisition_function(
models, datasets=data
)
xs = tf.random.uniform([3, 5, 1], minval=-1.0)
npt.assert_allclose(acq(xs), xs**2)
def test_map() -> None:
prod = Map(lambda x: x + 1, _Static(lambda x: x + 2))
data, models = {TAG: empty_dataset([1], [1])}, {TAG: QuadraticMeanAndRBFKernel()}
acq = prod.prepare_acquisition_function(models, datasets=data)
xs = tf.random.uniform([3, 5, 1], minval=-1.0, dtype=tf.float64)
npt.assert_allclose(acq(xs), (xs + 3))
| 5,411 | 37.112676 | 97 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/test_utils.py | # Copyright 2022 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Any
from unittest.mock import MagicMock
import numpy as np
import pytest
import tensorflow as tf
from trieste.acquisition import AcquisitionFunction
from trieste.acquisition.utils import (
get_local_dataset,
select_nth_output,
split_acquisition_function,
)
from trieste.data import Dataset
from trieste.space import Box, SearchSpaceType
@pytest.mark.parametrize(
"f",
[
lambda x: x**2,
lambda x: tf.cast(x, tf.float64),
],
)
@pytest.mark.parametrize(
"x, split_size, expected_batches",
[
(np.zeros((0,)), 2, 1),
(np.array([1]), 2, 1),
(np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]), 2, 6),
(np.array([1, 2, 3, 4]), 3, 2),
(np.array([1, 2, 3, 4]), 4, 1),
(np.array([1, 2, 3, 4]), 1, 4),
(np.array([1, 2, 3, 4]), 10, 1),
],
)
def test_split_acquisition_function(
f: AcquisitionFunction, x: "np.ndarray[Any, Any]", split_size: int, expected_batches: int
) -> None:
mock_f = MagicMock()
mock_f.side_effect = f
batch_f = split_acquisition_function(mock_f, split_size=split_size)
np.testing.assert_allclose(f(x), batch_f(x))
assert expected_batches == mock_f.call_count
@pytest.mark.parametrize("split_size", [0, -1])
def test_split_acquisition_function__invalid_split_size(split_size: int) -> None:
with pytest.raises(ValueError):
split_acquisition_function(MagicMock(), split_size=split_size)
def test_select_nth_output() -> None:
a = tf.random.normal([5, 6])
assert np.all(select_nth_output(a) == a[..., 0])
assert np.all(select_nth_output(a, 3) == a[..., 3])
@pytest.mark.parametrize(
"space, dataset",
[
(Box([0], [1]), Dataset(tf.constant([[0, 1], [0, 1]]), tf.constant([[1], [1]]))),
(Box([0, 0], [1, 1]), Dataset(tf.constant([[1], [1]]), tf.constant([[1], [1]]))),
],
)
def test_get_local_dataset_raises_for_invalid_input(
space: SearchSpaceType, dataset: Dataset
) -> None:
with pytest.raises(ValueError):
get_local_dataset(space, dataset)
def test_get_local_dataset_works() -> None:
search_space_1 = Box([0, 0, 0], [1, 1, 1])
search_space_2 = Box([5, 5, 5], [10, 10, 10])
points_1 = search_space_1.sample(10)
points_2 = search_space_2.sample(20)
dataset_1 = Dataset(points_1, points_1[:, 0:1])
dataset_2 = Dataset(points_2, points_2[:, 0:1])
combined = dataset_1 + dataset_2
assert tf.shape(get_local_dataset(search_space_1, combined).query_points)[0] == 10
assert tf.shape(get_local_dataset(search_space_2, combined).query_points)[0] == 20
| 3,238 | 31.39 | 93 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/function/test_continuous_thompson_sampling.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import gpflow
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import quadratic
from tests.util.models.gpflow.models import (
QuadraticMeanAndRBFKernel,
QuadraticMeanAndRBFKernelWithSamplers,
)
from trieste.acquisition.function.continuous_thompson_sampling import (
GreedyContinuousThompsonSampling,
ParallelContinuousThompsonSampling,
negate_trajectory_function,
)
from trieste.acquisition.function.function import lower_confidence_bound
from trieste.data import Dataset
from trieste.models import TrajectoryFunction, TrajectoryFunctionClass, TrajectorySampler
from trieste.models.gpflow import (
RandomFourierFeatureTrajectorySampler,
feature_decomposition_trajectory,
)
class DumbTrajectorySampler(RandomFourierFeatureTrajectorySampler):
"""A RandomFourierFeatureTrajectorySampler that doesn't update trajectories in place."""
def update_trajectory(self, trajectory: TrajectoryFunction) -> TrajectoryFunction:
tf.debugging.Assert(
isinstance(trajectory, feature_decomposition_trajectory), [tf.constant([])]
)
return self.get_trajectory()
class ModelWithDumbSamplers(QuadraticMeanAndRBFKernelWithSamplers):
"""A model that uses DumbTrajectorySampler."""
def trajectory_sampler(self) -> TrajectorySampler[QuadraticMeanAndRBFKernelWithSamplers]:
return DumbTrajectorySampler(self, 100)
def test_greedy_thompson_sampling_raises_for_model_without_trajectory_sampler() -> None:
model = QuadraticMeanAndRBFKernel()
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
with pytest.raises(ValueError):
GreedyContinuousThompsonSampling().prepare_acquisition_function(model) # type: ignore
@pytest.mark.parametrize("dumb_samplers", [True, False])
def test_greedy_thompson_sampling_builder_builds_trajectory(dumb_samplers: bool) -> None:
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model_type = ModelWithDumbSamplers if dumb_samplers else QuadraticMeanAndRBFKernelWithSamplers
model = model_type(dataset, noise_variance=tf.constant(1.0, dtype=tf.float64))
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
builder = GreedyContinuousThompsonSampling()
acq_fn = builder.prepare_acquisition_function(model)
assert isinstance(acq_fn, TrajectoryFunctionClass)
new_acq_fn = builder.update_acquisition_function(acq_fn, model)
assert isinstance(new_acq_fn, TrajectoryFunctionClass)
@pytest.mark.parametrize("dumb_samplers", [True, False])
def test_greedy_thompson_sampling_builder_raises_when_update_with_wrong_function(
dumb_samplers: bool,
) -> None:
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model_type = ModelWithDumbSamplers if dumb_samplers else QuadraticMeanAndRBFKernelWithSamplers
model = model_type(dataset, noise_variance=tf.constant(1.0, dtype=tf.float64))
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
builder = GreedyContinuousThompsonSampling()
builder.prepare_acquisition_function(model)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.update_acquisition_function(lower_confidence_bound(model, 0.1), model)
def test_parallel_thompson_sampling_raises_for_model_without_trajectory_sampler() -> None:
model = QuadraticMeanAndRBFKernel()
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
with pytest.raises(ValueError):
ParallelContinuousThompsonSampling().prepare_acquisition_function(model) # type: ignore
@pytest.mark.parametrize("dumb_samplers", [True, False])
def test_parallel_thompson_sampling_builder_builds_trajectory(dumb_samplers: bool) -> None:
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model_type = ModelWithDumbSamplers if dumb_samplers else QuadraticMeanAndRBFKernelWithSamplers
model = model_type(dataset, noise_variance=tf.constant(1.0, dtype=tf.float64))
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
builder = ParallelContinuousThompsonSampling()
acq_fn = builder.prepare_acquisition_function(model)
assert isinstance(acq_fn, TrajectoryFunctionClass)
assert acq_fn.__class__.__name__ == "NegatedTrajectory"
new_acq_fn = builder.update_acquisition_function(acq_fn, model)
assert isinstance(new_acq_fn, TrajectoryFunctionClass)
assert new_acq_fn.__class__.__name__ == "NegatedTrajectory"
@pytest.mark.parametrize("dumb_samplers", [True, False])
def test_parallel_thompson_sampling_builder_raises_when_update_with_wrong_function(
dumb_samplers: bool,
) -> None:
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model_type = ModelWithDumbSamplers if dumb_samplers else QuadraticMeanAndRBFKernelWithSamplers
model = model_type(dataset, noise_variance=tf.constant(1.0, dtype=tf.float64))
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
builder = ParallelContinuousThompsonSampling()
builder.prepare_acquisition_function(model)
with pytest.raises(ValueError):
builder.update_acquisition_function(lower_confidence_bound(model, 0.1), model)
def test_parallel_thompson_sampling_raises_for_changing_batch_size() -> None:
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model = QuadraticMeanAndRBFKernelWithSamplers(
dataset, noise_variance=tf.constant(1.0, dtype=tf.float64)
)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
builder = ParallelContinuousThompsonSampling()
acq_fn = builder.prepare_acquisition_function(model)
query_at = tf.reshape(tf.linspace([[-10]], [[10]], 100), [10, 5, 2])
acq_fn(query_at)
with pytest.raises(tf.errors.InvalidArgumentError):
query_at = tf.reshape(tf.linspace([[-10]], [[10]], 100), [5, 10, 2])
acq_fn(query_at)
def test_negate_trajectory_function_negates_and_keeps_methods() -> None:
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model = QuadraticMeanAndRBFKernelWithSamplers(
dataset, noise_variance=tf.constant(1.0, dtype=tf.float64)
)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
builder = ParallelContinuousThompsonSampling()
acq_fn = builder.prepare_acquisition_function(model)
query_at = tf.reshape(tf.linspace([[-10]], [[10]], 100), [10, 5, 2])
evals = acq_fn(query_at)
neg_acq_fn = negate_trajectory_function(acq_fn)
neg_evals = acq_fn(query_at)
npt.assert_array_equal(evals, -1.0 * neg_evals)
assert hasattr(neg_acq_fn, "update")
assert hasattr(neg_acq_fn, "resample")
| 8,695 | 42.263682 | 98 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/function/test_greedy_batch.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Callable, Mapping, Union
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.utilities import to_default_float
from gpflow.utilities.ops import leading_transpose
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, random_seed
from tests.util.models.gpflow.models import QuadraticMeanAndRBFKernel, gpr_model
from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10
from trieste.acquisition import (
ExpectedImprovement,
MinValueEntropySearch,
PenalizationFunction,
UpdatablePenalizationFunction,
)
from trieste.acquisition.function import NegativePredictiveMean, PredictiveVariance
from trieste.acquisition.function.greedy_batch import (
Fantasizer,
FantasizerModelOrStack,
FantasizerModelStack,
LocalPenalization,
_generate_fantasized_model,
hard_local_penalizer,
soft_local_penalizer,
)
from trieste.data import Dataset
from trieste.models import ProbabilisticModel
from trieste.models.gpflow import GaussianProcessRegression
from trieste.observer import OBJECTIVE
from trieste.space import Box
from trieste.types import Tag, TensorType
def test_locally_penalized_expected_improvement_builder_raises_for_empty_data() -> None:
data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1]))
space = Box([0, 0], [1, 1])
with pytest.raises(tf.errors.InvalidArgumentError):
LocalPenalization(search_space=space).prepare_acquisition_function(
QuadraticMeanAndRBFKernel(),
dataset=data,
)
with pytest.raises(tf.errors.InvalidArgumentError):
LocalPenalization(search_space=space).prepare_acquisition_function(
QuadraticMeanAndRBFKernel(),
)
def test_locally_penalized_expected_improvement_builder_raises_for_invalid_num_samples() -> None:
search_space = Box([0, 0], [1, 1])
with pytest.raises(tf.errors.InvalidArgumentError):
LocalPenalization(search_space, num_samples=-5)
@pytest.mark.parametrize("pending_points", [tf.constant([0.0]), tf.constant([[[0.0], [1.0]]])])
def test_locally_penalized_expected_improvement_builder_raises_for_invalid_pending_points_shape(
pending_points: TensorType,
) -> None:
data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
space = Box([0, 0], [1, 1])
builder = LocalPenalization(search_space=space)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel(), data, pending_points)
@random_seed
@pytest.mark.parametrize(
"base_builder",
[
ExpectedImprovement(),
MinValueEntropySearch(Box([0, 0], [1, 1]), grid_size=10000, num_samples=10),
],
)
def test_locally_penalized_acquisitions_match_base_acquisition(
base_builder: ExpectedImprovement | MinValueEntropySearch[ProbabilisticModel],
) -> None:
data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
model = QuadraticMeanAndRBFKernel()
lp_acq_builder = LocalPenalization(search_space, base_acquisition_function_builder=base_builder)
lp_acq = lp_acq_builder.prepare_acquisition_function(model, data, None)
base_acq = base_builder.prepare_acquisition_function(model, dataset=data)
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
lp_acq_values = lp_acq(xs[..., None, :])
base_acq_values = base_acq(xs[..., None, :])
if isinstance(base_builder, ExpectedImprovement):
npt.assert_array_equal(lp_acq_values, base_acq_values)
else: # check sampling-based acquisition functions are close
npt.assert_allclose(lp_acq_values, base_acq_values, atol=0.001)
@random_seed
@pytest.mark.parametrize("penalizer", [soft_local_penalizer, hard_local_penalizer])
@pytest.mark.parametrize(
"base_builder",
[ExpectedImprovement(), MinValueEntropySearch(Box([0, 0], [1, 1]), grid_size=5000)],
)
def test_locally_penalized_acquisitions_combine_base_and_penalization_correctly(
penalizer: Callable[..., Union[PenalizationFunction, UpdatablePenalizationFunction]],
base_builder: ExpectedImprovement | MinValueEntropySearch[ProbabilisticModel],
) -> None:
data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
model = QuadraticMeanAndRBFKernel()
pending_points = tf.zeros([2, 2], dtype=tf.float64)
acq_builder = LocalPenalization(
search_space, penalizer=penalizer, base_acquisition_function_builder=base_builder
)
lp_acq = acq_builder.prepare_acquisition_function(model, data, None) # initialize
lp_acq = acq_builder.update_acquisition_function(lp_acq, model, data, pending_points[:1], False)
up_lp_acq = acq_builder.update_acquisition_function(lp_acq, model, data, pending_points, False)
assert up_lp_acq == lp_acq # in-place updates
base_acq = base_builder.prepare_acquisition_function(model, dataset=data)
best = acq_builder._eta
lipschitz_constant = acq_builder._lipschitz_constant
penalizer_value = penalizer(model, pending_points, lipschitz_constant, best)
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
lp_acq_values = lp_acq(xs[..., None, :])
base_acq_values = base_acq(xs[..., None, :])
penal_values = penalizer_value(xs[..., None, :])
penalized_base_acq = tf.math.exp(tf.math.log(base_acq_values) + tf.math.log(penal_values))
if isinstance(base_builder, ExpectedImprovement):
npt.assert_array_equal(lp_acq_values, penalized_base_acq)
else: # check sampling-based acquisition functions are close
npt.assert_allclose(lp_acq_values, penalized_base_acq, atol=0.001)
@pytest.mark.parametrize("penalizer", [soft_local_penalizer, hard_local_penalizer])
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_lipschitz_penalizers_raises_for_invalid_batch_size(
at: TensorType,
penalizer: Callable[..., PenalizationFunction],
) -> None:
pending_points = tf.zeros([1, 2], dtype=tf.float64)
best = tf.constant([0], dtype=tf.float64)
lipschitz_constant = tf.constant([1], dtype=tf.float64)
lp = penalizer(QuadraticMeanAndRBFKernel(), pending_points, lipschitz_constant, best)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
lp(at)
@pytest.mark.parametrize("penalizer", [soft_local_penalizer, hard_local_penalizer])
@pytest.mark.parametrize("pending_points", [tf.constant([0.0]), tf.constant([[[0.0], [1.0]]])])
def test_lipschitz_penalizers_raises_for_invalid_pending_points_shape(
pending_points: TensorType,
penalizer: Callable[..., PenalizationFunction],
) -> None:
best = tf.constant([0], dtype=tf.float64)
lipschitz_constant = tf.constant([1], dtype=tf.float64)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
penalizer(QuadraticMeanAndRBFKernel(), pending_points, lipschitz_constant, best)
def test_fantasized_expected_improvement_builder_raises_for_invalid_fantasize_method() -> None:
with pytest.raises(tf.errors.InvalidArgumentError):
Fantasizer(ExpectedImprovement().using(OBJECTIVE), "notKB")
def test_fantasized_expected_improvement_builder_raises_for_invalid_model() -> None:
data = {
OBJECTIVE: Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 1], dtype=tf.float64))
}
models = {OBJECTIVE: QuadraticMeanAndRBFKernel()}
pending_points = tf.zeros([3, 2], dtype=tf.float64)
builder = Fantasizer()
with pytest.raises(NotImplementedError):
builder.prepare_acquisition_function(models, data, pending_points) # type: ignore
def test_fantasized_expected_improvement_builder_raises_for_invalid_observation_shape() -> None:
x = tf.zeros([3, 2], dtype=tf.float64)
y1 = tf.ones([3, 1], dtype=tf.float64)
y2 = tf.ones([3, 2], dtype=tf.float64)
data = {OBJECTIVE: Dataset(x, y1)}
models = {OBJECTIVE: GaussianProcessRegression(gpr_model(x, y2))}
pending_points = tf.zeros([3, 2], dtype=tf.float64)
builder = Fantasizer()
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
builder.prepare_acquisition_function(models, data, pending_points)
@pytest.mark.parametrize("pending_points", [tf.constant([0.0]), tf.constant([[[0.0], [1.0]]])])
def test_fantasized_expected_improvement_builder_raises_for_invalid_pending_points_shape(
pending_points: TensorType,
) -> None:
x = tf.zeros([3, 2], dtype=tf.float64)
y = tf.ones([3, 1], dtype=tf.float64)
data = {OBJECTIVE: Dataset(x, y)}
models = {OBJECTIVE: GaussianProcessRegression(gpr_model(x, y))}
builder = Fantasizer()
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
builder.prepare_acquisition_function(models, data, pending_points)
@pytest.mark.parametrize("model_type", ["gpr", "stack"])
def test_fantasize_with_kriging_believer_does_not_change_negative_predictive_mean(
model_type: str,
) -> None:
x = to_default_float(tf.constant(np.arange(1, 6).reshape(-1, 1) / 5.0))
y = fnc_2sin_x_over_3(x)
x_test = to_default_float(tf.constant(np.arange(1, 13).reshape(-1, 1) / 12.0))[..., None]
pending_points = to_default_float(tf.constant([0.51, 0.81])[:, None])
data = {OBJECTIVE: Dataset(x, y)}
models: Mapping[Tag, FantasizerModelOrStack]
if model_type == "stack":
models = {OBJECTIVE: FantasizerModelStack((GaussianProcessRegression(gpr_model(x, y)), 1))}
else:
models = {OBJECTIVE: GaussianProcessRegression(gpr_model(x, y))}
builder = Fantasizer(NegativePredictiveMean())
acq0 = builder.prepare_acquisition_function(models, data)
acq1 = builder.prepare_acquisition_function(models, data, pending_points)
acq_val0 = acq0(x_test)
acq_val1 = acq1(x_test)
tf.assert_equal(acq_val1, acq_val0)
@pytest.mark.parametrize("model_type", ["gpr", "stack"])
@pytest.mark.parametrize("fantasize_method", ["KB", "sample"])
def test_fantasize_reduces_predictive_variance(model_type: str, fantasize_method: str) -> None:
x = to_default_float(tf.constant(np.arange(1, 6).reshape(-1, 1) / 5.0))
y = fnc_2sin_x_over_3(x)
x_test = to_default_float(tf.constant(np.arange(1, 13).reshape(-1, 1) / 12.0))[..., None]
pending_points = to_default_float(tf.constant([0.51, 0.81])[:, None])
data = {OBJECTIVE: Dataset(x, y)}
models: Mapping[Tag, FantasizerModelOrStack]
if model_type == "stack":
models = {OBJECTIVE: FantasizerModelStack((GaussianProcessRegression(gpr_model(x, y)), 1))}
else:
models = {OBJECTIVE: GaussianProcessRegression(gpr_model(x, y))}
builder = Fantasizer(PredictiveVariance(), fantasize_method=fantasize_method)
acq0 = builder.prepare_acquisition_function(models, data)
acq1 = builder.update_acquisition_function(acq0, models, data, pending_points[:1])
assert acq0._get_tracing_count() == 0 # type: ignore
assert acq1._get_tracing_count() == 0 # type: ignore
acq_val0 = acq0(x_test)
acq_val1 = acq1(x_test)
tf.assert_less(acq_val1, acq_val0)
# check we avoid retracing, both for the fantasized functions...
acq1_up = builder.update_acquisition_function(acq1, models, data, pending_points)
assert acq1_up == acq1 # in-place updates
acq1_up(x_test)
assert acq1_up._get_tracing_count() == 1 # type: ignore
# ...and the base functions
acq0_up = builder.update_acquisition_function(acq1, models, data)
assert acq0_up == acq0 # in-place updates
acq0_up(x_test)
assert acq0_up._get_tracing_count() == 1 # type: ignore
@pytest.mark.parametrize("model_type", ["gpr", "stack"])
def test_fantasize_allows_query_points_with_leading_dimensions(model_type: str) -> None:
x = to_default_float(tf.constant(np.arange(1, 24).reshape(-1, 1) / 8.0)) # shape: [23, 1]
y = fnc_2sin_x_over_3(x)
model5 = GaussianProcessRegression(gpr_model(x[:5, :], y[:5, :]))
additional_data = Dataset(tf.reshape(x[5:, :], [3, 6, -1]), tf.reshape(y[5:, :], [3, 6, -1]))
query_points = to_default_float(tf.constant(np.arange(1, 21).reshape(-1, 1) / 20.0))[..., None]
query_points = tf.reshape(query_points, [4, 5, 1])
if model_type == "stack":
fanta_model5 = _generate_fantasized_model(
FantasizerModelStack((model5, 1)), additional_data
)
else:
fanta_model5 = _generate_fantasized_model(model5, additional_data)
num_samples = 100000
samples_fm5 = fanta_model5.sample(query_points, num_samples)
pred_f_mean_fm5, pred_f_var_fm5 = fanta_model5.predict(query_points)
pred_y_mean_fm5, pred_y_var_fm5 = fanta_model5.predict_y(query_points)
pred_j_mean_fm5, pred_j_cov_fm5 = fanta_model5.predict_joint(query_points)
tf.assert_equal(samples_fm5.shape, [4, 3, num_samples, 5, 1])
tf.assert_equal(pred_f_mean_fm5.shape, [4, 3, 5, 1])
tf.assert_equal(pred_f_var_fm5.shape, [4, 3, 5, 1])
tf.assert_equal(pred_j_cov_fm5.shape, [4, 3, 1, 5, 5])
np.testing.assert_allclose(pred_f_mean_fm5, pred_j_mean_fm5, atol=1e-5)
np.testing.assert_allclose(pred_f_mean_fm5, pred_y_mean_fm5, atol=1e-5)
samples_fm5_mean = tf.reduce_mean(samples_fm5, axis=-3)
samples_fm5_cov = tfp.stats.covariance(samples_fm5[..., 0], sample_axis=-2)
for j in range(3):
samples_m5 = model5.conditional_predict_f_sample(
query_points[j], additional_data, num_samples
)
pred_f_mean_m5, pred_f_var_m5 = model5.conditional_predict_f(
query_points[j], additional_data
)
pred_j_mean_m5, pred_j_cov_m5 = model5.conditional_predict_joint(
query_points[j], additional_data
)
pred_y_mean_m5, pred_y_var_m5 = model5.conditional_predict_y(
query_points[j], additional_data
)
sample_m5_mean = tf.reduce_mean(samples_m5, axis=1)
sample_m5_cov = tfp.stats.covariance(samples_m5[..., 0], sample_axis=1)
np.testing.assert_allclose(sample_m5_mean, samples_fm5_mean[j], atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(sample_m5_cov, samples_fm5_cov[j], atol=1e-2, rtol=1e-2)
np.testing.assert_allclose(pred_f_mean_m5, pred_f_mean_fm5[j], atol=1e-5)
np.testing.assert_allclose(pred_y_mean_m5, pred_y_mean_fm5[j], atol=1e-5)
np.testing.assert_allclose(pred_j_mean_m5, pred_j_mean_fm5[j], atol=1e-5)
np.testing.assert_allclose(pred_f_var_m5, pred_f_var_fm5[j], atol=1e-5)
np.testing.assert_allclose(pred_y_var_m5, pred_y_var_fm5[j], atol=1e-5)
np.testing.assert_allclose(pred_j_cov_m5, pred_j_cov_fm5[j], atol=1e-5)
def test_fantasized_stack_is_the_same_as_individually_fantasized() -> None:
x = to_default_float(tf.constant(np.arange(1, 24).reshape(-1, 1) / 8.0)) # shape: [23, 1]
y1 = fnc_2sin_x_over_3(x)
y2 = fnc_3x_plus_10(x)
model1 = GaussianProcessRegression(gpr_model(x[:5, :], y1[:5, :]))
model2 = GaussianProcessRegression(gpr_model(x[:5, :], y2[:5, :]))
stacked_models = FantasizerModelStack((model1, 1), (model2, 1))
additional_data1 = Dataset(tf.reshape(x[5:, :], [3, 6, -1]), tf.reshape(y1[5:, :], [3, 6, -1]))
additional_data2 = Dataset(tf.reshape(x[5:, :], [3, 6, -1]), tf.reshape(y2[5:, :], [3, 6, -1]))
additional_data_stacked = Dataset(
tf.reshape(x[5:, :], [3, 6, -1]),
tf.reshape(tf.concat([y1[5:, :], y2[5:, :]], axis=-1), [3, 6, -1]),
)
query_points = to_default_float(tf.constant(np.arange(1, 21).reshape(-1, 1) / 20.0))[..., None]
query_points = tf.reshape(query_points, [4, 5, 1])
stack_fanta_model = _generate_fantasized_model(stacked_models, additional_data_stacked)
fanta_model1 = _generate_fantasized_model(model1, additional_data1)
fanta_model2 = _generate_fantasized_model(model2, additional_data2)
num_samples = 100000
samples_fm1 = fanta_model1.sample(query_points, num_samples)
pred_f_mean_fm1, pred_f_var_fm1 = fanta_model1.predict(query_points)
pred_y_mean_fm1, pred_y_var_fm1 = fanta_model1.predict_y(query_points)
pred_j_mean_fm1, pred_j_cov_fm1 = fanta_model1.predict_joint(query_points)
samples_fm2 = fanta_model2.sample(query_points, num_samples)
pred_f_mean_fm2, pred_f_var_fm2 = fanta_model2.predict(query_points)
pred_y_mean_fm2, pred_y_var_fm2 = fanta_model2.predict_y(query_points)
pred_j_mean_fm2, pred_j_cov_fm2 = fanta_model2.predict_joint(query_points)
samples_fms = stack_fanta_model.sample(query_points, num_samples)
pred_f_mean_fms, pred_f_var_fms = stack_fanta_model.predict(query_points)
pred_y_mean_fms, pred_y_var_fms = stack_fanta_model.predict_y(query_points)
pred_j_mean_fms, pred_j_cov_fms = stack_fanta_model.predict_joint(query_points)
np.testing.assert_equal(pred_f_mean_fms.shape, [4, 3, 5, 2])
np.testing.assert_equal(pred_f_var_fms.shape, [4, 3, 5, 2])
np.testing.assert_equal(pred_f_mean_fm1.shape, [4, 3, 5, 1])
np.testing.assert_equal(pred_f_var_fm1.shape, [4, 3, 5, 1])
np.testing.assert_equal(pred_j_cov_fms.shape, [4, 3, 2, 5, 5])
np.testing.assert_equal(pred_j_cov_fm1.shape, [4, 3, 1, 5, 5])
np.testing.assert_equal(samples_fms.shape, [4, 3, 100000, 5, 2])
np.testing.assert_equal(samples_fm1.shape, [4, 3, 100000, 5, 1])
np.testing.assert_allclose(
pred_f_mean_fms, tf.concat([pred_f_mean_fm1, pred_f_mean_fm2], axis=-1), atol=1e-5
)
np.testing.assert_allclose(
pred_y_mean_fms, tf.concat([pred_y_mean_fm1, pred_y_mean_fm2], axis=-1), atol=1e-5
)
np.testing.assert_allclose(
pred_j_mean_fms, tf.concat([pred_j_mean_fm1, pred_j_mean_fm2], axis=-1), atol=1e-5
)
np.testing.assert_allclose(
pred_f_var_fms, tf.concat([pred_f_var_fm1, pred_f_var_fm2], axis=-1), atol=1e-5
)
np.testing.assert_allclose(
pred_y_var_fms, tf.concat([pred_y_var_fm1, pred_y_var_fm2], axis=-1), atol=1e-5
)
np.testing.assert_allclose(
pred_j_cov_fms, tf.concat([pred_j_cov_fm1, pred_j_cov_fm2], axis=-3), atol=1e-5
)
sample_fms_mean = tf.reduce_mean(samples_fms, axis=2)
sample_fms_cov = tfp.stats.covariance(
leading_transpose(samples_fms, [..., -1, -2]), sample_axis=2
)
sample_fm1_mean = tf.reduce_mean(samples_fm1, axis=2)
sample_fm1_cov = tfp.stats.covariance(samples_fm1[..., 0], sample_axis=2, keepdims=True)
sample_fm2_mean = tf.reduce_mean(samples_fm2, axis=2)
sample_fm2_cov = tfp.stats.covariance(samples_fm2[..., 0], sample_axis=2, keepdims=True)
np.testing.assert_allclose(
sample_fms_mean,
tf.concat([sample_fm1_mean, sample_fm2_mean], axis=-1),
atol=1e-2,
rtol=1e-2,
)
np.testing.assert_allclose(
sample_fms_cov, tf.concat([sample_fm1_cov, sample_fm2_cov], axis=2), atol=1e-2, rtol=1e-2
)
| 19,735 | 42.471366 | 100 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/function/test_entropy.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest.mock
from unittest.mock import MagicMock
import gpflow
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, quadratic, random_seed
from tests.util.models.gpflow.models import (
GaussianProcess,
MultiFidelityQuadraticMeanAndRBFKernel,
MultiFidelityQuadraticMeanAndRBFKernelWithSamplers,
QuadraticMeanAndRBFKernel,
QuadraticMeanAndRBFKernelWithSamplers,
)
from trieste.acquisition.function.entropy import (
GIBBON,
MUMBO,
MinValueEntropySearch,
MUMBOModelType,
SupportsCovarianceObservationNoiseTrajectory,
gibbon_quality_term,
gibbon_repulsion_term,
min_value_entropy_search,
mumbo,
)
from trieste.acquisition.sampler import (
ExactThompsonSampler,
GumbelSampler,
ThompsonSampler,
ThompsonSamplerFromTrajectory,
)
from trieste.data import Dataset, add_fidelity_column
from trieste.models import SupportsCovarianceWithTopFidelity
from trieste.objectives import Branin
from trieste.space import Box
from trieste.types import TensorType
def test_min_value_entropy_search_builder_raises_for_empty_data() -> None:
empty_data = Dataset(tf.zeros([0, 2], dtype=tf.float64), tf.ones([0, 2], dtype=tf.float64))
non_empty_data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
builder = MinValueEntropySearch(search_space)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel(), dataset=empty_data)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel())
acq = builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel(), dataset=non_empty_data)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.update_acquisition_function(acq, QuadraticMeanAndRBFKernel(), dataset=empty_data)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.update_acquisition_function(acq, QuadraticMeanAndRBFKernel())
@pytest.mark.parametrize("param", [-2, 0])
def test_min_value_entropy_search_builder_raises_for_invalid_init_params(param: int) -> None:
search_space = Box([0, 0], [1, 1])
with pytest.raises(tf.errors.InvalidArgumentError):
MinValueEntropySearch(search_space, num_samples=param)
with pytest.raises(tf.errors.InvalidArgumentError):
MinValueEntropySearch(search_space, grid_size=param)
@pytest.mark.parametrize(
"sampler",
[
ExactThompsonSampler(sample_min_value=False),
ThompsonSamplerFromTrajectory(sample_min_value=False),
],
)
def test_mes_raises_if_passed_sampler_with_sample_min_value_False(
sampler: ThompsonSampler[GaussianProcess],
) -> None:
search_space = Box([0, 0], [1, 1])
with pytest.raises(ValueError):
MinValueEntropySearch(search_space, min_value_sampler=sampler)
def test_mes_default_sampler_is_exact_thompson() -> None:
search_space = Box([0, 0], [1, 1])
builder = MinValueEntropySearch(search_space)
assert isinstance(builder._min_value_sampler, ExactThompsonSampler)
assert builder._min_value_sampler._sample_min_value
@pytest.mark.parametrize(
"sampler",
[
ExactThompsonSampler(sample_min_value=True),
GumbelSampler(sample_min_value=True),
ThompsonSamplerFromTrajectory(sample_min_value=True),
],
)
def test_mes_initialized_with_passed_sampler(sampler: ThompsonSampler[GaussianProcess]) -> None:
search_space = Box([0, 0], [1, 1])
builder = MinValueEntropySearch(search_space, min_value_sampler=sampler)
assert builder._min_value_sampler == sampler
def test_mes_raises_when_use_trajectory_sampler_and_model_without_trajectories() -> None:
dataset = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
builder = MinValueEntropySearch(
search_space, min_value_sampler=ThompsonSamplerFromTrajectory(sample_min_value=True)
)
model = QuadraticMeanAndRBFKernel()
with pytest.raises(ValueError):
builder.prepare_acquisition_function(model, dataset=dataset) # type: ignore
@unittest.mock.patch("trieste.acquisition.function.entropy.min_value_entropy_search")
@pytest.mark.parametrize(
"min_value_sampler",
[ExactThompsonSampler(sample_min_value=True), GumbelSampler(sample_min_value=True)],
)
def test_min_value_entropy_search_builder_builds_min_value_samples(
mocked_mves: MagicMock, min_value_sampler: ThompsonSampler[GaussianProcess]
) -> None:
dataset = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
builder = MinValueEntropySearch(search_space, min_value_sampler=min_value_sampler)
model = QuadraticMeanAndRBFKernelWithSamplers(dataset)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
builder.prepare_acquisition_function(model, dataset=dataset)
mocked_mves.assert_called_once()
# check that the Gumbel samples look sensible
min_value_samples = mocked_mves.call_args[0][1]
query_points = builder._search_space.sample(num_samples=builder._grid_size)
query_points = tf.concat([dataset.query_points, query_points], 0)
fmean, _ = model.predict(query_points)
assert max(min_value_samples) < min(fmean)
@pytest.mark.parametrize(
"min_value_sampler",
[ExactThompsonSampler(sample_min_value=True), GumbelSampler(sample_min_value=True)],
)
def test_min_value_entropy_search_builder_updates_acquisition_function(
min_value_sampler: ThompsonSampler[GaussianProcess],
) -> None:
search_space = Box([0.0, 0.0], [1.0, 1.0])
model = QuadraticMeanAndRBFKernel(noise_variance=tf.constant(1e-10, dtype=tf.float64))
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
partial_dataset = Dataset(xs[:10], ys[:10])
full_dataset = Dataset(xs, ys)
builder = MinValueEntropySearch(search_space, min_value_sampler=min_value_sampler)
xs = tf.cast(tf.linspace([[0.0]], [[1.0]], 10), tf.float64)
old_acq_fn = builder.prepare_acquisition_function(model, dataset=partial_dataset)
tf.random.set_seed(0) # to ensure consistent sampling
updated_acq_fn = builder.update_acquisition_function(old_acq_fn, model, dataset=full_dataset)
assert updated_acq_fn == old_acq_fn
updated_values = updated_acq_fn(xs)
tf.random.set_seed(0) # to ensure consistent sampling
new_acq_fn = builder.prepare_acquisition_function(model, dataset=full_dataset)
new_values = new_acq_fn(xs)
npt.assert_allclose(updated_values, new_values)
@random_seed
@unittest.mock.patch("trieste.acquisition.function.entropy.min_value_entropy_search")
def test_min_value_entropy_search_builder_builds_min_value_samples_trajectory_sampler(
mocked_mves: MagicMock,
) -> None:
search_space = Box([0.0, 0.0], [1.0, 1.0])
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model = QuadraticMeanAndRBFKernelWithSamplers(
dataset=dataset, noise_variance=tf.constant(1e-10, dtype=tf.float64)
)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
builder = MinValueEntropySearch(
search_space, min_value_sampler=ThompsonSamplerFromTrajectory(sample_min_value=True)
)
builder.prepare_acquisition_function(model, dataset=dataset)
mocked_mves.assert_called_once()
# check that the Gumbel samples look sensible
min_value_samples = mocked_mves.call_args[0][1]
query_points = builder._search_space.sample(num_samples=builder._grid_size)
query_points = tf.concat([dataset.query_points, query_points], 0)
fmean, _ = model.predict(query_points)
assert max(min_value_samples) < min(fmean) + 1e-4
@pytest.mark.parametrize("samples", [tf.constant([]), tf.constant([[[]]])])
def test_min_value_entropy_search_raises_for_min_values_samples_with_invalid_shape(
samples: TensorType,
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
min_value_entropy_search(QuadraticMeanAndRBFKernel(), samples)
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_min_value_entropy_search_raises_for_invalid_batch_size(at: TensorType) -> None:
mes = min_value_entropy_search(QuadraticMeanAndRBFKernel(), tf.constant([[1.0], [2.0]]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
mes(at)
def test_min_value_entropy_search_returns_correct_shape() -> None:
model = QuadraticMeanAndRBFKernel()
min_value_samples = tf.constant([[1.0], [2.0]])
query_at = tf.linspace([[-10.0]], [[10.0]], 5)
evals = min_value_entropy_search(model, min_value_samples)(query_at)
npt.assert_array_equal(evals.shape, tf.constant([5, 1]))
def test_min_value_entropy_search_chooses_same_as_probability_of_improvement() -> None:
"""
When based on a single max-value sample, MES should choose the same point that probability of
improvement would when calcualted with the max-value as its threshold (See :cite:`wang2017max`).
"""
kernel = tfp.math.psd_kernels.MaternFiveHalves()
model = GaussianProcess([Branin.objective], [kernel])
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
min_value_sample = tf.constant([[1.0]], dtype=tf.float64)
mes_evals = min_value_entropy_search(model, min_value_sample)(xs[..., None, :])
mean, variance = model.predict(xs)
gamma = (tf.cast(min_value_sample, dtype=mean.dtype) - mean) / tf.sqrt(variance)
norm = tfp.distributions.Normal(tf.cast(0, dtype=mean.dtype), tf.cast(1, dtype=mean.dtype))
pi_evals = norm.cdf(gamma)
npt.assert_array_equal(tf.argmax(mes_evals), tf.argmax(pi_evals))
def test_gibbon_builder_raises_for_empty_data() -> None:
empty_data = Dataset(tf.zeros([0, 2], dtype=tf.float64), tf.ones([0, 2], dtype=tf.float64))
non_empty_data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
builder = GIBBON(search_space)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel(), empty_data)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel())
acq = builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel(), non_empty_data)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.update_acquisition_function(acq, QuadraticMeanAndRBFKernel(), empty_data)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.update_acquisition_function(acq, QuadraticMeanAndRBFKernel())
@pytest.mark.parametrize("param", [-2, 0])
def test_gibbon_builder_raises_for_invalid_init_params(param: int) -> None:
search_space = Box([0, 0], [1, 1])
with pytest.raises(tf.errors.InvalidArgumentError):
GIBBON(search_space, num_samples=param)
with pytest.raises(tf.errors.InvalidArgumentError):
GIBBON(search_space, grid_size=param)
@pytest.mark.parametrize(
"sampler",
[
ExactThompsonSampler(sample_min_value=False),
ThompsonSamplerFromTrajectory(sample_min_value=False),
],
)
def test_gibbon_raises_if_passed_sampler_with_sample_min_value_False(
sampler: ThompsonSampler[GaussianProcess],
) -> None:
search_space = Box([0, 0], [1, 1])
with pytest.raises(ValueError):
GIBBON(search_space, min_value_sampler=sampler)
def test_gibbon_default_sampler_is_exact_thompson() -> None:
search_space = Box([0, 0], [1, 1])
builder = GIBBON(search_space)
assert isinstance(builder._min_value_sampler, ExactThompsonSampler)
assert builder._min_value_sampler._sample_min_value
@pytest.mark.parametrize(
"sampler",
[
ExactThompsonSampler(sample_min_value=True),
GumbelSampler(sample_min_value=True),
ThompsonSamplerFromTrajectory(sample_min_value=True),
],
)
def test_gibbon_initialized_with_passed_sampler(sampler: ThompsonSampler[GaussianProcess]) -> None:
search_space = Box([0, 0], [1, 1])
builder = GIBBON(search_space, min_value_sampler=sampler)
assert builder._min_value_sampler == sampler
def test_gibbon_raises_when_use_trajectory_sampler_and_model_without_trajectories() -> None:
dataset = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
builder = GIBBON[SupportsCovarianceObservationNoiseTrajectory](
search_space, min_value_sampler=ThompsonSamplerFromTrajectory(sample_min_value=True)
)
model = QuadraticMeanAndRBFKernel()
with pytest.raises(ValueError):
builder.prepare_acquisition_function(model, dataset=dataset) # type: ignore
@pytest.mark.parametrize("samples", [tf.constant([]), tf.constant([[[]]])])
def test_gibbon_quality_term_raises_for_gumbel_samples_with_invalid_shape(
samples: TensorType,
) -> None:
with pytest.raises(ValueError):
model = QuadraticMeanAndRBFKernel()
gibbon_quality_term(model, samples)
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_gibbon_quality_term_raises_for_invalid_batch_size(at: TensorType) -> None:
model = QuadraticMeanAndRBFKernel()
gibbon_acq = gibbon_quality_term(model, tf.constant([[1.0], [2.0]]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
gibbon_acq(at)
def test_gibbon_quality_term_returns_correct_shape() -> None:
model = QuadraticMeanAndRBFKernel()
gumbel_samples = tf.constant([[1.0], [2.0]])
query_at = tf.linspace([[-10.0]], [[10.0]], 5)
evals = gibbon_quality_term(model, gumbel_samples)(query_at)
npt.assert_array_equal(evals.shape, tf.constant([5, 1]))
@unittest.mock.patch("trieste.acquisition.function.entropy.gibbon_quality_term")
@pytest.mark.parametrize(
"min_value_sampler",
[ExactThompsonSampler(sample_min_value=True), GumbelSampler(sample_min_value=True)],
)
def test_gibbon_builder_builds_min_value_samples(
mocked_mves: MagicMock,
min_value_sampler: ThompsonSampler[GaussianProcess],
) -> None:
dataset = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
builder = GIBBON(search_space, min_value_sampler=min_value_sampler)
model = QuadraticMeanAndRBFKernel()
builder.prepare_acquisition_function(model, dataset=dataset)
mocked_mves.assert_called_once()
# check that the Gumbel samples look sensible
min_value_samples = builder._min_value_samples
query_points = builder._search_space.sample(num_samples=builder._grid_size)
query_points = tf.concat([dataset.query_points, query_points], 0)
fmean, _ = model.predict(query_points)
assert max(min_value_samples) < min(fmean) # type: ignore
@pytest.mark.parametrize(
"min_value_sampler",
[ExactThompsonSampler(sample_min_value=True), GumbelSampler(sample_min_value=True)],
)
def test_gibbon_builder_updates_acquisition_function(
min_value_sampler: ThompsonSampler[GaussianProcess],
) -> None:
search_space = Box([0.0, 0.0], [1.0, 1.0])
x_range = tf.cast(tf.linspace(0.0, 1.0, 5), dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
partial_dataset = Dataset(xs[:10], ys[:10])
full_dataset = Dataset(xs, ys)
builder = GIBBON(search_space, min_value_sampler=min_value_sampler)
xs = tf.cast(tf.linspace([[0.0]], [[1.0]], 10), tf.float64)
model = QuadraticMeanAndRBFKernel()
old_acq_fn = builder.prepare_acquisition_function(model, dataset=partial_dataset)
tf.random.set_seed(0) # to ensure consistent sampling
updated_acq_fn = builder.update_acquisition_function(old_acq_fn, model, dataset=full_dataset)
assert updated_acq_fn == old_acq_fn
updated_values = updated_acq_fn(xs)
tf.random.set_seed(0) # to ensure consistent sampling
new_acq_fn = builder.prepare_acquisition_function(model, dataset=full_dataset)
new_values = new_acq_fn(xs)
npt.assert_allclose(updated_values, new_values)
@pytest.mark.parametrize("pending_points", [tf.constant([0.0]), tf.constant([[[0.0], [1.0]]])])
def test_gibbon_builder_raises_for_invalid_pending_points_shape(
pending_points: TensorType,
) -> None:
data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
space = Box([0, 0], [1, 1])
builder = GIBBON(search_space=space)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel(), data, pending_points)
@random_seed
@unittest.mock.patch("trieste.acquisition.function.entropy.gibbon_quality_term")
def test_gibbon_builder_builds_min_value_samples_using_trajectories(mocked_mves: MagicMock) -> None:
search_space = Box([0.0, 0.0], [1.0, 1.0])
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model = QuadraticMeanAndRBFKernelWithSamplers(
dataset=dataset, noise_variance=tf.constant(1e-10, dtype=tf.float64)
)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
builder = GIBBON[SupportsCovarianceObservationNoiseTrajectory](
search_space, min_value_sampler=ThompsonSamplerFromTrajectory(sample_min_value=True)
)
builder.prepare_acquisition_function(model, dataset=dataset)
mocked_mves.assert_called_once()
# check that the Gumbel samples look sensible
min_value_samples = mocked_mves.call_args[0][1]
query_points = builder._search_space.sample(num_samples=builder._grid_size)
query_points = tf.concat([dataset.query_points, query_points], 0)
fmean, _ = model.predict(query_points)
assert max(min_value_samples) < min(fmean) + 1e-4
def test_gibbon_chooses_same_as_min_value_entropy_search() -> None:
"""
When based on a single max-value sample, GIBBON should choose the same point as
MES (see :cite:`Moss:2021`).
"""
model = QuadraticMeanAndRBFKernel(noise_variance=tf.constant(1e-10, dtype=tf.float64))
x_range = tf.linspace(-1.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
min_value_sample = tf.constant([[1.0]], dtype=tf.float64)
mes_evals = min_value_entropy_search(model, min_value_sample)(xs[..., None, :])
gibbon_evals = gibbon_quality_term(model, min_value_sample)(xs[..., None, :])
npt.assert_array_equal(tf.argmax(mes_evals), tf.argmax(gibbon_evals))
@pytest.mark.parametrize("rescaled_repulsion", [True, False])
@pytest.mark.parametrize("noise_variance", [0.1, 1e-10])
def test_batch_gibbon_is_sum_of_individual_gibbons_and_repulsion_term(
rescaled_repulsion: bool, noise_variance: float
) -> None:
"""
Check that batch GIBBON can be decomposed into the sum of sequential GIBBONs and a repulsion
term (see :cite:`Moss:2021`).
"""
noise_variance = tf.constant(noise_variance, dtype=tf.float64)
model = QuadraticMeanAndRBFKernel(noise_variance=noise_variance)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decomposition
x_range = tf.linspace(0.0, 1.0, 4)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
pending_points = tf.constant([[0.11, 0.51], [0.21, 0.31], [0.41, 0.91]], dtype=tf.float64)
min_value_sample = tf.constant([[-0.1, 0.1]], dtype=tf.float64)
gibbon_of_new_points = gibbon_quality_term(model, min_value_sample)(xs[..., None, :])
mean, var = model.predict(xs)
_, pending_var = model.predict_joint(pending_points)
pending_var += noise_variance * tf.eye(len(pending_points), dtype=pending_var.dtype)
calculated_batch_gibbon = gibbon_of_new_points + gibbon_repulsion_term(
model, pending_points, rescaled_repulsion=rescaled_repulsion
)(xs[..., None, :])
for i in tf.range(len(xs)): # check across a set of candidate points
candidate_and_pending = tf.concat([xs[i : i + 1], pending_points], axis=0)
_, A = model.predict_joint(candidate_and_pending)
A += noise_variance * tf.eye(len(pending_points) + 1, dtype=A.dtype)
repulsion = tf.linalg.logdet(A) - tf.math.log(A[0, 0, 0]) - tf.linalg.logdet(pending_var)
if rescaled_repulsion: # down-weight repulsion term
batch_size, search_space_dim = tf.cast(tf.shape(pending_points), dtype=mean.dtype)
repulsion = repulsion * ((1 / batch_size) ** (2))
reconstructed_batch_gibbon = 0.5 * repulsion + gibbon_of_new_points[i : i + 1]
npt.assert_array_almost_equal(
calculated_batch_gibbon[i : i + 1], reconstructed_batch_gibbon
)
def test_mumbo_builder_raises_for_empty_data() -> None:
empty_data = Dataset(tf.zeros([0, 2], dtype=tf.float64), tf.ones([0, 2], dtype=tf.float64))
non_empty_data = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
builder = MUMBO(search_space)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(
MultiFidelityQuadraticMeanAndRBFKernel(), dataset=empty_data
)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(MultiFidelityQuadraticMeanAndRBFKernel())
acq = builder.prepare_acquisition_function(
MultiFidelityQuadraticMeanAndRBFKernel(), dataset=non_empty_data
)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.update_acquisition_function(
acq, MultiFidelityQuadraticMeanAndRBFKernel(), dataset=empty_data
)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.update_acquisition_function(acq, MultiFidelityQuadraticMeanAndRBFKernel())
@pytest.mark.parametrize("param", [-2, 0])
def test_mumbo_builder_raises_for_invalid_init_params(param: int) -> None:
search_space = Box([0, 0], [1, 1])
with pytest.raises(tf.errors.InvalidArgumentError):
MUMBO(search_space, num_samples=param)
with pytest.raises(tf.errors.InvalidArgumentError):
MUMBO(search_space, grid_size=param)
@pytest.mark.parametrize(
"sampler",
[
ExactThompsonSampler(sample_min_value=False),
ThompsonSamplerFromTrajectory(sample_min_value=False),
],
)
def test_mumbo_raises_if_passed_sampler_with_sample_min_value_False(
sampler: ThompsonSampler[MUMBOModelType],
) -> None:
search_space = Box([0, 0], [1, 1])
with pytest.raises(ValueError):
MUMBO(search_space, min_value_sampler=sampler)
def test_mumbo_default_sampler_is_exact_thompson() -> None:
search_space = Box([0, 0], [1, 1])
builder = MUMBO(search_space)
assert isinstance(builder._min_value_sampler, ExactThompsonSampler)
assert builder._min_value_sampler._sample_min_value
@pytest.mark.parametrize(
"sampler",
[
ExactThompsonSampler(sample_min_value=True),
GumbelSampler(sample_min_value=True),
ThompsonSamplerFromTrajectory(sample_min_value=True),
],
)
def test_mumbo_initialized_with_passed_sampler(sampler: ThompsonSampler[MUMBOModelType]) -> None:
search_space = Box([0, 0], [1, 1])
builder = MUMBO(search_space, min_value_sampler=sampler)
assert builder._min_value_sampler == sampler
def test_mumbo_raises_when_use_trajectory_sampler_and_model_without_trajectories() -> None:
dataset = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
builder = MUMBO( # type: ignore
search_space, min_value_sampler=ThompsonSamplerFromTrajectory(sample_min_value=True)
)
model = MultiFidelityQuadraticMeanAndRBFKernel()
with pytest.raises(ValueError):
builder.prepare_acquisition_function(model, dataset=dataset) # type: ignore
@unittest.mock.patch("trieste.acquisition.function.entropy.mumbo")
@pytest.mark.parametrize(
"min_value_sampler",
[ExactThompsonSampler(sample_min_value=True), GumbelSampler(sample_min_value=True)],
)
def test_mumbo_builder_builds_min_value_samples(
mocked_mves: MagicMock,
min_value_sampler: ThompsonSampler[SupportsCovarianceWithTopFidelity],
) -> None:
dataset = Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))
search_space = Box([0, 0], [1, 1])
builder = MUMBO(search_space, min_value_sampler=min_value_sampler)
model = MultiFidelityQuadraticMeanAndRBFKernelWithSamplers(dataset)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
builder.prepare_acquisition_function(model, dataset=dataset)
mocked_mves.assert_called_once()
# check that the Gumbel samples look sensible
min_value_samples = mocked_mves.call_args[0][1]
query_points = builder._search_space.sample(num_samples=builder._grid_size)
query_points = tf.concat([dataset.query_points, query_points], 0)
query_points = add_fidelity_column(query_points[:, :-1], model.num_fidelities - 1)
fmean, _ = model.predict(query_points)
assert max(min_value_samples) < min(fmean)
@pytest.mark.parametrize(
"min_value_sampler",
[ExactThompsonSampler(sample_min_value=True), GumbelSampler(sample_min_value=True)],
)
def test_mumbo_builder_updates_acquisition_function(
min_value_sampler: ThompsonSampler[SupportsCovarianceWithTopFidelity],
) -> None:
search_space = Box([0.0, 0.0], [1.0, 1.0])
model = MultiFidelityQuadraticMeanAndRBFKernel(
noise_variance=tf.constant(1e-10, dtype=tf.float64)
)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
partial_dataset = Dataset(xs[:10], ys[:10])
full_dataset = Dataset(xs, ys)
builder = MUMBO(search_space, min_value_sampler=min_value_sampler)
xs = tf.cast(tf.linspace([[0.0]], [[1.0]], 10), tf.float64)
old_acq_fn = builder.prepare_acquisition_function(model, dataset=partial_dataset)
tf.random.set_seed(0) # to ensure consistent sampling
updated_acq_fn = builder.update_acquisition_function(old_acq_fn, model, dataset=full_dataset)
assert updated_acq_fn == old_acq_fn
updated_values = updated_acq_fn(xs)
tf.random.set_seed(0) # to ensure consistent sampling
new_acq_fn = builder.prepare_acquisition_function(model, dataset=full_dataset)
new_values = new_acq_fn(xs)
npt.assert_allclose(updated_values, new_values)
@random_seed
@unittest.mock.patch("trieste.acquisition.function.entropy.min_value_entropy_search")
def test_mumbo_builder_builds_min_value_samples_trajectory_sampler(
mocked_mves: MagicMock,
) -> None:
search_space = Box([0.0, 0.0], [1.0, 1.0])
x_range = tf.linspace(0.0, 1.0, 5)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
ys = quadratic(xs)
dataset = Dataset(xs, ys)
model = MultiFidelityQuadraticMeanAndRBFKernelWithSamplers(
dataset=dataset, noise_variance=tf.constant(1e-10, dtype=tf.float64)
)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
builder = MinValueEntropySearch(
search_space, min_value_sampler=ThompsonSamplerFromTrajectory(sample_min_value=True)
)
builder.prepare_acquisition_function(model, dataset=dataset)
mocked_mves.assert_called_once()
# check that the Gumbel samples look sensible
min_value_samples = mocked_mves.call_args[0][1]
query_points = builder._search_space.sample(num_samples=builder._grid_size)
query_points = tf.concat([dataset.query_points, query_points], 0)
fmean, _ = model.predict(query_points)
assert max(min_value_samples) < min(fmean) + 1e-4
@pytest.mark.parametrize("samples", [tf.constant([]), tf.constant([[[]]])])
def test_mumbo_raises_for_min_values_samples_with_invalid_shape(
samples: TensorType,
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
mumbo(MultiFidelityQuadraticMeanAndRBFKernel(), samples)
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_mumbo_raises_for_invalid_batch_size(at: TensorType) -> None:
mes = mumbo(MultiFidelityQuadraticMeanAndRBFKernel(), tf.constant([[1.0], [2.0]]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
mes(at)
def test_mumbo_returns_correct_shape() -> None:
model = MultiFidelityQuadraticMeanAndRBFKernel()
min_value_samples = tf.constant([[1.0], [2.0]])
query_at = tf.linspace([[-10.0]], [[10.0]], 5)
evals = mumbo(model, min_value_samples)(query_at)
npt.assert_array_equal(evals.shape, tf.constant([5, 1]))
| 30,802 | 41.311813 | 100 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/function/test_function.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Mapping
from typing import Callable, Optional, Sequence
from unittest.mock import MagicMock
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.misc import (
TF_DEBUGGING_ERROR_TYPES,
ShapeLike,
mk_dataset,
quadratic,
raise_exc,
random_seed,
various_shapes,
)
from tests.util.models.gpflow.models import (
GaussianProcess,
GaussianProcessWithBatchSamplers,
GaussianProcessWithoutNoise,
GaussianProcessWithSamplers,
QuadraticMeanAndRBFKernel,
QuadraticMeanAndRBFKernelWithBatchSamplers,
QuadraticMeanAndRBFKernelWithSamplers,
rbf,
)
from trieste.acquisition.function.function import (
AcquisitionFunction,
AcquisitionFunctionBuilder,
AugmentedExpectedImprovement,
BatchExpectedImprovement,
BatchMonteCarloExpectedImprovement,
ExpectedConstrainedImprovement,
ExpectedImprovement,
FastConstraintsFeasibility,
MakePositive,
MonteCarloAugmentedExpectedImprovement,
MonteCarloExpectedImprovement,
MultipleOptimismNegativeLowerConfidenceBound,
NegativeLowerConfidenceBound,
ProbabilityOfFeasibility,
ProbabilityOfImprovement,
augmented_expected_improvement,
expected_improvement,
fast_constraints_feasibility,
lower_confidence_bound,
multiple_optimism_lower_confidence_bound,
probability_below_threshold,
)
from trieste.data import Dataset
from trieste.models import ProbabilisticModel
from trieste.objectives import Branin
from trieste.space import Box, LinearConstraint, SearchSpace
from trieste.types import Tag, TensorType
# tags
FOO: Tag = "foo"
NA: Tag = ""
def test_probability_of_improvement_builder_builds_pi_using_best_from_model() -> None:
dataset = Dataset(
tf.constant([[-2.0], [-1.0], [0.0], [1.0], [2.0]]),
tf.constant([[4.1], [0.9], [0.1], [1.1], [3.9]]),
)
model = QuadraticMeanAndRBFKernel()
acq_fn = ProbabilityOfImprovement().prepare_acquisition_function(model, dataset=dataset)
xs = tf.linspace([[-10.0]], [[10.0]], 100)
expected = probability_below_threshold(model, tf.constant(0.0))(xs)
npt.assert_allclose(acq_fn(xs), expected)
def test_probability_of_improvement_builder_updates_pi_using_best_from_model() -> None:
dataset = Dataset(
tf.constant([[-2.0], [-1.0]]),
tf.constant([[4.1], [0.9]]),
)
model = QuadraticMeanAndRBFKernel()
acq_fn = ProbabilityOfImprovement().prepare_acquisition_function(model, dataset=dataset)
assert acq_fn.__call__._get_tracing_count() == 0 # type: ignore
xs = tf.linspace([[-10.0]], [[10.0]], 100)
expected = probability_below_threshold(model, tf.constant(1.0))(xs)
npt.assert_allclose(acq_fn(xs), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
new_dataset = Dataset(
tf.concat([dataset.query_points, tf.constant([[0.0], [1.0], [2.0]])], 0),
tf.concat([dataset.observations, tf.constant([[0.1], [1.1], [3.9]])], 0),
)
updated_acq_fn = ProbabilityOfImprovement().update_acquisition_function(
acq_fn, model, dataset=new_dataset
)
# assert updated_acq_fn == acq_fn
expected = probability_below_threshold(model, tf.constant(0.0))(xs)
npt.assert_allclose(updated_acq_fn(xs), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
def test_probability_of_improvement_builder_raises_for_empty_data() -> None:
data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1]))
with pytest.raises(tf.errors.InvalidArgumentError):
ProbabilityOfImprovement().prepare_acquisition_function(
QuadraticMeanAndRBFKernel(), dataset=data
)
with pytest.raises(tf.errors.InvalidArgumentError):
ProbabilityOfImprovement().prepare_acquisition_function(QuadraticMeanAndRBFKernel())
@random_seed
@pytest.mark.parametrize("best", [tf.constant([50.0]), Branin.minimum, Branin.minimum * 1.01])
@pytest.mark.parametrize(
"variance_scale, num_samples_per_point, rtol, atol",
[
(0.1, 1000, 0.01, 1e-9),
(1.0, 50_000, 0.01, 1e-3),
(10.0, 100_000, 0.01, 1e-2),
(100.0, 150_000, 0.01, 1e-1),
],
)
def test_probability_below_threshold_as_probability_of_improvement(
variance_scale: float, num_samples_per_point: int, best: tf.Tensor, rtol: float, atol: float
) -> None:
variance_scale = tf.constant(variance_scale, tf.float64)
best = tf.cast(best, dtype=tf.float64)[0]
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
kernel = tfp.math.psd_kernels.MaternFiveHalves(variance_scale, length_scale=0.25)
model = GaussianProcess([Branin.objective], [kernel])
mean, variance = model.predict(xs)
samples = tfp.distributions.Normal(mean, tf.sqrt(variance)).sample(num_samples_per_point)
samples_improvement = tf.where(samples < best, 1, 0)
pi_approx = tf.reduce_sum(samples_improvement, axis=0) / num_samples_per_point
pif = probability_below_threshold(model, best)
pi = pif(xs[..., None, :])
npt.assert_allclose(pi, pi_approx, rtol=rtol, atol=atol)
def test_expected_improvement_builder_builds_expected_improvement_using_best_from_model() -> None:
dataset = Dataset(
tf.constant([[-2.0], [-1.0], [0.0], [1.0], [2.0]]),
tf.constant([[4.1], [0.9], [0.1], [1.1], [3.9]]),
)
model = QuadraticMeanAndRBFKernel()
acq_fn = ExpectedImprovement().prepare_acquisition_function(model, dataset=dataset)
xs = tf.linspace([[-10.0]], [[10.0]], 100)
expected = expected_improvement(model, tf.constant([0.0]))(xs)
npt.assert_allclose(acq_fn(xs), expected)
def test_expected_improvement_builder_updates_expected_improvement_using_best_from_model() -> None:
dataset = Dataset(
tf.constant([[-2.0], [-1.0]]),
tf.constant([[4.1], [0.9]]),
)
model = QuadraticMeanAndRBFKernel()
acq_fn = ExpectedImprovement().prepare_acquisition_function(model, dataset=dataset)
assert acq_fn.__call__._get_tracing_count() == 0 # type: ignore
xs = tf.linspace([[-10.0]], [[10.0]], 100)
expected = expected_improvement(model, tf.constant([1.0]))(xs)
npt.assert_allclose(acq_fn(xs), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
new_dataset = Dataset(
tf.concat([dataset.query_points, tf.constant([[0.0], [1.0], [2.0]])], 0),
tf.concat([dataset.observations, tf.constant([[0.1], [1.1], [3.9]])], 0),
)
updated_acq_fn = ExpectedImprovement().update_acquisition_function(
acq_fn, model, dataset=new_dataset
)
assert updated_acq_fn == acq_fn
expected = expected_improvement(model, tf.constant([0.0]))(xs)
npt.assert_allclose(acq_fn(xs), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
def test_expected_improvement_builder_raises_for_empty_data() -> None:
data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1]))
with pytest.raises(tf.errors.InvalidArgumentError):
ExpectedImprovement().prepare_acquisition_function(
QuadraticMeanAndRBFKernel(), dataset=data
)
with pytest.raises(tf.errors.InvalidArgumentError):
ExpectedImprovement().prepare_acquisition_function(QuadraticMeanAndRBFKernel())
def test_expected_improvement_is_relative_to_feasible_point() -> None:
search_space = Box([-1.0], [1.0], [LinearConstraint(A=tf.constant([[1.0]]), lb=0.0, ub=1.0)])
full_data = Dataset(tf.constant([[-0.2], [0.3]]), tf.constant([[0.04], [0.09]]))
full_ei = ExpectedImprovement(search_space).prepare_acquisition_function(
QuadraticMeanAndRBFKernel(),
dataset=full_data,
)
filtered_data = Dataset(tf.constant([[0.3]]), tf.constant([[0.09]]))
filtered_ei = ExpectedImprovement().prepare_acquisition_function(
QuadraticMeanAndRBFKernel(), dataset=filtered_data
)
npt.assert_allclose(full_ei(tf.constant([[0.1]])), filtered_ei(tf.constant([[0.1]])))
def test_expected_improvement_uses_max_when_no_feasible_points() -> None:
search_space = Box([-2.5], [2.5], [LinearConstraint(A=tf.constant([[1.0]]), lb=0.5, ub=0.9)])
data = Dataset(
tf.constant([[-2.0], [-1.0], [0.0], [1.0], [2.0]]),
tf.constant([[4.1], [0.9], [0.1], [1.1], [3.9]]),
)
builder = ExpectedImprovement(search_space)
ei = builder.prepare_acquisition_function(
QuadraticMeanAndRBFKernel(),
dataset=data,
)
filtered_data = Dataset(tf.constant([[-2.0]]), tf.constant([[4.1]]))
filtered_ei = ExpectedImprovement().prepare_acquisition_function(
QuadraticMeanAndRBFKernel(), dataset=filtered_data
)
xs = tf.linspace([[-10.0]], [[10.0]], 100)
npt.assert_allclose(ei(xs), filtered_ei(xs))
ei = builder.update_acquisition_function(
ei,
QuadraticMeanAndRBFKernel(),
dataset=data,
)
npt.assert_allclose(ei(xs), filtered_ei(xs))
def test_expected_improvement_switches_to_improvement_on_feasible_points() -> None:
search_space = Box([0.0], [1.0], [LinearConstraint(A=tf.constant([[1.0]]), lb=0.5, ub=0.9)])
data = Dataset(tf.constant([[0.2], [1.0]]), tf.constant([[4.0], [1.0]]))
builder = ExpectedImprovement(search_space)
ei = builder.prepare_acquisition_function(
QuadraticMeanAndRBFKernel(),
dataset=data,
)
data = Dataset(tf.constant([[0.2], [0.7]]), tf.constant([[4.0], [1.0]]))
ei = builder.update_acquisition_function(
ei,
QuadraticMeanAndRBFKernel(),
dataset=data,
)
filtered_data = Dataset(tf.constant([[0.7]]), tf.constant([[1.0]]))
filtered_ei = ExpectedImprovement().prepare_acquisition_function(
QuadraticMeanAndRBFKernel(), dataset=filtered_data
)
npt.assert_allclose(ei(tf.constant([[0.1]])), filtered_ei(tf.constant([[0.1]])))
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_expected_improvement_raises_for_invalid_batch_size(at: TensorType) -> None:
ei = expected_improvement(QuadraticMeanAndRBFKernel(), tf.constant([1.0]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ei(at)
@random_seed
@pytest.mark.parametrize("best", [tf.constant([50.0]), Branin.minimum, Branin.minimum * 1.01])
@pytest.mark.parametrize("test_update", [False, True])
@pytest.mark.parametrize(
"variance_scale, num_samples_per_point, rtol, atol",
[
(0.1, 1000, 0.01, 1e-9),
(1.0, 50_000, 0.01, 1e-3),
(10.0, 100_000, 0.01, 1e-2),
(100.0, 150_000, 0.01, 1e-1),
],
)
def test_expected_improvement(
variance_scale: float,
num_samples_per_point: int,
best: tf.Tensor,
rtol: float,
atol: float,
test_update: bool,
) -> None:
variance_scale = tf.constant(variance_scale, tf.float64)
best = tf.cast(best, dtype=tf.float64)
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
kernel = tfp.math.psd_kernels.MaternFiveHalves(variance_scale, length_scale=0.25)
model = GaussianProcess([Branin.objective], [kernel])
mean, variance = model.predict(xs)
samples = tfp.distributions.Normal(mean, tf.sqrt(variance)).sample(num_samples_per_point)
samples_improvement = tf.where(samples < best, best - samples, 0)
ei_approx = tf.reduce_mean(samples_improvement, axis=0)
if test_update:
eif = expected_improvement(model, tf.constant([100.0], dtype=tf.float64))
eif.update(best)
else:
eif = expected_improvement(model, best)
ei = eif(xs[..., None, :])
npt.assert_allclose(ei, ei_approx, rtol=rtol, atol=atol)
def test_augmented_expected_improvement_builder_raises_for_empty_data() -> None:
data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1]))
with pytest.raises(tf.errors.InvalidArgumentError):
AugmentedExpectedImprovement().prepare_acquisition_function(
QuadraticMeanAndRBFKernel(),
dataset=data,
)
with pytest.raises(tf.errors.InvalidArgumentError):
AugmentedExpectedImprovement().prepare_acquisition_function(QuadraticMeanAndRBFKernel())
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_augmented_expected_improvement_raises_for_invalid_batch_size(at: TensorType) -> None:
aei = augmented_expected_improvement(QuadraticMeanAndRBFKernel(), tf.constant([1.0]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
aei(at)
@pytest.mark.parametrize("observation_noise", [1e-8, 1.0, 10.0])
def test_augmented_expected_improvement_builder_builds_expected_improvement_times_augmentation(
observation_noise: float,
) -> None:
dataset = Dataset(
tf.constant([[-2.0], [-1.0], [0.0], [1.0], [2.0]]),
tf.constant([[4.1], [0.9], [0.1], [1.1], [3.9]]),
)
model = QuadraticMeanAndRBFKernel(noise_variance=observation_noise)
acq_fn = AugmentedExpectedImprovement().prepare_acquisition_function(model, dataset=dataset)
xs = tf.linspace([[-10.0]], [[10.0]], 100)
ei = ExpectedImprovement().prepare_acquisition_function(model, dataset=dataset)(xs)
@tf.function
def augmentation() -> TensorType:
_, variance = model.predict(tf.squeeze(xs, -2))
return 1.0 - (tf.math.sqrt(observation_noise)) / (
tf.math.sqrt(observation_noise + variance)
)
npt.assert_allclose(acq_fn(xs), ei * augmentation(), rtol=1e-6)
@pytest.mark.parametrize("observation_noise", [1e-8, 1.0, 10.0])
def test_augmented_expected_improvement_builder_updates_acquisition_function(
observation_noise: float,
) -> None:
partial_dataset = Dataset(
tf.constant([[-2.0], [-1.0]]),
tf.constant([[4.1], [0.9]]),
)
full_dataset = Dataset(
tf.constant([[-2.0], [-1.0], [0.0], [1.0], [2.0]]),
tf.constant([[4.1], [0.9], [0.1], [1.1], [3.9]]),
)
model = QuadraticMeanAndRBFKernel(noise_variance=observation_noise)
partial_data_acq_fn = AugmentedExpectedImprovement().prepare_acquisition_function(
model,
dataset=partial_dataset,
)
updated_acq_fn = AugmentedExpectedImprovement().update_acquisition_function(
partial_data_acq_fn,
model,
dataset=full_dataset,
)
assert updated_acq_fn == partial_data_acq_fn
full_data_acq_fn = AugmentedExpectedImprovement().prepare_acquisition_function(
model, dataset=full_dataset
)
xs = tf.linspace([[-10.0]], [[10.0]], 100)
npt.assert_allclose(updated_acq_fn(xs), full_data_acq_fn(xs))
@pytest.mark.parametrize("sample_size", [-2, 0])
def test_mc_expected_improvement_raises_for_invalid_sample_size(sample_size: int) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
MonteCarloExpectedImprovement(sample_size)
def test_mc_expected_improvement_raises_for_invalid_jitter() -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
MonteCarloExpectedImprovement(100, jitter=-1.0)
@random_seed
def test_mc_expected_improvement_builds_expected_improvement_using_best_from_model() -> None:
dataset = Dataset(
tf.constant([[-2.0], [-1.0], [0.0], [1.0], [2.0]]),
tf.constant([[4.1], [0.9], [0.1], [1.1], [3.9]]),
)
model = QuadraticMeanAndRBFKernelWithSamplers(dataset)
acq_fn = MonteCarloExpectedImprovement(int(1e6)).prepare_acquisition_function(model, dataset)
xs = tf.linspace([[-10.0]], [[10.0]], 100)
expected = expected_improvement(model, tf.constant([0.0]))(xs)
npt.assert_allclose(acq_fn(xs), expected, rtol=1e-4, atol=2e-3)
def test_mc_expected_improvement_builder_raises_for_model_without_reparam_sampler() -> None:
data = Dataset(tf.zeros([1, 1]), tf.ones([1, 1]))
kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(1.0)
noise_variance = 1.0
with pytest.raises(ValueError, match="MonteCarloExpectedImprovement only supports models *."):
(
MonteCarloExpectedImprovement(100).prepare_acquisition_function(
GaussianProcess([lambda x: quadratic(x)], [kernel], noise_variance), # type: ignore
data,
)
)
def test_mc_expected_improvement_builder_raises_for_model_with_wrong_event_shape() -> None:
data = mk_dataset([(0.0, 0.0)], [(0.0, 0.0)])
matern52 = tfp.math.psd_kernels.MaternFiveHalves(
amplitude=tf.cast(2.3, tf.float64), length_scale=tf.cast(0.5, tf.float64)
)
model = GaussianProcessWithSamplers(
[lambda x: Branin.objective(x), lambda x: quadratic(x)], [matern52, rbf()]
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES, match="Expected model with output *."):
MonteCarloExpectedImprovement(100).prepare_acquisition_function(model, dataset=data)
def test_mc_expected_improvement_builder_raises_for_empty_data() -> None:
data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES, match="Dataset must be populated."):
(
MonteCarloExpectedImprovement(100).prepare_acquisition_function(
QuadraticMeanAndRBFKernelWithSamplers(data), dataset=data
)
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
(
MonteCarloExpectedImprovement(100).prepare_acquisition_function(
QuadraticMeanAndRBFKernelWithSamplers(data)
)
)
def test_mc_expected_improvement_updater_raises_for_empty_data() -> None:
dataset = Dataset(
tf.constant([[-2.0], [-1.0], [0.0], [1.0], [2.0]]),
tf.constant([[4.1], [0.9], [0.1], [1.1], [3.9]]),
)
model = QuadraticMeanAndRBFKernelWithSamplers(dataset)
builder = MonteCarloExpectedImprovement(10)
acq_fn = builder.prepare_acquisition_function(model, dataset)
data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES, match="Dataset must be populated."):
builder.update_acquisition_function(acq_fn, model, dataset=data)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
builder.update_acquisition_function(acq_fn, model)
@random_seed
@pytest.mark.parametrize("test_update", [False, True])
@pytest.mark.parametrize(
"variance_scale, num_samples_per_point, rtol, atol",
[
(0.1, 25_000, 0.01, 1e-3),
(1.0, 50_000, 0.01, 2e-3),
(10.0, 100_000, 0.01, 1e-2),
(100.0, 150_000, 0.01, 1e-1),
],
)
def test_mc_expected_improvement_close_to_expected_improvement(
variance_scale: float,
num_samples_per_point: int,
rtol: float,
atol: float,
test_update: bool,
) -> None:
variance_scale = tf.constant(variance_scale, tf.float64)
dataset = Dataset(
tf.constant(
[[-2.0, 0.0], [-1.0, 0.0], [0.0, 0.0], [1.0, 0.0], [2.0, 0.0]], dtype=tf.float64
),
tf.constant([[4.1], [0.9], [0.1], [1.1], [3.9]], dtype=tf.float64),
)
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
kernel = tfp.math.psd_kernels.MaternFiveHalves(variance_scale, length_scale=0.25)
model = GaussianProcessWithSamplers([Branin.objective], [kernel])
if test_update:
builder = MonteCarloExpectedImprovement(num_samples_per_point)
init_data = Dataset(
tf.constant([[0.1, 0.1]], dtype=tf.float64), tf.constant([[100.0]], dtype=tf.float64)
)
eif = builder.prepare_acquisition_function(model, init_data)
eif = builder.update_acquisition_function(eif, model, dataset)
else:
eif = MonteCarloExpectedImprovement(num_samples_per_point).prepare_acquisition_function(
model, dataset
)
ei_approx = eif(xs[..., None, :])
best = tf.reduce_min(Branin.objective(dataset.query_points))
eif = expected_improvement(model, best)
ei = eif(xs[..., None, :]) # type: ignore
npt.assert_allclose(ei, ei_approx, rtol=rtol, atol=atol)
@random_seed
def test_mc_expected_improvement_updates_without_retracing() -> None:
known_query_points = tf.random.uniform([10, 2], dtype=tf.float64)
data = Dataset(known_query_points[8:], quadratic(known_query_points[8:]))
model = QuadraticMeanAndRBFKernelWithSamplers(dataset=data)
builder = MonteCarloExpectedImprovement(10_000)
ei = ExpectedImprovement().prepare_acquisition_function(model, dataset=data)
xs = tf.random.uniform([5, 1, 2], dtype=tf.float64)
mcei = builder.prepare_acquisition_function(model, dataset=data)
assert mcei.__call__._get_tracing_count() == 0 # type: ignore
npt.assert_allclose(mcei(xs), ei(xs), rtol=0.06)
assert mcei.__call__._get_tracing_count() == 1 # type: ignore
data = Dataset(known_query_points, quadratic(known_query_points))
up_mcei = builder.update_acquisition_function(mcei, model, dataset=data)
ei = ExpectedImprovement().prepare_acquisition_function(model, dataset=data)
assert up_mcei == mcei
assert mcei.__call__._get_tracing_count() == 1 # type: ignore
npt.assert_allclose(mcei(xs), ei(xs), rtol=0.06)
assert mcei.__call__._get_tracing_count() == 1 # type: ignore
@pytest.mark.parametrize("sample_size", [-2, 0])
def test_mc_augmented_expected_improvement_raises_for_invalid_sample_size(sample_size: int) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
MonteCarloAugmentedExpectedImprovement(sample_size)
def test_mc_augmented_expected_improvement_raises_for_invalid_jitter() -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
MonteCarloAugmentedExpectedImprovement(100, jitter=-1.0)
@random_seed
def test_mc_augmented_expected_improvement_builds_aei_using_best_from_model() -> None:
dataset = Dataset(
tf.constant([[-2.0], [-1.0], [0.0], [1.0], [2.0]]),
tf.constant([[4.1], [0.9], [0.1], [1.1], [3.9]]),
)
model = QuadraticMeanAndRBFKernelWithSamplers(dataset)
acq_fn = MonteCarloAugmentedExpectedImprovement(int(1e6)).prepare_acquisition_function(
model, dataset
)
xs = tf.linspace([[-10.0]], [[10.0]], 100)
expected = augmented_expected_improvement(model, tf.constant([0.0]))(xs)
npt.assert_allclose(acq_fn(xs), expected, rtol=1e-4, atol=2e-3)
def test_mc_augmented_expected_improvement_raises_for_invalid_models() -> None:
data = Dataset(tf.zeros([1, 1]), tf.ones([1, 1]))
kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(1.0)
noise_variance = 1.0
with pytest.raises(
ValueError, match="MonteCarloAugmentedExpectedImprovement only supports models .*"
):
(
MonteCarloAugmentedExpectedImprovement(100).prepare_acquisition_function(
GaussianProcess([lambda x: quadratic(x)], [kernel], noise_variance), # type: ignore
data,
)
)
with pytest.raises(
ValueError, match="MonteCarloAugmentedExpectedImprovement only supports models .*"
):
(
MonteCarloAugmentedExpectedImprovement(100).prepare_acquisition_function(
GaussianProcessWithoutNoise([lambda x: quadratic(x)], [kernel]), # type: ignore
data,
)
)
def test_mc_augmented_expected_improvement_builder_raises_for_empty_data() -> None:
data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES, match="Dataset must be populated."):
(
MonteCarloAugmentedExpectedImprovement(100).prepare_acquisition_function(
QuadraticMeanAndRBFKernelWithSamplers(data), dataset=data
)
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
(
MonteCarloAugmentedExpectedImprovement(100).prepare_acquisition_function(
QuadraticMeanAndRBFKernelWithSamplers(data)
)
)
def test_mc_augmented_expected_improvement_updater_raises_for_empty_data() -> None:
dataset = Dataset(
tf.constant([[-2.0], [-1.0], [0.0], [1.0], [2.0]]),
tf.constant([[4.1], [0.9], [0.1], [1.1], [3.9]]),
)
model = QuadraticMeanAndRBFKernelWithSamplers(dataset)
builder = MonteCarloAugmentedExpectedImprovement(10)
acq_fn = builder.prepare_acquisition_function(model, dataset)
data = Dataset(tf.zeros([0, 1]), tf.ones([0, 1]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES, match="Dataset must be populated."):
builder.update_acquisition_function(acq_fn, model, dataset=data)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
builder.update_acquisition_function(acq_fn, model)
@random_seed
@pytest.mark.parametrize("test_update", [False, True])
@pytest.mark.parametrize(
"variance_scale, noise_variance, num_samples_per_point, rtol, atol",
[
(0.1, 1e-4, 150_000, 0.01, 1e-3),
(1.0, 1e-4, 150_000, 0.01, 1e-3),
(10.0, 1e-4, 150_000, 0.01, 2e-3),
(100.0, 1e-4, 150_000, 0.01, 2e-2),
(0.1, 1e-3, 150_000, 0.01, 1e-3),
(1.0, 1e-3, 150_000, 0.01, 1e-3),
(10.0, 1e-3, 150_000, 0.01, 2e-3),
(100.0, 1e-3, 150_000, 0.01, 2e-2),
],
)
def test_mc_augmented_expected_improvement_close_to_augmented_expected_improvement(
variance_scale: float,
noise_variance: float,
num_samples_per_point: int,
rtol: float,
atol: float,
test_update: bool,
) -> None:
variance_scale = tf.constant(variance_scale, tf.float64)
dataset = Dataset(
tf.constant(
[[-2.0, 0.0], [-1.0, 0.0], [0.0, 0.0], [1.0, 0.0], [2.0, 0.0]], dtype=tf.float64
),
tf.constant([[4.1], [0.9], [0.1], [1.1], [3.9]], dtype=tf.float64),
)
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
kernel = tfp.math.psd_kernels.MaternFiveHalves(variance_scale, length_scale=0.25)
model = GaussianProcessWithSamplers(
[Branin.objective], [kernel], noise_variance=tf.constant(noise_variance, tf.float64)
)
if test_update:
init_data = Dataset(
tf.constant([[0.1, 0.1]], dtype=tf.float64), tf.constant([[100.0]], dtype=tf.float64)
)
builder = MonteCarloAugmentedExpectedImprovement(num_samples_per_point)
aeif = builder.prepare_acquisition_function(model, init_data)
model._noise_variance = tf.constant(noise_variance, tf.float64)
aeif = builder.update_acquisition_function(aeif, model, dataset)
else:
aeif = MonteCarloAugmentedExpectedImprovement(
num_samples_per_point
).prepare_acquisition_function(model, dataset)
aei_approx = aeif(xs[..., None, :])
best = tf.reduce_min(Branin.objective(dataset.query_points))
aeif = augmented_expected_improvement(model, best)
aei = aeif(xs[..., None, :]) # type: ignore
npt.assert_allclose(aei, aei_approx, rtol=rtol, atol=atol)
@random_seed
def test_mc_augmented_expected_improvement_updates_without_retracing() -> None:
known_query_points = tf.random.uniform([10, 2], dtype=tf.float64)
data = Dataset(known_query_points[8:], quadratic(known_query_points[8:]))
model = QuadraticMeanAndRBFKernelWithSamplers(dataset=data)
model._noise_variance = tf.cast(model.get_observation_noise(), tf.float64)
builder = MonteCarloAugmentedExpectedImprovement(10_000)
aei = AugmentedExpectedImprovement().prepare_acquisition_function(model, dataset=data)
xs = tf.random.uniform([5, 1, 2], dtype=tf.float64)
mcaei = builder.prepare_acquisition_function(model, dataset=data)
assert mcaei.__call__._get_tracing_count() == 0 # type: ignore
npt.assert_allclose(mcaei(xs), aei(xs), rtol=0.06)
assert mcaei.__call__._get_tracing_count() == 1 # type: ignore
data = Dataset(known_query_points, quadratic(known_query_points))
up_mcaei = builder.update_acquisition_function(mcaei, model, dataset=data)
aei = AugmentedExpectedImprovement().prepare_acquisition_function(model, dataset=data)
assert up_mcaei == mcaei
assert mcaei.__call__._get_tracing_count() == 1 # type: ignore
npt.assert_allclose(mcaei(xs), aei(xs), rtol=0.06)
assert mcaei.__call__._get_tracing_count() == 1 # type: ignore
def test_negative_lower_confidence_bound_builder_builds_negative_lower_confidence_bound() -> None:
model = QuadraticMeanAndRBFKernel()
beta = 1.96
acq_fn = NegativeLowerConfidenceBound(beta).prepare_acquisition_function(model)
query_at = tf.linspace([[-10]], [[10]], 100)
expected = -lower_confidence_bound(model, beta)(query_at)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
def test_negative_lower_confidence_bound_builder_updates_without_retracing() -> None:
model = QuadraticMeanAndRBFKernel()
beta = 1.96
builder = NegativeLowerConfidenceBound(beta)
acq_fn = builder.prepare_acquisition_function(model)
assert acq_fn._get_tracing_count() == 0 # type: ignore
query_at = tf.linspace([[-10]], [[10]], 100)
expected = -lower_confidence_bound(model, beta)(query_at)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn._get_tracing_count() == 1 # type: ignore
up_acq_fn = builder.update_acquisition_function(acq_fn, model)
assert up_acq_fn == acq_fn
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn._get_tracing_count() == 1 # type: ignore
@pytest.mark.parametrize("beta", [-0.1, -2.0])
def test_lower_confidence_bound_raises_for_negative_beta(beta: float) -> None:
with pytest.raises(tf.errors.InvalidArgumentError):
lower_confidence_bound(QuadraticMeanAndRBFKernel(), beta)
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_lower_confidence_bound_raises_for_invalid_batch_size(at: TensorType) -> None:
lcb = lower_confidence_bound(QuadraticMeanAndRBFKernel(), tf.constant(1.0))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
lcb(at)
@pytest.mark.parametrize("beta", [0.0, 0.1, 7.8])
def test_lower_confidence_bound(beta: float) -> None:
query_at = tf.linspace([[-3]], [[3]], 10)
actual = lower_confidence_bound(QuadraticMeanAndRBFKernel(), beta)(query_at)
npt.assert_array_almost_equal(actual, tf.squeeze(query_at, -2) ** 2 - beta)
@pytest.mark.parametrize(
"threshold, at, expected",
[
(0.0, tf.constant([[0.0]]), 0.5),
# values looked up on a standard normal table
(2.0, tf.constant([[1.0]]), 0.5 + 0.34134),
(-0.25, tf.constant([[-0.5]]), 0.5 - 0.19146),
],
)
def test_probability_below_threshold_as_probability_of_feasibility(
threshold: float, at: tf.Tensor, expected: float
) -> None:
actual = probability_below_threshold(QuadraticMeanAndRBFKernel(), threshold)(at)
npt.assert_allclose(actual, expected, rtol=1e-4)
@pytest.mark.parametrize(
"at",
[
tf.constant([[0.0]], tf.float64),
tf.constant([[-3.4]], tf.float64),
tf.constant([[0.2]], tf.float64),
],
)
@pytest.mark.parametrize("threshold", [-2.3, 0.2])
def test_probability_of_feasibility_builder_builds_pof(threshold: float, at: tf.Tensor) -> None:
builder = ProbabilityOfFeasibility(threshold)
acq = builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel())
expected = probability_below_threshold(QuadraticMeanAndRBFKernel(), threshold)(at)
npt.assert_allclose(acq(at), expected)
@pytest.mark.parametrize("shape", various_shapes() - {()})
def test_probability_below_threshold_raises_on_non_scalar_threshold(shape: ShapeLike) -> None:
threshold = tf.ones(shape)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
probability_below_threshold(QuadraticMeanAndRBFKernel(), threshold)
@pytest.mark.parametrize("shape", [[], [0], [2], [2, 1], [1, 2, 1]])
def test_probability_below_threshold_raises_on_invalid_at_shape(shape: ShapeLike) -> None:
at = tf.ones(shape)
pof = probability_below_threshold(QuadraticMeanAndRBFKernel(), 0.0)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
pof(at)
@pytest.mark.parametrize("shape", various_shapes() - {()})
def test_probability_of_feasibility_builder_raises_on_non_scalar_threshold(
shape: ShapeLike,
) -> None:
threshold = tf.ones(shape)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ProbabilityOfFeasibility(threshold)
@pytest.mark.parametrize("at", [tf.constant([[0.0]], tf.float64)])
@pytest.mark.parametrize("threshold", [-2.3, 0.2])
def test_probability_of_feasibility_builder_updates_without_retracing(
threshold: float, at: tf.Tensor
) -> None:
builder = ProbabilityOfFeasibility(threshold)
model = QuadraticMeanAndRBFKernel()
expected = probability_below_threshold(QuadraticMeanAndRBFKernel(), threshold)(at)
acq = builder.prepare_acquisition_function(model)
assert acq.__call__._get_tracing_count() == 0 # type: ignore
npt.assert_allclose(acq(at), expected)
assert acq.__call__._get_tracing_count() == 1 # type: ignore
up_acq = builder.update_acquisition_function(acq, model)
assert up_acq == acq
npt.assert_allclose(acq(at), expected)
assert acq.__call__._get_tracing_count() == 1 # type: ignore
def _box_feasibility_constraints() -> Sequence[LinearConstraint]:
return [LinearConstraint(A=tf.eye(3), lb=tf.zeros((3)) + 0.3, ub=tf.ones((3)) - 0.3)]
@pytest.mark.parametrize(
"smoother, expected",
[
(None, tf.constant([1.0, 0.0, 0.0, 1.0])),
(tfp.distributions.Normal(0.0, 0.1).cdf, tf.constant([0.871, 0.029, 0.029, 0.462])),
(tf.math.sigmoid, tf.constant([0.028, 0.026, 0.026, 0.027])),
],
)
def test_fast_constraints_feasibility_smoothing_values(
smoother: Optional[Callable[[TensorType], TensorType]],
expected: TensorType,
) -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), _box_feasibility_constraints())
points = box.sample_sobol(4, skip=0)
acq = fast_constraints_feasibility(box, smoother)
got = tf.squeeze(acq(points))
npt.assert_allclose(got, expected, atol=0.005)
def test_fast_constraints_feasibility_builder_builds_same_func() -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), _box_feasibility_constraints())
points = box.sample_sobol(4)
builder = FastConstraintsFeasibility(box)
acq = builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel())
expected = fast_constraints_feasibility(box)(points)
npt.assert_allclose(acq(points), expected)
def test_fast_constraints_feasibility_builder_updates_without_retracing() -> None:
box = Box(tf.zeros((3,)), tf.ones((3,)), _box_feasibility_constraints())
points = box.sample_sobol(4)
builder = FastConstraintsFeasibility(box)
expected = fast_constraints_feasibility(box)(points)
acq = builder.prepare_acquisition_function(QuadraticMeanAndRBFKernel())
assert acq._get_tracing_count() == 0 # type: ignore
npt.assert_allclose(acq(points), expected)
assert acq._get_tracing_count() == 1 # type: ignore
up_acq = builder.update_acquisition_function(acq, QuadraticMeanAndRBFKernel())
assert up_acq == acq
points = box.sample_sobol(4)
expected = fast_constraints_feasibility(box)(points)
npt.assert_allclose(acq(points), expected)
assert acq._get_tracing_count() == 1 # type: ignore
def test_fast_constraints_feasibility_raises_without_constraints() -> None:
box = Box(tf.zeros((2)), tf.ones((2)))
with pytest.raises(NotImplementedError):
_ = FastConstraintsFeasibility(box)
with pytest.raises(NotImplementedError):
_ = fast_constraints_feasibility(box)
def test_expected_constrained_improvement_raises_for_non_scalar_min_pof() -> None:
pof = ProbabilityOfFeasibility(0.0).using(NA)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ExpectedConstrainedImprovement(NA, pof, tf.constant([0.0]))
def test_expected_constrained_improvement_raises_for_out_of_range_min_pof() -> None:
pof = ProbabilityOfFeasibility(0.0).using(NA)
with pytest.raises(tf.errors.InvalidArgumentError):
ExpectedConstrainedImprovement(NA, pof, 1.5)
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_expected_constrained_improvement_raises_for_invalid_batch_size(at: TensorType) -> None:
pof = ProbabilityOfFeasibility(0.0).using(NA)
builder = ExpectedConstrainedImprovement(NA, pof, tf.constant(0.0))
initial_query_points = tf.constant([[-1.0]])
initial_objective_function_values = tf.constant([[1.0]])
data = {NA: Dataset(initial_query_points, initial_objective_function_values)}
eci = builder.prepare_acquisition_function({NA: QuadraticMeanAndRBFKernel()}, datasets=data)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
eci(at)
def test_expected_constrained_improvement_can_reproduce_expected_improvement() -> None:
class _Certainty(AcquisitionFunctionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return lambda x: tf.ones_like(tf.squeeze(x, -2))
data = {FOO: Dataset(tf.constant([[0.5]]), tf.constant([[0.25]]))}
models_ = {FOO: QuadraticMeanAndRBFKernel()}
builder = ExpectedConstrainedImprovement(FOO, _Certainty(), 0)
eci = builder.prepare_acquisition_function(models_, datasets=data)
ei = ExpectedImprovement().using(FOO).prepare_acquisition_function(models_, datasets=data)
at = tf.constant([[[-0.1]], [[1.23]], [[-6.78]]])
npt.assert_allclose(eci(at), ei(at))
new_data = {FOO: Dataset(tf.constant([[0.5], [1.0]]), tf.constant([[0.25], [0.5]]))}
up_eci = builder.update_acquisition_function(eci, models_, datasets=new_data)
assert up_eci == eci
up_ei = (
ExpectedImprovement().using(FOO).prepare_acquisition_function(models_, datasets=new_data)
)
npt.assert_allclose(eci(at), up_ei(at))
assert eci._get_tracing_count() == 1 # type: ignore
@pytest.mark.parametrize(
"search_space, dataset",
[
(None, Dataset(tf.constant([[-0.2], [0.3]]), tf.constant([[0.04], [0.09]]))),
(
Box([-1.0], [1.0], [LinearConstraint(A=tf.constant([[1.0]]), lb=0.25, ub=1.0)]),
Dataset(tf.constant([[-0.2], [0.2], [0.3]]), tf.constant([[0.04], [0.04], [0.09]])),
),
],
)
def test_expected_constrained_improvement_is_relative_to_feasible_point(
search_space: SearchSpace, dataset: Dataset
) -> None:
class _Constraint(AcquisitionFunctionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return lambda x: tf.cast(tf.squeeze(x, -2) >= 0, x.dtype)
models_ = {FOO: QuadraticMeanAndRBFKernel()}
eci_data = {FOO: Dataset(tf.constant([[-0.2], [0.3]]), tf.constant([[0.04], [0.09]]))}
eci = ExpectedConstrainedImprovement(
FOO, _Constraint(), search_space=search_space
).prepare_acquisition_function(
models_,
datasets=eci_data,
)
ei_data = {FOO: Dataset(tf.constant([[0.3]]), tf.constant([[0.09]]))}
ei = ExpectedImprovement().using(FOO).prepare_acquisition_function(models_, datasets=ei_data)
npt.assert_allclose(eci(tf.constant([[0.1]])), ei(tf.constant([[0.1]])))
def test_expected_constrained_improvement_is_less_for_constrained_points() -> None:
class _Constraint(AcquisitionFunctionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return lambda x: tf.cast(tf.squeeze(x, -2) >= 0, x.dtype)
def two_global_minima(x: tf.Tensor) -> tf.Tensor:
return x**4 / 4 - x**2 / 2
initial_query_points = tf.constant([[-2.0], [0.0], [1.2]])
data = {FOO: Dataset(initial_query_points, two_global_minima(initial_query_points))}
models_ = {FOO: GaussianProcess([two_global_minima], [rbf()])}
eci = ExpectedConstrainedImprovement(FOO, _Constraint()).prepare_acquisition_function(
models_,
datasets=data,
)
npt.assert_array_less(eci(tf.constant([[-1.0]])), eci(tf.constant([[1.0]])))
def test_expected_constrained_improvement_raises_for_empty_data() -> None:
class _Constraint(AcquisitionFunctionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return raise_exc
data = {FOO: Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))}
models_ = {FOO: QuadraticMeanAndRBFKernel()}
builder = ExpectedConstrainedImprovement(FOO, _Constraint())
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(models_, datasets=data)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(models_)
@pytest.mark.parametrize(
"search_space, observations",
[
(None, tf.constant([[-2.0], [1.0]])),
(Box([-2.0], [1.0]), tf.constant([[-2.0], [1.0]])),
(
Box([-2.0], [1.0], [LinearConstraint(A=tf.constant([[1.0]]), lb=0.5, ub=1.0)]),
tf.constant([[0.2], [1.0]]),
),
],
)
def test_expected_constrained_improvement_is_constraint_when_no_feasible_points(
search_space: SearchSpace, observations: TensorType
) -> None:
class _Constraint(AcquisitionFunctionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
def acquisition(x: TensorType) -> TensorType:
x_ = tf.squeeze(x, -2)
return tf.cast(tf.logical_and(0.0 <= x_, x_ < 1.0), x.dtype)
return acquisition
data = {FOO: Dataset(tf.constant([[-2.0], [1.0]]), tf.constant([[4.0], [1.0]]))}
models_ = {FOO: QuadraticMeanAndRBFKernel()}
eci = ExpectedConstrainedImprovement(
FOO, _Constraint(), search_space=search_space
).prepare_acquisition_function(
models_,
datasets=data,
)
constraint_fn = _Constraint().prepare_acquisition_function(models_, datasets=data)
xs = tf.linspace([[-10.0]], [[10.0]], 100)
npt.assert_allclose(eci(xs), constraint_fn(xs))
def test_expected_constrained_improvement_min_feasibility_probability_bound_is_inclusive() -> None:
def pof(x_: TensorType) -> TensorType:
return tfp.bijectors.Sigmoid().forward(tf.squeeze(x_, -2))
class _Constraint(AcquisitionFunctionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return pof
models_ = {FOO: QuadraticMeanAndRBFKernel()}
data = {FOO: Dataset(tf.constant([[1.1], [2.0]]), tf.constant([[1.21], [4.0]]))}
eci = ExpectedConstrainedImprovement(
FOO, _Constraint(), min_feasibility_probability=tfp.bijectors.Sigmoid().forward(1.0)
).prepare_acquisition_function(
models_,
datasets=data,
)
ei = ExpectedImprovement().using(FOO).prepare_acquisition_function(models_, datasets=data)
x = tf.constant([[1.5]])
npt.assert_allclose(eci(x), ei(x) * pof(x))
@pytest.mark.parametrize("sample_size", [-2, 0])
def test_batch_expected_improvement_raises_for_invalid_sample_size(
sample_size: int,
) -> None:
with pytest.raises(tf.errors.InvalidArgumentError):
BatchExpectedImprovement(sample_size=sample_size)
@pytest.mark.parametrize("sample_size", [2])
@pytest.mark.parametrize("jitter", [-1e0])
def test_batch_expected_improvement_raises_for_invalid_jitter(
sample_size: int,
jitter: float,
) -> None:
with pytest.raises(tf.errors.InvalidArgumentError):
BatchExpectedImprovement(sample_size=sample_size, jitter=jitter)
@pytest.mark.parametrize("sample_size", [100])
@pytest.mark.parametrize("jitter", [1e-6])
def test_batch_expected_improvement_raises_for_empty_data(
sample_size: int,
jitter: float,
) -> None:
builder = BatchExpectedImprovement(
sample_size=sample_size,
jitter=jitter,
)
data = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
matern52 = tfp.math.psd_kernels.MaternFiveHalves(
amplitude=tf.cast(2.3, tf.float64), length_scale=tf.cast(0.5, tf.float64)
)
model = GaussianProcessWithBatchSamplers(
[lambda x: Branin.objective(x), lambda x: quadratic(x)], [matern52, rbf()]
)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(model, dataset=data)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(model)
@pytest.mark.parametrize("num_data", [4, 8])
@pytest.mark.parametrize("batch_size", [2, 3])
@pytest.mark.parametrize("dimension", [2, 4])
@random_seed
def test_batch_expected_improvement_can_reproduce_mc_excpected_improvement_handcrafted_problem(
num_data: int,
batch_size: int,
dimension: int,
jitter: float = 1e-6,
sample_size: int = 200,
mc_sample_size: int = 100000,
) -> None:
xs = tf.random.uniform([num_data, dimension], dtype=tf.float64)
data = Dataset(xs, quadratic(xs))
model = QuadraticMeanAndRBFKernelWithBatchSamplers(dataset=data)
mean, cov = model.predict_joint(xs)
mvn = tfp.distributions.MultivariateNormalFullCovariance(tf.linalg.matrix_transpose(mean), cov)
mvn_samples = mvn.sample(10000)
dummy_inputs = [dimension * [0.1]]
dummy_outputs = [dimension * [0.1**2.0]]
min_predictive_mean_at_known_points = dimension * 0.1**2.0
# fmt: off
expected = tf.reduce_mean(tf.reduce_max(tf.maximum(
min_predictive_mean_at_known_points - mvn_samples, 0.0
), axis=-1), axis=0)
# fmt: on
builder = BatchMonteCarloExpectedImprovement(10_000)
acq = builder.prepare_acquisition_function(
model, dataset=mk_dataset(dummy_inputs, dummy_outputs)
)
npt.assert_allclose(acq(xs), expected, rtol=0.05)
@pytest.mark.parametrize("num_data", [4, 8, 16])
@pytest.mark.parametrize("batch_size", [2, 3])
@pytest.mark.parametrize("dimension", [2, 4, 6])
@random_seed
def test_batch_expected_improvement_can_reproduce_mc_excpected_improvement_random_problems(
num_data: int,
batch_size: int,
dimension: int,
jitter: float = 1e-6,
sample_size: int = 200,
mc_sample_size: int = 100000,
num_parallel: int = 4,
) -> None:
known_query_points = tf.random.uniform([num_data, dimension], dtype=tf.float64)
data = Dataset(known_query_points, quadratic(known_query_points))
model = QuadraticMeanAndRBFKernelWithBatchSamplers(dataset=data)
batch_ei = BatchExpectedImprovement(
sample_size=sample_size,
jitter=jitter,
).prepare_acquisition_function(
model=model,
dataset=data,
)
batch_mcei = BatchMonteCarloExpectedImprovement(
sample_size=mc_sample_size,
jitter=jitter,
).prepare_acquisition_function(
model=model,
dataset=data,
)
xs = tf.random.uniform([num_parallel, batch_size, dimension], dtype=tf.float64)
npt.assert_allclose(batch_mcei(xs), batch_ei(xs), rtol=2e-2)
# and again, since the sampler uses cacheing
npt.assert_allclose(batch_mcei(xs), batch_ei(xs), rtol=2e-2)
@pytest.mark.parametrize("num_data", [10])
@pytest.mark.parametrize("num_parallel", [3])
@pytest.mark.parametrize("batch_size", [5])
@pytest.mark.parametrize("sample_size", [100])
@pytest.mark.parametrize("dimension", [2])
@pytest.mark.parametrize("jitter", [1e-6])
@pytest.mark.parametrize("mc_sample_size", [int(4e5)])
@random_seed
def test_batch_expected_improvement_updates_without_retracing(
num_data: int,
num_parallel: int,
batch_size: int,
sample_size: int,
dimension: int,
jitter: float,
mc_sample_size: int,
) -> None:
known_query_points = tf.random.uniform([num_data, dimension], dtype=tf.float64)
data = Dataset(
known_query_points[num_data - 2 :], quadratic(known_query_points[num_data - 2 :])
)
model = QuadraticMeanAndRBFKernelWithBatchSamplers(dataset=data)
batch_ei_builder = BatchExpectedImprovement(
sample_size=sample_size,
jitter=jitter,
)
batch_mcei_builder = BatchMonteCarloExpectedImprovement(
sample_size=mc_sample_size,
jitter=jitter,
)
xs = tf.random.uniform([num_parallel, batch_size, dimension], dtype=tf.float64)
batch_ei = batch_ei_builder.prepare_acquisition_function(model=model, dataset=data)
batch_mcei = batch_mcei_builder.prepare_acquisition_function(model=model, dataset=data)
assert batch_ei.__call__._get_tracing_count() == 0 # type: ignore
npt.assert_allclose(batch_mcei(xs), batch_ei(xs), rtol=2e-2)
assert batch_ei.__call__._get_tracing_count() == 1 # type: ignore
data = Dataset(known_query_points, quadratic(known_query_points))
up_batch_ei = batch_ei_builder.update_acquisition_function(batch_ei, model, dataset=data)
batch_mcei = batch_mcei_builder.update_acquisition_function(batch_mcei, model, dataset=data)
assert up_batch_ei == batch_ei
assert batch_ei.__call__._get_tracing_count() == 1 # type: ignore
npt.assert_allclose(batch_mcei(xs), batch_ei(xs), rtol=2e-2)
assert batch_ei.__call__._get_tracing_count() == 1 # type: ignore
@pytest.mark.parametrize("sample_size", [-2, 0])
def test_batch_monte_carlo_expected_improvement_raises_for_invalid_sample_size(
sample_size: int,
) -> None:
with pytest.raises(tf.errors.InvalidArgumentError):
BatchMonteCarloExpectedImprovement(sample_size)
def test_batch_monte_carlo_expected_improvement_raises_for_invalid_jitter() -> None:
with pytest.raises(tf.errors.InvalidArgumentError):
BatchMonteCarloExpectedImprovement(100, jitter=-1.0)
def test_batch_monte_carlo_expected_improvement_raises_for_empty_data() -> None:
builder = BatchMonteCarloExpectedImprovement(100)
data = Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))
matern52 = tfp.math.psd_kernels.MaternFiveHalves(
amplitude=tf.cast(2.3, tf.float64), length_scale=tf.cast(0.5, tf.float64)
)
model = GaussianProcessWithBatchSamplers(
[lambda x: Branin.objective(x), lambda x: quadratic(x)], [matern52, rbf()]
)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(model, dataset=data)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(model)
def test_batch_monte_carlo_expected_improvement_raises_for_model_with_wrong_event_shape() -> None:
builder = BatchMonteCarloExpectedImprovement(100)
data = mk_dataset([(0.0, 0.0)], [(0.0, 0.0)])
matern52 = tfp.math.psd_kernels.MaternFiveHalves(
amplitude=tf.cast(2.3, tf.float64), length_scale=tf.cast(0.5, tf.float64)
)
model = GaussianProcessWithBatchSamplers(
[lambda x: Branin.objective(x), lambda x: quadratic(x)], [matern52, rbf()]
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
builder.prepare_acquisition_function(model, dataset=data)
@random_seed
def test_batch_monte_carlo_expected_improvement_raises_for_model_without_reparam_sampler() -> None:
known_query_points = tf.random.uniform([5, 2], dtype=tf.float64)
data = Dataset(known_query_points, quadratic(known_query_points))
model = QuadraticMeanAndRBFKernel()
with pytest.raises(ValueError):
(
BatchMonteCarloExpectedImprovement(10_000).prepare_acquisition_function(
model, dataset=data # type: ignore
)
)
@random_seed
def test_batch_monte_carlo_expected_improvement_can_reproduce_ei() -> None:
known_query_points = tf.random.uniform([5, 2], dtype=tf.float64)
data = Dataset(known_query_points, quadratic(known_query_points))
model = QuadraticMeanAndRBFKernelWithBatchSamplers(dataset=data)
batch_ei = BatchMonteCarloExpectedImprovement(10_000).prepare_acquisition_function(
model, dataset=data
)
ei = ExpectedImprovement().prepare_acquisition_function(model, dataset=data)
xs = tf.random.uniform([3, 5, 1, 2], dtype=tf.float64)
npt.assert_allclose(batch_ei(xs), ei(xs), rtol=0.06)
# and again, since the sampler uses cacheing
npt.assert_allclose(batch_ei(xs), ei(xs), rtol=0.06)
@random_seed
def test_batch_monte_carlo_expected_improvement() -> None:
xs = tf.random.uniform([3, 5, 7, 2], dtype=tf.float64)
data = Dataset(xs, quadratic(xs))
model = QuadraticMeanAndRBFKernelWithBatchSamplers(dataset=data)
mean, cov = model.predict_joint(xs)
mvn = tfp.distributions.MultivariateNormalFullCovariance(tf.linalg.matrix_transpose(mean), cov)
mvn_samples = mvn.sample(10_000)
min_predictive_mean_at_known_points = 0.09
# fmt: off
expected = tf.reduce_mean(tf.reduce_max(tf.maximum(
min_predictive_mean_at_known_points - mvn_samples, 0.0
), axis=-1), axis=0)
# fmt: on
builder = BatchMonteCarloExpectedImprovement(10_000)
acq = builder.prepare_acquisition_function(
model, dataset=mk_dataset([[0.3], [0.5]], [[0.09], [0.25]])
)
npt.assert_allclose(acq(xs), expected, rtol=0.05)
@random_seed
def test_batch_monte_carlo_expected_improvement_updates_without_retracing() -> None:
known_query_points = tf.random.uniform([10, 2], dtype=tf.float64)
data = Dataset(known_query_points[8:], quadratic(known_query_points[8:]))
model = QuadraticMeanAndRBFKernelWithBatchSamplers(dataset=data)
builder = BatchMonteCarloExpectedImprovement(10_000)
ei = ExpectedImprovement().prepare_acquisition_function(model, dataset=data)
xs = tf.random.uniform([3, 5, 1, 2], dtype=tf.float64)
batch_ei = builder.prepare_acquisition_function(model, dataset=data)
assert batch_ei.__call__._get_tracing_count() == 0 # type: ignore
npt.assert_allclose(batch_ei(xs), ei(xs), rtol=0.06)
assert batch_ei.__call__._get_tracing_count() == 2 # type: ignore
data = Dataset(known_query_points, quadratic(known_query_points))
up_batch_ei = builder.update_acquisition_function(batch_ei, model, dataset=data)
ei = ExpectedImprovement().update_acquisition_function(ei, model, dataset=data)
assert up_batch_ei == batch_ei
assert batch_ei.__call__._get_tracing_count() == 2 # type: ignore
npt.assert_allclose(batch_ei(xs), ei(xs), rtol=0.06)
assert batch_ei.__call__._get_tracing_count() == 2 # type: ignore
def test_multiple_optimism_builder_builds_negative_lower_confidence_bound() -> None:
model = QuadraticMeanAndRBFKernel()
search_space = Box([0, 0], [1, 1])
acq_fn = MultipleOptimismNegativeLowerConfidenceBound(
search_space
).prepare_acquisition_function(model)
query_at = tf.reshape(tf.linspace([[-10]], [[10]], 100), [10, 5, 2])
expected = multiple_optimism_lower_confidence_bound(model, search_space.dimension)(query_at)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
def test_multiple_optimism_builder_updates_without_retracing() -> None:
model = QuadraticMeanAndRBFKernel()
search_space = Box([0, 0], [1, 1])
builder = MultipleOptimismNegativeLowerConfidenceBound(search_space)
acq_fn = builder.prepare_acquisition_function(model)
assert acq_fn.__call__._get_tracing_count() == 0 # type: ignore
query_at = tf.reshape(tf.linspace([[-10]], [[10]], 100), [10, 5, 2])
expected = multiple_optimism_lower_confidence_bound(model, search_space.dimension)(query_at)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
up_acq_fn = builder.update_acquisition_function(acq_fn, model)
assert up_acq_fn == acq_fn
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
def test_multiple_optimism_builder_raises_when_update_with_wrong_function() -> None:
model = QuadraticMeanAndRBFKernel()
search_space = Box([0, 0], [1, 1])
builder = MultipleOptimismNegativeLowerConfidenceBound(search_space)
builder.prepare_acquisition_function(model)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.update_acquisition_function(lower_confidence_bound(model, 0.1), model)
@pytest.mark.parametrize("d", [0, -5])
def test_multiple_optimism_negative_confidence_bound_raises_for_negative_search_space_dim(
d: int,
) -> None:
with pytest.raises(tf.errors.InvalidArgumentError):
multiple_optimism_lower_confidence_bound(QuadraticMeanAndRBFKernel(), d)
def test_multiple_optimism_negative_confidence_bound_raises_for_changing_batch_size() -> None:
model = QuadraticMeanAndRBFKernel()
search_space = Box([0, 0], [1, 1])
acq_fn = MultipleOptimismNegativeLowerConfidenceBound(
search_space
).prepare_acquisition_function(model)
query_at = tf.reshape(tf.linspace([[-10]], [[10]], 100), [10, 5, 2])
acq_fn(query_at)
with pytest.raises(tf.errors.InvalidArgumentError):
query_at = tf.reshape(tf.linspace([[-10]], [[10]], 100), [5, 10, 2])
acq_fn(query_at)
@pytest.mark.parametrize("in_place_update", [False, True])
def test_make_positive(in_place_update: bool) -> None:
base = MagicMock()
base.prepare_acquisition_function.side_effect = lambda *args: lambda x: x
if in_place_update:
base.update_acquisition_function.side_effect = lambda f, *args: f
else:
base.update_acquisition_function.side_effect = lambda *args: lambda x: 3.0
builder: MakePositive[ProbabilisticModel] = MakePositive(base)
model = QuadraticMeanAndRBFKernel()
acq_fn = builder.prepare_acquisition_function(model)
xs = tf.linspace([-1], [1], 10)
npt.assert_allclose(acq_fn(xs), tf.math.log(1 + tf.math.exp(xs)))
assert base.prepare_acquisition_function.call_count == 1
assert base.update_acquisition_function.call_count == 0
up_acq_fn = builder.update_acquisition_function(acq_fn, model)
assert base.prepare_acquisition_function.call_count == 1
assert base.update_acquisition_function.call_count == 1
if in_place_update:
assert up_acq_fn is acq_fn
else:
npt.assert_allclose(up_acq_fn(xs), tf.math.log(1 + tf.math.exp(3.0)))
| 59,777 | 38.82545 | 100 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/function/test_active_learning.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Sequence
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.utilities import to_default_float
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, ShapeLike, random_seed, various_shapes
from tests.util.models.gpflow.models import (
GaussianProcess,
QuadraticMeanAndRBFKernel,
gpr_model,
vgp_model_bernoulli,
)
from tests.util.models.models import binary_line, fnc_2sin_x_over_3
from trieste.acquisition.function.active_learning import (
BayesianActiveLearningByDisagreement,
ExpectedFeasibility,
IntegratedVarianceReduction,
PredictiveVariance,
bayesian_active_learning_by_disagreement,
bichon_ranjan_criterion,
integrated_variance_reduction,
predictive_variance,
)
from trieste.data import Dataset
from trieste.models.gpflow import (
GaussianProcessRegression,
VariationalGaussianProcess,
build_vgp_classifier,
)
from trieste.objectives import Branin
from trieste.space import Box
from trieste.types import TensorType
from trieste.utils import DEFAULTS
def test_predictive_variance_builder_builds_predictive_variance() -> None:
model = QuadraticMeanAndRBFKernel()
acq_fn = PredictiveVariance().prepare_acquisition_function(model)
query_at = tf.linspace([[-10]], [[10]], 100)
_, covariance = model.predict_joint(query_at)
expected = tf.linalg.det(covariance)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
@pytest.mark.parametrize(
"at, acquisition_shape",
[
(tf.constant([[[1.0]]]), tf.constant([1, 1])),
(tf.linspace([[-10.0]], [[10.0]], 5), tf.constant([5, 1])),
(tf.constant([[[1.0, 1.0]]]), tf.constant([1, 1])),
(tf.linspace([[-10.0, -10.0]], [[10.0, 10.0]], 5), tf.constant([5, 1])),
],
)
def test_predictive_variance_returns_correct_shape(
at: TensorType, acquisition_shape: TensorType
) -> None:
model = QuadraticMeanAndRBFKernel()
acq_fn = PredictiveVariance().prepare_acquisition_function(model)
npt.assert_array_equal(acq_fn(at).shape, acquisition_shape)
@random_seed
@pytest.mark.parametrize(
"variance_scale, num_samples_per_point, rtol, atol",
[
(0.1, 10_000, 0.05, 1e-6),
(1.0, 50_000, 0.05, 1e-3),
(10.0, 100_000, 0.05, 1e-2),
(100.0, 150_000, 0.05, 1e-1),
],
)
def test_predictive_variance(
variance_scale: float,
num_samples_per_point: int,
rtol: float,
atol: float,
) -> None:
variance_scale = tf.constant(variance_scale, tf.float64)
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
kernel = tfp.math.psd_kernels.MaternFiveHalves(variance_scale, length_scale=0.25)
model = GaussianProcess([Branin.objective], [kernel])
mean, variance = model.predict(xs)
samples = tfp.distributions.Normal(mean, tf.sqrt(variance)).sample(num_samples_per_point)
predvar_approx = tf.math.reduce_variance(samples, axis=0)
detcov = predictive_variance(model, DEFAULTS.JITTER)
predvar = detcov(xs[..., None, :])
npt.assert_allclose(predvar, predvar_approx, rtol=rtol, atol=atol)
def test_predictive_variance_builder_updates_without_retracing() -> None:
model = QuadraticMeanAndRBFKernel()
builder = PredictiveVariance()
acq_fn = builder.prepare_acquisition_function(model)
assert acq_fn._get_tracing_count() == 0 # type: ignore
query_at = tf.linspace([[-10]], [[10]], 100)
expected = predictive_variance(model, DEFAULTS.JITTER)(query_at)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn._get_tracing_count() == 1 # type: ignore
up_acq_fn = builder.update_acquisition_function(acq_fn, model)
assert up_acq_fn == acq_fn
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn._get_tracing_count() == 1 # type: ignore
@pytest.mark.parametrize("delta", [1, 2])
def test_expected_feasibility_builder_builds_acquisition_function(delta: int) -> None:
threshold = 1
alpha = 1
query_at = tf.linspace([[-10]], [[10]], 100)
model = QuadraticMeanAndRBFKernel()
acq_fn = ExpectedFeasibility(threshold, alpha, delta).prepare_acquisition_function(model)
expected = bichon_ranjan_criterion(model, threshold, alpha, delta)(query_at)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
@pytest.mark.parametrize(
"at, acquisition_shape",
[
(tf.constant([[[1.0]]]), tf.constant([1, 1])),
(tf.linspace([[-10.0]], [[10.0]], 5), tf.constant([5, 1])),
(tf.constant([[[1.0, 1.0]]]), tf.constant([1, 1])),
(tf.linspace([[-10.0, -10.0]], [[10.0, 10.0]], 5), tf.constant([5, 1])),
],
)
@pytest.mark.parametrize("delta", [1, 2])
def test_expected_feasibility_returns_correct_shape(
at: TensorType, acquisition_shape: TensorType, delta: int
) -> None:
threshold = 1
alpha = 1
model = QuadraticMeanAndRBFKernel()
acq_fn = ExpectedFeasibility(threshold, alpha, delta).prepare_acquisition_function(model)
npt.assert_array_equal(acq_fn(at).shape, acquisition_shape)
@pytest.mark.parametrize("delta", [1, 2])
def test_expected_feasibility_builder_updates_without_retracing(delta: int) -> None:
threshold = 1
alpha = 1
model = QuadraticMeanAndRBFKernel()
builder = ExpectedFeasibility(threshold, alpha, delta)
acq_fn = builder.prepare_acquisition_function(model)
assert acq_fn._get_tracing_count() == 0 # type: ignore
query_at = tf.linspace([[-10]], [[10]], 100)
expected = bichon_ranjan_criterion(model, threshold, alpha, delta)(query_at)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn._get_tracing_count() == 1 # type: ignore
up_acq_fn = builder.update_acquisition_function(acq_fn, model)
assert up_acq_fn == acq_fn
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn._get_tracing_count() == 1 # type: ignore
@pytest.mark.parametrize("shape", various_shapes() - {()})
def test_expected_feasibility_builder_raises_on_non_scalar_threshold(
shape: ShapeLike,
) -> None:
threshold, alpha, delta = tf.ones(shape), tf.ones(shape), tf.ones(shape)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ExpectedFeasibility(threshold, 1, 1)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ExpectedFeasibility(1, alpha, 1)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ExpectedFeasibility(1, 1, delta)
@pytest.mark.parametrize("alpha", [0.0, -1.0])
def test_expected_feasibility_builder_raises_on_non_positive_alpha(alpha: float) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ExpectedFeasibility(1, alpha, 1)
@pytest.mark.parametrize("delta", [-1, 0, 1.5, 3])
def test_expected_feasibility_raises_for_invalid_delta(delta: int) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ExpectedFeasibility(1, 1, delta)
@pytest.mark.parametrize("delta", [1, 2])
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_expected_feasibility_raises_for_invalid_batch_size(at: TensorType, delta: int) -> None:
ef = bichon_ranjan_criterion(QuadraticMeanAndRBFKernel(), 1, 1, delta)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ef(at)
@pytest.mark.parametrize(
"threshold, at",
[
(0.0, tf.constant([[0.0]])),
(2.0, tf.constant([[1.0]])),
(-0.25, tf.constant([[-0.5]])),
],
)
@pytest.mark.parametrize("delta", [1, 2])
@pytest.mark.parametrize("alpha", [0.1, 1, 2])
def test_bichon_ranjan_criterion(threshold: float, at: tf.Tensor, alpha: float, delta: int) -> None:
model = QuadraticMeanAndRBFKernel()
actual = bichon_ranjan_criterion(model, threshold, alpha, delta)(at)
# approach is to sample based on the model and compute the expectation eq directly
mean, variance = model.predict(tf.squeeze(at, -2))
stdev = tf.sqrt(variance)
normal = tfp.distributions.Normal(mean, stdev)
samples = normal.sample(1000000)
expected = tf.reduce_mean(
tf.maximum(0, (alpha * stdev) ** delta - tf.abs(threshold - samples) ** delta)
)
npt.assert_allclose(actual, expected, rtol=0.01)
def test_integrated_variance_reduction() -> None:
x = to_default_float(tf.constant(np.arange(1, 7).reshape(-1, 1) / 8.0)) # shape: [6, 1]
y = fnc_2sin_x_over_3(x)
model6 = GaussianProcessRegression(gpr_model(x, y))
model5 = GaussianProcessRegression(gpr_model(x[:5, :], y[:5, :]))
reduced_data = Dataset(x[:5, :], y[:5, :])
query_points = x[5:, :]
integration_points = tf.concat([0.37 * x, 1.7 * x], 0) # shape: [14, 1]
_, pred_var6 = model6.predict(integration_points)
acq_noweight = IntegratedVarianceReduction(integration_points=integration_points)
acq = IntegratedVarianceReduction(threshold=[0.5, 0.8], integration_points=integration_points)
acq_function = acq.prepare_acquisition_function(model=model5, dataset=reduced_data)
acq_function_noweight = acq_noweight.prepare_acquisition_function(
model=model5, dataset=reduced_data
)
acq_values = -acq_function(tf.expand_dims(query_points, axis=-2))
acq_values_noweight = -acq_function_noweight(tf.expand_dims(query_points, axis=-2))
# Weighted criterion is always smaller than non-weighted
np.testing.assert_array_less(acq_values, acq_values_noweight)
# Non-weighted variance integral should match the one with fully updated model
np.testing.assert_allclose(tf.reduce_mean(pred_var6), acq_values_noweight[0], atol=1e-5)
def test_integrated_variance_reduction_works_with_batch() -> None:
x = to_default_float(tf.constant(np.arange(1, 8).reshape(-1, 1) / 8.0)) # shape: [7, 1]
y = fnc_2sin_x_over_3(x)
model7 = GaussianProcessRegression(gpr_model(x, y))
model5 = GaussianProcessRegression(gpr_model(x[:5, :], y[:5, :]))
reduced_data = Dataset(x[:5, :], y[:5, :])
query_points = tf.expand_dims(x[5:, :], axis=0) # one batch of 2
integration_points = tf.concat([0.37 * x, 1.7 * x], 0) # shape: [14, 1]
_, pred_var7 = model7.predict(integration_points)
acq = IntegratedVarianceReduction(integration_points=integration_points)
acq_function = acq.prepare_acquisition_function(model=model5, dataset=reduced_data)
acq_values = -acq_function(query_points)
# Variance integral should match the one with fully updated model
np.testing.assert_allclose(tf.reduce_mean(pred_var7), acq_values, atol=1e-5)
@pytest.mark.parametrize("integration_points", [tf.zeros([0, 2]), tf.zeros([1, 2, 3])])
def test_integrated_variance_reduction_raises_for_invalid_integration_points(
integration_points: tf.Tensor,
) -> None:
threshold = [1.0, 2.0]
query_at = tf.zeros([1, 1, 1])
x = to_default_float(tf.constant(np.arange(1, 8).reshape(-1, 1)))
y = fnc_2sin_x_over_3(x)
model = GaussianProcessRegression(gpr_model(x, y))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
integrated_variance_reduction(model, integration_points, threshold)(query_at)
@pytest.mark.parametrize("threshold", [[1.0, 2.0, 3.0], tf.zeros([2, 2]), [2.0, 1.0]])
def test_integrated_variance_reduction_raises_for_invalid_threshold(
threshold: tf.Tensor | Sequence[float],
) -> None:
integration_points = to_default_float(tf.zeros([5, 1]))
query_at = tf.zeros([1, 1, 1])
x = to_default_float(tf.constant(np.arange(1, 8).reshape(-1, 1)))
y = fnc_2sin_x_over_3(x)
model = GaussianProcessRegression(gpr_model(x, y))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
integrated_variance_reduction(model, integration_points, threshold)(query_at)
def test_integrated_variance_reduction_builds_acquisition_function() -> None:
threshold = [1.0, 2.0]
integration_points = to_default_float(tf.zeros([5, 1]))
query_at = to_default_float(tf.linspace([[-10]], [[10]], 100))
x = to_default_float(tf.constant(np.arange(1, 8).reshape(-1, 1) / 8.0)) # shape: [7, 1]
y = fnc_2sin_x_over_3(x)
model = GaussianProcessRegression(gpr_model(x, y))
acq_fn = IntegratedVarianceReduction(
integration_points, threshold
).prepare_acquisition_function(model)
expected = integrated_variance_reduction(model, integration_points, threshold)(query_at)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
@pytest.mark.parametrize(
"at",
[
tf.zeros([3, 2]),
tf.zeros(
[
3,
]
),
],
)
def test_integrated_variance_reduction_raises_for_invalid_batch_size(at: TensorType) -> None:
threshold = [1.0, 2.0]
integration_points = to_default_float(tf.zeros([3, 1]))
x = to_default_float(tf.zeros([1, 1]))
y = to_default_float(tf.zeros([1, 1]))
model = GaussianProcessRegression(gpr_model(x, y))
acq_fn = IntegratedVarianceReduction(
integration_points, threshold
).prepare_acquisition_function(model)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
acq_fn(to_default_float(at))
def test_integrated_variance_reduction_builder_updates_without_retracing() -> None:
threshold = [1.0, 2.0]
integration_points = to_default_float(tf.zeros([3, 1]))
x = to_default_float(tf.zeros([1, 1]))
y = to_default_float(tf.zeros([1, 1]))
model = GaussianProcessRegression(gpr_model(x, y))
builder = IntegratedVarianceReduction(integration_points, threshold)
acq_fn = builder.prepare_acquisition_function(model)
assert acq_fn.__call__._get_tracing_count() == 0 # type: ignore
query_at = tf.linspace([[-10]], [[10]], 100)
expected = integrated_variance_reduction(model, integration_points, threshold)(query_at)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
up_acq_fn = builder.update_acquisition_function(acq_fn, model)
assert up_acq_fn == acq_fn
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
@pytest.mark.parametrize(
"at",
[
(tf.constant([[[-1.0]]])),
(tf.constant([[-0.5]])),
(tf.constant([[0.0]])),
(tf.constant([[0.5]])),
(tf.constant([[1.0]])),
],
)
def test_bayesian_active_learning_by_disagreement_is_correct(at: tf.Tensor) -> None:
""" "
We perform an MC check as in Section 5 of Houlsby 2011 paper. We check only the
2nd, more complicated term.
"""
search_space = Box([-1], [1])
x = to_default_float(tf.constant(np.linspace(-1, 1, 8).reshape(-1, 1)))
y = to_default_float(tf.reshape(binary_line(x), [-1, 1]))
model = VariationalGaussianProcess(
build_vgp_classifier(Dataset(x, y), search_space, noise_free=True)
)
mean, var = model.predict(to_default_float(at))
def entropy(p: TensorType) -> TensorType:
return -p * tf.math.log(p + DEFAULTS.JITTER) - (1 - p) * tf.math.log(
1 - p + DEFAULTS.JITTER
)
# we get the actual but substract term 1 which is computed here the same as in the method
normal = tfp.distributions.Normal(to_default_float(0), to_default_float(1))
actual_term1 = entropy(normal.cdf((mean / tf.sqrt(var + 1))))
actual_term2 = actual_term1 - bayesian_active_learning_by_disagreement(model, DEFAULTS.JITTER)(
[to_default_float(at)]
)
# MC based term 2, 1st and 2nd approximation
samples = tfp.distributions.Normal(
to_default_float(mean), to_default_float(tf.sqrt(var))
).sample(100000)
MC_term21 = tf.reduce_mean(entropy(normal.cdf(samples)))
MC_term22 = tf.reduce_mean(np.exp(-(samples**2) / np.pi * np.log(2)))
npt.assert_allclose(actual_term2, MC_term21, rtol=0.05, atol=0.05)
npt.assert_allclose(actual_term2, MC_term22, rtol=0.05, atol=0.05)
def test_bayesian_active_learning_by_disagreement_builder_builds_acquisition_function() -> None:
x = to_default_float(tf.zeros([1, 1]))
y = to_default_float(tf.zeros([1, 1]))
model = VariationalGaussianProcess(vgp_model_bernoulli(x, y))
acq_fn = BayesianActiveLearningByDisagreement().prepare_acquisition_function(model)
query_at = tf.linspace([[-10]], [[10]], 100)
expected = bayesian_active_learning_by_disagreement(model, DEFAULTS.JITTER)(query_at)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
@pytest.mark.parametrize("jitter", [0.0, -1.0])
def test_bayesian_active_learning_by_disagreement_raise_on_non_positive_jitter(
jitter: float,
) -> None:
x = to_default_float(tf.zeros([1, 1]))
y = to_default_float(tf.zeros([1, 1]))
model = VariationalGaussianProcess(vgp_model_bernoulli(x, y))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
BayesianActiveLearningByDisagreement(jitter).prepare_acquisition_function(model)
@pytest.mark.parametrize(
"x, at, acquisition_shape",
[
(tf.zeros([1, 1]), tf.constant([[[1.0]]]), tf.constant([1, 1])),
(tf.zeros([1, 1]), tf.linspace([[-10.0]], [[10.0]], 5), tf.constant([5, 1])),
(tf.zeros([1, 2]), tf.constant([[[1.0, 1.0]]]), tf.constant([1, 1])),
(tf.zeros([1, 2]), tf.linspace([[-10.0, -10.0]], [[10.0, 10.0]], 5), tf.constant([5, 1])),
],
)
def test_bayesian_active_learning_by_disagreement_returns_correct_shape(
x: TensorType, at: TensorType, acquisition_shape: TensorType
) -> None:
x = to_default_float(x)
y = to_default_float(tf.zeros([1, 1]))
model = VariationalGaussianProcess(vgp_model_bernoulli(x, y))
acq_fn = BayesianActiveLearningByDisagreement().prepare_acquisition_function(model)
npt.assert_array_equal(acq_fn(to_default_float(at)).shape, acquisition_shape)
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_bayesian_active_learning_by_disagreement_raises_for_invalid_batch_size(
at: TensorType,
) -> None:
x = to_default_float(tf.zeros([1, 1]))
y = to_default_float(tf.zeros([1, 1]))
model = VariationalGaussianProcess(vgp_model_bernoulli(x, y))
acq_fn = BayesianActiveLearningByDisagreement().prepare_acquisition_function(model)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
acq_fn(to_default_float(at))
def test_bayesian_active_learning_by_disagreement_builder_updates_without_retracing() -> None:
x = to_default_float(tf.zeros([1, 1]))
y = to_default_float(tf.zeros([1, 1]))
model = VariationalGaussianProcess(vgp_model_bernoulli(x, y))
builder = BayesianActiveLearningByDisagreement()
acq_fn = builder.prepare_acquisition_function(model)
assert acq_fn.__call__._get_tracing_count() == 0 # type: ignore
query_at = tf.linspace([[-10]], [[10]], 100)
expected = bayesian_active_learning_by_disagreement(model, DEFAULTS.JITTER)(query_at)
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
up_acq_fn = builder.update_acquisition_function(acq_fn, model)
assert up_acq_fn == acq_fn
npt.assert_array_almost_equal(acq_fn(query_at), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
| 19,947 | 37.584139 | 100 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/function/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/unit/acquisition/function/test_utils.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from trieste.acquisition.function.utils import MultivariateNormalCDF
from trieste.types import TensorType
tfd = tfp.distributions
@pytest.mark.parametrize("num_sobol", [-10, -1, 0])
@pytest.mark.parametrize("dim", [2, 3, 5])
def test_make_mvn_cdf_raises_exception_for_incorrect_sample_size(
num_sobol: int,
dim: int,
) -> None:
# Set data type and jitter
dtype = tf.float64
with pytest.raises(tf.errors.InvalidArgumentError):
MultivariateNormalCDF(sample_size=num_sobol, dim=dim, dtype=dtype)
@pytest.mark.parametrize("num_sobol", [1, 10, 100])
@pytest.mark.parametrize("dim", [-10, -1, 0])
def test_make_mvn_cdf_raises_exception_for_incorrect_dimension(
num_sobol: int,
dim: int,
) -> None:
# Set data type and jitter
dtype = tf.float64
with pytest.raises(tf.errors.InvalidArgumentError):
MultivariateNormalCDF(sample_size=num_sobol, dim=dim, dtype=dtype)
def test_make_mvn_cdf_raises_exception_for_incorrect_batch_size(
num_sobol: int = 100,
dim: int = 5,
) -> None:
# Set data type and jitter
dtype = tf.float64
# Set x, mean and covariance
x = tf.zeros((0, dim), dtype=dtype)
mean = tf.zeros((0, dim), dtype=dtype)
cov = tf.eye(dim, dtype=dtype)[None, :, :][:0, :, :]
with pytest.raises(tf.errors.InvalidArgumentError):
MultivariateNormalCDF(sample_size=num_sobol, dim=dim, dtype=dtype)(x=x, mean=mean, cov=cov)
@pytest.mark.parametrize("num_sobol", [200])
@pytest.mark.parametrize("dim", [1, 2, 3, 5])
@pytest.mark.parametrize("batch_size", [1, 2, 3])
def test_make_genz_cdf_matches_naive_monte_carlo_on_random_tasks(
num_sobol: int,
dim: int,
batch_size: int,
) -> None:
def mc_mvn_cdf(
x: TensorType,
mean: TensorType,
cov: TensorType,
num_samples: int = int(1e6),
) -> TensorType:
# Define multivariate normal
normal = tfd.MultivariateNormalTriL(
loc=mean,
scale_tril=tf.linalg.cholesky(cov),
)
# Draw samples
samples = normal.sample(sample_shape=[num_samples])
# Check shapes of input tensors
tf.debugging.assert_shapes(
[
(x, ("B", "Q")),
(mean, ("B", "Q")),
(cov, ("B", "Q", "Q")),
(samples, ("S", "B", "Q")),
]
)
# Compute Monte Carlo estimate
indicator = tf.reduce_all(tf.math.less(samples, x[None, ...]), axis=-1)
mc_mvn_cdf = tf.reduce_mean(tf.cast(indicator, tf.float64), axis=0)
return mc_mvn_cdf
# Seed sampling for reproducible testing
tf.random.set_seed(0)
# Set data type and jitter
dtype = tf.float64
jitter = 1e-6
# Draw x randomly
x = tf.random.normal((batch_size, dim), dtype=dtype) / dim**0.5
# Draw mean randomly
mean = tf.random.normal((batch_size, dim), dtype=dtype) / dim**0.5
# Draw covariance randomly
cov = tf.random.normal((batch_size, dim, dim), dtype=dtype) / dim**0.5
cov = tf.matmul(cov, cov, transpose_a=True) + jitter * tf.eye(dim, dtype=dtype)[None, :, :]
# Set up Genz approximation and direct Monte Carlo estimate
genz_cdf = MultivariateNormalCDF(sample_size=num_sobol, dim=dim, dtype=dtype)(
x=x, mean=mean, cov=cov
)
mc_cdf = mc_mvn_cdf(x=x, mean=mean, cov=cov)
# Check that the Genz and direct Monte Carlo estimates agree
tf.debugging.assert_near(mc_cdf, genz_cdf, rtol=3e-1)
| 4,186 | 30.719697 | 99 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/multi_objective/test_function.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import itertools
import math
from typing import Callable, Mapping, Optional, Sequence, cast
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.acquisition.sampler import (
BatchReparametrizationSampler,
PseudoBatchReparametrizationSampler,
)
from tests.util.misc import (
TF_DEBUGGING_ERROR_TYPES,
empty_dataset,
quadratic,
raise_exc,
random_seed,
)
from tests.util.models.gpflow.models import (
GaussianProcess,
GaussianProcessWithBatchSamplers,
HasReparamSampler,
QuadraticMeanAndRBFKernel,
)
from trieste.acquisition import (
AcquisitionFunction,
AcquisitionFunctionBuilder,
ProbabilityOfFeasibility,
)
from trieste.acquisition.function.multi_objective import (
HIPPO,
BatchMonteCarloExpectedHypervolumeImprovement,
ExpectedConstrainedHypervolumeImprovement,
ExpectedHypervolumeImprovement,
batch_ehvi,
expected_hv_improvement,
hippo_penalizer,
)
from trieste.acquisition.interface import GreedyAcquisitionFunctionBuilder
from trieste.acquisition.multi_objective.pareto import Pareto, get_reference_point
from trieste.acquisition.multi_objective.partition import (
ExactPartition2dNonDominated,
prepare_default_non_dominated_partition_bounds,
)
from trieste.data import Dataset
from trieste.models import ProbabilisticModel, ProbabilisticModelType, ReparametrizationSampler
from trieste.types import Tag, TensorType
from trieste.utils import DEFAULTS
# tags
FOO: Tag = "foo"
NA: Tag = ""
def _mo_test_model(
num_obj: int, *kernel_amplitudes: float | TensorType | None, with_reparam_sampler: bool = True
) -> GaussianProcess:
means = [quadratic, lambda x: tf.reduce_sum(x, axis=-1, keepdims=True), quadratic]
kernels = [tfp.math.psd_kernels.ExponentiatedQuadratic(k_amp) for k_amp in kernel_amplitudes]
if with_reparam_sampler:
return GaussianProcessWithBatchSamplers(means[:num_obj], kernels[:num_obj])
else:
return GaussianProcess(means[:num_obj], kernels[:num_obj])
class _Certainty(AcquisitionFunctionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return lambda x: tf.ones((tf.shape(x)[0], 1), dtype=tf.float64)
def test_ehvi_builder_raises_for_empty_data() -> None:
num_obj = 3
dataset = empty_dataset([2], [num_obj])
model = QuadraticMeanAndRBFKernel()
with pytest.raises(tf.errors.InvalidArgumentError):
ExpectedHypervolumeImprovement().prepare_acquisition_function(model, dataset=dataset)
with pytest.raises(tf.errors.InvalidArgumentError):
ExpectedHypervolumeImprovement().prepare_acquisition_function(model, dataset)
def test_ehvi_builder_builds_expected_hv_improvement_using_pareto_from_model() -> None:
num_obj = 2
train_x = tf.constant([[-2.0], [-1.5], [-1.0], [0.0], [0.5], [1.0], [1.5], [2.0]])
dataset = Dataset(
train_x,
tf.tile(
tf.constant([[4.1], [0.9], [1.2], [0.1], [-8.8], [1.1], [2.1], [3.9]]), [1, num_obj]
),
)
model = _mo_test_model(num_obj, *[10, 10] * num_obj)
acq_fn = ExpectedHypervolumeImprovement().prepare_acquisition_function(model, dataset=dataset)
model_pred_observation = model.predict(train_x)[0]
_prt = Pareto(model_pred_observation)
_partition_bounds = ExactPartition2dNonDominated(_prt.front).partition_bounds(
tf.constant([-1e10] * 2), get_reference_point(_prt.front)
)
xs = tf.linspace([[-10.0]], [[10.0]], 100)
expected = expected_hv_improvement(model, _partition_bounds)(xs)
npt.assert_allclose(acq_fn(xs), expected)
def test_ehvi_builder_updates_expected_hv_improvement_using_pareto_from_model() -> None:
num_obj = 2
train_x = tf.constant([[-2.0], [-1.5], [-1.0], [0.0], [0.5], [1.0], [1.5], [2.0]])
dataset = Dataset(
train_x,
tf.tile(
tf.constant([[4.1], [0.9], [1.2], [0.1], [-8.8], [1.1], [2.1], [3.9]]), [1, num_obj]
),
)
partial_dataset = Dataset(dataset.query_points[:4], dataset.observations[:4])
xs = tf.linspace([[-10.0]], [[10.0]], 100)
model = _mo_test_model(num_obj, *[10, 10] * num_obj)
acq_fn = ExpectedHypervolumeImprovement().prepare_acquisition_function(
model, dataset=partial_dataset
)
assert acq_fn.__call__._get_tracing_count() == 0 # type: ignore
model_pred_observation = model.predict(train_x)[0]
_prt = Pareto(model_pred_observation)
_partition_bounds = ExactPartition2dNonDominated(_prt.front).partition_bounds(
tf.constant([-1e10] * 2), get_reference_point(_prt.front)
)
expected = expected_hv_improvement(model, _partition_bounds)(xs)
npt.assert_allclose(acq_fn(xs), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
# update the acquisition function, evaluate it, and check that it hasn't been retraced
updated_acq_fn = ExpectedHypervolumeImprovement().update_acquisition_function(
acq_fn,
model,
dataset=dataset,
)
assert updated_acq_fn == acq_fn
model_pred_observation = model.predict(train_x)[0]
_prt = Pareto(model_pred_observation)
_partition_bounds = ExactPartition2dNonDominated(_prt.front).partition_bounds(
tf.constant([-1e10] * 2), get_reference_point(_prt.front)
)
expected = expected_hv_improvement(model, _partition_bounds)(xs)
npt.assert_allclose(acq_fn(xs), expected)
assert acq_fn.__call__._get_tracing_count() == 1 # type: ignore
class CustomGetReferencePoint:
def __call__(
self,
observations: TensorType,
) -> TensorType:
return tf.reduce_max(observations, -2)
def custom_get_ref_point(
observations: TensorType,
) -> TensorType:
return tf.reduce_min(observations, -2)
@pytest.mark.parametrize(
"specify_ref_points",
[
pytest.param(
get_reference_point,
id="func_input",
),
pytest.param(
[8, 2],
id="list_input",
),
pytest.param(
(8, 2),
id="tuple_input",
),
pytest.param(
tf.constant([8, 2]),
id="tensor_input",
),
pytest.param(
custom_get_ref_point,
id="callable_func_input",
),
pytest.param(
CustomGetReferencePoint(),
id="callable_instance_input",
),
],
)
def test_ehvi_builder_builds_expected_hv_improvement_based_on_specified_ref_points(
specify_ref_points: TensorType | Sequence[float] | Callable[..., TensorType]
) -> None:
num_obj = 2
train_x = tf.constant([[-2.0], [0.0]])
dataset = Dataset(
train_x,
tf.tile(tf.constant([[4.1], [2.2]]), [1, num_obj]),
)
model = _mo_test_model(num_obj, *[10, 10] * num_obj)
acq_fn = ExpectedHypervolumeImprovement(
reference_point_spec=specify_ref_points
).prepare_acquisition_function(model, dataset)
# manually get ref point outside
model_pred_observation = model.predict(train_x)[0]
_prt = Pareto(model_pred_observation)
if callable(specify_ref_points):
_ref_point = specify_ref_points(_prt.front)
else:
_ref_point = tf.convert_to_tensor(specify_ref_points)
_ref_point = tf.cast(_ref_point, dtype=dataset.observations.dtype)
screened_front = _prt.front[tf.reduce_all(_prt.front <= _ref_point, -1)]
_partition_bounds = prepare_default_non_dominated_partition_bounds(_ref_point, screened_front)
xs = tf.linspace([[-10.0]], [[10.0]], 10)
expected = expected_hv_improvement(model, _partition_bounds)(xs)
npt.assert_allclose(acq_fn(xs), expected)
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_ehvi_raises_for_invalid_batch_size(at: TensorType) -> None:
num_obj = 2
train_x = tf.constant([[-2.0], [-1.5], [-1.0], [0.0], [0.5], [1.0], [1.5], [2.0]])
model = _mo_test_model(num_obj, *[None] * num_obj)
model_pred_observation = model.predict(train_x)[0]
_prt = Pareto(model_pred_observation)
_partition_bounds = ExactPartition2dNonDominated(_prt.front).partition_bounds(
tf.constant([-math.inf] * 2), get_reference_point(_prt.front)
)
ehvi = expected_hv_improvement(model, _partition_bounds)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ehvi(at)
@random_seed
@pytest.mark.parametrize(
"input_dim, num_samples_per_point, existing_observations, obj_num, variance_scale",
[
pytest.param(
1,
100_000,
tf.constant([[0.3, 0.2], [0.2, 0.22], [0.1, 0.25], [0.0, 0.3]]),
2,
1.0,
id="1d_input_2obj_gp_var_1",
),
pytest.param(
1,
200_000,
tf.constant([[0.3, 0.2], [0.2, 0.22], [0.1, 0.25], [0.0, 0.3]]),
2,
2.0,
id="1d_input_2obj_gp_var_2",
),
pytest.param(2, 50_000, tf.constant([[0.0, 0.0]]), 2, 1.0, id="2d_input_2obj_gp_var_2"),
pytest.param(
3,
50_000,
tf.constant([[2.0, 1.0], [0.8, 3.0]]),
2,
1.0,
id="3d_input_2obj_gp_var_1",
),
pytest.param(
4,
100_000,
tf.constant([[3.0, 2.0, 1.0], [1.1, 2.0, 3.0]]),
3,
1.0,
id="4d_input_3obj_gp_var_1",
),
],
)
def test_expected_hypervolume_improvement_matches_monte_carlo(
input_dim: int,
num_samples_per_point: int,
existing_observations: tf.Tensor,
obj_num: int,
variance_scale: float,
) -> None:
# Note: the test data number grows exponentially with num of obj
data_num_seg_per_dim = 2 # test data number per input dim
N = data_num_seg_per_dim**input_dim
xs = tf.convert_to_tensor(
list(itertools.product(*[list(tf.linspace(-1, 1, data_num_seg_per_dim))] * input_dim))
)
xs = tf.cast(xs, dtype=existing_observations.dtype)
model = _mo_test_model(obj_num, *[variance_scale] * obj_num)
mean, variance = model.predict(xs)
predict_samples = tfp.distributions.Normal(mean, tf.sqrt(variance)).sample(
num_samples_per_point # [f_samples, batch_size, obj_num]
)
_pareto = Pareto(existing_observations)
ref_pt = get_reference_point(_pareto.front)
lb_points, ub_points = prepare_default_non_dominated_partition_bounds(ref_pt, _pareto.front)
# calc MC approx EHVI
splus_valid = tf.reduce_all(
tf.tile(ub_points[tf.newaxis, :, tf.newaxis, :], [num_samples_per_point, 1, N, 1])
> tf.expand_dims(predict_samples, axis=1),
axis=-1, # can predict_samples contribute to hvi in cell
) # [f_samples, num_cells, B]
splus_idx = tf.expand_dims(tf.cast(splus_valid, dtype=ub_points.dtype), -1)
splus_lb = tf.tile(lb_points[tf.newaxis, :, tf.newaxis, :], [num_samples_per_point, 1, N, 1])
splus_lb = tf.maximum( # max of lower bounds and predict_samples
splus_lb, tf.expand_dims(predict_samples, 1)
)
splus_ub = tf.tile(ub_points[tf.newaxis, :, tf.newaxis, :], [num_samples_per_point, 1, N, 1])
splus = tf.concat( # concatenate validity labels and possible improvements
[splus_idx, splus_ub - splus_lb], axis=-1
)
# calculate hyper-volume improvement over the non-dominated cells
ehvi_approx = tf.transpose(tf.reduce_sum(tf.reduce_prod(splus, axis=-1), axis=1, keepdims=True))
ehvi_approx = tf.reduce_mean(ehvi_approx, axis=-1) # average through mc sample
ehvi = expected_hv_improvement(model, (lb_points, ub_points))(tf.expand_dims(xs, -2))
npt.assert_allclose(ehvi, ehvi_approx, rtol=0.01, atol=0.01)
def test_qehvi_builder_raises_for_empty_data() -> None:
num_obj = 3
dataset = empty_dataset([2], [num_obj])
model = cast(GaussianProcessWithBatchSamplers, _mo_test_model(2, *[1.0] * 2))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
BatchMonteCarloExpectedHypervolumeImprovement(sample_size=100).prepare_acquisition_function(
model,
dataset=dataset,
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
BatchMonteCarloExpectedHypervolumeImprovement(sample_size=100).prepare_acquisition_function(
model,
)
def test_batch_monte_carlo_expected_hypervolume_improvement_builder_raises_for_empty_data() -> None:
num_obj = 3
dataset = empty_dataset([2], [num_obj])
model = cast(GaussianProcessWithBatchSamplers, _mo_test_model(2, *[1.0] * 2))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
BatchMonteCarloExpectedHypervolumeImprovement(sample_size=100).prepare_acquisition_function(
model,
dataset=dataset,
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
BatchMonteCarloExpectedHypervolumeImprovement(sample_size=100).prepare_acquisition_function(
model,
)
@pytest.mark.parametrize("sample_size", [-2, 0])
def test_batch_monte_carlo_expected_hypervolume_improvement_raises_for_invalid_sample_size(
sample_size: int,
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
BatchMonteCarloExpectedHypervolumeImprovement(sample_size)
@pytest.mark.parametrize(
"specify_ref_points, sample_size",
[
pytest.param(
get_reference_point,
100_000,
id="func_input",
),
pytest.param(
[8, 2],
100_000,
id="list_input",
),
pytest.param(
(8, 2),
100_000,
id="tuple_input",
),
pytest.param(
tf.constant([8, 2]),
100_000,
id="tensor_input",
),
pytest.param(
custom_get_ref_point,
100_000,
id="callable_func_input",
),
pytest.param(
CustomGetReferencePoint(),
100_000,
id="callable_instance_input",
),
],
)
def test_batch_monte_carlo_expected_hypervolume_improvement_based_on_specified_ref_points(
specify_ref_points: TensorType | Sequence[float] | Callable[..., TensorType],
sample_size: int,
) -> None:
num_obj = 2
train_x = tf.constant([[-2.0], [0.0]])
dataset = Dataset(
train_x,
tf.tile(tf.constant([[4.1], [2.2]]), [1, num_obj]),
)
model = _mo_test_model(num_obj, *[1, 1] * num_obj)
assert isinstance(model, HasReparamSampler)
acq_fn = BatchMonteCarloExpectedHypervolumeImprovement(
sample_size, reference_point_spec=specify_ref_points
).prepare_acquisition_function(model, dataset)
# manually get ref point outside
model_pred_observation = model.predict(train_x)[0]
_prt = Pareto(model_pred_observation)
if callable(specify_ref_points):
_ref_point = tf.cast(specify_ref_points(_prt.front), model_pred_observation.dtype)
else:
_ref_point = tf.convert_to_tensor(specify_ref_points)
_ref_point = tf.cast(_ref_point, dtype=dataset.observations.dtype)
screened_front = _prt.front[tf.reduce_all(_prt.front <= _ref_point, -1)]
_partition_bounds = prepare_default_non_dominated_partition_bounds(_ref_point, screened_front)
xs = tf.linspace([[-10.0]], [[10.0]], 10)
sampler = BatchReparametrizationSampler(sample_size, model)
expected = batch_ehvi(
sampler, # type: ignore[arg-type]
sampler_jitter=DEFAULTS.JITTER,
partition_bounds=_partition_bounds,
)(xs)
npt.assert_allclose(acq_fn(xs), expected, rtol=0.01, atol=0.02)
def test_batch_monte_carlo_expected_hypervolume_improvement_raises_for_invalid_jitter() -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
BatchMonteCarloExpectedHypervolumeImprovement(100, jitter=-1.0)
def test_batch_monte_carlo_ehvi_raises_for_model_without_reparam_sampler() -> None:
model = _mo_test_model(2, *[1.0] * 2, with_reparam_sampler=False)
training_input = tf.constant([[0.3], [0.22], [0.1], [0.35]])
mean, _ = model.predict(training_input) # gen prepare Pareto
_model_based_tr_dataset = Dataset(training_input, mean)
qehvi_builder = BatchMonteCarloExpectedHypervolumeImprovement(sample_size=10)
with pytest.raises(ValueError):
qehvi_builder.prepare_acquisition_function(
model, dataset=_model_based_tr_dataset # type: ignore
)
@random_seed
@pytest.mark.parametrize(
"input_dim, num_samples_per_point, training_input, obj_num, variance_scale",
[
pytest.param(
1,
50_000,
tf.constant([[0.3], [0.22], [0.1], [0.35]]),
2,
1.0,
id="1d_input_2obj_model_var_1_q_1",
),
pytest.param(
1,
50_000,
tf.constant([[0.3], [0.22], [0.1], [0.35]]),
2,
2.0,
id="1d_input_2obj_model_var_2_q_1",
),
pytest.param(
2,
50_000,
tf.constant([[0.0, 0.0], [0.2, 0.5]]),
2,
1.0,
id="2d_input_2obj_model_var_1_q_1",
),
pytest.param(
3,
25_000,
tf.constant([[0.0, 0.0, 0.2], [-0.2, 0.5, -0.1], [0.2, -0.5, 0.2]]),
3,
1.0,
id="3d_input_3obj_model_var_1_q_1",
),
],
)
def test_batch_monte_carlo_expected_hypervolume_improvement_can_reproduce_ehvi(
input_dim: int,
num_samples_per_point: int,
training_input: tf.Tensor,
obj_num: int,
variance_scale: float,
) -> None:
data_num_seg_per_dim = 10 # test data number per input dim
model = cast(
GaussianProcessWithBatchSamplers, _mo_test_model(obj_num, *[variance_scale] * obj_num)
)
mean, _ = model.predict(training_input) # gen prepare Pareto
_model_based_tr_dataset = Dataset(training_input, mean)
_model_based_pareto = Pareto(mean)
_reference_pt = get_reference_point(_model_based_pareto.front)
_partition_bounds = prepare_default_non_dominated_partition_bounds(
_reference_pt, _model_based_pareto.front
)
qehvi_builder = BatchMonteCarloExpectedHypervolumeImprovement(sample_size=num_samples_per_point)
qehvi_acq = qehvi_builder.prepare_acquisition_function(model, dataset=_model_based_tr_dataset)
ehvi_acq = expected_hv_improvement(model, _partition_bounds)
test_xs = tf.convert_to_tensor(
list(itertools.product(*[list(tf.linspace(-1, 1, data_num_seg_per_dim))] * input_dim)),
dtype=training_input.dtype,
) # [test_num, input_dim]
test_xs = tf.expand_dims(test_xs, -2) # add Batch dim: q=1
npt.assert_allclose(ehvi_acq(test_xs), qehvi_acq(test_xs), rtol=1e-2, atol=1e-2)
@random_seed
@pytest.mark.parametrize(
"test_input, obj_samples, pareto_front_obs, reference_point, expected_output",
[
pytest.param(
tf.zeros(shape=(1, 2, 1)),
tf.constant([[[-6.5, -4.5], [-7.0, -4.0]]]),
tf.constant([[-4.0, -5.0], [-5.0, -5.0], [-8.5, -3.5], [-8.5, -3.0], [-9.0, -1.0]]),
tf.constant([0.0, 0.0]),
tf.constant([[1.75]]),
id="q_2, both points contribute",
),
pytest.param(
tf.zeros(shape=(1, 2, 1)),
tf.constant([[[-6.5, -4.5], [-6.0, -4.0]]]),
tf.constant([[-4.0, -5.0], [-5.0, -5.0], [-8.5, -3.5], [-8.5, -3.0], [-9.0, -1.0]]),
tf.constant([0.0, 0.0]),
tf.constant([[1.5]]),
id="q_2, only 1 point contributes",
),
pytest.param(
tf.zeros(shape=(1, 2, 1)),
tf.constant([[[-2.0, -2.0], [0.0, -0.1]]]),
tf.constant([[-4.0, -5.0], [-5.0, -5.0], [-8.5, -3.5], [-8.5, -3.0], [-9.0, -1.0]]),
tf.constant([0.0, 0.0]),
tf.constant([[0.0]]),
id="q_2, neither contributes",
),
pytest.param(
tf.zeros(shape=(1, 2, 1)),
tf.constant([[[-6.5, -4.5], [-9.0, -2.0]]]),
tf.constant([[-4.0, -5.0], [-5.0, -5.0], [-8.5, -3.5], [-8.5, -3.0], [-9.0, -1.0]]),
tf.constant([0.0, 0.0]),
tf.constant([[2.0]]),
id="obj_2_q_2, test input better than current-best first objective",
),
pytest.param(
tf.zeros(shape=(1, 2, 1)),
tf.constant([[[-6.5, -4.5], [-6.0, -6.0]]]),
tf.constant([[-4.0, -5.0], [-5.0, -5.0], [-8.5, -3.5], [-8.5, -3.0], [-9.0, -1.0]]),
tf.constant([0.0, 0.0]),
tf.constant([[8.0]]),
id="obj_2_q_2, test input better than current best second objective",
),
pytest.param(
tf.zeros(shape=(1, 3, 1)),
tf.constant([[[-6.5, -4.5], [-9.0, -2.0], [-7.0, -4.0]]]),
tf.constant([[-4.0, -5.0], [-5.0, -5.0], [-8.5, -3.5], [-8.5, -3.0], [-9.0, -1.0]]),
tf.constant([0.0, 0.0]),
tf.constant([[2.25]]),
id="obj_2_q_3, all points contribute",
),
pytest.param(
tf.zeros(shape=(1, 3, 1)),
tf.constant([[[-6.5, -4.5], [-9.0, -2.0], [-7.0, -5.0]]]),
tf.constant([[-4.0, -5.0], [-5.0, -5.0], [-8.5, -3.5], [-8.5, -3.0], [-9.0, -1.0]]),
tf.constant([0.0, 0.0]),
tf.constant([[3.5]]),
id="obj_2_q_3, not all points contribute",
),
pytest.param(
tf.zeros(shape=(1, 3, 1)),
tf.constant([[[-0.0, -4.5], [-1.0, -2.0], [-3.0, -0.0]]]),
tf.constant([[-4.0, -5.0], [-5.0, -5.0], [-8.5, -3.5], [-8.5, -3.0], [-9.0, -1.0]]),
tf.constant([0.0, 0.0]),
tf.constant([[0.0]]),
id="obj_2_q_3, none contribute",
),
pytest.param(
tf.zeros(shape=(1, 2, 1)),
tf.constant([[[-1.0, -1.0, -1.0], [-2.0, -2.0, -2.0]]]),
tf.constant([[-4.0, -2.0, -3.0], [-3.0, -5.0, -1.0], [-2.0, -4.0, -2.0]]),
tf.constant([1.0, 1.0, 1.0]),
tf.constant([[0.0]]),
id="obj_3_q_2, none contribute",
),
pytest.param(
tf.zeros(shape=(1, 2, 1)),
tf.constant([[[-1.0, -2.0, -6.0], [-1.0, -3.0, -4.0]]]),
tf.constant([[-4.0, -2.0, -3.0], [-3.0, -5.0, -1.0], [-2.0, -4.0, -2.0]]),
tf.constant([1.0, 1.0, 1.0]),
tf.constant([[22.0]]),
id="obj_3_q_2, all points contribute",
),
pytest.param(
tf.zeros(shape=(1, 2, 1)),
tf.constant(
[[[-2.0, -3.0, -7.0], [-2.0, -4.0, -5.0]], [[-1.0, -2.0, -6.0], [-1.0, -3.0, -4.0]]]
),
tf.constant([[-4.0, -2.0, -3.0], [-3.0, -5.0, -1.0], [-2.0, -4.0, -2.0]]),
tf.constant([1.0, 1.0, 1.0]),
tf.constant([[41.0]]),
id="obj_3_q_2, mc sample size=2",
),
],
)
def test_batch_monte_carlo_expected_hypervolume_improvement_utility_on_specified_samples(
test_input: TensorType,
obj_samples: TensorType,
pareto_front_obs: TensorType,
reference_point: TensorType,
expected_output: TensorType,
) -> None:
npt.assert_allclose(
batch_ehvi(
cast(
ReparametrizationSampler[ProbabilisticModel],
PseudoBatchReparametrizationSampler(obj_samples),
),
sampler_jitter=DEFAULTS.JITTER,
partition_bounds=prepare_default_non_dominated_partition_bounds(
reference_point, Pareto(pareto_front_obs).front
),
)(test_input),
expected_output,
rtol=1e-5,
atol=1e-5,
)
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_expected_constrained_hypervolume_improvement_raises_for_invalid_batch_size(
at: TensorType,
) -> None:
pof = ProbabilityOfFeasibility(0.0).using(NA)
builder = ExpectedConstrainedHypervolumeImprovement(NA, pof, tf.constant(0.5))
initial_query_points = tf.constant([[-1.0]])
initial_objective_function_values = tf.constant([[1.0, 1.0]])
data = {NA: Dataset(initial_query_points, initial_objective_function_values)}
echvi = builder.prepare_acquisition_function({NA: QuadraticMeanAndRBFKernel()}, datasets=data)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
echvi(at)
def test_expected_constrained_hypervolume_improvement_can_reproduce_ehvi() -> None:
num_obj = 2
train_x = tf.constant(
[[-2.0], [-1.5], [-1.0], [0.0], [0.5], [1.0], [1.5], [2.0]], dtype=tf.float64
)
obj_model = _mo_test_model(num_obj, *[None] * num_obj)
model_pred_observation = obj_model.predict(train_x)[0]
data = {FOO: Dataset(train_x[:5], model_pred_observation[:5])}
models_ = {FOO: obj_model}
builder = ExpectedConstrainedHypervolumeImprovement(
FOO,
_Certainty(),
0,
reference_point_spec=get_reference_point(Pareto(data[FOO].observations).front),
)
echvi = builder.prepare_acquisition_function(models_, datasets=data)
ehvi = (
ExpectedHypervolumeImprovement()
.using(FOO)
.prepare_acquisition_function(models_, datasets=data)
)
at = tf.constant([[[-0.1]], [[1.23]], [[-6.78]]], dtype=tf.float64)
npt.assert_allclose(echvi(at), ehvi(at))
new_data = {FOO: Dataset(train_x, model_pred_observation)}
up_echvi = builder.update_acquisition_function(echvi, models_, datasets=new_data)
assert up_echvi == echvi
up_ehvi = (
ExpectedHypervolumeImprovement()
.using(FOO)
.prepare_acquisition_function(models_, datasets=new_data)
)
npt.assert_allclose(up_echvi(at), up_ehvi(at))
assert up_echvi._get_tracing_count() == 1 # type: ignore
def custom_get_ref_point_echvi(
observations: TensorType,
) -> TensorType:
return tf.reduce_min(observations, -2)
@pytest.mark.parametrize(
"specify_ref_points",
[
pytest.param(
custom_get_ref_point_echvi,
id="callable_func_input",
),
],
)
def test_expected_constrained_hypervolume_improvement_based_on_specified_ref_points(
specify_ref_points: TensorType | Sequence[float] | Callable[..., TensorType]
) -> None:
num_obj = 2
train_x = tf.constant(
[[-2.0], [-1.5], [-1.0], [0.0], [0.5], [1.0], [1.5], [2.0]], dtype=tf.float64
)
obj_model = _mo_test_model(num_obj, *[None] * num_obj)
model_pred_observation = obj_model.predict(train_x)[0]
class _Certainty(AcquisitionFunctionBuilder[ProbabilisticModelType]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return lambda x: tf.ones_like(tf.squeeze(x, -2))
data = {FOO: Dataset(train_x[:5], model_pred_observation[:5])}
models_ = {FOO: obj_model}
builder = ExpectedConstrainedHypervolumeImprovement( # type: ignore
FOO,
_Certainty(),
0,
reference_point_spec=get_reference_point(Pareto(data[FOO].observations).front),
)
echvi = builder.prepare_acquisition_function(models_, datasets=data)
ehvi = (
ExpectedHypervolumeImprovement()
.using(FOO)
.prepare_acquisition_function(models_, datasets=data)
)
at = tf.constant([[[-0.1]], [[1.23]], [[-6.78]]], dtype=tf.float64)
npt.assert_allclose(echvi(at), ehvi(at))
new_data = {FOO: Dataset(train_x, model_pred_observation)}
up_echvi = builder.update_acquisition_function(echvi, models_, datasets=new_data)
assert up_echvi == echvi
up_ehvi = (
ExpectedHypervolumeImprovement()
.using(FOO)
.prepare_acquisition_function(models_, datasets=new_data)
)
npt.assert_allclose(up_echvi(at), up_ehvi(at))
assert up_echvi._get_tracing_count() == 1 # type: ignore
def test_echvi_is_constraint_when_no_feasible_points() -> None:
class _Constraint(AcquisitionFunctionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
def acquisition(x: TensorType) -> TensorType:
x_ = tf.squeeze(x, -2)
return tf.cast(tf.logical_and(0.0 <= x_, x_ < 1.0), x.dtype)
return acquisition
data = {FOO: Dataset(tf.constant([[-2.0], [1.0]]), tf.constant([[4.0], [1.0]]))}
models_ = {FOO: QuadraticMeanAndRBFKernel()}
echvi = ExpectedConstrainedHypervolumeImprovement(
FOO, _Constraint()
).prepare_acquisition_function(models_, datasets=data)
constraint_fn = _Constraint().prepare_acquisition_function(models_, datasets=data)
xs = tf.linspace([[-10.0]], [[10.0]], 100)
npt.assert_allclose(echvi(xs), constraint_fn(xs))
def test_echvi_raises_for_non_scalar_min_pof() -> None:
pof = ProbabilityOfFeasibility(0.0).using(NA)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
ExpectedConstrainedHypervolumeImprovement(NA, pof, tf.constant([0.0]))
def test_echvi_raises_for_out_of_range_min_pof() -> None:
pof = ProbabilityOfFeasibility(0.0).using(NA)
with pytest.raises(tf.errors.InvalidArgumentError):
ExpectedConstrainedHypervolumeImprovement(NA, pof, 1.5)
def test_echvi_raises_for_empty_data() -> None:
class _Constraint(AcquisitionFunctionBuilder[ProbabilisticModel]):
def prepare_acquisition_function(
self,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> AcquisitionFunction:
return raise_exc
data = {FOO: Dataset(tf.zeros([0, 2]), tf.zeros([0, 1]))}
models_ = {FOO: QuadraticMeanAndRBFKernel()}
builder = ExpectedConstrainedHypervolumeImprovement(FOO, _Constraint())
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(models_, datasets=data)
with pytest.raises(tf.errors.InvalidArgumentError):
builder.prepare_acquisition_function(models_)
def test_hippo_builder_raises_for_empty_data() -> None:
num_obj = 3
dataset = {NA: empty_dataset([2], [num_obj])}
model = {NA: QuadraticMeanAndRBFKernel()}
hippo = cast(GreedyAcquisitionFunctionBuilder[QuadraticMeanAndRBFKernel], HIPPO(NA))
with pytest.raises(tf.errors.InvalidArgumentError):
hippo.prepare_acquisition_function(model, dataset)
with pytest.raises(tf.errors.InvalidArgumentError):
hippo.prepare_acquisition_function(model, dataset)
@pytest.mark.parametrize("at", [tf.constant([[0.0], [1.0]]), tf.constant([[[0.0], [1.0]]])])
def test_hippo_penalizer_raises_for_invalid_batch_size(at: TensorType) -> None:
pending_points = tf.zeros([1, 2], dtype=tf.float64)
hp = hippo_penalizer(QuadraticMeanAndRBFKernel(), pending_points)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
hp(at)
def test_hippo_penalizer_raises_for_empty_pending_points() -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
hippo_penalizer(QuadraticMeanAndRBFKernel(), None)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
hippo_penalizer(QuadraticMeanAndRBFKernel(), tf.zeros([0, 2]))
def test_hippo_penalizer_update_raises_for_empty_pending_points() -> None:
pending_points = tf.zeros([1, 2], dtype=tf.float64)
hp = hippo_penalizer(QuadraticMeanAndRBFKernel(), pending_points)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
hp.update(None)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
hp.update(tf.zeros([0, 2]))
@pytest.mark.parametrize(
"point_to_penalize", [tf.constant([[[0.0, 1.0]]]), tf.constant([[[3.0, 4.0]]])]
)
def test_hippo_penalizer_penalizes_pending_point(point_to_penalize: TensorType) -> None:
pending_points = tf.constant([[0.0, 1.0], [2.0, 3.0], [3.0, 4.0]])
hp = hippo_penalizer(QuadraticMeanAndRBFKernel(), pending_points)
penalty = hp(point_to_penalize)
# if the point is already collected, it shall be penalized to 0
npt.assert_allclose(penalty, tf.zeros((1, 1)))
@random_seed
@pytest.mark.parametrize(
"base_builder",
[
ExpectedHypervolumeImprovement().using(NA),
ExpectedConstrainedHypervolumeImprovement(NA, _Certainty(), 0.0),
],
)
def test_hippo_penalized_acquisitions_match_base_acquisition(
base_builder: AcquisitionFunctionBuilder[ProbabilisticModel],
) -> None:
data = {NA: Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))}
model = {NA: _mo_test_model(2, *[None] * 2)}
hippo_acq_builder: HIPPO[ProbabilisticModel] = HIPPO(
NA, base_acquisition_function_builder=base_builder
)
hippo_acq = hippo_acq_builder.prepare_acquisition_function(model, data, None)
base_acq = base_builder.prepare_acquisition_function(model, data)
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
hippo_acq_values = hippo_acq(xs[..., None, :])
base_acq_values = base_acq(xs[..., None, :])
npt.assert_array_equal(hippo_acq_values, base_acq_values)
@random_seed
@pytest.mark.parametrize(
"base_builder",
[
ExpectedHypervolumeImprovement().using(NA),
ExpectedConstrainedHypervolumeImprovement(NA, _Certainty(), 0.0),
],
)
def test_hippo_penalized_acquisitions_combine_base_and_penalization_correctly(
base_builder: AcquisitionFunctionBuilder[ProbabilisticModel],
) -> None:
data = {NA: Dataset(tf.zeros([3, 2], dtype=tf.float64), tf.ones([3, 2], dtype=tf.float64))}
model = {NA: _mo_test_model(2, *[None] * 2)}
pending_points = tf.zeros([2, 2], dtype=tf.float64)
hippo_acq_builder: HIPPO[ProbabilisticModel] = HIPPO(
NA, base_acquisition_function_builder=base_builder
)
hippo_acq = hippo_acq_builder.prepare_acquisition_function(model, data, pending_points)
base_acq = base_builder.prepare_acquisition_function(model, data)
penalizer = hippo_penalizer(model[NA], pending_points)
assert hippo_acq._get_tracing_count() == 0 # type: ignore
x_range = tf.linspace(0.0, 1.0, 11)
x_range = tf.cast(x_range, dtype=tf.float64)
xs = tf.reshape(tf.stack(tf.meshgrid(x_range, x_range, indexing="ij"), axis=-1), (-1, 2))
hippo_acq_values = hippo_acq(xs[..., None, :])
base_acq_values = base_acq(xs[..., None, :])
penalty_values = penalizer(xs[..., None, :])
penalized_base_acq = tf.math.exp(tf.math.log(base_acq_values) + tf.math.log(penalty_values))
npt.assert_array_equal(hippo_acq_values, penalized_base_acq)
assert hippo_acq._get_tracing_count() == 1 # type: ignore
| 35,701 | 35.844169 | 100 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/multi_objective/test_dominance.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
from time import perf_counter
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
from trieste.acquisition.multi_objective.dominance import non_dominated
_COMPILERS = {
"no_compiler": lambda f: f,
"tf_function": tf.function,
}
@pytest.mark.parametrize(
"scores, pareto_set, nondominated",
[
(
tf.constant(
[
[0.9575, 0.4218],
[0.9649, 0.9157],
[0.1576, 0.7922],
[0.9706, 0.9595],
[0.9572, 0.6557],
[0.4854, 0.0357],
[0.8003, 0.8491],
[0.1419, 0.9340],
]
),
tf.constant([[0.1576, 0.7922], [0.4854, 0.0357], [0.1419, 0.934]]),
tf.constant([False, False, True, False, False, True, False, True]),
),
(
tf.constant(
[
[0.9575, 0.4218],
[0.9649, 0.9157],
[0.1576, 0.7922],
[0.9706, 0.9595],
[0.9572, 0.6557],
[0.4854, 0.0357],
[0.4954, 0.0357],
[0.8003, 0.8491],
[0.1419, 0.9340],
[0.1419, 0.9440],
]
),
tf.constant([[0.1576, 0.7922], [0.4854, 0.0357], [0.1419, 0.934]]),
tf.constant([False, False, True, False, False, True, False, False, True, False]),
),
(
tf.constant(
[
[0.9575, 0.4218],
[0.9649, 0.9157],
[0.1576, 0.7922],
[0.9706, 0.9595],
[0.9572, 0.6557],
[0.4854, 0.0357],
[0.4854, 0.0357],
[0.8003, 0.8491],
[0.1419, 0.9340],
[0.1419, 0.9340],
]
),
tf.constant(
[
[0.1576, 0.7922],
[0.4854, 0.0357],
[0.4854, 0.0357],
[0.1419, 0.934],
[0.1419, 0.934],
]
),
tf.constant([False, False, True, False, False, True, True, False, True, True]),
),
(
tf.constant(
[
[0.90234935, 0.02297473, 0.05389869],
[0.98328614, 0.44182944, 0.6975261],
[0.39555323, 0.3040712, 0.3433497],
[0.72582424, 0.55389977, 0.00330079],
[0.9590585, 0.03233206, 0.2403127],
[0.04540098, 0.22407162, 0.11227596],
]
),
tf.constant(
[
[0.90234935, 0.02297473, 0.05389869],
[0.72582424, 0.55389977, 0.00330079],
[0.04540098, 0.22407162, 0.11227596],
]
),
tf.constant([True, False, False, True, False, True]),
),
(
tf.zeros((0, 3)),
tf.zeros((0, 3)),
tf.ones((0,), dtype=tf.bool),
),
],
)
@pytest.mark.parametrize("compiler_name", _COMPILERS)
def test_dominated_sort(
compiler_name: str, scores: tf.Tensor, pareto_set: tf.Tensor, nondominated: tf.Tensor
) -> None:
compiled_non_dominated = _COMPILERS[compiler_name](non_dominated)
ret_pareto_set, ret_nondominated = compiled_non_dominated(scores)
npt.assert_allclose(tf.sort(ret_pareto_set, 0), tf.sort(pareto_set, 0))
npt.assert_array_equal(ret_nondominated, nondominated)
@pytest.mark.parametrize("num_objectives", [2, 4, 6])
@pytest.mark.parametrize("compiler_name", _COMPILERS)
def test_dominated_scales_ok(compiler_name: str, num_objectives: int) -> None:
num_points = 10_000
compiled_non_dominated = _COMPILERS[compiler_name](non_dominated)
rng = np.random.RandomState(1234)
dataset = tf.Variable(rng.rand(num_points, num_objectives), shape=[None, num_objectives])
before = perf_counter()
front, idx = compiled_non_dominated(dataset)
after = perf_counter()
print()
print(
f"{num_points} x {num_objectives} ({compiler_name})"
f" -> {timedelta(seconds=after - before)}"
)
for f in front:
assert np.all(np.any(f <= dataset, axis=1))
assert np.all(np.sort(front, axis=0) == np.sort(dataset[idx], axis=0))
| 5,163 | 32.973684 | 93 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/multi_objective/test_pareto.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, SequenceN
from trieste.acquisition.multi_objective.pareto import Pareto, get_reference_point
@pytest.mark.parametrize("reference", [0.0, [0.0], [[0.0]]])
def test_pareto_hypervolume_indicator_raises_for_reference_with_invalid_shape(
reference: SequenceN[float],
) -> None:
pareto = Pareto(tf.constant([[-1.0, -0.6], [-0.8, -0.7], [-0.6, -1.1]]))
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
pareto.hypervolume_indicator(tf.constant(reference))
@pytest.mark.parametrize("reference", [[0.1, -0.65], [-0.7, -0.1]])
def test_pareto_hypervolume_indicator_raises_for_reference_below_anti_ideal_point(
reference: list[float],
) -> None:
pareto = Pareto(tf.constant([[-1.0, -0.6], [-0.8, -0.7], [-0.6, -1.1]]))
with pytest.raises(tf.errors.InvalidArgumentError):
pareto.hypervolume_indicator(tf.constant(reference))
@pytest.mark.parametrize(
"front, reference",
[
(tf.zeros(shape=(0, 2)), [[0.1, -0.65], [-0.7, -0.1]]),
((tf.zeros(shape=(0, 3)), [4.0, 4.0, 4.0])),
],
)
def test_pareto_hypervolume_indicator_raises_for_empty_front(
front: tf.Tensor, reference: list[float]
) -> None:
pareto = Pareto(front)
with pytest.raises(ValueError):
pareto.hypervolume_indicator(tf.constant(reference))
@pytest.mark.parametrize(
"objectives, reference, expected",
[
([[1.0, 0.5]], [2.3, 2.0], 1.95),
([[-1.0, -0.6], [-0.8, -0.7], [-0.6, -1.1]], [0.1, -0.1], 0.92),
( # reference point is equal to one pareto point in one dimension
[[-1.0, -0.6], [-0.8, -0.7], [-0.6, -1.1]],
[0.1, -0.6],
0.37,
),
([[2.0, 2.0, 0.0], [2.0, 0.0, 1.0], [3.0, 1.0, 0.0]], [4.0, 4.0, 4.0], 29.0),
],
)
def test_pareto_hypervolume_indicator(
objectives: list[list[float]],
reference: list[float],
expected: float,
) -> None:
pareto = Pareto(tf.constant(objectives))
npt.assert_allclose(pareto.hypervolume_indicator(tf.constant(reference)), expected, 1e-6)
@pytest.mark.parametrize(
"observations",
[
(tf.zeros(shape=(0, 2))),
(tf.zeros(shape=(0, 3))),
(tf.constant([])),
],
)
def test_get_reference_point_raise_when_feed_empty_front(observations: tf.Tensor) -> None:
with pytest.raises(ValueError):
get_reference_point(observations)
@pytest.mark.parametrize(
"observations, expected",
[
(tf.constant([[1.0, 2.0], [3.0, 4.0]]), tf.constant([1.0, 2.0])),
(tf.constant([[1.0, 2.0], [2.0, 1.0], [3.0, 4.0]]), tf.constant([3.0, 3.0])),
(tf.constant([[1.0, 2.0], [2.0, 1.0], [3.0, 4.0], [4.0, 5.0]]), tf.constant([3.0, 3.0])),
],
)
def test_get_reference_point_extract_based_on_pareto_front(
observations: tf.Tensor, expected: tf.Tensor
) -> None:
tf.debugging.assert_equal(get_reference_point(observations), expected)
@pytest.mark.qhsri
def test_pareto_sample_diverse_subset_raises_too_large_sample_size() -> None:
observations = tf.constant([[1.0, -1.0], [-1.0, 1.0]])
pareto_set = Pareto(observations)
with pytest.raises(ValueError):
pareto_set.sample_diverse_subset(3, allow_repeats=False)
@pytest.mark.qhsri
def test_pareto_sample_diverse_subset_raises_zero_range() -> None:
observations = tf.constant([[1.0, 1.0], [1.0, 1.0]])
pareto_set = Pareto(observations)
with pytest.raises(ValueError):
pareto_set.sample_diverse_subset(1, bounds_min_delta=0.0)
@pytest.mark.qhsri
def test_pareto_sample_diverse_subset_get_bounds() -> None:
observations = tf.constant([[1.0, -1.0], [-1.0, 1.0]])
pareto_set = Pareto(observations)
lower_bounds, reference_point = pareto_set._get_bounds(delta_scaling_factor=0.2, min_delta=1e-9)
expected_lower_bounds = tf.constant([-1.4, -1.4])
expected_reference_point = tf.constant([1.4, 1.4])
npt.assert_allclose(expected_lower_bounds, lower_bounds)
npt.assert_allclose(expected_reference_point, reference_point)
@pytest.mark.qhsri
def test_pareto_sample_diverse_subset_calculate_p() -> None:
observations = tf.constant([[1.0, -1.0], [-1.0, 1.0]])
lower_bound = tf.constant([-2.0, -2.0])
reference_point = tf.constant([2.0, 2.0])
pareto_set = Pareto(observations)
output = pareto_set._calculate_p_matrix(lower_bound, reference_point)
expected_output = tf.constant([[3 / 16, 1 / 16], [1 / 16, 3 / 16]])
npt.assert_array_equal(expected_output, output)
@pytest.mark.qhsri
def test_pareto_sample_diverse_subset_choose_batch_no_repeats() -> None:
observations = tf.constant([[2.0, -2.0], [1.0, -1.0], [0.0, 0.0], [-1.0, 1.0], [-2.0, 2.0]])
x_star = tf.constant([[0.15], [0.25], [0.2], [0.3], [0.1]])
pareto_set = Pareto(observations)
sample, sample_ids = pareto_set._choose_batch_no_repeats(x_star, sample_size=2)
expected_sample = tf.constant([[-1.0, 1.0], [1.0, -1.0]])
expected_sample_ids = tf.constant([3, 1])
npt.assert_array_equal(expected_sample, sample)
npt.assert_array_equal(expected_sample_ids, sample_ids)
@pytest.mark.qhsri
def test_pareto_sample_diverse_subset_choose_batch_no_repeats_return_same_front() -> None:
observations = tf.constant([[1.0, -1.0], [0.0, 0.0], [-1.0, 1.0]])
x_star = tf.constant([[0.4], [0.35], [0.25]])
pareto_set = Pareto(observations)
sample, sample_ids = pareto_set._choose_batch_no_repeats(x_star, sample_size=3)
expected_sample = pareto_set.front
expected_sample_ids = tf.constant([0, 1, 2])
npt.assert_array_equal(expected_sample, sample)
npt.assert_array_equal(expected_sample_ids, sample_ids)
@pytest.mark.parametrize(
"x_star,expected_ids",
(
([[0.25], [0.1], [0.09], [0.51], [0.05]], [0, 3, 3]),
([[0.25], [0.24], [0.25], [0.01], [0.25]], [0, 1, 2, 4]),
([[0.1], [0.2], [0.3], [0.4], [0.0]], [1, 2, 3, 3]),
),
)
@pytest.mark.qhsri
def test_pareto_sample_diverse_subset_choose_batch_with_repeats(
x_star: list[list[float]], expected_ids: list[int]
) -> None:
observations = tf.constant([[2.0, -2.0], [1.0, -1.0], [0.0, 0.0], [-1.0, 1.0], [-2.0, 2.0]])
pareto_set = Pareto(observations)
_, sample_ids = pareto_set._choose_batch_with_repeats(np.array(x_star), sample_size=4)
sample_ids_list = list(sample_ids)
for expected_id in expected_ids:
assert expected_id in sample_ids_list
sample_ids_list.remove(expected_id)
| 7,158 | 36.481675 | 100 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/multi_objective/test_partition.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import TF_DEBUGGING_ERROR_TYPES, SequenceN
from trieste.acquisition.multi_objective.partition import (
DividedAndConquerNonDominated,
ExactPartition2dNonDominated,
prepare_default_non_dominated_partition_bounds,
)
@pytest.mark.parametrize(
"reference, observations, anti_ref, expected",
[
(
tf.constant([1.0, 1.0]),
None,
tf.constant([-1.0, -1.0]),
(tf.constant([[-1.0, -1.0]]), tf.constant([[1.0, 1.0]])),
),
(
tf.constant([1.0, 1.0]),
None,
tf.constant([1.0, -1.0]),
(tf.constant([[1.0, -1.0]]), tf.constant([[1.0, 1.0]])),
),
(
tf.constant([1.0, 1.0]),
tf.constant([]),
tf.constant([1.0, -1.0]),
(tf.constant([[1.0, -1.0]]), tf.constant([[1.0, 1.0]])),
),
],
)
def test_default_non_dominated_partition_when_no_valid_obs(
reference: tf.Tensor,
observations: Optional[tf.Tensor],
anti_ref: Optional[tf.Tensor],
expected: tuple[tf.Tensor, tf.Tensor],
) -> None:
npt.assert_array_equal(
prepare_default_non_dominated_partition_bounds(reference, observations, anti_ref), expected
)
def test_default_non_dominated_partition_raise_when_obs_below_default_anti_reference() -> None:
objectives = tf.constant(
[
[-1e11, 0.7922],
[0.4854, 0.0357],
[0.1419, 0.9340],
]
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
prepare_default_non_dominated_partition_bounds(tf.constant([1.0, 1.0]), objectives)
@pytest.mark.parametrize(
"ref, obs, anti_ref",
[
(
tf.constant([-1e12, 1.0]),
tf.constant(
[
[0.4854, 0.7922],
[0.4854, 0.0357],
[0.1419, 0.9340],
]
),
None,
),
(tf.constant([-1e12, 1.0]), None, None),
(tf.constant([-1e12, 1.0]), tf.constant([]), None),
],
)
def test_default_non_dominated_partition_raise_when_ref_below_default_anti_reference(
ref: tf.Tensor, obs: Optional[tf.Tensor], anti_ref: Optional[tf.Tensor]
) -> None:
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
prepare_default_non_dominated_partition_bounds(ref, obs, anti_ref)
def test_exact_partition_2d_bounds() -> None:
objectives = tf.constant(
[
[0.1576, 0.7922],
[0.4854, 0.0357],
[0.1419, 0.9340],
]
)
partition_2d = ExactPartition2dNonDominated(objectives)
npt.assert_array_equal(
partition_2d._bounds.lower_idx, tf.constant([[0, 0], [1, 0], [2, 0], [3, 0]])
)
npt.assert_array_equal(
partition_2d._bounds.upper_idx, tf.constant([[1, 4], [2, 1], [3, 2], [4, 3]])
)
npt.assert_allclose(
partition_2d.front, tf.constant([[0.1419, 0.9340], [0.1576, 0.7922], [0.4854, 0.0357]])
)
def test_exact_partition_2d_raise_when_input_is_not_pareto_front() -> None:
objectives = tf.constant(
[
[0.9575, 0.4218],
[0.9649, 0.9157],
[0.1576, 0.7922],
[0.9706, 0.9595],
[0.9572, 0.6557],
[0.4854, 0.0357],
[0.8003, 0.8491],
[0.1419, 0.9340],
]
)
with pytest.raises(tf.errors.InvalidArgumentError):
ExactPartition2dNonDominated(objectives)
@pytest.mark.parametrize(
"reference",
[0.0, [0.0], [[0.0]]],
)
def test_exact_partition_2d_partition_bounds_raises_for_reference_with_invalid_shape(
reference: SequenceN[float],
) -> None:
partition = ExactPartition2dNonDominated(
tf.constant([[-1.0, -0.6], [-0.8, -0.7], [-0.6, -1.1]])
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
partition.partition_bounds(tf.constant([0.0, 0.0]), tf.constant(reference))
@pytest.mark.parametrize("anti_reference", [-10.0, [-10.0], [[-10.0]]])
def test_exact_partition_2d_partition_bounds_raises_for_anti_reference_with_invalid_shape(
anti_reference: SequenceN[float],
) -> None:
partition = ExactPartition2dNonDominated(
tf.constant([[-1.0, -0.6], [-0.8, -0.7], [-0.6, -1.1]])
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
partition.partition_bounds(tf.constant(anti_reference), tf.constant([10.0, 10.0]))
@pytest.mark.parametrize("reference", [[0.1, -0.65], [-0.7, -0.1]])
def test_exact_partition_2d_partition_bounds_raises_for_reference_below_anti_ideal_point(
reference: list[float],
) -> None:
partition = ExactPartition2dNonDominated(
tf.constant([[-1.0, -0.6], [-0.8, -0.7], [-0.6, -1.1]])
)
with pytest.raises(tf.errors.InvalidArgumentError):
partition.partition_bounds(tf.constant([-10.0, -10.0]), tf.constant(reference))
@pytest.mark.parametrize("anti_reference", [[0.1, -0.65], [-0.7, -0.1]])
def test_exact_partition_2d_partition_bounds_raises_for_front_below_anti_reference_point(
anti_reference: list[float],
) -> None:
partition = ExactPartition2dNonDominated(
tf.constant([[-1.0, -0.6], [-0.8, -0.7], [-0.6, -1.1]])
)
with pytest.raises(tf.errors.InvalidArgumentError):
partition.partition_bounds(tf.constant(anti_reference), tf.constant([10.0, 10.0]))
@pytest.mark.parametrize(
"objectives, anti_reference, reference, expected",
[
(
[[1.0, 0.5]],
[-10.0, -8.0],
[2.3, 2.0],
([[-10.0, -8.0], [1.0, -8.0]], [[1.0, 2.0], [2.3, 0.5]]),
),
(
[[-1.0, -0.6], [-0.8, -0.7]],
[-2.0, -1.0],
[0.1, -0.1],
([[-2.0, -1.0], [-1.0, -1.0], [-0.8, -1.0]], [[-1.0, -0.1], [-0.8, -0.6], [0.1, -0.7]]),
),
( # reference point is equal to one pareto point in one dimension
# anti idea point is equal to two pareto point in one dimension
[[-1.0, -0.6], [-0.8, -0.7]],
[-1.0, -0.7],
[0.1, -0.6],
([[-1.0, -0.7], [-1.0, -0.7], [-0.8, -0.7]], [[-1.0, -0.6], [-0.8, -0.6], [0.1, -0.7]]),
),
],
)
def test_exact_partition_2d_partition_bounds(
objectives: SequenceN[float],
anti_reference: list[float],
reference: list[float],
expected: SequenceN[float],
) -> None:
partition = ExactPartition2dNonDominated(tf.constant(objectives))
npt.assert_allclose(
partition.partition_bounds(tf.constant(anti_reference), tf.constant(reference))[0],
tf.constant(expected[0]),
)
npt.assert_allclose(
partition.partition_bounds(tf.constant(anti_reference), tf.constant(reference))[1],
tf.constant(expected[1]),
)
def test_divide_conquer_non_dominated_raise_when_input_is_not_pareto_front() -> None:
objectives = tf.constant(
[
[0.0, 2.0, 1.0],
[7.0, 6.0, 0.0],
[9.0, 0.0, 1.0],
[0.0, 0.0, 0.0],
]
)
with pytest.raises(tf.errors.InvalidArgumentError):
DividedAndConquerNonDominated(objectives)
@pytest.mark.parametrize(
"reference",
[0.0, [0.0], [[0.0]]],
)
def test_divide_conquer_non_dominated_partition_bounds_raises_for_reference_with_invalid_shape(
reference: SequenceN[float],
) -> None:
partition = DividedAndConquerNonDominated(
tf.constant(
[
[0.0, 2.0, 1.0],
[7.0, 6.0, 0.0],
[9.0, 0.0, 1.0],
]
)
)
with pytest.raises(TF_DEBUGGING_ERROR_TYPES):
partition.partition_bounds(tf.constant([0.0, 0.0, 0.0]), tf.constant(reference))
@pytest.mark.parametrize("reference", [[0.5, 0.65, 4], [11.0, 4.0, 2.0], [11.0, 11.0, 0.0]])
def test_divide_conquer_non_dominated_partition_bounds_raises_for_reference_below_anti_ideal_point(
reference: list[float],
) -> None:
partition = DividedAndConquerNonDominated(
tf.constant(
[
[0.0, 2.0, 1.0],
[7.0, 6.0, 0.0],
[9.0, 0.0, 1.0],
]
)
)
with pytest.raises(tf.errors.InvalidArgumentError):
partition.partition_bounds(tf.constant([-10.0, -10.0, -10.0]), tf.constant(reference))
@pytest.mark.parametrize(
"anti_reference", [[1.0, -2.0, -2.0], [-1.0, 3.0, -2.0], [-1.0, -3.0, 1.0]]
)
def test_divide_conquer_non_dominated_partition_bounds_raises_for_front_below_anti_reference_point(
anti_reference: list[float],
) -> None:
partition = DividedAndConquerNonDominated(
tf.constant(
[
[0.0, 2.0, 1.0],
[7.0, 6.0, 0.0],
[9.0, 0.0, 1.0],
]
)
)
with pytest.raises(tf.errors.InvalidArgumentError):
partition.partition_bounds(tf.constant(anti_reference), tf.constant([10.0, 10.0, 10.0]))
def test_divide_conquer_non_dominated_three_dimension_case() -> None:
objectives = tf.constant(
[
[0.0, 2.0, 1.0],
[7.0, 6.0, 0.0],
[9.0, 0.0, 1.0],
]
)
partition_nd = DividedAndConquerNonDominated(objectives)
npt.assert_array_equal(
partition_nd._bounds.lower_idx,
tf.constant(
[
[3, 2, 0],
[3, 1, 0],
[2, 2, 0],
[2, 1, 0],
[3, 0, 1],
[2, 0, 1],
[2, 0, 0],
[0, 1, 1],
[0, 1, 0],
[0, 0, 0],
]
),
)
npt.assert_array_equal(
partition_nd._bounds.upper_idx,
tf.constant(
[
[4, 4, 2],
[4, 2, 1],
[3, 4, 2],
[3, 2, 1],
[4, 3, 4],
[3, 1, 4],
[4, 1, 1],
[1, 4, 4],
[2, 4, 1],
[2, 1, 4],
]
),
)
npt.assert_allclose(
partition_nd.front,
tf.constant(
[
[0.0, 2.0, 1.0],
[7.0, 6.0, 0.0],
[9.0, 0.0, 1.0],
]
),
)
| 10,918 | 29.415042 | 100 | py |
trieste-develop | trieste-develop/tests/unit/acquisition/multi_objective/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/thirdparty/test_tensorflow.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import tensorflow as tf
def test_raise_is_incompatible_with_tf_function() -> None:
err_msg = "very specific error message 13579"
@tf.function
def f(a: tf.Tensor) -> tf.Tensor:
if a <= tf.constant(0):
raise ValueError(err_msg)
return a
with pytest.raises(ValueError, match=err_msg):
f(tf.constant(1)) # note that 1 should *not* trigger the error branch, but does
def test_tf_debugging_is_compatible_with_tf_function() -> None:
err_msg = "very specific error message 2468"
@tf.function
def f(a: tf.Tensor) -> tf.Tensor:
tf.debugging.assert_positive(a, message=err_msg)
return a
f(tf.constant(1))
with pytest.raises(tf.errors.InvalidArgumentError, match=err_msg):
f(tf.constant(-1))
| 1,389 | 30.590909 | 88 | py |
trieste-develop | trieste-develop/tests/thirdparty/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/util/misc.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import functools
import os
import random
from collections.abc import Container, Mapping
from typing import Any, Callable, NoReturn, Optional, Sequence, TypeVar, Union, cast, overload
import numpy as np
import numpy.testing as npt
import tensorflow as tf
from typing_extensions import Final
from trieste.acquisition.rule import AcquisitionRule
from trieste.data import Dataset
from trieste.models import ProbabilisticModel
from trieste.objectives import Branin, Hartmann6
from trieste.objectives.utils import mk_observer
from trieste.space import SearchSpace
from trieste.types import Tag, TensorType
from trieste.utils import shapes_equal
TF_DEBUGGING_ERROR_TYPES: Final[tuple[type[Exception], ...]] = (
ValueError,
tf.errors.InvalidArgumentError,
)
""" Error types thrown by TensorFlow's debugging functionality for tensor shapes. """
C = TypeVar("C", bound=Callable[..., object])
""" Type variable bound to `typing.Callable`. """
@overload
def random_seed(f_py: C, seed: int = 0) -> C:
...
@overload
def random_seed(f_py: None = None, seed: int = 0) -> Callable[[C], C]:
...
def random_seed(f_py: Optional[C] = None, seed: int = 0) -> Callable[[C], C] | C:
"""
Decorates function ``f`` with TensorFlow, numpy and Python randomness seeds fixed to ``seed``.
This decorator can be used without and with the ``seed`` parameter. When used with the default
seed::
@random_seed
def foo():
pass
or::
@random_seed()
def foo():
pass
However, if ``seed`` needs to be set to a custom value parameter needs to be named::
@random_seed(seed=1)
def foo():
pass
:param f_py: A function to be decorated, used when ``seed`` parameter is not set.
:param seed: A seed to be fixed, defaults to 0.
"""
assert callable(f_py) or f_py is None
def _decorator(f: C) -> C:
"""
:param f: A function.
:return: The function ``f``, but with TensorFlow, numpy and Python randomness seeds fixed.
"""
@functools.wraps(f)
def decorated(*args: Any, **kwargs: Any) -> Any:
os.environ["PYTHONHASHSEED"] = str(seed)
tf.random.set_seed(seed)
np.random.seed(seed)
random.seed(seed)
return f(*args, **kwargs)
return cast(C, decorated)
if f_py is None:
return _decorator
else:
return _decorator(f_py)
T = TypeVar("T")
""" Unbound type variable. """
SequenceN = Union[
Sequence[T],
Sequence[Sequence[T]],
Sequence[Sequence[Sequence[T]]],
Sequence[Sequence[Sequence[Sequence[Any]]]],
]
""" Type alias for a nested sequence (e.g. list or tuple) with array shape. """
def mk_dataset(
query_points: SequenceN[Sequence[float]], observations: SequenceN[Sequence[float]]
) -> Dataset:
"""
:param query_points: The query points.
:param observations: The observations.
:return: A :class:`Dataset` containing the specified ``query_points`` and ``observations`` with
dtype `tf.float64`.
"""
return Dataset(
tf.constant(query_points, dtype=tf.float64), tf.constant(observations, dtype=tf.float64)
)
def empty_dataset(query_point_shape: ShapeLike, observation_shape: ShapeLike) -> Dataset:
"""
:param query_point_shape: The shape of a *single* query point.
:param observation_shape: The shape of a *single* observation.
:return: An empty dataset with points of the specified shapes, and dtype `tf.float64`.
"""
qp = tf.zeros(tf.TensorShape([0]) + query_point_shape, tf.float64)
obs = tf.zeros(tf.TensorShape([0]) + observation_shape, tf.float64)
return Dataset(qp, obs)
def raise_exc(*_: object, **__: object) -> NoReturn:
"""
Raise an exception. This dummy function can be used wherever a function of any signature is
expected but isn't intended to be used.
:raise Exception: Always.
"""
raise Exception
def quadratic(x: tf.Tensor) -> tf.Tensor:
r"""
The multi-dimensional quadratic function.
:param x: A tensor whose last dimension is of length greater than zero.
:return: The sum :math:`\Sigma x^2` of the squares of ``x``.
:raise ValueError: If ``x`` is a scalar or has empty trailing dimension.
"""
if x.shape == [] or x.shape[-1] == 0:
raise ValueError(f"x must have non-empty trailing dimension, got shape {x.shape}")
return tf.reduce_sum(x**2, axis=-1, keepdims=True)
class FixedAcquisitionRule(AcquisitionRule[TensorType, SearchSpace, ProbabilisticModel]):
"""An acquisition rule that returns the same fixed value on every step."""
def __init__(self, query_points: SequenceN[Sequence[float]]):
"""
:param query_points: The value to return on each step. Will be converted to a tensor with
dtype `tf.float64`.
"""
self._qp = tf.constant(query_points, dtype=tf.float64)
def __repr__(self) -> str:
return f"FixedAcquisitionRule({self._qp!r})"
def acquire(
self,
search_space: SearchSpace,
models: Mapping[Tag, ProbabilisticModel],
datasets: Optional[Mapping[Tag, Dataset]] = None,
) -> TensorType:
"""
:param search_space: Unused.
:param models: Unused.
:param datasets: Unused.
:return: The fixed value specified on initialisation.
"""
return self._qp
ShapeLike = Union[tf.TensorShape, Sequence[int]]
""" Type alias for types that can represent tensor shapes. """
def various_shapes(*, excluding_ranks: Container[int] = ()) -> frozenset[tuple[int, ...]]:
"""
:param excluding_ranks: Ranks to exclude from the result.
:return: A reasonably comprehensive variety of tensor shapes, where no shapes will have a rank
in ``excluding_ranks``.
"""
shapes = (
{()}
| {(0,), (1,), (3,)}
| {(0, 0), (1, 0), (0, 1), (3, 4)}
| {(1, 0, 3), (1, 2, 3)}
| {(1, 2, 3, 4, 5, 6)}
)
return frozenset(s for s in shapes if len(s) not in excluding_ranks)
def assert_datasets_allclose(this: Dataset, that: Dataset) -> None:
"""
Check the :attr:`query_points` in ``this`` and ``that`` have the same shape and dtype, and all
elements are approximately equal. Also check the same for :attr:`observations`.
:param this: A dataset.
:param that: A dataset.
:raise AssertionError: If any of the following are true:
- shapes are not equal
- dtypes are not equal
- elements are not approximately equal.
"""
assert shapes_equal(this.query_points, that.query_points)
assert shapes_equal(this.observations, that.observations)
assert this.query_points.dtype == that.query_points.dtype
assert this.observations.dtype == that.observations.dtype
npt.assert_allclose(this.query_points, that.query_points)
npt.assert_allclose(this.observations, that.observations)
def hartmann_6_dataset(num_query_points: int) -> Dataset:
"""
Generate example dataset based on Hartmann 6 objective function.
:param num_query_points: A number of samples from the objective function.
:return: A dataset.
"""
search_space = Hartmann6.search_space
query_points = search_space.sample(num_query_points)
observer = mk_observer(Hartmann6.objective)
data = observer(query_points)
return data
def branin_dataset(num_query_points: int) -> Dataset:
"""
Generate example dataset based on Hartmann 6 objective function.
:param num_query_points: A number of samples from the objective function.
:return: A dataset.
"""
search_space = Branin.search_space
query_points = search_space.sample(num_query_points)
observer = mk_observer(Branin.objective)
data = observer(query_points)
return data
| 8,480 | 31.125 | 99 | py |
trieste-develop | trieste-develop/tests/util/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/util/models/models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions for tests.
"""
from __future__ import annotations
import tensorflow as tf
def fnc_3x_plus_10(x: tf.Tensor) -> tf.Tensor:
return 3.0 * x + 10
def fnc_2sin_x_over_3(x: tf.Tensor) -> tf.Tensor:
return 2.0 * tf.math.sin(x / 3.0)
def binary_line(x: tf.Tensor) -> tf.Tensor:
return tf.stack([1 if xi > 0 else 0 for xi in x])
| 949 | 27.787879 | 74 | py |
trieste-develop | trieste-develop/tests/util/models/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/util/models/gpflux/models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple GPflux models to be used in the tests.
"""
from __future__ import annotations
from typing import Any, Dict, Tuple
import gpflow
import tensorflow as tf
from gpflow.utilities import set_trainable
from gpflux.architectures import Config, build_constant_input_dim_deep_gp
from gpflux.helpers import construct_basic_kernel
from gpflux.layers import GPLayer
from gpflux.models import DeepGP
from trieste.data import Dataset, TensorType
from trieste.models.gpflux import DeepGaussianProcess, build_vanilla_deep_gp
from trieste.models.optimizer import KerasOptimizer
from trieste.space import SearchSpace
from trieste.utils import to_numpy
def single_layer_dgp_model(x: TensorType) -> DeepGP:
x = to_numpy(x)
config = Config(
num_inducing=len(x),
inner_layer_qsqrt_factor=1e-5,
likelihood_noise_variance=1e-2,
whiten=True, # whiten = False not supported yet in GPflux for this model
)
return build_constant_input_dim_deep_gp(X=x, num_layers=1, config=config)
def two_layer_dgp_model(x: TensorType) -> DeepGP:
x = to_numpy(x)
config = Config(
num_inducing=len(x),
inner_layer_qsqrt_factor=1e-5,
likelihood_noise_variance=1e-2,
whiten=True, # whiten = False not supported yet in GPflux for this model
)
return build_constant_input_dim_deep_gp(X=x, num_layers=2, config=config)
def simple_two_layer_dgp_model(x: TensorType) -> DeepGP:
x = to_numpy(x)
x_shape = x.shape[-1]
num_data = len(x)
Z = x.copy()
kernel_1 = gpflow.kernels.SquaredExponential()
inducing_variable_1 = gpflow.inducing_variables.InducingPoints(Z.copy())
gp_layer_1 = GPLayer(
kernel_1,
inducing_variable_1,
num_data=num_data,
num_latent_gps=x_shape,
)
kernel_2 = gpflow.kernels.SquaredExponential()
inducing_variable_2 = gpflow.inducing_variables.InducingPoints(Z.copy())
gp_layer_2 = GPLayer(
kernel_2,
inducing_variable_2,
num_data=num_data,
num_latent_gps=1,
mean_function=gpflow.mean_functions.Zero(),
)
return DeepGP([gp_layer_1, gp_layer_2], gpflow.likelihoods.Gaussian(0.01))
def separate_independent_kernel_two_layer_dgp_model(x: TensorType) -> DeepGP:
x = to_numpy(x)
x_shape = x.shape[-1]
num_data = len(x)
Z = x.copy()
kernel_list = [
gpflow.kernels.SquaredExponential(
variance=tf.exp(tf.random.normal([], dtype=gpflow.default_float())),
lengthscales=tf.exp(tf.random.normal([], dtype=gpflow.default_float())),
)
for _ in range(x_shape)
]
kernel_1 = construct_basic_kernel(kernel_list)
inducing_variable_1 = gpflow.inducing_variables.SharedIndependentInducingVariables(
gpflow.inducing_variables.InducingPoints(Z.copy())
)
gp_layer_1 = GPLayer(
kernel_1,
inducing_variable_1,
num_data=num_data,
num_latent_gps=x_shape,
)
kernel_2 = gpflow.kernels.SquaredExponential()
inducing_variable_2 = gpflow.inducing_variables.InducingPoints(Z.copy())
gp_layer_2 = GPLayer(
kernel_2,
inducing_variable_2,
num_data=num_data,
num_latent_gps=1,
mean_function=gpflow.mean_functions.Zero(),
)
return DeepGP([gp_layer_1, gp_layer_2], gpflow.likelihoods.Gaussian(0.01))
def trieste_deep_gaussian_process(
data: Dataset,
search_space: SearchSpace,
num_layers: int,
num_inducing_points: int,
learning_rate: float,
batch_size: int,
epochs: int,
fix_noise: bool = False,
) -> Tuple[DeepGaussianProcess, Dict[str, Any]]:
dgp = build_vanilla_deep_gp(data, search_space, num_layers, num_inducing_points)
if fix_noise:
dgp.likelihood_layer.likelihood.variance.assign(1e-5)
set_trainable(dgp.likelihood_layer, False)
def scheduler(epoch: int, lr: float) -> float:
if epoch == epochs // 2:
return lr * 0.1
else:
return lr
fit_args = {
"batch_size": batch_size,
"epochs": epochs,
"verbose": 0,
"callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler),
}
optimizer = KerasOptimizer(tf.optimizers.Adam(learning_rate), fit_args)
model = DeepGaussianProcess(dgp, optimizer)
return model, fit_args
def two_layer_trieste_dgp(data: Dataset, search_space: SearchSpace) -> DeepGaussianProcess:
return trieste_deep_gaussian_process(data, search_space, 2, 10, 0.01, 5, 10)[0]
| 5,115 | 30.195122 | 91 | py |
trieste-develop | trieste-develop/tests/util/models/gpflux/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/util/models/keras/models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for creating (Keras) neural network models to be used in the tests.
"""
from __future__ import annotations
from typing import Optional, Tuple
import tensorflow as tf
from packaging.version import Version
from trieste.data import Dataset
from trieste.models.keras import (
DeepEnsemble,
GaussianNetwork,
KerasEnsemble,
get_tensor_spec_from_data,
)
from trieste.models.optimizer import KerasOptimizer
from trieste.types import TensorType
def trieste_keras_ensemble_model(
example_data: Dataset,
ensemble_size: int,
independent_normal: bool = False,
) -> KerasEnsemble:
input_tensor_spec, output_tensor_spec = get_tensor_spec_from_data(example_data)
networks = [
GaussianNetwork(
input_tensor_spec,
output_tensor_spec,
hidden_layer_args=[
{"units": 32, "activation": "selu"},
{"units": 32, "activation": "selu"},
],
independent=independent_normal,
)
for _ in range(ensemble_size)
]
keras_ensemble = KerasEnsemble(networks)
return keras_ensemble
def trieste_deep_ensemble_model(
example_data: Dataset,
ensemble_size: int,
bootstrap_data: bool = False,
independent_normal: bool = False,
) -> Tuple[DeepEnsemble, KerasEnsemble, KerasOptimizer]:
keras_ensemble = trieste_keras_ensemble_model(example_data, ensemble_size, independent_normal)
optimizer = tf.keras.optimizers.Adam()
fit_args = {
"batch_size": 100,
"epochs": 1,
"callbacks": [],
"verbose": 0,
}
optimizer_wrapper = KerasOptimizer(optimizer, fit_args)
model = DeepEnsemble(keras_ensemble, optimizer_wrapper, bootstrap_data)
return model, keras_ensemble, optimizer_wrapper
def keras_optimizer_weights(optimizer: tf.keras.optimizers.Optimizer) -> Optional[TensorType]:
# optimizer weight API was changed in TF 2.11: https://github.com/keras-team/keras/issues/16983
if Version(tf.__version__) < Version("2.11"):
return optimizer.get_weights()
else:
return optimizer.variables[0]
| 2,717 | 29.539326 | 99 | py |
trieste-develop | trieste-develop/tests/util/models/keras/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/util/models/gpflow/test_models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.misc import quadratic, random_seed
from tests.util.models.gpflow.models import GaussianProcess
def _example_gaussian_process() -> GaussianProcess:
return GaussianProcess(
[quadratic, lambda x: quadratic(x) / 5.0],
[
tfp.math.psd_kernels.ExponentiatedQuadratic(amplitude=1.6, length_scale=1.0),
tfp.math.psd_kernels.ExponentiatedQuadratic(amplitude=1.6, length_scale=2.0),
],
)
# fmt: off
@pytest.mark.parametrize("xs, expected_mean, expected_cov", [
(
tf.constant([[0.0, -1.0], [-2.0, 3.0], [4.0, 5.0]]),
tf.constant([[1.0, 0.2], [13.0, 2.6], [41.0, 8.2]]),
2.56 / tf.exp([
[[0.0, 10.0, 26.0], [10.0, 0.0, 20.0], [26.0, 20.0, 0.0]],
[[0.0, 2.5, 6.5], [2.5, 0.0, 5.0], [6.5, 5.0, 0.0]],
])
),
(
tf.constant([
[[0.0, -1.0], [-2.0, 3.0], [4.0, 5.0]],
[[-3.0, 2.0], [4.0, 3.0], [-4.0, 6.0]],
]),
tf.constant([[
[1.0, 0.2], [13.0, 2.6], [41.0, 8.2]],
[[13.0, 2.6], [25.0, 5.0], [52.0, 10.4]]
]),
2.56 / tf.exp([
[
[[0.0, 10.0, 26.0], [10.0, 0.0, 20.0], [26.0, 20.0, 0.0]],
[[0.0, 2.5, 6.5], [2.5, 0.0, 5.0], [6.5, 5.0, 0.0]],
],
[
[[0.0, 25.0, 8.5], [25.0, 0.0, 36.5], [8.5, 36.5, 0.0]],
[[0.0, 6.25, 2.125], [6.25, 0.0, 9.125], [2.125, 9.125, 0.0]],
]
])
)
])
# fmt: on
def test_gaussian_process_predict_joint(
xs: tf.Tensor, expected_mean: tf.Tensor, expected_cov: tf.Tensor
) -> None:
mean, cov = _example_gaussian_process().predict_joint(xs)
npt.assert_allclose(mean, expected_mean)
npt.assert_allclose(cov, expected_cov, rtol=2e-6)
# fmt: off
@pytest.mark.parametrize("xs, expected_mean, expected_var", [
(
tf.constant([[0.0, -1.0], [-2.0, 3.0], [4.0, 5.0]]),
tf.constant([[1.0, 0.2], [13.0, 2.6], [41.0, 8.2]]),
tf.fill([3, 2], 2.56)
),
(
tf.constant([
[[0.0, -1.0], [-2.0, 3.0], [4.0, 5.0]],
[[-3.0, 2.0], [4.0, 3.0], [-4.0, 6.0]],
]),
tf.constant([
[[1.0, 0.2], [13.0, 2.6], [41.0, 8.2]],
[[13.0, 2.6], [25.0, 5.0], [52.0, 10.4]]
]),
tf.fill([2, 3, 2], 2.56)
)
])
# fmt: on
def test_gaussian_process_predict(
xs: tf.Tensor, expected_mean: tf.Tensor, expected_var: tf.Tensor
) -> None:
mean, var = _example_gaussian_process().predict(xs)
npt.assert_allclose(mean, expected_mean)
npt.assert_allclose(var, expected_var)
@random_seed
def test_gaussian_process_sample() -> None:
# fmt: off
samples = _example_gaussian_process().sample(tf.constant([
[[0.0, -1.0], [-2.0, 3.0], [4.0, 5.0]],
[[-3.0, 2.0], [4.0, 3.0], [-4.0, 6.0]],
]), 10_000)
npt.assert_allclose(tf.reduce_mean(samples, axis=-3), [
[[1.0, 0.2], [13.0, 2.6], [41.0, 8.2]],
[[13.0, 2.6], [25.0, 5.0], [52.0, 10.4]]
], rtol=0.02)
variance = tf.math.reduce_variance(samples, axis=-3)
npt.assert_allclose(variance, tf.fill([2, 3, 2], 2.56), rtol=0.02)
# fmt: on
def test_gaussian_process_get_observation_noise() -> None:
noise = _example_gaussian_process().get_observation_noise()
npt.assert_equal(noise, tf.constant(1.0))
def test_gaussian_process_covariance_between_points() -> None:
x = tf.reshape(tf.linspace(0.0, 5.0, 4), (-1, 1))
x = tf.cast(x, dtype=tf.float32)
query_points_1 = tf.concat([0.5 * x, 0.5 * x], 0)
query_points_2 = tf.concat([2 * x, 2 * x, 2 * x], 0)
all_query_points = tf.concat([query_points_1, query_points_2], 0)
_, predictive_covariance = _example_gaussian_process().predict_joint(all_query_points)
expected_covariance = predictive_covariance[:, :8, 8:]
actual_covariance = _example_gaussian_process().covariance_between_points(
query_points_1, query_points_2
)
npt.assert_allclose(expected_covariance, actual_covariance, atol=1e-5)
| 4,758 | 33.23741 | 90 | py |
trieste-develop | trieste-develop/tests/util/models/gpflow/models.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Callable, Sequence
from typing import Optional
import gpflow
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.models import GPR, SGPR, SVGP, VGP, GPModel
from typing_extensions import Protocol
from tests.util.misc import SequenceN, quadratic
from trieste.data import Dataset
from trieste.models import (
ProbabilisticModel,
ReparametrizationSampler,
TrainableProbabilisticModel,
TrajectorySampler,
)
from trieste.models.gpflow import (
BatchReparametrizationSampler,
GPflowPredictor,
IndependentReparametrizationSampler,
RandomFourierFeatureTrajectorySampler,
)
from trieste.models.gpflow.interface import SupportsCovarianceBetweenPoints
from trieste.models.interfaces import (
HasReparamSampler,
HasTrajectorySampler,
SupportsCovarianceWithTopFidelity,
SupportsGetKernel,
SupportsGetObservationNoise,
)
from trieste.models.optimizer import Optimizer
from trieste.types import TensorType
def rbf() -> tfp.math.psd_kernels.ExponentiatedQuadratic:
"""
:return: A :class:`tfp.math.psd_kernels.ExponentiatedQuadratic` with default arguments.
"""
return tfp.math.psd_kernels.ExponentiatedQuadratic()
class PseudoTrainableProbModel(TrainableProbabilisticModel, Protocol):
"""A model that does nothing on :meth:`update` and :meth:`optimize`."""
def update(self, dataset: Dataset) -> None:
return
def optimize(self, dataset: Dataset) -> None:
return
class GaussianMarginal(ProbabilisticModel):
"""A probabilistic model with Gaussian marginal distribution. Assumes events of shape [N]."""
def sample(self, query_points: TensorType, num_samples: int) -> TensorType:
mean, var = self.predict(query_points)
samples = tfp.distributions.Normal(mean, tf.sqrt(var)).sample(num_samples)
dim_order = tf.range(tf.rank(samples))
return tf.transpose(samples, tf.concat([dim_order[1:-2], [0], dim_order[-2:]], -1))
class GaussianProcess(
GaussianMarginal, SupportsCovarianceBetweenPoints, SupportsGetObservationNoise
):
"""A (static) Gaussian process over a vector random variable."""
def __init__(
self,
mean_functions: Sequence[Callable[[TensorType], TensorType]],
kernels: Sequence[tfp.math.psd_kernels.PositiveSemidefiniteKernel],
noise_variance: float = 1.0,
):
self._mean_functions = mean_functions
self._kernels = kernels
self._noise_variance = noise_variance
def __repr__(self) -> str:
return f"GaussianProcess({self._mean_functions!r}, {self._kernels!r})"
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
mean, cov = self.predict_joint(query_points[..., None, :])
return tf.squeeze(mean, -2), tf.squeeze(cov, [-2, -1])
def predict_joint(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
means = [f(query_points) for f in self._mean_functions]
covs = [k.tensor(query_points, query_points, 1, 1)[..., None, :, :] for k in self._kernels]
return tf.concat(means, axis=-1), tf.concat(covs, axis=-3)
def get_observation_noise(self) -> TensorType:
return tf.constant(self._noise_variance)
def covariance_between_points(
self, query_points_1: TensorType, query_points_2: TensorType
) -> TensorType:
covs = [
k.tensor(query_points_1, query_points_2, 1, 1)[..., None, :, :] for k in self._kernels
]
return tf.concat(covs, axis=-3)
class GaussianProcessWithoutNoise(GaussianMarginal, HasReparamSampler):
"""A (static) Gaussian process over a vector random variable with independent reparam sampler
but without noise variance."""
def __init__(
self,
mean_functions: Sequence[Callable[[TensorType], TensorType]],
kernels: Sequence[tfp.math.psd_kernels.PositiveSemidefiniteKernel],
):
self._mean_functions = mean_functions
self._kernels = kernels
def __repr__(self) -> str:
return f"GaussianProcessWithoutNoise({self._mean_functions!r}, {self._kernels!r})"
def predict(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
mean, cov = self.predict_joint(query_points[..., None, :])
return tf.squeeze(mean, -2), tf.squeeze(cov, [-2, -1])
def predict_joint(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
means = [f(query_points) for f in self._mean_functions]
covs = [k.tensor(query_points, query_points, 1, 1)[..., None, :, :] for k in self._kernels]
return tf.concat(means, axis=-1), tf.concat(covs, axis=-3)
def covariance_between_points(
self, query_points_1: TensorType, query_points_2: TensorType
) -> TensorType:
covs = [
k.tensor(query_points_1, query_points_2, 1, 1)[..., None, :, :] for k in self._kernels
]
return tf.concat(covs, axis=-3)
def reparam_sampler(
self: GaussianProcessWithoutNoise, num_samples: int
) -> ReparametrizationSampler[GaussianProcessWithoutNoise]:
return IndependentReparametrizationSampler(num_samples, self)
class GaussianProcessWithSamplers(GaussianProcess, HasReparamSampler):
"""A (static) Gaussian process over a vector random variable with independent reparam sampler"""
def reparam_sampler(
self, num_samples: int
) -> ReparametrizationSampler[GaussianProcessWithSamplers]:
return IndependentReparametrizationSampler(num_samples, self)
class GaussianProcessWithBatchSamplers(GaussianProcess, HasReparamSampler):
"""A (static) Gaussian process over a vector random variable with a batch reparam sampler"""
def reparam_sampler(
self, num_samples: int
) -> ReparametrizationSampler[GaussianProcessWithBatchSamplers]:
return BatchReparametrizationSampler(num_samples, self)
class QuadraticMeanAndRBFKernel(GaussianProcess, SupportsGetKernel, SupportsGetObservationNoise):
r"""A Gaussian process with scalar quadratic mean and RBF kernel."""
def __init__(
self,
*,
x_shift: float | SequenceN[float] | TensorType = 0,
kernel_amplitude: float | TensorType | None = None,
noise_variance: float = 1.0,
):
self.kernel = tfp.math.psd_kernels.ExponentiatedQuadratic(kernel_amplitude)
self.mean_function = lambda x: quadratic(x - tf.cast(x_shift, dtype=x.dtype))
super().__init__([self.mean_function], [self.kernel], noise_variance)
def __repr__(self) -> str:
return "QuadraticMeanAndRBFKernel()"
def get_kernel(self) -> tfp.math.psd_kernels.PositiveSemidefiniteKernel:
return self.kernel
def get_mean_function(self) -> Callable[[TensorType], TensorType]:
return self.mean_function
def mock_data() -> tuple[tf.Tensor, tf.Tensor]:
return (
tf.constant([[1.1], [2.2], [3.3], [4.4]], gpflow.default_float()),
tf.constant([[1.2], [3.4], [5.6], [7.8]], gpflow.default_float()),
)
class QuadraticMeanAndRBFKernelWithSamplers(
QuadraticMeanAndRBFKernel, HasTrajectorySampler, HasReparamSampler
):
r"""
A Gaussian process with scalar quadratic mean, an RBF kernel and
trajectory_sampler and reparam_sampler methods.
"""
def __init__(
self,
dataset: Dataset,
*,
x_shift: float | SequenceN[float] | TensorType = 0,
kernel_amplitude: float | TensorType | None = None,
noise_variance: float = 1.0,
):
super().__init__(
x_shift=x_shift, kernel_amplitude=kernel_amplitude, noise_variance=noise_variance
)
self._dataset = ( # mimic that when our models store data, it is as variables
tf.Variable(
dataset.query_points, trainable=False, shape=[None, *dataset.query_points.shape[1:]]
),
tf.Variable(
dataset.observations, trainable=False, shape=[None, *dataset.observations.shape[1:]]
),
)
def trajectory_sampler(self) -> TrajectorySampler[QuadraticMeanAndRBFKernelWithSamplers]:
return RandomFourierFeatureTrajectorySampler(self, 100)
def reparam_sampler(
self, num_samples: int
) -> ReparametrizationSampler[QuadraticMeanAndRBFKernelWithSamplers]:
return IndependentReparametrizationSampler(num_samples, self)
def get_internal_data(self) -> Dataset:
return Dataset(self._dataset[0], self._dataset[1])
def update(self, dataset: Dataset) -> None:
self._dataset[0].assign(dataset.query_points)
self._dataset[1].assign(dataset.observations)
def optimize(self, dataset: Dataset) -> None:
pass
class MultiFidelityQuadraticMeanAndRBFKernel(
QuadraticMeanAndRBFKernel, SupportsCovarianceWithTopFidelity
):
r"""
A Gaussian process with scalar quadratic mean, an RBF kernel and
trajectory_sampler and reparam_sampler methods.
"""
def __init__(
self,
*,
x_shift: float | SequenceN[float] | TensorType = 0,
kernel_amplitude: float | TensorType | None = None,
noise_variance: float = 1.0,
):
super().__init__(
x_shift=x_shift, kernel_amplitude=kernel_amplitude, noise_variance=noise_variance
)
@property
def num_fidelities(self) -> int:
return 5
def covariance_with_top_fidelity(self, x: TensorType) -> TensorType:
mean, _ = self.predict(x)
return tf.ones_like(mean, dtype=mean.dtype) # dummy covariances of correct shape
def predict_y(self, query_points: TensorType) -> tuple[TensorType, TensorType]:
fmean, fvar = self.predict(query_points)
yvar = fvar + tf.constant(1.0, dtype=fmean.dtype) # dummy noise variance
return fmean, yvar
class MultiFidelityQuadraticMeanAndRBFKernelWithSamplers(
QuadraticMeanAndRBFKernelWithSamplers, SupportsCovarianceWithTopFidelity
):
r"""
A Gaussian process with scalar quadratic mean, an RBF kernel and
trajectory_sampler and reparam_sampler methods.
"""
def __init__(
self,
dataset: Dataset,
*,
x_shift: float | SequenceN[float] | TensorType = 0,
kernel_amplitude: float | TensorType | None = None,
noise_variance: float = 1.0,
):
super().__init__(
dataset,
x_shift=x_shift,
kernel_amplitude=kernel_amplitude,
noise_variance=noise_variance,
)
@property
def num_fidelities(self) -> int:
return 5
def covariance_with_top_fidelity(self, x: TensorType) -> TensorType:
mean, _ = self.predict(x)
return tf.ones_like(mean, dtype=mean.dtype) # dummy covariances of correct shape
class QuadraticMeanAndRBFKernelWithBatchSamplers(
QuadraticMeanAndRBFKernel, HasTrajectorySampler, HasReparamSampler
):
r"""
A Gaussian process with scalar quadratic mean, an RBF kernel and
trajectory_sampler and batch reparam_sampler methods.
"""
def __init__(
self,
dataset: Dataset,
*,
x_shift: float | SequenceN[float] | TensorType = 0,
kernel_amplitude: float | TensorType | None = None,
noise_variance: float = 1.0,
):
super().__init__(
x_shift=x_shift, kernel_amplitude=kernel_amplitude, noise_variance=noise_variance
)
self._dataset = ( # mimic that when our models store data, it is as variables
tf.Variable(
dataset.query_points, trainable=False, shape=[None, *dataset.query_points.shape[1:]]
),
tf.Variable(
dataset.observations, trainable=False, shape=[None, *dataset.observations.shape[1:]]
),
)
def trajectory_sampler(self) -> TrajectorySampler[QuadraticMeanAndRBFKernelWithBatchSamplers]:
return RandomFourierFeatureTrajectorySampler(self, 100)
def reparam_sampler(
self, num_samples: int
) -> ReparametrizationSampler[QuadraticMeanAndRBFKernelWithBatchSamplers]:
return BatchReparametrizationSampler(num_samples, self)
def get_internal_data(self) -> Dataset:
return Dataset(self._dataset[0], self._dataset[1])
def update(self, dataset: Dataset) -> None:
self._dataset[0].assign(dataset.query_points)
self._dataset[1].assign(dataset.observations)
class ModelFactoryType(Protocol):
def __call__(
self, x: TensorType, y: TensorType, optimizer: Optimizer | None = None
) -> tuple[GPflowPredictor, Callable[[TensorType, TensorType], GPModel]]:
pass
def gpr_model(x: tf.Tensor, y: tf.Tensor) -> GPR:
return GPR((x, y), gpflow.kernels.Matern32())
def sgpr_model(x: tf.Tensor, y: tf.Tensor, num_latent_gps: int = 1) -> SGPR:
return SGPR((x, y), gpflow.kernels.Matern32(), x[:2], num_latent_gps=num_latent_gps)
def svgp_model(x: tf.Tensor, y: tf.Tensor, num_latent_gps: int = 1) -> SVGP:
return SVGP(
gpflow.kernels.Matern32(),
gpflow.likelihoods.Gaussian(),
x[:2],
num_data=len(x),
num_latent_gps=num_latent_gps,
)
def quadratic_mean_rbf_kernel_model(dataset: Dataset) -> QuadraticMeanAndRBFKernelWithSamplers:
model = QuadraticMeanAndRBFKernelWithSamplers(
noise_variance=tf.constant(0.9, dtype=tf.float64), dataset=dataset
)
model.kernel = (
gpflow.kernels.RBF()
) # need a gpflow kernel object for random feature decompositions
return model
def svgp_model_with_mean(
x: tf.Tensor, y: tf.Tensor, whiten: bool, num_inducing_points: int, num_latent_gps: int = 1
) -> SVGP:
mean_function = gpflow.mean_functions.Linear(
A=0.37 * np.ones((1, 1), dtype=gpflow.default_float()),
b=0.19 * np.ones((1,), dtype=gpflow.default_float()),
)
q_mu = np.random.randn(num_inducing_points, 1)
q_sqrt = np.tril(np.random.randn(1, num_inducing_points, num_inducing_points))
m = SVGP(
gpflow.kernels.Matern32(variance=0.91),
gpflow.likelihoods.Gaussian(variance=0.23),
x[:num_inducing_points],
num_data=len(x),
num_latent_gps=num_latent_gps,
mean_function=mean_function,
whiten=whiten,
q_mu=q_mu,
q_sqrt=q_sqrt,
)
gpflow.set_trainable(mean_function, False)
gpflow.set_trainable(m.inducing_variable, False)
return m
def vgp_model(x: tf.Tensor, y: tf.Tensor, num_latent_gps: int = 1) -> VGP:
likelihood = gpflow.likelihoods.Gaussian()
kernel = gpflow.kernels.Matern32()
m = VGP((x, y), kernel, likelihood, num_latent_gps=num_latent_gps)
return m
def vgp_matern_model(x: tf.Tensor, y: tf.Tensor) -> VGP:
likelihood = gpflow.likelihoods.Gaussian()
kernel = gpflow.kernels.Matern32(lengthscales=0.2)
m = VGP((x, y), kernel, likelihood)
return m
def svgp_model_by_type(
x: tf.Tensor,
type: str,
whiten: bool,
num_inducing_points: int = 3,
noise_var: Optional[float] = None,
mean_function: Optional[gpflow.mean_functions.MeanFunction] = None,
) -> SVGP:
num_latent_gps = 2
ker1 = gpflow.kernels.Matern32(variance=0.8, lengthscales=0.2)
ker2 = gpflow.kernels.Matern52(variance=0.3, lengthscales=0.7)
if type == "shared+shared":
kernel = gpflow.kernels.SharedIndependent(ker1, output_dim=2)
iv = gpflow.inducing_variables.SharedIndependentInducingVariables(
gpflow.inducing_variables.InducingPoints(x[:num_inducing_points])
)
elif type == "separate+shared":
kernel = gpflow.kernels.SeparateIndependent([ker1, ker2])
iv = gpflow.inducing_variables.SharedIndependentInducingVariables(
gpflow.inducing_variables.InducingPoints(x[:num_inducing_points])
)
elif type == "separate+separate":
kernel = gpflow.kernels.SeparateIndependent([ker1, ker2])
Zs = [
x[(num_inducing_points * i) : (num_inducing_points * i + num_inducing_points)]
for i in range(2)
]
iv_list = [gpflow.inducing_variables.InducingPoints(Z) for Z in Zs]
iv = gpflow.inducing_variables.SeparateIndependentInducingVariables(iv_list)
else:
if "single" in type:
num_latent_gps = 1
kernel = ker1
iv = x[:num_inducing_points]
return SVGP(
kernel,
gpflow.likelihoods.Gaussian(noise_var),
iv,
num_data=len(x),
num_latent_gps=num_latent_gps,
whiten=whiten,
mean_function=mean_function,
)
def two_output_sgpr_model(x: tf.Tensor, y: tf.Tensor, type: str = "separate+separate") -> SGPR:
ker1 = gpflow.kernels.Matern32()
ker2 = gpflow.kernels.Matern52()
if type == "shared+shared":
kernel = gpflow.kernels.SharedIndependent(ker1, output_dim=2)
iv = gpflow.inducing_variables.SharedIndependentInducingVariables(
gpflow.inducing_variables.InducingPoints(x[:3])
)
elif type == "separate+shared":
kernel = gpflow.kernels.SeparateIndependent([ker1, ker2])
iv = gpflow.inducing_variables.SharedIndependentInducingVariables(
gpflow.inducing_variables.InducingPoints(x[:3])
)
elif type == "separate+separate":
kernel = gpflow.kernels.SeparateIndependent([ker1, ker2])
Zs = [x[(3 * i) : (3 * i + 3)] for i in range(2)]
iv_list = [gpflow.inducing_variables.InducingPoints(Z) for Z in Zs]
iv = gpflow.inducing_variables.SeparateIndependentInducingVariables(iv_list)
else:
kernel = ker1
iv = x[:3]
return SGPR((x, y), kernel, iv, num_latent_gps=2)
def vgp_model_bernoulli(x: tf.Tensor, y: tf.Tensor) -> VGP:
likelihood = gpflow.likelihoods.Bernoulli()
kernel = gpflow.kernels.Matern32(lengthscales=0.2)
m = VGP((x, y), kernel, likelihood)
return m
| 18,548 | 35.017476 | 100 | py |
trieste-develop | trieste-develop/tests/util/models/gpflow/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/util/acquisition/sampler.py | import tensorflow as tf
from tests.util.models.gpflow.models import QuadraticMeanAndRBFKernel
from trieste.models.gpflow import BatchReparametrizationSampler
from trieste.types import TensorType
from trieste.utils import DEFAULTS
class PseudoBatchReparametrizationSampler(BatchReparametrizationSampler):
"""A Sampler that return the specified sample as deterministic samples`."""
def __init__(self, samples: TensorType):
"""
:param samples `[S, B, L]`, where `S` is the `sample_size`, `B` the
number of points per batch, and `L` the dimension of the model's predictive
distribution.
"""
tf.debugging.assert_shapes(
[(samples, ["S", "B", "L"])],
message="This sampler takes samples of shape "
"[sample_size, batch_points, output_dimension].",
)
self._samples = samples # [S, B, L]
super().__init__(1, QuadraticMeanAndRBFKernel())
def __repr__(self) -> str:
""""""
return f"{self.__class__.__name__}({self._samples!r})"
def sample(self, at: TensorType, *, jitter: float = DEFAULTS.JITTER) -> TensorType:
"""
:param at: Batches of query points at which to sample the predictive distribution, with
shape `[..., B, D]`, for batches of size `B` of points of dimension `D`.
:param jitter: placeholder
:return: The samples, of shape `[..., S, B, L]`, where `S` is the `sample_size`, `B` the
number of points per batch, and `L` the dimension of the model's predictive
distribution.
"""
tf.debugging.assert_rank_at_least(at, 2)
tf.debugging.assert_greater_equal(jitter, 0.0)
batch_size = at.shape[-2]
tf.debugging.assert_positive(batch_size)
tf.assert_equal(batch_size, self._samples.shape[-2]) # assert B is equivalent
return tf.broadcast_to(self._samples, [*at.shape[:-2], *self._samples.shape])
| 1,971 | 40.083333 | 96 | py |
trieste-develop | trieste-develop/tests/util/acquisition/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/integration/test_ask_tell_optimization.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import copy
import pickle
import tempfile
from typing import Callable
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import random_seed
from trieste.acquisition import LocalPenalization
from trieste.acquisition.rule import (
AcquisitionRule,
AsynchronousGreedy,
AsynchronousRuleState,
EfficientGlobalOptimization,
TrustRegion,
)
from trieste.ask_tell_optimization import AskTellOptimizer
from trieste.bayesian_optimizer import OptimizationResult, Record
from trieste.logging import set_step_number, tensorboard_writer
from trieste.models import TrainableProbabilisticModel
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
from trieste.objectives import ScaledBranin, SimpleQuadratic
from trieste.objectives.utils import mk_observer
from trieste.observer import OBJECTIVE
from trieste.space import Box, SearchSpace
from trieste.types import State, TensorType
# Optimizer parameters for testing against the branin function.
# We use a copy of these for a quicker test against a simple quadratic function
# (copying is necessary as some of the acquisition rules are stateful).
OPTIMIZER_PARAMS = (
"num_steps, reload_state, acquisition_rule_fn",
[
pytest.param(
20, False, lambda: EfficientGlobalOptimization(), id="EfficientGlobalOptimization"
),
pytest.param(
20,
True,
lambda: EfficientGlobalOptimization(),
id="EfficientGlobalOptimization/reload_state",
),
pytest.param(15, False, lambda: TrustRegion(), id="TrustRegion"),
pytest.param(16, True, lambda: TrustRegion(), id="TrustRegion/reload_state"),
pytest.param(
10,
False,
lambda: EfficientGlobalOptimization(
LocalPenalization(
ScaledBranin.search_space,
).using(OBJECTIVE),
num_query_points=3,
),
id="LocalPenalization",
),
pytest.param(
30,
False,
lambda: AsynchronousGreedy(
LocalPenalization(
ScaledBranin.search_space,
).using(OBJECTIVE),
),
id="LocalPenalization/AsynchronousGreedy",
),
],
)
@random_seed
@pytest.mark.slow # to run this, add --runslow yes to the pytest command
@pytest.mark.parametrize(*OPTIMIZER_PARAMS)
def test_ask_tell_optimizer_finds_minima_of_the_scaled_branin_function(
num_steps: int,
reload_state: bool,
acquisition_rule_fn: Callable[
[], AcquisitionRule[TensorType, SearchSpace, TrainableProbabilisticModel]
]
| Callable[
[],
AcquisitionRule[
State[TensorType, AsynchronousRuleState | TrustRegion.State],
Box,
TrainableProbabilisticModel,
],
],
) -> None:
_test_ask_tell_optimization_finds_minima(True, num_steps, reload_state, acquisition_rule_fn)
@random_seed
@pytest.mark.parametrize(*copy.deepcopy(OPTIMIZER_PARAMS))
def test_ask_tell_optimizer_finds_minima_of_simple_quadratic(
num_steps: int,
reload_state: bool,
acquisition_rule_fn: Callable[
[], AcquisitionRule[TensorType, SearchSpace, TrainableProbabilisticModel]
]
| Callable[
[],
AcquisitionRule[
State[TensorType, AsynchronousRuleState | TrustRegion.State],
Box,
TrainableProbabilisticModel,
],
],
) -> None:
# for speed reasons we sometimes test with a simple quadratic defined on the same search space
# branin; currently assume that every rule should be able to solve this in 5 steps
_test_ask_tell_optimization_finds_minima(
False, min(num_steps, 5), reload_state, acquisition_rule_fn
)
def _test_ask_tell_optimization_finds_minima(
optimize_branin: bool,
num_steps: int,
reload_state: bool,
acquisition_rule_fn: Callable[
[], AcquisitionRule[TensorType, SearchSpace, TrainableProbabilisticModel]
]
| Callable[
[],
AcquisitionRule[
State[TensorType, AsynchronousRuleState | TrustRegion.State],
Box,
TrainableProbabilisticModel,
],
],
) -> None:
# For the case when optimization state is saved and reload on each iteration
# we need to use new acquisition function object to imitate real life usage
# hence acquisition rule factory method is passed in, instead of a rule object itself
# it is then called to create a new rule whenever needed in the test
search_space = ScaledBranin.search_space
initial_query_points = search_space.sample(5)
observer = mk_observer(ScaledBranin.objective if optimize_branin else SimpleQuadratic.objective)
initial_data = observer(initial_query_points)
model = GaussianProcessRegression(
build_gpr(initial_data, search_space, likelihood_variance=1e-7)
)
with tempfile.TemporaryDirectory() as tmpdirname:
summary_writer = tf.summary.create_file_writer(tmpdirname)
with tensorboard_writer(summary_writer):
set_step_number(0)
ask_tell = AskTellOptimizer(search_space, initial_data, model, acquisition_rule_fn())
for i in range(1, num_steps + 1):
# two scenarios are tested here, depending on `reload_state` parameter
# in first the same optimizer object is always used
# in second new optimizer is created at each step from saved state
set_step_number(i)
new_point = ask_tell.ask()
if reload_state:
state: Record[
None | State[TensorType, AsynchronousRuleState | TrustRegion.State]
] = ask_tell.to_record()
written_state = pickle.dumps(state)
new_data_point = observer(new_point)
if reload_state:
state = pickle.loads(written_state)
ask_tell = AskTellOptimizer.from_record(
state, search_space, acquisition_rule_fn()
)
ask_tell.tell(new_data_point)
result: OptimizationResult[
None | State[TensorType, AsynchronousRuleState | TrustRegion.State]
] = ask_tell.to_result()
dataset = result.try_get_final_dataset()
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
best_y = dataset.observations[arg_min_idx]
best_x = dataset.query_points[arg_min_idx]
if optimize_branin:
relative_minimizer_err = tf.abs(
(best_x - ScaledBranin.minimizers) / ScaledBranin.minimizers
)
# these accuracies are the current best for the given number of optimization steps,
# which makes this is a regression test
assert tf.reduce_any(tf.reduce_all(relative_minimizer_err < 0.05, axis=-1), axis=0)
npt.assert_allclose(best_y, ScaledBranin.minimum, rtol=0.005)
else:
absolute_minimizer_err = tf.abs(best_x - SimpleQuadratic.minimizers)
assert tf.reduce_any(tf.reduce_all(absolute_minimizer_err < 0.05, axis=-1), axis=0)
npt.assert_allclose(best_y, SimpleQuadratic.minimum, rtol=0.05)
| 7,921 | 36.367925 | 100 | py |
trieste-develop | trieste-develop/tests/integration/test_bayesian_optimization.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import tempfile
from functools import partial
from pathlib import Path
from typing import Any, List, Mapping, Optional, Tuple, Type, cast
import dill
import gpflow
import numpy.testing as npt
import pytest
import tensorflow as tf
from _pytest.mark import ParameterSet
from tests.util.misc import random_seed
from trieste.acquisition import (
GIBBON,
AcquisitionFunctionClass,
AugmentedExpectedImprovement,
BatchExpectedImprovement,
BatchMonteCarloExpectedImprovement,
Fantasizer,
GreedyAcquisitionFunctionBuilder,
GreedyContinuousThompsonSampling,
LocalPenalization,
MinValueEntropySearch,
MonteCarloAugmentedExpectedImprovement,
MonteCarloExpectedImprovement,
MultipleOptimismNegativeLowerConfidenceBound,
ParallelContinuousThompsonSampling,
)
from trieste.acquisition.optimizer import generate_continuous_optimizer
from trieste.acquisition.rule import (
TURBO,
AcquisitionRule,
AsynchronousGreedy,
AsynchronousOptimization,
AsynchronousRuleState,
BatchHypervolumeSharpeRatioIndicator,
DiscreteThompsonSampling,
EfficientGlobalOptimization,
TrustRegion,
)
from trieste.acquisition.sampler import ThompsonSamplerFromTrajectory
from trieste.bayesian_optimizer import (
BayesianOptimizer,
FrozenRecord,
OptimizationResult,
TrainableProbabilisticModelType,
stop_at_minimum,
)
from trieste.logging import tensorboard_writer
from trieste.models import TrainableProbabilisticModel, TrajectoryFunctionClass
from trieste.models.gpflow import (
ConditionalImprovementReduction,
GaussianProcessRegression,
GPflowPredictor,
SparseGaussianProcessRegression,
SparseVariational,
VariationalGaussianProcess,
build_gpr,
build_sgpr,
build_svgp,
)
from trieste.models.gpflux import DeepGaussianProcess, build_vanilla_deep_gp
from trieste.models.keras import DeepEnsemble, build_keras_ensemble
from trieste.models.optimizer import KerasOptimizer, Optimizer
from trieste.objectives import ScaledBranin, SimpleQuadratic
from trieste.objectives.utils import mk_observer
from trieste.observer import OBJECTIVE
from trieste.space import Box, SearchSpace
from trieste.types import State, TensorType
try:
import pymoo
except ImportError: # pragma: no cover (tested but not by coverage)
pymoo = None
# Optimizer parameters for testing GPR against the branin function.
# We also use these for a quicker test against a simple quadratic function
# (regenerating is necessary as some of the acquisition rules are stateful).
def GPR_OPTIMIZER_PARAMS() -> Tuple[str, List[ParameterSet]]:
return (
"num_steps, acquisition_rule",
[
pytest.param(20, EfficientGlobalOptimization(), id="EfficientGlobalOptimization"),
pytest.param(
30,
EfficientGlobalOptimization(AugmentedExpectedImprovement().using(OBJECTIVE)),
id="AugmentedExpectedImprovement",
),
pytest.param(
20,
EfficientGlobalOptimization(
MonteCarloExpectedImprovement(int(1e3)).using(OBJECTIVE),
generate_continuous_optimizer(100),
),
id="MonteCarloExpectedImprovement",
),
pytest.param(
24,
EfficientGlobalOptimization(
MinValueEntropySearch(
ScaledBranin.search_space,
min_value_sampler=ThompsonSamplerFromTrajectory(sample_min_value=True),
).using(OBJECTIVE)
),
id="MinValueEntropySearch",
),
pytest.param(
12,
EfficientGlobalOptimization(
BatchExpectedImprovement(sample_size=100).using(OBJECTIVE),
num_query_points=3,
),
id="BatchExpectedImprovement",
),
pytest.param(
12,
EfficientGlobalOptimization(
BatchMonteCarloExpectedImprovement(sample_size=500).using(OBJECTIVE),
num_query_points=3,
),
id="BatchMonteCarloExpectedImprovement",
),
pytest.param(
12, AsynchronousOptimization(num_query_points=3), id="AsynchronousOptimization"
),
pytest.param(
15,
EfficientGlobalOptimization(
LocalPenalization(
ScaledBranin.search_space,
).using(OBJECTIVE),
num_query_points=3,
),
id="LocalPenalization",
),
pytest.param(
15,
AsynchronousGreedy(
LocalPenalization(
ScaledBranin.search_space,
).using(OBJECTIVE),
num_query_points=3,
),
id="LocalPenalization/AsynchronousGreedy",
),
pytest.param(
10,
EfficientGlobalOptimization(
GIBBON(
ScaledBranin.search_space,
).using(OBJECTIVE),
num_query_points=2,
),
id="GIBBON",
),
pytest.param(
20,
EfficientGlobalOptimization(
MultipleOptimismNegativeLowerConfidenceBound(
ScaledBranin.search_space,
).using(OBJECTIVE),
num_query_points=3,
),
id="MultipleOptimismNegativeLowerConfidenceBound",
),
pytest.param(20, TrustRegion(), id="TrustRegion"),
pytest.param(
15,
TrustRegion(
EfficientGlobalOptimization(
MinValueEntropySearch(
ScaledBranin.search_space,
).using(OBJECTIVE)
)
),
id="TrustRegion/MinValueEntropySearch",
),
pytest.param(
10,
TURBO(ScaledBranin.search_space, rule=DiscreteThompsonSampling(500, 3)),
id="Turbo",
),
pytest.param(15, DiscreteThompsonSampling(500, 5), id="DiscreteThompsonSampling"),
pytest.param(
15,
EfficientGlobalOptimization(
Fantasizer(),
num_query_points=3,
),
id="Fantasizer",
),
pytest.param(
10,
EfficientGlobalOptimization(
GreedyContinuousThompsonSampling(),
num_query_points=5,
),
id="GreedyContinuousThompsonSampling",
),
pytest.param(
10,
EfficientGlobalOptimization(
ParallelContinuousThompsonSampling(),
num_query_points=5,
),
id="ParallelContinuousThompsonSampling",
),
pytest.param(
15,
BatchHypervolumeSharpeRatioIndicator() if pymoo else None,
id="BatchHypevolumeSharpeRatioIndicator",
marks=pytest.mark.qhsri,
),
],
)
@random_seed
@pytest.mark.slow # to run this, add --runslow yes to the pytest command
@pytest.mark.parametrize(*GPR_OPTIMIZER_PARAMS())
def test_bayesian_optimizer_with_gpr_finds_minima_of_scaled_branin(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, GaussianProcessRegression]
| AcquisitionRule[
State[TensorType, AsynchronousRuleState | TrustRegion.State], Box, GaussianProcessRegression
],
) -> None:
_test_optimizer_finds_minimum(
GaussianProcessRegression, num_steps, acquisition_rule, optimize_branin=True
)
@random_seed
@pytest.mark.parametrize(*GPR_OPTIMIZER_PARAMS())
def test_bayesian_optimizer_with_gpr_finds_minima_of_simple_quadratic(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, GaussianProcessRegression]
| AcquisitionRule[
State[TensorType, AsynchronousRuleState | TrustRegion.State], Box, GaussianProcessRegression
],
) -> None:
# for speed reasons we sometimes test with a simple quadratic defined on the same search space
# branin; currently assume that every rule should be able to solve this in 6 steps
_test_optimizer_finds_minimum(GaussianProcessRegression, min(num_steps, 6), acquisition_rule)
@random_seed
@pytest.mark.slow
def test_bayesian_optimizer_with_vgp_finds_minima_of_scaled_branin() -> None:
_test_optimizer_finds_minimum(
VariationalGaussianProcess,
10,
EfficientGlobalOptimization[SearchSpace, VariationalGaussianProcess](
builder=ParallelContinuousThompsonSampling(), num_query_points=5
),
)
@random_seed
@pytest.mark.parametrize("use_natgrads", [False, True])
def test_bayesian_optimizer_with_vgp_finds_minima_of_simple_quadratic(use_natgrads: bool) -> None:
# regression test for [#406]; use natgrads doesn't work well as a model for the objective
# so don't bother checking the results, just that it doesn't crash
_test_optimizer_finds_minimum(
VariationalGaussianProcess,
None if use_natgrads else 5,
EfficientGlobalOptimization[SearchSpace, GPflowPredictor](),
model_args={"use_natgrads": use_natgrads},
)
@random_seed
@pytest.mark.slow
def test_bayesian_optimizer_with_svgp_finds_minima_of_scaled_branin() -> None:
_test_optimizer_finds_minimum(
SparseVariational,
40,
EfficientGlobalOptimization[SearchSpace, SparseVariational](),
optimize_branin=True,
model_args={"optimizer": Optimizer(gpflow.optimizers.Scipy(), compile=True)},
)
_test_optimizer_finds_minimum(
SparseVariational,
25,
EfficientGlobalOptimization[SearchSpace, SparseVariational](
builder=ParallelContinuousThompsonSampling(), num_query_points=5
),
optimize_branin=True,
model_args={"optimizer": Optimizer(gpflow.optimizers.Scipy(), compile=True)},
)
@random_seed
def test_bayesian_optimizer_with_svgp_finds_minima_of_simple_quadratic() -> None:
_test_optimizer_finds_minimum(
SparseVariational,
5,
EfficientGlobalOptimization[SearchSpace, SparseVariational](),
model_args={"optimizer": Optimizer(gpflow.optimizers.Scipy(), compile=True)},
)
_test_optimizer_finds_minimum(
SparseVariational,
5,
EfficientGlobalOptimization[SearchSpace, SparseVariational](
builder=ParallelContinuousThompsonSampling(), num_query_points=5
),
model_args={"optimizer": Optimizer(gpflow.optimizers.Scipy(), compile=True)},
)
@random_seed
@pytest.mark.slow
def test_bayesian_optimizer_with_sgpr_finds_minima_of_scaled_branin() -> None:
_test_optimizer_finds_minimum(
SparseGaussianProcessRegression,
9,
EfficientGlobalOptimization[SearchSpace, SparseGaussianProcessRegression](),
optimize_branin=True,
)
_test_optimizer_finds_minimum(
SparseGaussianProcessRegression,
20,
EfficientGlobalOptimization[SearchSpace, SparseGaussianProcessRegression](
builder=ParallelContinuousThompsonSampling(), num_query_points=5
),
optimize_branin=True,
)
@random_seed
def test_bayesian_optimizer_with_sgpr_finds_minima_of_simple_quadratic() -> None:
_test_optimizer_finds_minimum(
SparseGaussianProcessRegression,
5,
EfficientGlobalOptimization[SearchSpace, SparseGaussianProcessRegression](),
)
@random_seed
@pytest.mark.slow
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
pytest.param(25, DiscreteThompsonSampling(1000, 8), id="DiscreteThompsonSampling"),
pytest.param(
25,
EfficientGlobalOptimization(
ParallelContinuousThompsonSampling(),
num_query_points=4,
),
id="ParallelContinuousThompsonSampling",
),
pytest.param(
12,
EfficientGlobalOptimization(
GreedyContinuousThompsonSampling(),
num_query_points=4,
),
id="GreedyContinuousThompsonSampling",
marks=pytest.mark.skip(reason="too fragile"),
),
],
)
def test_bayesian_optimizer_with_dgp_finds_minima_of_scaled_branin(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, DeepGaussianProcess],
) -> None:
_test_optimizer_finds_minimum(
DeepGaussianProcess, num_steps, acquisition_rule, optimize_branin=True
)
@random_seed
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
pytest.param(5, DiscreteThompsonSampling(1000, 1), id="DiscreteThompsonSampling"),
pytest.param(
5,
EfficientGlobalOptimization(
MonteCarloExpectedImprovement(int(1e2)), generate_continuous_optimizer(100)
),
id="MonteCarloExpectedImprovement",
),
pytest.param(
5,
EfficientGlobalOptimization(
MonteCarloAugmentedExpectedImprovement(int(1e2)), generate_continuous_optimizer(100)
),
id="MonteCarloAugmentedExpectedImprovement",
),
pytest.param(
2,
EfficientGlobalOptimization(
ParallelContinuousThompsonSampling(),
num_query_points=5,
),
id="ParallelContinuousThompsonSampling",
),
pytest.param(
2,
EfficientGlobalOptimization(
GreedyContinuousThompsonSampling(),
num_query_points=5,
),
id="GreedyContinuousThompsonSampling",
),
],
)
def test_bayesian_optimizer_with_dgp_finds_minima_of_simple_quadratic(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, DeepGaussianProcess],
) -> None:
_test_optimizer_finds_minimum(DeepGaussianProcess, num_steps, acquisition_rule)
@random_seed
@pytest.mark.slow
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
pytest.param(
60,
EfficientGlobalOptimization(),
id="EfficientGlobalOptimization",
marks=pytest.mark.skip(reason="too fragile"),
),
pytest.param(
30,
EfficientGlobalOptimization(
ParallelContinuousThompsonSampling(),
num_query_points=4,
),
id="ParallelContinuousThompsonSampling",
),
],
)
def test_bayesian_optimizer_with_deep_ensemble_finds_minima_of_scaled_branin(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, DeepEnsemble],
) -> None:
_test_optimizer_finds_minimum(
DeepEnsemble,
num_steps,
acquisition_rule,
optimize_branin=True,
model_args={"bootstrap": True, "diversify": False},
)
@random_seed
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
pytest.param(5, EfficientGlobalOptimization(), id="EfficientGlobalOptimization"),
pytest.param(10, DiscreteThompsonSampling(1000, 1), id="DiscreteThompsonSampling"),
pytest.param(
5,
DiscreteThompsonSampling(1000, 1, thompson_sampler=ThompsonSamplerFromTrajectory()),
id="DiscreteThompsonSampling/ThompsonSamplerFromTrajectory",
),
],
)
def test_bayesian_optimizer_with_deep_ensemble_finds_minima_of_simple_quadratic(
num_steps: int, acquisition_rule: AcquisitionRule[TensorType, SearchSpace, DeepEnsemble]
) -> None:
_test_optimizer_finds_minimum(
DeepEnsemble,
num_steps,
acquisition_rule,
)
@random_seed
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
pytest.param(
5,
EfficientGlobalOptimization(
ParallelContinuousThompsonSampling(),
num_query_points=3,
),
id="ParallelContinuousThompsonSampling",
),
],
)
def test_bayesian_optimizer_with_PCTS_and_deep_ensemble_finds_minima_of_simple_quadratic(
num_steps: int, acquisition_rule: AcquisitionRule[TensorType, SearchSpace, DeepEnsemble]
) -> None:
_test_optimizer_finds_minimum(
DeepEnsemble,
num_steps,
acquisition_rule,
model_args={"diversify": False},
)
_test_optimizer_finds_minimum(
DeepEnsemble,
num_steps,
acquisition_rule,
model_args={"diversify": True},
)
def _test_optimizer_finds_minimum(
model_type: Type[TrainableProbabilisticModelType],
num_steps: Optional[int],
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, TrainableProbabilisticModelType]
| AcquisitionRule[
State[TensorType, AsynchronousRuleState | TrustRegion.State],
Box,
TrainableProbabilisticModelType,
],
optimize_branin: bool = False,
model_args: Optional[Mapping[str, Any]] = None,
check_regret: bool = False,
) -> None:
model_args = model_args or {}
if optimize_branin:
search_space = ScaledBranin.search_space
minimizers = ScaledBranin.minimizers
minima = ScaledBranin.minimum
rtol_level = 0.005
num_initial_query_points = 5
else:
search_space = SimpleQuadratic.search_space
minimizers = SimpleQuadratic.minimizers
minima = SimpleQuadratic.minimum
rtol_level = 0.05
num_initial_query_points = 10
if model_type in [SparseVariational, DeepEnsemble]:
num_initial_query_points = 20
elif model_type in [DeepGaussianProcess]:
num_initial_query_points = 25
initial_query_points = search_space.sample(num_initial_query_points)
observer = mk_observer(ScaledBranin.objective if optimize_branin else SimpleQuadratic.objective)
initial_data = observer(initial_query_points)
model: TrainableProbabilisticModel # (really TPMType, but that's too complicated for mypy)
if model_type is GaussianProcessRegression:
if "LocalPenalization" in acquisition_rule.__repr__():
likelihood_variance = 1e-3
else:
likelihood_variance = 1e-5
gpr = build_gpr(initial_data, search_space, likelihood_variance=likelihood_variance)
model = GaussianProcessRegression(gpr, **model_args)
elif model_type is SparseGaussianProcessRegression:
sgpr = build_sgpr(initial_data, search_space, num_inducing_points=50)
model = SparseGaussianProcessRegression(
sgpr,
**model_args,
inducing_point_selector=ConditionalImprovementReduction(),
)
elif model_type is VariationalGaussianProcess:
empirical_variance = tf.math.reduce_variance(initial_data.observations)
kernel = gpflow.kernels.Matern52(variance=empirical_variance, lengthscales=[0.2, 0.2])
likelihood = gpflow.likelihoods.Gaussian(1e-3)
vgp = gpflow.models.VGP(initial_data.astuple(), kernel, likelihood)
gpflow.utilities.set_trainable(vgp.likelihood, False)
model = VariationalGaussianProcess(vgp, **model_args)
elif model_type is SparseVariational:
svgp = build_svgp(initial_data, search_space, num_inducing_points=50)
model = SparseVariational(
svgp,
**model_args,
inducing_point_selector=ConditionalImprovementReduction(),
)
elif model_type is DeepGaussianProcess:
model = DeepGaussianProcess(
partial(build_vanilla_deep_gp, initial_data, search_space), **model_args
)
elif model_type is DeepEnsemble:
keras_ensemble = build_keras_ensemble(initial_data, 5, 3, 25, "selu")
fit_args = {
"batch_size": 20,
"epochs": 200,
"callbacks": [
tf.keras.callbacks.EarlyStopping(
monitor="loss", patience=25, restore_best_weights=True
)
],
"verbose": 0,
}
de_optimizer = KerasOptimizer(tf.keras.optimizers.Adam(0.01), fit_args)
model = DeepEnsemble(keras_ensemble, de_optimizer, **model_args)
else:
raise ValueError(f"Unsupported model_type '{model_type}'")
with tempfile.TemporaryDirectory() as tmpdirname:
summary_writer = tf.summary.create_file_writer(tmpdirname)
with tensorboard_writer(summary_writer):
result = BayesianOptimizer(observer, search_space).optimize(
num_steps or 2,
initial_data,
cast(TrainableProbabilisticModelType, model),
acquisition_rule,
track_state=True,
track_path=Path(tmpdirname) / "history",
early_stop_callback=stop_at_minimum(
# stop as soon as we find the minimum (but always run at least one step)
minima,
minimizers,
minimum_rtol=rtol_level,
minimum_step_number=2,
),
fit_model=not isinstance(acquisition_rule, TURBO),
fit_initial_model=False,
)
# check history saved ok
assert len(result.history) <= (num_steps or 2)
assert len(result.loaded_history) == len(result.history)
loaded_result: OptimizationResult[None] = OptimizationResult.from_path(
Path(tmpdirname) / "history"
)
assert loaded_result.final_result.is_ok
assert len(loaded_result.history) == len(result.history)
if num_steps is None:
# this test is just being run to check for crashes, not performance
pass
elif check_regret:
# this just check that the new observations are mostly better than the initial ones
assert isinstance(result.history[0], FrozenRecord)
initial_observations = result.history[0].load().dataset.observations
best_initial = tf.math.reduce_min(initial_observations)
better_than_initial = 0
num_points = len(initial_observations)
for i in range(1, len(result.history)):
step_history = result.history[i]
assert isinstance(step_history, FrozenRecord)
step_observations = step_history.load().dataset.observations
new_observations = step_observations[num_points:]
if tf.math.reduce_min(new_observations) < best_initial:
better_than_initial += 1
num_points = len(step_observations)
assert better_than_initial / len(result.history) > 0.6
else:
# this actually checks that we solved the problem
best_x, best_y, _ = result.try_get_optimal_point()
minimizer_err = tf.abs((best_x - minimizers) / minimizers)
assert tf.reduce_any(tf.reduce_all(minimizer_err < 0.05, axis=-1), axis=0)
npt.assert_allclose(best_y, minima, rtol=rtol_level)
if isinstance(acquisition_rule, EfficientGlobalOptimization):
acq_function = acquisition_rule.acquisition_function
assert acq_function is not None
# check that acquisition functions defined as classes aren't retraced unnecessarily
# they should be retraced for the optimizer's starting grid, L-BFGS, and logging
# (and possibly once more due to variable creation)
if isinstance(acq_function, (AcquisitionFunctionClass, TrajectoryFunctionClass)):
assert acq_function.__call__._get_tracing_count() in {3, 4} # type: ignore
# update trajectory function if necessary, so we can test it
if isinstance(acq_function, TrajectoryFunctionClass):
sampler = (
acquisition_rule._builder.single_builder._trajectory_sampler # type: ignore
)
sampler.update_trajectory(acq_function)
# check that acquisition functions can be saved and reloaded
acq_function_copy = dill.loads(dill.dumps(acq_function))
# and that the copy gives the same values as the original
batch_size = (
1
if isinstance(acquisition_rule._builder, GreedyAcquisitionFunctionBuilder)
else acquisition_rule._num_query_points
)
random_batch = tf.expand_dims(search_space.sample(batch_size), 0)
npt.assert_allclose(
acq_function(random_batch), acq_function_copy(random_batch), rtol=5e-7
)
| 26,051 | 36.058321 | 100 | py |
trieste-develop | trieste-develop/tests/integration/test_constrained_bayesian_optimization.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import math
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import random_seed
from trieste.acquisition import (
ExpectedConstrainedImprovement,
ExpectedImprovement,
ProbabilityOfFeasibility,
)
from trieste.acquisition.function import FastConstraintsFeasibility
from trieste.acquisition.interface import AcquisitionFunctionBuilder
from trieste.acquisition.rule import EfficientGlobalOptimization
from trieste.bayesian_optimizer import BayesianOptimizer
from trieste.data import Dataset
from trieste.models import ProbabilisticModel
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
from trieste.objectives import ConstrainedScaledBranin, ScaledBranin
from trieste.objectives.utils import mk_observer
from trieste.observer import OBJECTIVE
from trieste.space import Box
from trieste.types import Tag, TensorType
@random_seed
@pytest.mark.parametrize(
"num_steps, acquisition_function_builder",
[
pytest.param(12, ExpectedConstrainedImprovement, id="ExpectedConstrainedImprovement"),
],
)
def test_optimizer_finds_minima_of_Gardners_Simulation_1(
num_steps: int,
acquisition_function_builder: type[ExpectedConstrainedImprovement[ProbabilisticModel]],
) -> None:
"""
Test that tests the covergence of constrained BO algorithms on the
synthetic "simulation 1" experiment of :cite:`gardner14`.
"""
search_space = Box([0, 0], [6, 6])
def objective(input_data: TensorType) -> TensorType:
x, y = input_data[..., -2], input_data[..., -1]
z = tf.cos(2.0 * x) * tf.cos(y) + tf.sin(x)
return z[:, None]
def constraint(input_data: TensorType) -> TensorType:
x, y = input_data[:, -2], input_data[:, -1]
z = tf.cos(x) * tf.cos(y) - tf.sin(x) * tf.sin(y)
return z[:, None]
MINIMUM = -2.0
MINIMIZER = [math.pi * 1.5, 0.0]
CONSTRAINT = "CONSTRAINT"
# observe both objective and constraint data
def observer(query_points: TensorType) -> dict[Tag, Dataset]:
return {
OBJECTIVE: Dataset(query_points, objective(query_points)),
CONSTRAINT: Dataset(query_points, constraint(query_points)),
}
num_initial_points = 6
initial_data = observer(search_space.sample(num_initial_points))
models = {
OBJECTIVE: GaussianProcessRegression(build_gpr(initial_data[OBJECTIVE], search_space)),
CONSTRAINT: GaussianProcessRegression(build_gpr(initial_data[CONSTRAINT], search_space)),
}
pof = ProbabilityOfFeasibility(threshold=0.5)
acq = acquisition_function_builder(OBJECTIVE, pof.using(CONSTRAINT))
rule: EfficientGlobalOptimization[Box, ProbabilisticModel] = EfficientGlobalOptimization(acq)
dataset = (
BayesianOptimizer(observer, search_space)
.optimize(num_steps, initial_data, models, rule)
.try_get_final_datasets()[OBJECTIVE]
)
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
best_y = dataset.observations[arg_min_idx]
best_x = dataset.query_points[arg_min_idx]
relative_minimizer_err = tf.abs(best_x - MINIMIZER)
# these accuracies are the current best for the given number of optimization steps, which makes
# this is a regression test
assert tf.reduce_all(relative_minimizer_err < 0.03, axis=-1)
npt.assert_allclose(best_y, MINIMUM, rtol=0.005)
@random_seed
@pytest.mark.slow
@pytest.mark.parametrize(
"num_steps, acquisition_function_builder",
[
pytest.param(12, ExpectedImprovement, id="ExpectedImprovement"),
pytest.param(12, ExpectedConstrainedImprovement, id="ExpectedConstrainedImprovement"),
],
)
def test_constrained_optimizer_finds_minima_of_custom_problem(
num_steps: int,
acquisition_function_builder: type[AcquisitionFunctionBuilder[ProbabilisticModel]],
) -> None:
"""
Test the covergence of constrained algorithms on the constrained scaled Branin problem.
"""
observer = mk_observer(ConstrainedScaledBranin.objective)
search_space = ConstrainedScaledBranin.search_space
num_initial_points = 5
initial_query_points = search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)
MINIMUM = ConstrainedScaledBranin.minimum
MINIMIZER = ConstrainedScaledBranin.minimizers
OBJECTIVE = "OBJECTIVE"
model = GaussianProcessRegression(
build_gpr(initial_data, search_space, likelihood_variance=1e-7)
)
if acquisition_function_builder is ExpectedConstrainedImprovement:
feas = FastConstraintsFeasibility(search_space) # Search space with constraints.
eci = acquisition_function_builder(OBJECTIVE, feas.using(OBJECTIVE)) # type: ignore
rule: EfficientGlobalOptimization[Box, ProbabilisticModel] = EfficientGlobalOptimization(
eci
)
# Note: use the search space without constraints for the penalty method.
bo_search_space = ScaledBranin.search_space
else:
ei = acquisition_function_builder(search_space) # type: ignore
rule = EfficientGlobalOptimization(ei)
bo_search_space = search_space
dataset = (
BayesianOptimizer(observer, bo_search_space)
.optimize(num_steps, initial_data, model, rule)
.try_get_final_datasets()[OBJECTIVE]
)
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
best_y = dataset.observations[arg_min_idx]
best_x = dataset.query_points[arg_min_idx]
relative_minimizer_err = tf.abs(best_x - MINIMIZER)
# these accuracies are the current best for the given number of optimization steps, which makes
# this is a regression test
assert tf.reduce_all(relative_minimizer_err < 0.03, axis=-1)
npt.assert_allclose(best_y, MINIMUM, rtol=0.1)
| 6,438 | 36.005747 | 99 | py |
trieste-develop | trieste-develop/tests/integration/test_multi_objective_bayesian_optimization.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import pytest
import tensorflow as tf
from tests.util.misc import random_seed
from trieste.acquisition import (
HIPPO,
BatchMonteCarloExpectedHypervolumeImprovement,
ExpectedHypervolumeImprovement,
)
from trieste.acquisition.multi_objective.pareto import Pareto, get_reference_point
from trieste.acquisition.optimizer import generate_continuous_optimizer
from trieste.acquisition.rule import (
AcquisitionRule,
AsynchronousOptimization,
BatchHypervolumeSharpeRatioIndicator,
EfficientGlobalOptimization,
)
from trieste.bayesian_optimizer import BayesianOptimizer
from trieste.data import Dataset
from trieste.logging import set_summary_filter, tensorboard_writer
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
from trieste.models.interfaces import (
TrainableModelStack,
TrainablePredictJointReparamModelStack,
TrainableProbabilisticModel,
)
from trieste.objectives.multi_objectives import VLMOP2
from trieste.objectives.utils import mk_observer
from trieste.observer import OBJECTIVE
from trieste.space import Box
from trieste.types import TensorType
try:
import pymoo
except ImportError: # pragma: no cover (tested but not by coverage)
pymoo = None
@random_seed
@pytest.mark.parametrize(
"num_steps, acquisition_rule, convergence_threshold",
[
pytest.param(
20,
EfficientGlobalOptimization(
ExpectedHypervolumeImprovement(tf.constant([1.1, 1.1], dtype=tf.float64)).using(
OBJECTIVE
)
),
-3.65,
id="ehvi_fixed_reference_pts",
),
pytest.param(
20,
EfficientGlobalOptimization(ExpectedHypervolumeImprovement().using(OBJECTIVE)),
-3.65,
id="ExpectedHypervolumeImprovement",
),
pytest.param(
15,
EfficientGlobalOptimization(
BatchMonteCarloExpectedHypervolumeImprovement(sample_size=500).using(OBJECTIVE),
num_query_points=2,
optimizer=generate_continuous_optimizer(num_initial_samples=500),
),
-3.44,
id="BatchMonteCarloExpectedHypervolumeImprovement/2",
),
pytest.param(
15,
EfficientGlobalOptimization(
BatchMonteCarloExpectedHypervolumeImprovement(
sample_size=500,
reference_point_spec=tf.constant([1.1, 1.1], dtype=tf.float64),
).using(OBJECTIVE),
num_query_points=2,
optimizer=generate_continuous_optimizer(num_initial_samples=500),
),
-3.44,
id="qehvi_vlmop2_q_2_fixed_reference_pts",
),
pytest.param(
10,
EfficientGlobalOptimization(
BatchMonteCarloExpectedHypervolumeImprovement(sample_size=250).using(OBJECTIVE),
num_query_points=4,
optimizer=generate_continuous_optimizer(num_initial_samples=500),
),
-3.2095,
id="BatchMonteCarloExpectedHypervolumeImprovement/4",
),
pytest.param(
10,
EfficientGlobalOptimization(
HIPPO(),
num_query_points=4,
optimizer=generate_continuous_optimizer(num_initial_samples=500),
),
-3.2095,
id="HIPPO/4",
),
pytest.param(
10,
AsynchronousOptimization(
BatchMonteCarloExpectedHypervolumeImprovement(sample_size=250).using(OBJECTIVE),
num_query_points=4,
optimizer=generate_continuous_optimizer(num_initial_samples=500),
),
-3.2095,
id="BatchMonteCarloExpectedHypervolumeImprovement/4",
),
pytest.param(
15,
BatchHypervolumeSharpeRatioIndicator(num_query_points=20) if pymoo else None,
-3.2095,
id="BatchHypervolumeSharpeRatioIndicator",
marks=pytest.mark.qhsri,
),
],
)
def test_multi_objective_optimizer_finds_pareto_front_of_the_VLMOP2_function(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, Box, TrainableProbabilisticModel],
convergence_threshold: float,
) -> None:
problem = VLMOP2(2)
search_space = problem.search_space
def build_stacked_independent_objectives_model(data: Dataset) -> TrainableModelStack:
gprs = []
for idx in range(2):
single_obj_data = Dataset(
data.query_points, tf.gather(data.observations, [idx], axis=1)
)
gpr = build_gpr(single_obj_data, search_space, likelihood_variance=1e-5)
gprs.append((GaussianProcessRegression(gpr), 1))
return TrainablePredictJointReparamModelStack(*gprs)
observer = mk_observer(problem.objective, OBJECTIVE)
initial_query_points = search_space.sample(10)
initial_data = observer(initial_query_points)
model = build_stacked_independent_objectives_model(initial_data[OBJECTIVE])
with tempfile.TemporaryDirectory() as tmpdirname:
summary_writer = tf.summary.create_file_writer(tmpdirname)
set_summary_filter(lambda x: True)
with tensorboard_writer(summary_writer):
dataset = (
BayesianOptimizer(observer, search_space)
.optimize(num_steps, initial_data, {OBJECTIVE: model}, acquisition_rule)
.try_get_final_datasets()[OBJECTIVE]
)
# A small log hypervolume difference corresponds to a succesful optimization.
ideal_pf = problem.gen_pareto_optimal_points(100)
ref_point = get_reference_point(ideal_pf)
obs_pareto = Pareto(dataset.observations)
if obs_pareto.front.shape[0] > 0:
obs_hv = obs_pareto.hypervolume_indicator(ref_point)
else:
obs_hv = 0
ideal_hv = Pareto(ideal_pf).hypervolume_indicator(ref_point)
assert tf.math.log(ideal_hv - obs_hv) < convergence_threshold
| 6,701 | 35.032258 | 96 | py |
trieste-develop | trieste-develop/tests/integration/test_active_learning.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration tests for various forms of active learning implemented in Trieste.
"""
from __future__ import annotations
from typing import Callable
import gpflow
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from tests.util.misc import random_seed
from trieste.acquisition import LocalPenalization
from trieste.acquisition.function import (
BayesianActiveLearningByDisagreement,
ExpectedFeasibility,
IntegratedVarianceReduction,
PredictiveVariance,
)
from trieste.acquisition.function.function import MakePositive
from trieste.acquisition.rule import AcquisitionRule, EfficientGlobalOptimization
from trieste.bayesian_optimizer import BayesianOptimizer
from trieste.data import Dataset
from trieste.models import TrainableProbabilisticModel
from trieste.models.gpflow import (
GaussianProcessRegression,
SparseVariational,
VariationalGaussianProcess,
build_gpr,
)
from trieste.models.gpflow.builders import build_svgp, build_vgp_classifier
from trieste.models.interfaces import FastUpdateModel, SupportsPredictJoint
from trieste.objectives import Branin, ScaledBranin
from trieste.objectives.utils import mk_observer
from trieste.observer import Observer
from trieste.space import Box, SearchSpace
from trieste.types import TensorType
@random_seed
@pytest.mark.slow
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
(50, EfficientGlobalOptimization[SearchSpace, SupportsPredictJoint](PredictiveVariance())),
(
70,
EfficientGlobalOptimization(
IntegratedVarianceReduction(ScaledBranin.search_space.sample_sobol(1000))
),
),
],
)
def test_optimizer_learns_scaled_branin_function(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, SupportsPredictJoint],
) -> None:
"""
Ensure that the objective function is effectively learned, such that the final model
fits well and predictions are close to actual objective values.
"""
search_space = ScaledBranin.search_space
num_initial_points = 6
initial_query_points = search_space.sample_halton(num_initial_points)
observer = mk_observer(ScaledBranin.objective)
initial_data = observer(initial_query_points)
# we set a performance criterion at 1% of the range
# max absolute error needs to be bettter than this criterion
test_query_points = search_space.sample_sobol(10000 * search_space.dimension)
test_data = observer(test_query_points)
test_range = tf.reduce_max(test_data.observations) - tf.reduce_min(test_data.observations)
criterion = 0.02 * test_range
# we expect a model with initial data to fail the criterion
initial_model = GaussianProcessRegression(
build_gpr(initial_data, search_space, likelihood_variance=1e-5)
)
initial_model.optimize(initial_data)
initial_predicted_means, _ = initial_model.model.predict_f(test_query_points)
initial_accuracy = tf.reduce_max(tf.abs(initial_predicted_means - test_data.observations))
assert not initial_accuracy < criterion
# after active learning the model should be much more accurate
model = GaussianProcessRegression(
build_gpr(initial_data, search_space, likelihood_variance=1e-5)
)
final_model = (
BayesianOptimizer(observer, search_space)
.optimize(num_steps, initial_data, model, acquisition_rule)
.try_get_final_model()
)
final_predicted_means, _ = final_model.model.predict_f(test_query_points) # type: ignore
final_accuracy = tf.reduce_max(tf.abs(final_predicted_means - test_data.observations))
assert initial_accuracy > final_accuracy
assert final_accuracy < criterion
@random_seed
@pytest.mark.slow
@pytest.mark.parametrize(
"num_steps, acquisition_rule, threshold",
[
pytest.param(
50,
EfficientGlobalOptimization(ExpectedFeasibility(80, delta=1)),
80,
id="ExpectedFeasibility/80/1",
),
pytest.param(
50,
EfficientGlobalOptimization(ExpectedFeasibility(80, delta=2)),
80,
id="ExpectedFeasibility/80/2",
),
pytest.param(
70,
EfficientGlobalOptimization(ExpectedFeasibility(20, delta=1)),
20,
id="ExpectedFeasibility/20",
),
pytest.param(
25,
EfficientGlobalOptimization(
IntegratedVarianceReduction(Branin.search_space.sample_sobol(2000), 80.0),
num_query_points=3,
),
80.0,
id="IntegratedVarianceReduction/80",
),
pytest.param(
25,
EfficientGlobalOptimization(
IntegratedVarianceReduction(Branin.search_space.sample_sobol(2000), [78.0, 82.0]),
num_query_points=3,
),
80.0,
id="IntegratedVarianceReduction/[78, 82]",
),
pytest.param(
25,
EfficientGlobalOptimization(
LocalPenalization(
Branin.search_space,
base_acquisition_function_builder=MakePositive(
ExpectedFeasibility(80, delta=1)
),
),
num_query_points=3,
),
80.0,
id="LocalPenalization/MakePositive(ExpectedFeasibility)",
),
],
)
def test_optimizer_learns_feasibility_set_of_thresholded_branin_function(
num_steps: int,
acquisition_rule: AcquisitionRule[TensorType, SearchSpace, FastUpdateModel],
threshold: int,
) -> None:
"""
Ensure that the feasible set is sufficiently well learned, such that the final model
classifies with great degree of certainty whether points in the search space are in
in the feasible set or not.
"""
search_space = Branin.search_space
num_initial_points = 6
initial_query_points = search_space.sample_halton(num_initial_points)
observer = mk_observer(Branin.objective)
initial_data = observer(initial_query_points)
# we set a performance criterion at 0.001 probability of required precision per point
# for global points and 0.01 close to the boundary
n_global = 10000 * search_space.dimension
n_boundary = 2000 * search_space.dimension
global_test, boundary_test = _get_feasible_set_test_data(
search_space, observer, n_global, n_boundary, threshold, range_pct=0.03
)
global_criterion = 0.001 * (1 - 0.001) * tf.cast(n_global, tf.float64)
boundary_criterion = 0.01 * (1 - 0.01) * tf.cast(n_boundary, tf.float64)
# we expect a model with initial data to fail the criteria
initial_model = GaussianProcessRegression(
build_gpr(initial_data, search_space, likelihood_variance=1e-3)
)
initial_model.optimize(initial_data)
initial_accuracy_global = _get_excursion_accuracy(global_test, initial_model, threshold)
initial_accuracy_boundary = _get_excursion_accuracy(boundary_test, initial_model, threshold)
assert not initial_accuracy_global < global_criterion
assert not initial_accuracy_boundary < boundary_criterion
# after active learning the model should be much more accurate
model = GaussianProcessRegression(
build_gpr(initial_data, search_space, likelihood_variance=1e-3)
)
final_model = (
BayesianOptimizer(observer, search_space)
.optimize(num_steps, initial_data, model, acquisition_rule)
.try_get_final_model()
)
final_accuracy_global = _get_excursion_accuracy(global_test, final_model, threshold)
final_accuracy_boundary = _get_excursion_accuracy(boundary_test, final_model, threshold)
assert initial_accuracy_global > final_accuracy_global
assert initial_accuracy_boundary > final_accuracy_boundary
assert final_accuracy_global < global_criterion
assert final_accuracy_boundary < boundary_criterion
def _excursion_probability(
x: TensorType, model: TrainableProbabilisticModel, threshold: int
) -> tfp.distributions.Distribution:
mean, variance = model.model.predict_f(x) # type: ignore
normal = tfp.distributions.Normal(tf.cast(0, x.dtype), tf.cast(1, x.dtype))
t = (mean - threshold) / tf.sqrt(variance)
return normal.cdf(t)
def _get_excursion_accuracy(
x: TensorType, model: TrainableProbabilisticModel, threshold: int
) -> float:
prob = _excursion_probability(x, model, threshold)
accuracy = tf.reduce_sum(prob * (1 - prob))
return accuracy
def _get_feasible_set_test_data(
search_space: Box,
observer: Observer,
n_global: int,
n_boundary: int,
threshold: float,
range_pct: float = 0.01,
) -> tuple[TensorType, TensorType]:
boundary_done = False
global_done = False
boundary_points = tf.constant(0, dtype=tf.float64, shape=(0, search_space.dimension))
global_points = tf.constant(0, dtype=tf.float64, shape=(0, search_space.dimension))
while not boundary_done and not global_done:
test_query_points = search_space.sample(100000)
test_data = observer(test_query_points)
threshold_deviation = range_pct * (
tf.reduce_max(test_data.observations) # type: ignore
- tf.reduce_min(test_data.observations) # type: ignore
)
mask = tf.reduce_all(
tf.concat(
[
test_data.observations > threshold - threshold_deviation, # type: ignore
test_data.observations < threshold + threshold_deviation, # type: ignore
],
axis=1,
),
axis=1,
)
boundary_points = tf.concat(
[boundary_points, tf.boolean_mask(test_query_points, mask)], axis=0
)
global_points = tf.concat(
[global_points, tf.boolean_mask(test_query_points, tf.logical_not(mask))], axis=0
)
if boundary_points.shape[0] > n_boundary:
boundary_done = True
if global_points.shape[0] > n_global:
global_done = True
return (
global_points[:n_global,],
boundary_points[:n_boundary,],
)
def vgp_classification_model(
initial_data: Dataset, search_space: Box
) -> VariationalGaussianProcess:
return VariationalGaussianProcess(
build_vgp_classifier(initial_data, search_space, noise_free=True)
)
def svgp_classification_model(initial_data: Dataset, search_space: Box) -> SparseVariational:
return SparseVariational(build_svgp(initial_data, search_space, classification=True))
@random_seed
@pytest.mark.slow
@pytest.mark.parametrize(
"num_steps, model_builder",
[
(20, vgp_classification_model),
(70, svgp_classification_model),
],
)
def test_bald_learner_learns_circle_function(
num_steps: int,
model_builder: Callable[[Dataset, Box], VariationalGaussianProcess | SparseVariational],
) -> None:
search_space = Box([-1, -1], [1, 1])
def circle(x: TensorType) -> TensorType:
return tf.cast((tf.reduce_sum(tf.square(x), axis=1, keepdims=True) - 0.5) > 0, tf.float64)
def ilink(f: TensorType) -> TensorType:
return gpflow.likelihoods.Bernoulli().invlink(f).numpy()
num_initial_points = 10
initial_query_points = search_space.sample(num_initial_points)
observer = mk_observer(circle)
initial_data = observer(initial_query_points)
# we set a performance criterion at 20% error
# predictive error needs to be bettter than this criterion
test_query_points = search_space.sample_sobol(10000 * search_space.dimension)
test_data = observer(test_query_points)
criterion = 0.2
# we expect a model with initial data to fail the criterion
initial_model = model_builder(initial_data, search_space)
initial_model.optimize(initial_data)
initial_predicted_means, _ = ilink(initial_model.model.predict_f(test_query_points))
initial_error = tf.reduce_mean(tf.abs(initial_predicted_means - test_data.observations))
assert not initial_error < criterion
# after active learning the model should be much more accurate
model = model_builder(initial_data, search_space)
acq = BayesianActiveLearningByDisagreement()
rule = EfficientGlobalOptimization(acq) # type: ignore
final_model = (
BayesianOptimizer(observer, search_space)
.optimize(num_steps, initial_data, model, rule)
.try_get_final_model()
)
final_predicted_means, _ = ilink(final_model.model.predict_f(test_query_points)) # type: ignore
final_error = tf.reduce_mean(tf.abs(final_predicted_means - test_data.observations))
assert initial_error > final_error
assert final_error < criterion
| 13,398 | 35.709589 | 100 | py |
trieste-develop | trieste-develop/tests/integration/test_mixed_space_bayesian_optimization.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import numpy.testing as npt
import pytest
import tensorflow as tf
from tests.util.misc import random_seed
from trieste.acquisition import (
AcquisitionFunctionClass,
BatchMonteCarloExpectedImprovement,
LocalPenalization,
)
from trieste.acquisition.rule import AcquisitionRule, EfficientGlobalOptimization
from trieste.bayesian_optimizer import BayesianOptimizer
from trieste.models import TrainableProbabilisticModel
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
from trieste.objectives import ScaledBranin
from trieste.objectives.utils import mk_observer
from trieste.observer import OBJECTIVE
from trieste.space import Box, DiscreteSearchSpace, TaggedProductSearchSpace
from trieste.types import TensorType
@random_seed
@pytest.mark.parametrize(
"num_steps, acquisition_rule",
[
pytest.param(25, EfficientGlobalOptimization(), id="EfficientGlobalOptimization"),
pytest.param(
5,
EfficientGlobalOptimization(
BatchMonteCarloExpectedImprovement(sample_size=500).using(OBJECTIVE),
num_query_points=3,
),
id="BatchMonteCarloExpectedImprovement",
),
pytest.param(
8,
EfficientGlobalOptimization(
LocalPenalization(
ScaledBranin.search_space,
).using(OBJECTIVE),
num_query_points=3,
),
id="LocalPenalization",
),
],
)
def test_optimizer_finds_minima_of_the_scaled_branin_function(
num_steps: int,
acquisition_rule: AcquisitionRule[
TensorType, TaggedProductSearchSpace, TrainableProbabilisticModel
],
) -> None:
search_space = TaggedProductSearchSpace(
spaces=[Box([0], [1]), DiscreteSearchSpace(tf.linspace(0, 1, 15)[:, None])],
tags=["continuous", "discrete"],
)
initial_query_points = search_space.sample(5)
observer = mk_observer(ScaledBranin.objective)
initial_data = observer(initial_query_points)
model = GaussianProcessRegression(
build_gpr(initial_data, search_space, likelihood_variance=1e-8)
)
dataset = (
BayesianOptimizer(observer, search_space)
.optimize(num_steps, initial_data, model, acquisition_rule)
.try_get_final_dataset()
)
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
best_y = dataset.observations[arg_min_idx]
best_x = dataset.query_points[arg_min_idx]
relative_minimizer_err = tf.abs((best_x - ScaledBranin.minimizers) / ScaledBranin.minimizers)
# these accuracies are the current best for the given number of optimization steps, which makes
# this is a regression test
assert tf.reduce_any(tf.reduce_all(relative_minimizer_err < 0.1, axis=-1), axis=0)
npt.assert_allclose(best_y, ScaledBranin.minimum, rtol=0.005)
# check that acquisition functions defined as classes aren't being retraced unnecessarily
# They should be retraced once for the optimzier's starting grid and once for L-BFGS.
if isinstance(acquisition_rule, EfficientGlobalOptimization):
acquisition_function = acquisition_rule._acquisition_function
if isinstance(acquisition_function, AcquisitionFunctionClass):
assert acquisition_function.__call__._get_tracing_count() <= 4 # type: ignore
| 3,993 | 37.776699 | 99 | py |
trieste-develop | trieste-develop/tests/integration/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/integration/test_multifidelity_bayesian_optimization.py | # Copyright 2023 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy.testing as npt
import pytest
import tensorflow as tf
import trieste
from tests.util.misc import random_seed
from trieste.acquisition.combination import Product
from trieste.acquisition.function.entropy import MUMBO, CostWeighting
from trieste.acquisition.optimizer import generate_continuous_optimizer
from trieste.data import (
Dataset,
add_fidelity_column,
check_and_extract_fidelity_query_points,
get_dataset_for_fidelity,
)
from trieste.models.gpflow import (
MultifidelityAutoregressive,
build_multifidelity_autoregressive_models,
)
from trieste.objectives import (
Linear2Fidelity,
Linear3Fidelity,
Linear5Fidelity,
SingleObjectiveMultifidelityTestProblem,
)
from trieste.objectives.utils import mk_observer
from trieste.observer import SingleObserver
from trieste.space import TaggedProductSearchSpace
from trieste.types import TensorType
def _build_observer(problem: SingleObjectiveMultifidelityTestProblem) -> SingleObserver:
objective_function = problem.objective
def noisy_objective(x: TensorType) -> TensorType:
_, fidelities = check_and_extract_fidelity_query_points(x)
y = objective_function(x)
not_lowest_fidelity = fidelities > 0
noise = tf.random.normal(y.shape, stddev=2e-2, dtype=y.dtype)
y = tf.where(not_lowest_fidelity, y + noise, y)
return y
return mk_observer(noisy_objective)
def _build_nested_multifidelity_dataset(
problem: SingleObjectiveMultifidelityTestProblem, observer: SingleObserver
) -> Dataset:
num_fidelities = problem.num_fidelities
initial_sample_sizes = [10 + 2 * (num_fidelities - i) for i in range(num_fidelities)]
fidelity_samples = list()
lowest_fidelity_sample = problem.search_space.sample(initial_sample_sizes[0])
lowest_fidelity_sample = add_fidelity_column(lowest_fidelity_sample, 0)
fidelity_samples.append(lowest_fidelity_sample)
for i in range(1, num_fidelities):
previous_fidelity_points = fidelity_samples[i - 1][:, :-1]
indices = tf.range(tf.shape(previous_fidelity_points)[0])
random_indices = tf.random.shuffle(indices)[: initial_sample_sizes[i]]
random_points = tf.gather(previous_fidelity_points, random_indices)
sample_points = add_fidelity_column(random_points, i)
fidelity_samples.append(sample_points)
query_points = tf.concat(fidelity_samples, axis=0)
dataset = observer(query_points)
return dataset
@random_seed
@pytest.mark.parametrize("problem", ((Linear2Fidelity), (Linear3Fidelity), (Linear5Fidelity)))
def test_multifidelity_bo_finds_minima_of_linear_problem(
problem: SingleObjectiveMultifidelityTestProblem,
) -> None:
observer = _build_observer(problem)
initial_data = _build_nested_multifidelity_dataset(problem, observer)
costs = [2.0 * (n + 1) for n in range(problem.num_fidelities)]
input_search_space = problem.search_space # Does not include fidelities
search_space = problem.fidelity_search_space # Includes fidelities
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
initial_data,
num_fidelities=problem.num_fidelities,
input_search_space=input_search_space,
kernel_priors=True,
)
)
model.update(initial_data)
model.optimize(initial_data)
bo = trieste.bayesian_optimizer.BayesianOptimizer[TaggedProductSearchSpace](
observer, search_space
)
acq_builder = Product(
# (Reducer doesn't support model type generics)
MUMBO(search_space).using("OBJECTIVE"),
CostWeighting(costs).using("OBJECTIVE"),
)
optimizer = generate_continuous_optimizer(num_initial_samples=10_000, num_optimization_runs=10)
rule = trieste.acquisition.rule.EfficientGlobalOptimization[
TaggedProductSearchSpace, MultifidelityAutoregressive
](builder=acq_builder, optimizer=optimizer)
num_steps = 5
result = bo.optimize(num_steps, initial_data, model, acquisition_rule=rule)
query_points_on_top = get_dataset_for_fidelity(
result.try_get_final_dataset(), model.num_fidelities - 1
)
arg_min_idx = tf.squeeze(tf.argmin(query_points_on_top.observations, axis=0))
best_x, best_y = (
query_points_on_top.query_points[arg_min_idx],
query_points_on_top.observations[arg_min_idx],
)
# check we solve the problem
minimizer_err = tf.abs((best_x - problem.minimizers) / problem.minimizers)
assert tf.reduce_any(tf.reduce_all(minimizer_err < 0.05, axis=-1), axis=0)
npt.assert_allclose(best_y, problem.minimum, rtol=0.1)
| 5,238 | 38.097015 | 99 | py |
trieste-develop | trieste-develop/tests/integration/models/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/integration/models/gpflux/test_predictions.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from tests.util.misc import hartmann_6_dataset, random_seed
from trieste.models.gpflux import DeepGaussianProcess, build_vanilla_deep_gp
from trieste.objectives import Hartmann6
@pytest.mark.slow
@random_seed
@pytest.mark.parametrize("depth", [2, 3])
def test_dgp_model_close_to_actuals(depth: int) -> None:
dataset_size = 50
num_inducing = 50
example_data = hartmann_6_dataset(dataset_size)
dgp = build_vanilla_deep_gp(
example_data,
Hartmann6.search_space,
depth,
num_inducing,
likelihood_variance=1e-5,
trainable_likelihood=False,
)
model = DeepGaussianProcess(dgp)
model.optimize(example_data)
predicted_means, _ = model.predict(example_data.query_points)
np.testing.assert_allclose(predicted_means, example_data.observations, atol=0.2, rtol=0.2)
| 1,463 | 31.533333 | 94 | py |
trieste-develop | trieste-develop/tests/integration/models/gpflux/__init__.py | 0 | 0 | 0 | py |
|
trieste-develop | trieste-develop/tests/integration/models/multifidelity/test_multifidelity_models.py | import gpflow
import numpy as np
import numpy.testing as npt
import tensorflow as tf
from tensorflow.keras.metrics import mean_squared_error
import trieste
from trieste.data import (
Dataset,
add_fidelity_column,
check_and_extract_fidelity_query_points,
split_dataset_by_fidelity,
)
from trieste.models.gpflow import GaussianProcessRegression
from trieste.models.gpflow.builders import (
build_gpr,
build_multifidelity_autoregressive_models,
build_multifidelity_nonlinear_autoregressive_models,
)
from trieste.models.gpflow.models import (
MultifidelityAutoregressive,
MultifidelityNonlinearAutoregressive,
)
from trieste.objectives.utils import mk_observer
from trieste.space import Box
from trieste.types import TensorType
def noisy_linear_multifidelity(x: TensorType) -> TensorType:
x_input, x_fidelity = check_and_extract_fidelity_query_points(x)
f = 0.5 * ((6.0 * x_input - 2.0) ** 2) * tf.math.sin(12.0 * x_input - 4.0) + 10.0 * (
x_input - 1.0
)
f = f + x_fidelity * (f - 20.0 * (x_input - 1.0))
noise = tf.random.normal(f.shape, stddev=1e-1, dtype=f.dtype)
f = tf.where(x_fidelity > 0, f + noise, f)
return f
def noisy_nonlinear_multifidelity(x: TensorType) -> TensorType:
x_input, x_fidelity = check_and_extract_fidelity_query_points(x)
# Check we only have fidelity = 0 or 1
# 1 if element is not 0 or 1
bad_fidelities = tf.math.logical_and(x_fidelity != 0, x_fidelity != 1)
if tf.math.count_nonzero(bad_fidelities) > 0:
raise ValueError("Nonlinear simulator only supports 2 fidelities (0 and 1)")
else:
f = tf.math.sin(8 * np.pi * x_input)
fh = (x_input - tf.sqrt(tf.Variable(2.0, dtype=tf.float64))) * tf.square(f)
f = tf.where(x_fidelity > 0, fh, f)
noise = tf.random.normal(f.shape, stddev=1e-2, dtype=f.dtype)
f = tf.where(x_fidelity > 0, f + noise, f)
return f
def test_multifidelity_autoregressive_results_close() -> None:
input_dim = 1
lb = np.zeros(input_dim)
ub = np.ones(input_dim)
n_fidelities = 4
input_search_space = trieste.space.Box(lb, ub)
n_samples_per_fidelity = [
2 ** ((n_fidelities - fidelity) + 1) + 3 for fidelity in range(n_fidelities)
]
xs = [tf.linspace(0, 1, samples)[:, None] for samples in n_samples_per_fidelity]
initial_samples_list = [tf.concat([x, tf.ones_like(x) * i], 1) for i, x in enumerate(xs)]
initial_sample = tf.concat(initial_samples_list, 0)
observer = mk_observer(noisy_linear_multifidelity)
initial_data = observer(initial_sample)
data = split_dataset_by_fidelity(initial_data, n_fidelities)
gprs = [
GaussianProcessRegression(
build_gpr(
data[fidelity], input_search_space, likelihood_variance=1e-6, kernel_priors=False
)
)
for fidelity in range(n_fidelities)
]
model = MultifidelityAutoregressive(gprs)
model.update(initial_data)
model.optimize(initial_data)
test_xs = tf.linspace(0, 1, 11)[:, None]
test_xs_w_fid = add_fidelity_column(test_xs, fidelity=3)
predictions = model.predict(test_xs_w_fid)[0]
gt_obs = observer(test_xs_w_fid).observations
npt.assert_allclose(predictions, gt_obs, rtol=0.20)
def test_multifidelity_nonlinear_autoregressive_results_better_than_linear() -> None:
input_dim = 1
lb = np.zeros(input_dim)
ub = np.ones(input_dim)
n_fidelities = 2
input_search_space = trieste.space.Box(lb, ub)
n_samples_per_fidelity = [
2 ** ((n_fidelities - fidelity) + 1) + 10 for fidelity in range(n_fidelities)
]
xs = [tf.linspace(0, 1, samples)[:, None] for samples in n_samples_per_fidelity]
initial_samples_list = [tf.concat([x, tf.ones_like(x) * i], 1) for i, x in enumerate(xs)]
initial_sample = tf.concat(initial_samples_list, 0)
observer = mk_observer(noisy_nonlinear_multifidelity)
initial_data = observer(initial_sample)
nonlinear_model = MultifidelityNonlinearAutoregressive(
build_multifidelity_nonlinear_autoregressive_models(
initial_data, n_fidelities, input_search_space
)
)
linear_model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(initial_data, n_fidelities, input_search_space)
)
mses = list()
for model in [nonlinear_model, linear_model]:
model.update(initial_data)
model.optimize(initial_data)
test_xs = tf.linspace(0, 1, 111)[:, None]
test_xs_w_fid = add_fidelity_column(test_xs, fidelity=1)
predictions = model.predict(test_xs_w_fid)[0]
gt_obs = observer(test_xs_w_fid).observations
mses.append(tf.reduce_sum(mean_squared_error(gt_obs, predictions)))
assert mses[0] < mses[1]
def test_multifidelity_autoregressive_gets_expected_rhos() -> None:
input_dim = 1
lb = np.zeros(input_dim)
ub = np.ones(input_dim)
n_fidelities = 4
input_search_space = trieste.space.Box(lb, ub)
n_samples_per_fidelity = [
2 ** ((n_fidelities - fidelity) + 1) + 3 for fidelity in range(n_fidelities)
]
xs = [tf.linspace(0, 1, samples)[:, None] for samples in n_samples_per_fidelity]
initial_samples_list = [tf.concat([x, tf.ones_like(x) * i], 1) for i, x in enumerate(xs)]
initial_sample = tf.concat(initial_samples_list, 0)
observer = mk_observer(noisy_linear_multifidelity)
initial_data = observer(initial_sample)
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(initial_data, n_fidelities, input_search_space)
)
model.update(initial_data)
model.optimize(initial_data)
expected_rho = [1.0] + [(fidelity + 1) / fidelity for fidelity in range(1, n_fidelities)]
rhos = [float(rho.numpy()) for rho in model.rho]
npt.assert_allclose(np.array(expected_rho), np.array(rhos), rtol=0.30)
def test_multifidelity_autoregressive_predict_lf_are_consistent_with_multiple_fidelities() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
# Add some high fidelity points to check that predict on different fids works
test_locations_30 = tf.Variable(np.linspace(0, 10, 60), dtype=tf.float64)[:, None]
lf_test_locations = add_fidelity_column(test_locations_30, 0)
test_locations_32 = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations_32, 1)
second_batch = tf.Variable(np.linspace(0.5, 10.5, 92), dtype=tf.float64)[:, None]
second_batch_test_locations = add_fidelity_column(second_batch, 1)
concat_test_locations = tf.concat([lf_test_locations, hf_test_locations], axis=0)
concat_multibatch_test_locations = tf.concat(
[concat_test_locations[None, ...], second_batch_test_locations[None, ...]], axis=0
)
prediction_mean, prediction_var = model.predict(concat_multibatch_test_locations)
lf_prediction_mean, lf_prediction_var = (
prediction_mean[0, :60],
prediction_var[0, :60],
)
(
lf_prediction_direct_mean,
lf_prediction_direct_var,
) = model.lowest_fidelity_signal_model.predict(test_locations_30)
npt.assert_allclose(lf_prediction_mean, lf_prediction_direct_mean, rtol=1e-7)
npt.assert_allclose(lf_prediction_var, lf_prediction_direct_var, rtol=1e-7)
def test_multifidelity_autoregressive_predict_hf_is_consistent_when_rho_zero() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
model.rho[1] = 0.0 # type: ignore
test_locations = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations, 1)
hf_prediction = model.predict(hf_test_locations)
hf_prediction_direct = model.fidelity_residual_models[1].predict(test_locations)
npt.assert_array_equal(hf_prediction, hf_prediction_direct)
def test_multifidelity_autoregressive_predict_hf_is_consistent_when_lf_is_flat() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
flat_dataset_qps = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
flat_dataset_obs = tf.zeros_like(flat_dataset_qps)
flat_dataset = Dataset(flat_dataset_qps, flat_dataset_obs)
kernel = gpflow.kernels.Matern52()
gpr = gpflow.models.GPR(flat_dataset.astuple(), kernel, noise_variance=1e-5)
model.lowest_fidelity_signal_model = GaussianProcessRegression(gpr)
# Add some low fidelity points to check that predict on different fids works
test_locations_30 = tf.Variable(np.linspace(0, 10, 30), dtype=tf.float64)[:, None]
lf_test_locations = add_fidelity_column(test_locations_30, 0)
test_locations_32 = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations_32, 1)
concatenated_test_locations = tf.concat([lf_test_locations, hf_test_locations], axis=0)
concat_prediction, _ = model.predict(concatenated_test_locations)
hf_prediction = concat_prediction[30:]
hf_prediction_direct, _ = model.fidelity_residual_models[1].predict(test_locations_32)
npt.assert_allclose(hf_prediction, hf_prediction_direct)
def test_multifidelity_autoregressive_predict_hf_is_consistent_when_hf_residual_is_flat() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
flat_dataset_qps = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
flat_dataset_obs = tf.zeros_like(flat_dataset_qps)
flat_dataset = Dataset(flat_dataset_qps, flat_dataset_obs)
kernel = gpflow.kernels.Matern52()
gpr = gpflow.models.GPR(flat_dataset.astuple(), kernel, noise_variance=1e-5)
model.fidelity_residual_models[1] = GaussianProcessRegression(gpr) # type: ignore
test_locations = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations, 1)
hf_prediction, _ = model.predict(hf_test_locations)
hf_prediction_direct = (
model.rho[1] * model.lowest_fidelity_signal_model.predict(test_locations)[0]
)
npt.assert_allclose(hf_prediction, hf_prediction_direct)
def test_multifidelity_autoregressive_sample_lf_are_consistent_with_multiple_fidelities() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
# Add some high fidelity points to check that predict on different fids works
test_locations_31 = tf.Variable(np.linspace(0, 10, 31), dtype=tf.float64)[:, None]
lf_test_locations = add_fidelity_column(test_locations_31, 0)
test_locations_32 = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations_32, 1)
second_batch = tf.Variable(np.linspace(0.5, 10.5, 63), dtype=tf.float64)[:, None]
second_batch_test_locations = add_fidelity_column(second_batch, 1)
concat_test_locations = tf.concat([lf_test_locations, hf_test_locations], axis=0)
concat_multibatch_test_locations = tf.concat(
[concat_test_locations[None, ...], second_batch_test_locations[None, ...]], axis=0
)
concat_samples = model.sample(concat_multibatch_test_locations, 100_000)
lf_samples = concat_samples[0, :, :31]
lf_samples_direct = model.lowest_fidelity_signal_model.sample(test_locations_31, 100_000)
lf_samples_mean = tf.reduce_mean(lf_samples, axis=0)
lf_samples_var = tf.math.reduce_variance(lf_samples, axis=0)
lf_samples_direct_mean = tf.reduce_mean(lf_samples_direct, axis=0)
lf_samples_direct_var = tf.math.reduce_variance(lf_samples_direct, axis=0)
npt.assert_allclose(lf_samples_mean, lf_samples_direct_mean, atol=1e-4)
npt.assert_allclose(lf_samples_var, lf_samples_direct_var, atol=1e-4)
def test_multifidelity_autoregressive_sample_hf_is_consistent_when_rho_zero() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
model.rho[1] = 0.0 # type: ignore
test_locations = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations, 1)
hf_samples = model.sample(hf_test_locations, 100_000)
hf_samples_direct = model.fidelity_residual_models[1].sample(test_locations, 100_000)
hf_samples_mean = tf.reduce_mean(hf_samples, axis=0)
hf_samples_var = tf.math.reduce_variance(hf_samples, axis=0)
hf_samples_direct_mean = tf.reduce_mean(hf_samples_direct, axis=0)
hf_samples_direct_var = tf.math.reduce_variance(hf_samples_direct, axis=0)
npt.assert_allclose(hf_samples_mean, hf_samples_direct_mean, atol=1e-2)
npt.assert_allclose(hf_samples_var, hf_samples_direct_var, atol=1e-2)
def test_multifidelity_autoregressive_sample_hf_is_consistent_when_lf_is_flat() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
flat_dataset_qps = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
flat_dataset_obs = tf.zeros_like(flat_dataset_qps)
flat_dataset = Dataset(flat_dataset_qps, flat_dataset_obs)
kernel = gpflow.kernels.Matern52()
gpr = gpflow.models.GPR(flat_dataset.astuple(), kernel, noise_variance=1e-5)
model.lowest_fidelity_signal_model = GaussianProcessRegression(gpr)
# Add some low fidelity points to check that predict on different fids works
test_locations_30 = tf.Variable(np.linspace(0, 10, 30), dtype=tf.float64)[:, None]
lf_test_locations = add_fidelity_column(test_locations_30, 0)
test_locations_32 = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations_32, 1)
concatenated_test_locations = tf.concat([lf_test_locations, hf_test_locations], axis=0)
concat_samples = model.sample(concatenated_test_locations, 100_000)
hf_samples = concat_samples[:, 30:]
hf_samples_direct = model.fidelity_residual_models[1].sample(test_locations_32, 100_000)
hf_samples_mean = tf.reduce_mean(hf_samples, axis=0)
hf_samples_var = tf.math.reduce_variance(hf_samples, axis=0)
hf_samples_direct_mean = tf.reduce_mean(hf_samples_direct, axis=0)
hf_samples_direct_var = tf.math.reduce_variance(hf_samples_direct, axis=0)
npt.assert_allclose(hf_samples_mean, hf_samples_direct_mean, atol=1e-2)
npt.assert_allclose(hf_samples_var, hf_samples_direct_var, atol=1e-2)
def test_multifidelity_autoregressive_sample_hf_is_consistent_when_hf_residual_is_flat() -> None:
xs_low = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
xs_high = tf.Variable(np.linspace(0, 10, 10), dtype=tf.float64)[:, None]
lf_obs = tf.sin(xs_low)
hf_obs = 2 * tf.sin(xs_high) + tf.random.normal(
xs_high.shape, mean=0, stddev=1e-1, dtype=tf.float64
)
lf_query_points = add_fidelity_column(xs_low, 0)
hf_query_points = add_fidelity_column(xs_high, 1)
lf_dataset = Dataset(lf_query_points, lf_obs)
hf_dataset = Dataset(hf_query_points, hf_obs)
dataset = lf_dataset + hf_dataset
search_space = Box([0.0], [10.0])
model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
dataset, num_fidelities=2, input_search_space=search_space
)
)
model.update(dataset)
flat_dataset_qps = tf.Variable(np.linspace(0, 10, 100), dtype=tf.float64)[:, None]
flat_dataset_obs = tf.zeros_like(flat_dataset_qps)
flat_dataset = Dataset(flat_dataset_qps, flat_dataset_obs)
kernel = gpflow.kernels.Matern52()
gpr = gpflow.models.GPR(flat_dataset.astuple(), kernel, noise_variance=1e-5)
model.fidelity_residual_models[1] = GaussianProcessRegression(gpr) # type: ignore
test_locations = tf.Variable(np.linspace(0, 10, 32), dtype=tf.float64)[:, None]
hf_test_locations = add_fidelity_column(test_locations, 1)
hf_samples = model.sample(hf_test_locations, 100_000)
hf_samples_direct = model.rho[1] * model.lowest_fidelity_signal_model.sample(
test_locations, 100_000
)
hf_samples_mean = tf.reduce_mean(hf_samples, axis=0)
hf_samples_var = tf.math.reduce_variance(hf_samples, axis=0)
hf_samples_direct_mean = tf.reduce_mean(hf_samples_direct, axis=0)
hf_samples_direct_var = tf.math.reduce_variance(hf_samples_direct, axis=0)
npt.assert_allclose(hf_samples_mean, hf_samples_direct_mean, atol=1e-4)
npt.assert_allclose(hf_samples_var, hf_samples_direct_var, atol=1e-4)
| 21,553 | 37.148673 | 99 | py |
trieste-develop | trieste-develop/tests/integration/models/keras/test_predictions.py | # Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import numpy as np
import pytest
import tensorflow as tf
from tests.util.misc import hartmann_6_dataset, random_seed
from trieste.models.keras import DeepEnsemble, build_keras_ensemble
from trieste.models.optimizer import KerasOptimizer
@pytest.mark.slow
@random_seed
def test_neural_network_ensemble_predictions_close_to_actuals() -> None:
dataset_size = 2000
example_data = hartmann_6_dataset(dataset_size)
keras_ensemble = build_keras_ensemble(example_data, 5, 3, 250)
fit_args = {
"batch_size": 128,
"epochs": 1500,
"callbacks": [
tf.keras.callbacks.EarlyStopping(
monitor="loss", patience=100, restore_best_weights=True
)
],
"verbose": 0,
}
model = DeepEnsemble(
keras_ensemble,
KerasOptimizer(tf.keras.optimizers.Adam(), fit_args),
)
model.optimize(example_data)
predicted_means, _ = model.predict(example_data.query_points)
np.testing.assert_allclose(predicted_means, example_data.observations, atol=0.2, rtol=0.2)
| 1,688 | 32.117647 | 94 | py |
trieste-develop | trieste-develop/docs/conf.py | # Copyright 2020 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('../'))
# -- Project information -----------------------------------------------------
from pathlib import Path
project = "trieste"
# fmt: off
copyright = (
'Copyright 2020 The Trieste Contributors\n'
'\n'
'Licensed under the Apache License, Version 2.0 (the "License");\n'
'you may not use this file except in compliance with the License.\n'
'You may obtain a copy of the License at\n'
'\n'
' http://www.apache.org/licenses/LICENSE-2.0\n'
'\n'
'Unless required by applicable law or agreed to in writing, software\n'
'distributed under the License is distributed on an "AS IS" BASIS,\n'
'WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n'
'See the License for the specific language governing permissions and\n'
'limitations under the License.\n'
)
# fmt: on
author = "The Trieste Contributors"
# The full version, including alpha/beta/rc tags
release = Path("../trieste/VERSION").read_text().strip()
# -- General configuration ---------------------------------------------------
extensions = [
"sphinx.ext.viewcode",
"sphinx.ext.autosectionlabel",
"sphinx.ext.doctest",
]
add_module_names = False
autosectionlabel_prefix_document = True
# sphinx-autoapi
extensions.append("autoapi.extension")
autoapi_dirs = ["../trieste"]
autoapi_add_toctree_entry = False
autoapi_keep_files = True
autoapi_python_class_content = "both"
autoapi_options = [
"members",
"private-members",
"special-members",
"imported-members",
"show-inheritance",
]
# TODO: remove once https://github.com/sphinx-doc/sphinx/issues/4961 is fixed
suppress_warnings = ["ref.python"]
# intersphinx
extensions.append("sphinx.ext.intersphinx")
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
}
# nbsphinx
extensions.append("nbsphinx")
nbsphinx_custom_formats = {
".pct.py": ["jupytext.reads", {"fmt": "py:percent"}],
}
# sphinxcontrib-bibtex
extensions.append("sphinxcontrib.bibtex")
bibtex_bibfiles = ["refs.bib"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "pydata_sphinx_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = ["css/custom.css"]
# library logo location
html_logo = "_static/logo.png"
# If True, show link to rst source on rendered HTML pages
html_show_sourcelink = False
# theme-specific options. see theme docs for more info
html_theme_options = {
"show_prev_next": False,
"github_url": "https://github.com/secondmind-labs/trieste",
"switcher": {
"json_url": "https://secondmind-labs.github.io/trieste/versions.json",
"version_match": release,
},
"navbar_end": ["version-switcher", "navbar-icon-links"],
}
| 4,622 | 32.992647 | 80 | py |
trieste-develop | trieste-develop/docs/notebooks/asynchronous_greedy_multiprocessing.pct.py | # %% [markdown]
# # Asynchronous Bayesian optimization with Trieste
#
# In this notebook we demonstrate Trieste's ability to perform asynchronous Bayesian optimisation, as is suitable for scenarios where the objective function can be run for several points in parallel but where observations might return back at different times. To avoid wasting resources waiting for the evaluation of the whole batch, we immediately request the next point asynchronously, taking into account points that are still being evaluated. Besides saving resources, asynchronous approach also can potentially [improve sample efficiency](https://arxiv.org/abs/1901.10452) in comparison with synchronous batch strategies, although this is highly dependent on the use case.
#
# To contrast this approach with regular [batch optimization](batch_optimization.ipynb), this notebook also shows how to run parallel synchronous batch approach.
# %%
# silence TF warnings and info messages, only print errors
# https://stackoverflow.com/questions/35911252/disable-tensorflow-debugging-information
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
tf.get_logger().setLevel("ERROR")
import numpy as np
import time
import timeit
# %% [markdown]
# First, let's define a simple objective that will emulate evaluations taking variable time. We will be using a classic Bayesian optimisation benchmark function [Branin](https://www.sfu.ca/~ssurjano/branin.html) with a sleep call inserted in the middle of the calculation to emulate delay. Our sleep delay is a scaled sum of all input values to make sure delays are uneven.
# %%
from trieste.objectives import ScaledBranin
def objective(points, sleep=True):
if points.shape[1] != 2:
raise ValueError(
f"Incorrect input shape, expected (*, 2), got {points.shape}"
)
observations = []
for point in points:
observation = ScaledBranin.objective(point)
if sleep:
# insert some artificial delay
# increases linearly with the absolute value of points
# which means our evaluations will take different time
delay = 3 * np.sum(point)
pid = os.getpid()
print(
f"Process {pid}: Objective: pretends like it's doing something for {delay:.2}s",
flush=True,
)
time.sleep(delay)
observations.append(observation)
return np.array(observations)
# test the defined objective function
objective(np.array([[0.1, 0.5]]), sleep=False)
# %% [markdown]
# As always, we need to prepare the model and some initial data to kick-start the optimization process.
# %%
from trieste.space import Box
from trieste.data import Dataset
search_space = Box([0, 0], [1, 1])
num_initial_points = 3
initial_query_points = search_space.sample(num_initial_points)
initial_observations = objective(initial_query_points.numpy(), sleep=False)
initial_data = Dataset(
query_points=initial_query_points,
observations=tf.constant(initial_observations, dtype=tf.float64),
)
import gpflow
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
# We set the likelihood variance to a small number because
# we are dealing with a noise-free problem.
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
model = GaussianProcessRegression(gpflow_model)
# these imports will be used later for optimization
from trieste.acquisition import LocalPenalization
from trieste.acquisition.rule import (
AsynchronousGreedy,
EfficientGlobalOptimization,
)
from trieste.ask_tell_optimization import AskTellOptimizer
# %% [markdown]
# ## Multiprocessing setup
#
# To keep this notebook as reproducible as possible, we will only be using Python's multiprocessing package here. In this section we will explain our setup and define some common code to be used later.
#
# In both synchronous and asynchronous scenarios we will have a fixed set of worker processes performing observations. We will also have a main process responsible for optimization process with Trieste. When Trieste suggests a new point, it is inserted into a points queue. One of the workers picks this point from the queue, performs the observation, and inserts the output into the observations queue. The main process then picks up the observation from the queue, at which moment it either waits for the rest of the points in the batch to come back (synchronous scenario) or immediately suggests a new point (asynchronous scenario). This process continues either for a certain number of iterations or until we accumulate necessary number of observations.
#
# The overall setup is illustrated in this diagram:
# 
# %%
# Necessary multiprocessing primitives
from multiprocessing import Manager, Process
# %% [markdown]
# We now define several common functions to implement the described setup. First we define a worker function that will be running a single observation in a separate process. Worker takes both queues as an input, reads next point from the points queue, makes an observation, and inserts observed data into the observations queue.
# %%
def observer_proc(points_queue, observations_queue):
pid = os.getpid()
while True:
point_to_observe = points_queue.get()
if point_to_observe is None:
return
print(
f"Process {pid}: Observer : observing data at point {point_to_observe}",
flush=True,
)
new_observation = objective(point_to_observe, sleep=enable_sleep_delays)
new_data = (point_to_observe, new_observation)
print(f"Process {pid}: Observer : observed data {new_data}", flush=True)
observations_queue.put(new_data)
# %% [markdown]
# Next we define two helper functions, one is to create a certain number of worker processes, and another is to terminate them once we are done.
# %%
def create_worker_processes(n_workers, points_queue, obseverations_queue):
observer_processes = []
for i in range(n_workers):
worker_proc = Process(
target=observer_proc, args=(points_queue, obseverations_queue)
)
worker_proc.daemon = True
worker_proc.start()
observer_processes.append(worker_proc)
return observer_processes
def terminate_processes(processes):
for prc in processes:
prc.terminate()
prc.join()
prc.close()
# %% [markdown]
# Finally we set some common parameters. See comments below for explanation of what each one means.
# %%
# Number of worker processes to run simultaneously
# Setting this to 1 will turn both setups into non-batch sequential optimization
num_workers = 3
# Number of iterations to run the sycnhronous scenario for
num_iterations = 10
# Number of observations to collect in the asynchronous scenario
num_observations = num_workers * num_iterations
# Set this flag to False to disable sleep delays in case you want the notebook to execute quickly
enable_sleep_delays = True
# %% [markdown]
# ## Asynchronous optimization
# This section runs the asynchronous optimization routine. We first setup the [ask/tell optimizer](ask_tell_optimization.ipynb) as we cannot hand over the evaluation of the objective to Trieste. Next we create thread-safe queues for points and observations, and run the optimization loop.
#
# Crucially, even though we are using batch acquisition function Local Penalization, we specify batch size of 1. This is because we don't really want a batch. Since the amount of workers we have is fixed, whenever we see a new observation we only need one point back. However this process can only be done with acquisition functions that implement greedy batch collection strategies, because they are able to take into account points that are currently being observed (in Trieste we call them "pending"). Trieste currently provides two such functions: Local Penalization and GIBBON. Notice that we use **AsynchronousGreedy** rule specifically designed for using greedy batch acquisition functions in asynchronous scenarios.
# %%
# setup Ask Tell BO
local_penalization_acq = LocalPenalization(search_space, num_samples=2000)
local_penalization_rule = AsynchronousGreedy(builder=local_penalization_acq) # type: ignore
async_bo = AskTellOptimizer(
search_space, initial_data, model, local_penalization_rule
)
# retrieve process id for nice logging
pid = os.getpid()
# create point and observation queues
m = Manager()
pq = m.Queue()
oq = m.Queue()
# keep track of all workers we have launched
observer_processes = []
# counter to keep track of collected observations
points_observed = 0
start = timeit.default_timer()
try:
observer_processes = create_worker_processes(num_workers, pq, oq)
# init the queue with first batch of points
for _ in range(num_workers):
point = async_bo.ask()
pq.put(np.atleast_2d(point.numpy()))
while points_observed < num_observations:
# keep asking queue for new observations until one arrives
try:
new_data = oq.get_nowait()
print(
f"Process {pid}: Main : received data {new_data}",
flush=True,
)
except:
continue
# new_data is a tuple of (point, observation value)
# here we turn it into a Dataset and tell of it Trieste
points_observed += 1
new_data = Dataset(
query_points=tf.constant(new_data[0], dtype=tf.float64),
observations=tf.constant(new_data[1], dtype=tf.float64),
)
async_bo.tell(new_data)
# now we can ask Trieste for one more point
# and feed that back into the points queue
point = async_bo.ask()
print(f"Process {pid}: Main : acquired point {point}", flush=True)
pq.put(np.atleast_2d(point))
finally:
terminate_processes(observer_processes)
stop = timeit.default_timer()
# Collect the observations, compute the running time
async_lp_observations = (
async_bo.to_result().try_get_final_dataset().observations
- ScaledBranin.minimum
)
async_lp_time = stop - start
print(f"Got {len(async_lp_observations)} observations in {async_lp_time:.2f}s")
# %% [markdown]
# ## Synchronous parallel optimization
#
# This section runs the synchronous parallel optimization with Trieste. We again use Local Penalization acquisition function, but this time with batch size equal to the number of workers we have available. Once Trieste suggests the batch, we add all points to the point queue, and workers immediatelly pick them up, one point per worker. Therefore all points in the batch are evaluated in parallel.
# %%
# setup Ask Tell BO
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
model = GaussianProcessRegression(gpflow_model)
local_penalization_acq = LocalPenalization(search_space, num_samples=2000)
local_penalization_rule = EfficientGlobalOptimization( # type: ignore
num_query_points=num_workers, builder=local_penalization_acq
)
sync_bo = AskTellOptimizer(
search_space, initial_data, model, local_penalization_rule
)
# retrieve process id for nice logging
pid = os.getpid()
# create point and observation queues
m = Manager()
pq = m.Queue()
oq = m.Queue()
# keep track of all workers we have launched
observer_processes = []
start = timeit.default_timer()
try:
observer_processes = create_worker_processes(num_workers, pq, oq)
# BO loop starts here
for i in range(num_iterations):
print(f"Process {pid}: Main : iteration {i} starts", flush=True)
# get a batch of points from Trieste, send them to points queue
# each worker picks up a point and processes it
points = sync_bo.ask()
for point in points.numpy():
pq.put(point.reshape(1, -1)) # reshape is to make point a 2d array
# now we wait for all workers to finish
# we create an empty dataset and wait
# until we collected as many observations in it
# as there were points in the batch
all_new_data = Dataset(
tf.zeros((0, initial_data.query_points.shape[1]), tf.float64),
tf.zeros((0, initial_data.observations.shape[1]), tf.float64),
)
while len(all_new_data) < num_workers:
# this line blocks the process until new data is available in the queue
new_data = oq.get()
print(
f"Process {pid}: Main : received data {new_data}",
flush=True,
)
new_data = Dataset(
query_points=tf.constant(new_data[0], dtype=tf.float64),
observations=tf.constant(new_data[1], dtype=tf.float64),
)
all_new_data = all_new_data + new_data
# tell Trieste of new batch of observations
sync_bo.tell(all_new_data)
finally:
terminate_processes(observer_processes)
stop = timeit.default_timer()
# Collect the observations, compute the running time
sync_lp_observations = (
sync_bo.to_result().try_get_final_dataset().observations
- ScaledBranin.minimum
)
sync_lp_time = stop - start
print(f"Got {len(sync_lp_observations)} observations in {sync_lp_time:.2f}s")
# %% [markdown]
# ## Comparison
# To compare outcomes of sync and async runs, let's plot their respective regrets side by side, and print out the running time. For this toy problem we expect async scenario to run a little bit faster on machines with multiple CPU.
# %%
from trieste.experimental.plotting import plot_regret
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2)
sync_lp_min_idx = tf.squeeze(tf.argmin(sync_lp_observations, axis=0))
async_lp_min_idx = tf.squeeze(tf.argmin(async_lp_observations, axis=0))
plot_regret(
sync_lp_observations.numpy(),
ax[0],
num_init=len(initial_data),
idx_best=sync_lp_min_idx,
)
ax[0].set_yscale("log")
ax[0].set_ylabel("Regret")
ax[0].set_ylim(0.0000001, 100)
ax[0].set_xlabel("# evaluations")
ax[0].set_title(
f"Sync LP, {len(sync_lp_observations)} points, time {sync_lp_time:.2f}"
)
plot_regret(
async_lp_observations.numpy(),
ax[1],
num_init=len(initial_data),
idx_best=async_lp_min_idx,
)
ax[1].set_yscale("log")
ax[1].set_ylabel("Regret")
ax[1].set_ylim(0.0000001, 100)
ax[1].set_xlabel("# evaluations")
ax[1].set_title(
f"Async LP, {len(async_lp_observations)} points, time {async_lp_time:.2f}s"
)
fig.tight_layout()
| 14,534 | 38.604905 | 757 | py |
trieste-develop | trieste-develop/docs/notebooks/batch_optimization.pct.py | # %% [markdown]
# # Batch Bayesian Optimization with Batch Expected Improvement, Local Penalization, Kriging Believer and GIBBON
# %% [markdown]
# Sometimes it is practically convenient to query several points at a time. This notebook demonstrates four ways to perfom batch Bayesian optimization with Trieste.
# %%
import numpy as np
import tensorflow as tf
from trieste.experimental.plotting import plot_acq_function_2d
import matplotlib.pyplot as plt
import trieste
np.random.seed(1234)
tf.random.set_seed(1234)
# %% [markdown]
# ## Describe the problem
#
# In this example, we consider the same problem presented in our `expected_improvement` notebook, i.e. seeking the minimizer of the two-dimensional Branin function.
#
# We begin our optimization after collecting five function evaluations from random locations in the search space.
# %%
from trieste.objectives import ScaledBranin
from trieste.objectives.utils import mk_observer
from trieste.space import Box
observer = mk_observer(ScaledBranin.objective)
search_space = Box([0, 0], [1, 1])
num_initial_points = 5
initial_query_points = search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)
# %% [markdown]
# ## Surrogate model
#
# Just like in purely sequential optimization, we fit a surrogate Gaussian process model to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model using Trieste's convenient model build function `build_gpr` and pass it to the `GaussianProcessRegression` wrapper. Note that we set the likelihood variance to a small number because we are dealing with a noise-free problem.
# %%
import gpflow
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
model = GaussianProcessRegression(gpflow_model)
# %% [markdown]
# ## Batch acquisition functions.
# To perform batch BO, we must define a batch acquisition function. Four batch acquisition functions supported in Trieste are `BatchMonteCarloExpectedImprovement`, `LocalPenalization` (see <cite data-cite="Gonzalez:2016"/>), `Fantasizer` (see <cite data-cite="ginsbourger2010kriging"/>) and `GIBBON` (see <cite data-cite="Moss:2021"/>).
#
# Although all these acquisition functions recommend batches of diverse query points, the batches are chosen in very different ways. `BatchMonteCarloExpectedImprovement` jointly allocates the batch of points as those with the largest expected improvement over our current best solution. In contrast, the `LocalPenalization` greedily builds the batch, sequentially adding the maximizers of the standard (non-batch) `ExpectedImprovement` function penalized around the current pending batch points. `Fantasizer` works similarly, but instead of penalizing the acquisition model, it iteratively updates the predictive equations after "fantasizing" obervations at the previously chosen query points. `GIBBON` also builds batches in a greedy manner but seeks batches that provide a large reduction in our uncertainty around the maximum value of the objective function.
#
# In practice, `BatchMonteCarloExpectedImprovement` can be expected to have superior performance for small batches and dimension (`batch_size`<4) but scales poorly for larger batches, especially in high dimension. `Fantasizer` complexity scales cubically with the batch size, which also limits its use to small batches. `LocalPenalization` is computationally cheapest and may be the best fit for large batches.
#
# Note that all these acquisition functions have controllable parameters. In particular, `BatchMonteCarloExpectedImprovement` is computed using a Monte-Carlo method (so it requires a `sample_size`), but uses a reparametrisation trick to make it deterministic. The `LocalPenalization` has parameters controlling the degree of penalization that must be estimated from a random sample of `num_samples` model predictions (we recommend at least 1_000 for each search space dimension). Similarly, `GIBBON` requires a `grid_size` parameter that controls its approximation accuracy (which should also be larger than 1_000 for each search space dimension). `Fantasizer` requires a method for "fantasizing" the observations, which can be done by sampling from the GP posterior or by using the GP posterior mean (a.k.a "kriging believer" heuristic, our default setup).
#
# %% [markdown]
# First, we collect the batch of ten points recommended by `BatchMonteCarloExpectedImprovement` ...
# %%
from trieste.acquisition.function import BatchMonteCarloExpectedImprovement
from trieste.acquisition.rule import EfficientGlobalOptimization
monte_carlo_sample_size = 1000
batch_ei_acq = BatchMonteCarloExpectedImprovement(
sample_size=monte_carlo_sample_size, jitter=1e-5
)
batch_size = 10
batch_ei_acq_rule = EfficientGlobalOptimization( # type: ignore
num_query_points=batch_size, builder=batch_ei_acq
)
points_chosen_by_batch_ei = batch_ei_acq_rule.acquire_single(
search_space, model, dataset=initial_data
)
# %% [markdown]
# then we do the same with `LocalPenalization` ...
# %%
from trieste.acquisition import LocalPenalization
sample_size = 2000
local_penalization_acq = LocalPenalization(
search_space, num_samples=sample_size
)
local_penalization_acq_rule = EfficientGlobalOptimization( # type: ignore
num_query_points=batch_size, builder=local_penalization_acq
)
points_chosen_by_local_penalization = (
local_penalization_acq_rule.acquire_single(
search_space, model, dataset=initial_data
)
)
# %% [markdown]
# then with `Fantasizer` ...
# %%
from trieste.acquisition import Fantasizer
kriging_believer_acq = Fantasizer()
kriging_believer_acq_rule = EfficientGlobalOptimization( # type: ignore
num_query_points=batch_size, builder=kriging_believer_acq
)
points_chosen_by_kriging_believer = kriging_believer_acq_rule.acquire_single(
search_space, model, dataset=initial_data
)
# %% [markdown]
# and finally we use `GIBBON`.
# %%
from trieste.acquisition.function import GIBBON
gibbon_acq = GIBBON(search_space, grid_size=sample_size)
gibbon_acq_rule = EfficientGlobalOptimization( # type: ignore
num_query_points=batch_size, builder=gibbon_acq
)
points_chosen_by_gibbon = gibbon_acq_rule.acquire_single(
search_space, model, dataset=initial_data
)
# %% [markdown]
# We can now visualize the batch of 10 points chosen by each of these methods overlayed on the standard `ExpectedImprovement` acquisition function. `BatchMonteCarloExpectedImprovement` and `Fantasizer` choose a more diverse set of points, whereas `LocalPenalization` and `GIBBON` focus evaluations in the most promising areas of the space.
# %%
from trieste.acquisition.function import ExpectedImprovement
# plot standard EI acquisition function
ei = ExpectedImprovement()
ei_acq_function = ei.prepare_acquisition_function(model, dataset=initial_data)
plot_acq_function_2d(
ei_acq_function,
[0, 0],
[1, 1],
contour=True,
)
plt.scatter(
points_chosen_by_batch_ei[:, 0],
points_chosen_by_batch_ei[:, 1],
color="red",
lw=5,
label="Batch-EI",
marker="*",
zorder=1,
)
plt.scatter(
points_chosen_by_local_penalization[:, 0],
points_chosen_by_local_penalization[:, 1],
color="black",
lw=10,
label="Local \nPenalization",
marker="+",
)
plt.scatter(
points_chosen_by_kriging_believer[:, 0],
points_chosen_by_kriging_believer[:, 1],
color="blue",
lw=10,
label="Kriging \nBeliever",
marker="o",
)
plt.scatter(
points_chosen_by_gibbon[:, 0],
points_chosen_by_gibbon[:, 1],
color="purple",
lw=10,
label="GIBBON",
marker="X",
)
plt.legend(bbox_to_anchor=(1.2, 1), loc="upper left")
plt.xlabel(r"$x_1$")
plt.ylabel(r"$x_2$")
cbar = plt.colorbar()
cbar.set_label("EI", rotation=270)
# %% [markdown]
# ## Run the batch optimization loop
# We can now run a batch Bayesian optimization loop by defining a `BayesianOptimizer` with one of our batch acquisition functions.
#
# We reuse the same ` EfficientGlobalOptimization` rule as in the purely sequential case, however we pass in one of batch acquisition functions and set `num_query_points`>1.
#
# We'll run each method for ten steps for batches of three points.
# %% [markdown]
# First we run ten steps of `BatchMonteCarloExpectedImprovement` ...
# %%
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
batch_ei_rule = EfficientGlobalOptimization( # type: ignore
num_query_points=3, builder=batch_ei_acq
)
num_steps = 10
qei_result = bo.optimize(
num_steps, initial_data, model, acquisition_rule=batch_ei_rule
)
# %% [markdown]
# then we repeat the same optimization with `LocalPenalization`...
# %%
local_penalization_rule = EfficientGlobalOptimization( # type: ignore
num_query_points=3, builder=local_penalization_acq
)
local_penalization_result = bo.optimize(
num_steps, initial_data, model, acquisition_rule=local_penalization_rule
)
# %% [markdown]
# then with `Fantasizer`...
# %%
kriging_believer_rule = EfficientGlobalOptimization( # type: ignore
num_query_points=3, builder=kriging_believer_acq
)
kriging_believer_result = bo.optimize(
num_steps, initial_data, model, acquisition_rule=kriging_believer_rule
)
# %% [markdown]
# and finally with the `GIBBON` acquisition function.
# %%
gibbon_rule = EfficientGlobalOptimization( # type: ignore
num_query_points=3, builder=gibbon_acq
)
gibbon_result = bo.optimize(
num_steps, initial_data, model, acquisition_rule=gibbon_rule
)
# %% [markdown]
# We can visualize the performance of each of these methods by plotting the trajectory of the regret (suboptimality) of the best observed solution as the optimization progresses. We denote this trajectory with the orange line, the start of the optimization loop with the blue line and the best overall point as a purple dot.
#
# For this particular problem (and random seed), we see that `GIBBON` provides the fastest initial optimization but all methods have overall a roughly similar performance.
# %%
from trieste.experimental.plotting import plot_regret
qei_observations = (
qei_result.try_get_final_dataset().observations - ScaledBranin.minimum
)
qei_min_idx = tf.squeeze(tf.argmin(qei_observations, axis=0))
local_penalization_observations = (
local_penalization_result.try_get_final_dataset().observations
- ScaledBranin.minimum
)
local_penalization_min_idx = tf.squeeze(
tf.argmin(local_penalization_observations, axis=0)
)
kriging_believer_observations = (
kriging_believer_result.try_get_final_dataset().observations
- ScaledBranin.minimum
)
kriging_believer_min_idx = tf.squeeze(
tf.argmin(kriging_believer_observations, axis=0)
)
gibbon_observations = (
gibbon_result.try_get_final_dataset().observations - ScaledBranin.minimum
)
gibbon_min_idx = tf.squeeze(tf.argmin(gibbon_observations, axis=0))
fig, ax = plt.subplots(4, 1)
plot_regret(qei_observations.numpy(), ax[0], num_init=5, idx_best=qei_min_idx)
ax[0].set_yscale("log")
ax[0].set_ylabel("Regret")
ax[0].set_ylim(0.0000001, 100)
ax[0].set_xlabel("# evaluations")
ax[0].set_title("Batch-EI")
plot_regret(
local_penalization_observations.numpy(),
ax[1],
num_init=5,
idx_best=local_penalization_min_idx,
)
ax[1].set_yscale("log")
ax[1].set_xlabel("# evaluations")
ax[1].set_ylim(0.0000001, 100)
ax[1].set_title("Local Penalization")
plot_regret(
kriging_believer_observations.numpy(),
ax[2],
num_init=5,
idx_best=kriging_believer_min_idx,
)
ax[2].set_yscale("log")
ax[2].set_xlabel("# evaluations")
ax[2].set_ylim(0.0000001, 100)
ax[2].set_title("Kriging Believer")
plot_regret(
gibbon_observations.numpy(), ax[2], num_init=5, idx_best=gibbon_min_idx
)
ax[3].set_yscale("log")
ax[3].set_ylabel("Regret")
ax[3].set_ylim(0.0000001, 100)
ax[3].set_xlabel("# evaluations")
ax[3].set_title("GIBBON")
fig.tight_layout()
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 12,094 | 37.275316 | 861 | py |
trieste-develop | trieste-develop/docs/notebooks/rembo.pct.py | # %% [markdown]
# # High-dimensional Bayesian optimization with Random EMbedding Bayesian Optimization (REMBO).
# This notebook demonstrates a simple method for optimizing a high-dimensional (100-D) problem, where standard BO methods have trouble.
# %%
import math
import gpflow
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# ## Describe the problem
#
# In this example, we augment the standard two-dimensional Michalewicz function with 98 dummy dimensions to obtain a 100-dimensional problem over the hypercube $[0, \pi]^{100}$.
#
# We compare three approaches to optimizing this problem. The first uses a standard GP model over all 100 dimensions, using expected improvement as our acquisition function. As standard Gaussian process models have trouble modeling high dimensional data, we do not expect this approach to perform well. Therefore, we compare this to two Random EMbedding Bayesian Optimization (REMBO; see <cite data-cite="wang2013bayesian"/>) approaches.
#
# Instead of training a GP model and optimizing an acquisition function on the high-dimensional space directly, REMBO constructs a low-dimensional search space, performing the modeling and acquisition on this space. In order to transfer to the high-dimensional space, REMBO uses a static random projection matrix $A \in \mathbb{R}^{D \times d}$ to project query points from the lower, $d$-dimensional space to the original higher, $D$-dimensional space.
#
# As the lower dimension $d$ is a choice, we compare $d = 2$ and $d = 5$. While $d = 2$ should be sufficient (as the problem is intrinsically two-dimensional), a higher dimension may improve the chance of a good random embedding being found, at the cost of making it more difficult to find good areas of the lower-dimensional search space.
#
# We run each method 5 times to ensure that the results are not due to luck.
# %%
from trieste.objectives.single_objectives import Michalewicz2
from trieste.space import Box
from trieste.models.gpflow import GaussianProcessRegression
# Set the dimension of the full problem
D = 100
num_initial_points = 2
num_steps = 48
num_seeds = 5
objective = Michalewicz2.objective
minimum = Michalewicz2.minimum
search_space = (
Box([0.0], [math.pi]) ** D
) # manually construct the high-dimensional search space
# We simply add dummy dimensions to create the new objective
def high_dim_objective(x):
tf.debugging.assert_shapes([(x, (..., D))])
return objective(x[..., :2])
def build_model(data, d):
# add a bit of noise, since there's a risk the variance could be zero for Michalewicz
variance = tf.math.reduce_variance(data.observations) + 1e-4
kernel = gpflow.kernels.Matern52(variance=variance, lengthscales=[0.2] * d)
prior_scale = tf.cast(1.0, dtype=tf.float64)
kernel.variance.prior = tfp.distributions.LogNormal(
tf.cast(-2.0, dtype=tf.float64), prior_scale
)
kernel.lengthscales.prior = tfp.distributions.LogNormal(
tf.math.log(kernel.lengthscales), prior_scale
)
gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)
gpflow.set_trainable(gpr.likelihood, False)
return GaussianProcessRegression(gpr, num_kernel_samples=100)
# %% [markdown]
# ## Run standard Bayesian optimization
# We run the process 5 times - note that this takes a while!
# %%
import trieste
final_datasets = [] # to store the results
observer = trieste.objectives.utils.mk_observer(high_dim_objective)
for _ in range(num_seeds):
# Sample initial points
initial_query_points = search_space.sample_sobol(num_initial_points)
initial_data = observer(initial_query_points)
# Build the model over the high-dimensional space
model = build_model(initial_data, d=D)
# Set up the optimizer and run the loop
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
# Store the results
result = bo.optimize(num_steps, initial_data, model)
dataset = result.try_get_final_dataset()
final_datasets.append(dataset)
# %% [markdown]
# We now show how to implement REMBO, by providing a new observer that acts on a projection of the input data by wrapping the original objective.
# %%
def make_REMBO_observer_and_search_space(
full_dim, low_dim, objective, search_space
):
assert isinstance(search_space, Box)
A = tf.random.normal(
[full_dim, low_dim], dtype=gpflow.default_float()
) # sample projection matrix
new_search_space = Box(
[-math.sqrt(low_dim)] * low_dim, [math.sqrt(low_dim)] * low_dim
) # recommendation from REMBO paper
def new_objective(y):
tf.debugging.assert_shapes([(y, (..., low_dim))])
rescaled_search_space = Box(
[-1.0] * full_dim, [1.0] * full_dim
) # REMBO assumes the original space has bounds [-1, 1]^full_dim
scaling = (search_space.upper - search_space.lower) / (
rescaled_search_space.upper - rescaled_search_space.lower
)
x = tf.clip_by_value(
tf.matmul(y, A, transpose_b=True),
clip_value_min=-1,
clip_value_max=1,
) # project into the new box
x_rescaled = (
x - rescaled_search_space.lower
) * scaling + search_space.lower # rescale to match the original search space
return objective(x_rescaled)
observer = trieste.objectives.utils.mk_observer(new_objective)
return observer, new_search_space
# %% [markdown]
# Using the new observer, the process remains the same as before, except that now we must choose $d$ and build a model for that dimension. We run the same experiment for $d=2$ and $d=5$.
# %%
d = 2
rembo_2_final_datasets = []
for _ in range(num_seeds):
rembo_observer, rembo_search_space = make_REMBO_observer_and_search_space(
D, d, high_dim_objective, search_space
)
initial_query_points = rembo_search_space.sample_sobol(num_initial_points)
initial_data = rembo_observer(initial_query_points)
model = build_model(initial_data, d=d)
bo = trieste.bayesian_optimizer.BayesianOptimizer(
rembo_observer, rembo_search_space
)
result = bo.optimize(num_steps, initial_data, model)
dataset = result.try_get_final_dataset()
rembo_2_final_datasets.append(dataset)
# %% [markdown]
# We repeat the above but with d=5 - this might help find more suitable projections.
# %%
d = 5
rembo_5_final_datasets = []
for _ in range(num_seeds):
rembo_observer, rembo_search_space = make_REMBO_observer_and_search_space(
D, d, high_dim_objective, search_space
)
initial_query_points = rembo_search_space.sample_sobol(num_initial_points)
initial_data = rembo_observer(initial_query_points)
model = build_model(initial_data, d=d)
bo = trieste.bayesian_optimizer.BayesianOptimizer(
rembo_observer, rembo_search_space
)
result = bo.optimize(num_steps, initial_data, model)
dataset = result.try_get_final_dataset()
rembo_5_final_datasets.append(dataset)
# %% [markdown]
# We produce a regret plot below for each method.
# %%
import matplotlib.pyplot as plt
_, ax = plt.subplots(1, 3)
for i in range(num_seeds):
observations = final_datasets[i].observations.numpy()
suboptimality = observations - minimum.numpy()
ax[0].plot(np.minimum.accumulate(suboptimality))
ax[0].axvline(x=num_initial_points - 0.5, color="tab:blue")
ax[0].set_yscale("log")
ax[0].set_ylabel("Regret")
ax[0].set_ylim(0.001, 100)
ax[0].set_xlabel("# evaluations")
ax[0].set_title("Full-D BO")
rembo_observations = rembo_2_final_datasets[i].observations.numpy()
suboptimality = rembo_observations - minimum.numpy()
ax[1].plot(np.minimum.accumulate(suboptimality))
ax[1].axvline(x=num_initial_points - 0.5, color="tab:blue")
ax[1].set_yscale("log")
ax[1].set_ylim(0.001, 100)
ax[1].set_yticks([])
ax[1].set_xlabel("# evaluations")
ax[1].set_title("REMBO: d=2")
rembo_5_observations = rembo_5_final_datasets[i].observations.numpy()
suboptimality = rembo_5_observations - minimum.numpy()
ax[2].plot(np.minimum.accumulate(suboptimality))
ax[2].axvline(x=num_initial_points - 0.5, color="tab:blue")
ax[2].set_yscale("log")
ax[2].set_ylim(0.001, 100)
ax[2].set_yticks([])
ax[2].set_xlabel("# evaluations")
ax[2].set_title("REMBO: d=5")
# %% [markdown]
# We see that REMBO with $d=2$ generally performs the best, whereas both the full-dimensional approach and $d=5$ struggle more.
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 8,735 | 34.368421 | 453 | py |
trieste-develop | trieste-develop/docs/notebooks/explicit_constraints.pct.py | # %% [markdown]
# # Explicit Constraints
# %% [markdown]
# This notebook demonstrates ways to perfom Bayesian optimization with Trieste in the presence of explicit input constraints.
# %%
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import trieste
np.random.seed(1234)
tf.random.set_seed(1234)
# %% [markdown]
# ## Describe the problem
#
# In this example, we consider the same problem presented in our [EI notebook](expected_improvement.ipynb), i.e. seeking the minimizer of the two-dimensional Branin function, but with input constraints.
#
# There are 3 linear constraints with respective lower/upper limits (i.e. 6 linear inequality constraints). There are 2 non-linear constraints with respective lower/upper limits (i.e. 4 non-linear inequality constraints).
#
# We begin our optimization after collecting five function evaluations from random locations in the search space.
# %%
from trieste.acquisition.function import fast_constraints_feasibility
from trieste.objectives import ScaledBranin
from trieste.objectives.utils import mk_observer
from trieste.space import Box, LinearConstraint, NonlinearConstraint
observer = mk_observer(ScaledBranin.objective)
def _nlc_func0(x):
c0 = x[..., 0] - 0.2 - tf.sin(x[..., 1])
c0 = tf.expand_dims(c0, axis=-1)
return c0
def _nlc_func1(x):
c1 = x[..., 0] - tf.cos(x[..., 1])
c1 = tf.expand_dims(c1, axis=-1)
return c1
constraints = [
LinearConstraint(
A=tf.constant([[-1.0, 1.0], [1.0, 0.0], [0.0, 1.0]]),
lb=tf.constant([-0.4, 0.15, 0.2]),
ub=tf.constant([0.5, 0.9, 0.9]),
),
NonlinearConstraint(_nlc_func0, tf.constant(-1.0), tf.constant(0.0)),
NonlinearConstraint(_nlc_func1, tf.constant(-0.8), tf.constant(0.0)),
]
unconstrained_search_space = Box([0, 0], [1, 1])
constrained_search_space = Box([0, 0], [1, 1], constraints=constraints) # type: ignore
num_initial_points = 5
initial_query_points = constrained_search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)
# %% [markdown]
# We wrap the objective and constraint functions as methods on the `Sim` class. This provides us one way to visualise the objective function, as well as the constrained objective. We get the constrained objective by masking out regions where the constraint function is above the threshold.
# %%
from trieste.experimental.plotting import plot_objective_and_constraints
class Sim:
threshold = 0.5
@staticmethod
def objective(input_data):
return ScaledBranin.objective(input_data)
@staticmethod
def constraint(input_data):
# `fast_constraints_feasibility` returns the feasibility, so we invert it. The plotting
# function masks out values above the threshold.
return 1.0 - fast_constraints_feasibility(constrained_search_space)(
input_data
)
plot_objective_and_constraints(constrained_search_space, Sim)
plt.show()
# %% [markdown]
# In addition to the normal sampling methods, the search space provides sampling methods that return feasible points only. Here we demonstrate sampling 200 feasible points from the Halton sequence.
# We can visualise the sampled points along with the objective function and the constraints.
# %%
from trieste.experimental.plotting import plot_function_2d
[xi, xj] = np.meshgrid(
np.linspace(
constrained_search_space.lower[0],
constrained_search_space.upper[0],
100,
),
np.linspace(
constrained_search_space.lower[1],
constrained_search_space.upper[1],
100,
),
)
xplot = np.vstack(
(xi.ravel(), xj.ravel())
).T # Change our input grid to list of coordinates.
constraint_values = np.reshape(
constrained_search_space.is_feasible(xplot), xi.shape
)
_, ax = plot_function_2d(
ScaledBranin.objective,
constrained_search_space.lower,
constrained_search_space.upper,
contour=True,
)
points = constrained_search_space.sample_halton_feasible(200)
ax[0, 0].scatter(
points[:, 0],
points[:, 1],
s=15,
c="tab:orange",
edgecolors="black",
marker="o",
)
ax[0, 0].contourf(
xi,
xj,
constraint_values,
levels=1,
colors=[(0.2, 0.2, 0.2, 0.7), (1, 1, 1, 0)],
zorder=1,
)
ax[0, 0].set_xlabel(r"$x_1$")
ax[0, 0].set_ylabel(r"$x_2$")
plt.show()
# %% [markdown]
# ## Surrogate model
#
# We fit a surrogate Gaussian process model to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model using Trieste's convenient model build function `build_gpr` and pass it to the `GaussianProcessRegression` wrapper. Note that we set the likelihood variance to a small number because we are dealing with a noise-free problem.
# %%
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
gpflow_model = build_gpr(
initial_data, constrained_search_space, likelihood_variance=1e-7
)
model = GaussianProcessRegression(gpflow_model)
# %% [markdown]
# ## Constrained optimization method
#
# ### Acquisition function (constrained optimization)
#
# We can construct the _expected improvement_ acquisition function as usual. However, in order for the acquisition function to handle the constraints, the constrained search space must be passed as a constructor argument. Without the constrained search space, the acquisition function would be unconstrained _expected improvement_.
# %%
from trieste.acquisition.function import ExpectedImprovement
from trieste.acquisition.rule import EfficientGlobalOptimization
ei = ExpectedImprovement(constrained_search_space)
rule = EfficientGlobalOptimization(ei) # type: ignore
# %% [markdown]
# ### Run the optimization loop (constrained optimization)
#
# We can now run the optimization loop. As the search space contains constraints, the optimizer will automatically switch to using _scipy_ _trust-constr_ (trust region) method to optimize the acquisition function.
# %%
bo = trieste.bayesian_optimizer.BayesianOptimizer(
observer, constrained_search_space
)
num_steps = 15
result = bo.optimize(num_steps, initial_data, model, acquisition_rule=rule)
# %% [markdown]
# We can now get the best point found by the optimizer. Note this isn’t necessarily the point that was last evaluated.
# %%
query_point, observation, arg_min_idx = result.try_get_optimal_point()
print(f"query point: {query_point}")
print(f"observation: {observation}")
# %% [markdown]
# We obtain the final objective and constraint data using `.try_get_final_datasets()`. We can visualise how the optimizer performed by plotting all the acquired observations, along with the true function values and optima.
#
# The crosses are the 5 initial points that were sampled from the entire search space. The green circles are the acquired observations by the optimizer. The purple circle is the best point found.
# %%
from trieste.experimental.plotting import plot_bo_points, plot_function_2d
def plot_bo_results():
dataset = result.try_get_final_dataset()
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
_, ax = plot_function_2d(
ScaledBranin.objective,
constrained_search_space.lower,
constrained_search_space.upper,
contour=True,
figsize=(8, 6),
)
plot_bo_points(
query_points,
ax[0, 0],
num_initial_points,
arg_min_idx,
c_pass="green",
c_best="purple",
)
ax[0, 0].contourf(
xi,
xj,
constraint_values,
levels=1,
colors=[(0.2, 0.2, 0.2, 0.7), (1, 1, 1, 0)],
zorder=2,
)
ax[0, 0].set_xlabel(r"$x_1$")
ax[0, 0].set_ylabel(r"$x_2$")
plt.show()
plot_bo_results()
# %% [markdown]
# ## Penalty method
#
# ### Acquisition function (penalty method)
#
# We recommend using the constrained optimization method above for cases where it can be used, as it should be more efficient. However there are setups when that method cannot be used, e.g. when using batches and some acquisition functions. For such cases, an alternative is to construct the standard _expected constrained improvement_ (similar to the [inequality-constraints notebook](inequality_constraints.ipynb)); except instead of using probability of feasibility with respect to the constraint model, we construct feasibility from the explicit input constraints. This feasibility is calculated by passing all the constraints residuals (to their respective limits) through a smooth step function and taking the product.
#
# For this method, the `FastConstraintsFeasibility` class should be passed constraints via the search space. `ExpectedConstrainedImprovement` and `BayesianOptimizer` should be set up as usual without the constrained search space.
#
# Note: this method penalises the expected improvement acquisition outside the feasible region. The optimizer uses the default _scipy_ _L-BFGS-B_ method to find the max of the acquistion function.
# %%
from trieste.acquisition.function import (
ExpectedConstrainedImprovement,
FastConstraintsFeasibility,
)
from trieste.acquisition.rule import EfficientGlobalOptimization
from trieste.observer import OBJECTIVE
feas = FastConstraintsFeasibility(
constrained_search_space
) # Search space with constraints.
eci = ExpectedConstrainedImprovement(OBJECTIVE, feas.using(OBJECTIVE))
rule = EfficientGlobalOptimization(eci)
# %% [markdown]
# ### Run the optimization loop (penalty method)
#
# We can now run the optimization loop.
# %%
# Note: use the search space without constraints for the penalty method.
bo = trieste.bayesian_optimizer.BayesianOptimizer(
observer, unconstrained_search_space
)
result = bo.optimize(num_steps, initial_data, model, acquisition_rule=rule)
# %% [markdown]
# We can now get the best point found by the optimizer as before.
# %%
query_point, observation, arg_min_idx = result.try_get_optimal_point()
print(f"query point: {query_point}")
print(f"observation: {observation}")
# %% [markdown]
# Plot the results as before.
# %%
plot_bo_results()
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 10,251 | 33.173333 | 724 | py |
trieste-develop | trieste-develop/docs/notebooks/failure_ego.pct.py | # %% [markdown]
# # EGO with a failure region
# %%
from __future__ import annotations
import numpy as np
import tensorflow as tf
np.random.seed(1234)
tf.random.set_seed(1234)
# %% [markdown]
# ## The problem
#
# This notebook is similar to the [EI notebook](expected_improvement.ipynb), where we look to find the minimum value of the two-dimensional Branin function over the hypercube $[0, 1]^2$. But here, we constrain the problem, by adding an area to the search space in which the objective fails to evaluate.
#
# We represent this setup with a function `masked_branin` that produces null values when evaluated in the disk with center $(0.5, 0.4)$ and radius $0.3$. It's important to remember that while _we_ know where this _failure region_ is, this function is a black box from the optimizer's point of view: the optimizer must learn it.
# %%
import trieste
def masked_branin(x):
mask_nan = np.sqrt((x[:, 0] - 0.5) ** 2 + (x[:, 1] - 0.4) ** 2) < 0.3
y = np.array(trieste.objectives.Branin.objective(x))
y[mask_nan] = np.nan
return tf.convert_to_tensor(y.reshape(-1, 1), x.dtype)
# %% [markdown]
# As mentioned, we'll search over the hypercube $[0, 1]^2$ ...
# %%
from trieste.space import Box
search_space = Box([0, 0], [1, 1])
# %% [markdown]
# ... where the `masked_branin` now looks as follows. The white area in the centre shows the failure
# region.
# %%
from trieste.experimental.plotting import plot_function_plotly
fig = plot_function_plotly(
masked_branin, search_space.lower, search_space.upper
)
fig.show()
# %% [markdown]
# ## Define the data sets
#
# We'll work with two data sets
#
# - one containing only those query_points and observations where the observations are finite.
# We'll label this with `OBJECTIVE`.
# - the other containing all the query points, but whose observations indicate if evaluating the
# observer failed at that point, using `0` if the evaluation failed, else `1`. We'll label this
# with `FAILURE`.
#
# Let's define an observer that outputs the data in these formats.
# %%
OBJECTIVE = "OBJECTIVE"
FAILURE = "FAILURE"
def observer(x):
y = masked_branin(x)
mask = np.isfinite(y).reshape(-1)
return {
OBJECTIVE: trieste.data.Dataset(x[mask], y[mask]),
FAILURE: trieste.data.Dataset(x, tf.cast(np.isfinite(y), tf.float64)),
}
# %% [markdown]
# We can evaluate the observer at points sampled from the search space.
# %%
num_init_points = 15
initial_data = observer(search_space.sample(num_init_points))
# %% [markdown]
# ## Build GPflow models
#
# We'll model the data on the objective with a regression model, and the data on which points failed with a classification model. The regression model will be a `GaussianProcessRegression` wrapping a GPflow `GPR` model, and the classification model a `VariationalGaussianProcess` wrapping a GPflow `VGP` model with Bernoulli likelihood. The `GPR` and `VGP` models are build using Trieste's convenient model build functions `build_gpr` and `build_vgp_classifier`. Note that we set the likelihood variance to a small number because we are dealing with a noise-free problem.
# %%
from trieste.models.gpflow import build_gpr, build_vgp_classifier
regression_model = build_gpr(
initial_data[OBJECTIVE], search_space, likelihood_variance=1e-7
)
classification_model = build_vgp_classifier(
initial_data[FAILURE], search_space, noise_free=True
)
# %% [markdown]
# ## Build Trieste models
#
# We now specify how Trieste will use our GPflow models within the BO loop.
#
# For our `VGP` model we will use a non-default optimization: alternate Adam steps (to optimize kernel parameter) and NatGrad steps (to optimize variational parameters). For this we need to use the `BatchOptimizer` wrapper and set the `use_natgrads` model argument to `True` in our `VariationalGaussianProcess` model wrapper.
# %% [markdown]
# We'll train the GPR model with the default Scipy-based L-BFGS optimizer, and the VGP model with the custom algorithm above.
# %%
from trieste.models import TrainableProbabilisticModel
from trieste.models.gpflow.models import (
GaussianProcessRegression,
VariationalGaussianProcess,
)
from trieste.models.optimizer import BatchOptimizer
from trieste.types import Tag
models: dict[Tag, TrainableProbabilisticModel] = {
OBJECTIVE: GaussianProcessRegression(regression_model),
FAILURE: VariationalGaussianProcess(
classification_model,
BatchOptimizer(tf.optimizers.Adam(1e-3)),
use_natgrads=True,
),
}
# %% [markdown]
# ## Create a custom acquisition function
#
# We'll need a custom acquisition function for this problem. This function is the product of the expected improvement for the objective data and the predictive mean for the failure data. We can specify which data and model to use in each acquisition function builder with the `OBJECTIVE` and `FAILURE` labels. We'll optimize the function using EfficientGlobalOptimization.
# %%
from trieste.acquisition.rule import EfficientGlobalOptimization
from trieste.acquisition import (
SingleModelAcquisitionBuilder,
ExpectedImprovement,
Product,
)
class ProbabilityOfValidity(SingleModelAcquisitionBuilder):
def prepare_acquisition_function(self, model, dataset=None):
def acquisition(at):
mean, _ = model.predict_y(tf.squeeze(at, -2))
return mean
return acquisition
ei = ExpectedImprovement()
pov = ProbabilityOfValidity()
acq_fn = Product(ei.using(OBJECTIVE), pov.using(FAILURE))
rule = EfficientGlobalOptimization(acq_fn) # type: ignore
# %% [markdown]
# ## Run the optimizer
#
# Now, we run the Bayesian optimization loop for twenty steps, and print the location of the query point corresponding to the minimum observation.
# %%
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
num_steps = 20
result = bo.optimize(
num_steps, initial_data, models, rule
).final_result.unwrap()
arg_min_idx = tf.squeeze(
tf.argmin(result.datasets[OBJECTIVE].observations, axis=0)
)
print(f"query point: {result.datasets[OBJECTIVE].query_points[arg_min_idx, :]}")
# %% [markdown]
# We can visualise where the optimizer queried on a contour plot of the Branin with the failure region. The minimum observation can be seen along the bottom axis towards the right, outside of the failure region.
# %%
import matplotlib.pyplot as plt
from trieste.experimental.plotting import (
plot_gp_2d,
plot_function_2d,
plot_bo_points,
)
mask_fail = (
result.datasets[FAILURE].observations.numpy().flatten().astype(int) == 0
)
fig, ax = plot_function_2d(
masked_branin,
search_space.lower,
search_space.upper,
grid_density=20,
contour=True,
)
plot_bo_points(
result.datasets[FAILURE].query_points.numpy(),
ax=ax[0, 0],
num_init=num_init_points,
mask_fail=mask_fail,
)
plt.show()
# %% [markdown]
# We can also plot the mean and variance of the predictive distribution over the search space, first for the objective data and model ...
# %%
from trieste.experimental.plotting import (
plot_model_predictions_plotly,
add_bo_points_plotly,
)
arg_min_idx = tf.squeeze(
tf.argmin(result.datasets[OBJECTIVE].observations, axis=0)
)
fig = plot_model_predictions_plotly(
result.models[OBJECTIVE],
search_space.lower,
search_space.upper,
)
fig = add_bo_points_plotly(
x=result.datasets[OBJECTIVE].query_points[:, 0].numpy(),
y=result.datasets[OBJECTIVE].query_points[:, 1].numpy(),
z=result.datasets[OBJECTIVE].observations.numpy().flatten(),
num_init=num_init_points,
idx_best=arg_min_idx,
fig=fig,
figrow=1,
figcol=1,
)
fig.show()
# %% [markdown]
# ... and then for the failure data and model
# %%
fig, ax = plot_gp_2d(
result.models[FAILURE].model, # type: ignore
search_space.lower,
search_space.upper,
grid_density=20,
contour=True,
figsize=(12, 5),
predict_y=True,
)
plot_bo_points(
result.datasets[FAILURE].query_points.numpy(),
num_init=num_init_points,
ax=ax[0, 0],
mask_fail=mask_fail,
)
plot_bo_points(
result.datasets[FAILURE].query_points.numpy(),
num_init=num_init_points,
ax=ax[0, 1],
mask_fail=mask_fail,
)
plt.show()
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 8,385 | 30.059259 | 571 | py |
trieste-develop | trieste-develop/docs/notebooks/inequality_constraints.pct.py | # %% [markdown]
# # Inequality constraints
# %%
import numpy as np
import tensorflow as tf
np.random.seed(1799)
tf.random.set_seed(1799)
# %% [markdown]
# ## The problem
#
# In this tutorial, we replicate one of the results of <cite data-cite="gardner14">[Gardner et al.](http://proceedings.mlr.press/v32/gardner14.html)</cite>, specifically their synthetic experiment "simulation 1", which consists of an objective function with a single constraint, defined over a two-dimensional input domain. We'll start by defining the problem parameters. The constraint is satisfied when `constraint(input_data) <= threshold`.
# %%
from trieste.space import Box
class Sim:
threshold = 0.5
@staticmethod
def objective(input_data):
x, y = input_data[..., -2], input_data[..., -1]
z = tf.cos(2.0 * x) * tf.cos(y) + tf.sin(x)
return z[:, None]
@staticmethod
def constraint(input_data):
x, y = input_data[:, -2], input_data[:, -1]
z = tf.cos(x) * tf.cos(y) - tf.sin(x) * tf.sin(y)
return z[:, None]
search_space = Box([0, 0], [6, 6])
# %% [markdown]
# The objective and constraint functions are accessible as methods on the `Sim` class. Let's visualise these functions, as well as the constrained objective. We get the constrained objective by masking out regions where the constraint function is above the threshold.
# %%
import trieste
import matplotlib.pyplot as plt
from trieste.experimental.plotting import plot_objective_and_constraints
plot_objective_and_constraints(search_space, Sim)
plt.show()
# %% [markdown]
# We'll make an observer that outputs both the objective and constraint data. Since the observer is outputting multiple datasets, we have to label them so that the optimization process knows which is which.
# %%
from trieste.data import Dataset
OBJECTIVE = "OBJECTIVE"
CONSTRAINT = "CONSTRAINT"
def observer(query_points):
return {
OBJECTIVE: Dataset(query_points, Sim.objective(query_points)),
CONSTRAINT: Dataset(query_points, Sim.constraint(query_points)),
}
# %% [markdown]
# Let's randomly sample some initial data from the observer ...
# %%
num_initial_points = 5
initial_data = observer(search_space.sample(num_initial_points))
# %% [markdown]
# ... and visualise those points on the constrained objective. Note how the generated data is labelled, like the observer.
# %%
from trieste.experimental.plotting import plot_init_query_points
plot_init_query_points(
search_space,
Sim,
initial_data[OBJECTIVE].astuple(),
initial_data[CONSTRAINT].astuple(),
)
plt.show()
# %% [markdown]
# ## Modelling the two functions
#
# We'll model the objective and constraint data with their own Gaussian process regression model, as implemented in GPflow. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model using Trieste's convenient model build function `build_gpr` and pass it to the `GaussianProcessRegression` wrapper. Note that we set the likelihood variance to a small number because we are dealing with a noise-free problem.
# %%
from trieste.models.gpflow import build_gpr, GaussianProcessRegression
def create_bo_model(data):
gpr = build_gpr(data, search_space, likelihood_variance=1e-7)
return GaussianProcessRegression(gpr)
initial_models = trieste.utils.map_values(create_bo_model, initial_data)
# %% [markdown]
# ## Define the acquisition process
#
# We can construct the _expected constrained improvement_ acquisition function defined in <cite data-cite="gardner14">[Gardner et al.](http://proceedings.mlr.press/v32/gardner14.html)</cite>, where they use the probability of feasibility with respect to the constraint model.
# %%
from trieste.acquisition.rule import EfficientGlobalOptimization
pof = trieste.acquisition.ProbabilityOfFeasibility(threshold=Sim.threshold)
eci = trieste.acquisition.ExpectedConstrainedImprovement(
OBJECTIVE, pof.using(CONSTRAINT)
)
rule = EfficientGlobalOptimization(eci) # type: ignore
# %% [markdown]
# ## Run the optimization loop
#
# We can now run the optimization loop. We obtain the final objective and constraint data using `.try_get_final_datasets()`.
# %%
num_steps = 20
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
data = bo.optimize(
num_steps, initial_data, initial_models, rule, track_state=False
).try_get_final_datasets()
# %% [markdown]
# To conclude this section, we visualise the resulting data. Orange dots show the new points queried during optimization. Notice the concentration of these points in regions near the local minima.
# %%
constraint_data = data[CONSTRAINT]
new_query_points = constraint_data.query_points[-num_steps:]
new_observations = constraint_data.observations[-num_steps:]
new_data = (new_query_points, new_observations)
plot_init_query_points(
search_space,
Sim,
initial_data[OBJECTIVE].astuple(),
initial_data[CONSTRAINT].astuple(),
new_data,
)
plt.show()
# %% [markdown]
# ## Batch-sequential strategy
#
# We'll now look at a batch-sequential approach to the same problem. Sometimes it's beneficial to query several points at a time instead of one. The acquisition function we used earlier, built by `ExpectedConstrainedImprovement`, only supports a batch size of 1, so we'll need a new acquisition function builder for larger batch sizes. We can implement this using the reparametrization trick with the Monte-Carlo sampler `BatchReparametrizationSampler`. Note that when we do this, we must initialise the sampler *outside* the acquisition function (here `batch_efi`). This is crucial: a given instance of a sampler produces repeatable, continuous samples, and we can use this to create a repeatable continuous acquisition function. Using a new sampler on each call would not result in a repeatable continuous acquisition function.
# %%
class BatchExpectedConstrainedImprovement(
trieste.acquisition.AcquisitionFunctionBuilder
):
def __init__(self, sample_size, threshold):
self._sample_size = sample_size
self._threshold = threshold
def prepare_acquisition_function(self, models, datasets):
objective_model = models[OBJECTIVE]
objective_dataset = datasets[OBJECTIVE]
samplers = {
tag: trieste.models.gpflow.BatchReparametrizationSampler(
self._sample_size, model
)
for tag, model in models.items()
}
pf = trieste.acquisition.probability_below_threshold(
models[CONSTRAINT], self._threshold
)(tf.expand_dims(objective_dataset.query_points, 1))
is_feasible = pf >= 0.5
mean, _ = objective_model.predict(objective_dataset.query_points)
eta = tf.reduce_min(tf.boolean_mask(mean, is_feasible), axis=0)
def batch_efi(at):
samples = {
tag: tf.squeeze(sampler.sample(at), -1)
for tag, sampler in samplers.items()
}
feasible_mask = samples[CONSTRAINT] < self._threshold # [N, S, B]
improvement = tf.where(
feasible_mask, tf.maximum(eta - samples[OBJECTIVE], 0.0), 0.0
) # [N, S, B]
batch_improvement = tf.reduce_max(improvement, axis=-1) # [N, S]
return tf.reduce_mean(
batch_improvement, axis=-1, keepdims=True
) # [N, 1]
return batch_efi
num_query_points = 4
sample_size = 50
batch_eci = BatchExpectedConstrainedImprovement(sample_size, Sim.threshold)
batch_rule = EfficientGlobalOptimization( # type: ignore
batch_eci, num_query_points=num_query_points
)
# %% [markdown]
# We can now run the BO loop as before; note that here we also query twenty points, but in five batches of four points.
# %%
initial_models = trieste.utils.map_values(create_bo_model, initial_data)
num_steps = 5
batch_data = bo.optimize(
num_steps, initial_data, initial_models, batch_rule, track_state=False
).try_get_final_datasets()
# %% [markdown]
# We visualise the resulting data as before.
# %%
batch_constraint_data = batch_data[CONSTRAINT]
new_batch_data = (
batch_constraint_data.query_points[-num_query_points * num_steps :],
batch_constraint_data.observations[-num_query_points * num_steps :],
)
plot_init_query_points(
search_space,
Sim,
initial_data[OBJECTIVE].astuple(),
initial_data[CONSTRAINT].astuple(),
new_batch_data,
)
plt.show()
# %% [markdown]
# Finally, we compare the regret from the non-batch strategy (left panel) to the regret from the batch strategy (right panel).
# In the following plots each marker represents a query point. The x-axis is the index of the query point (where the first queried point has index 0), and the y-axis is the observed value. The vertical blue line denotes the end of initialisation/start of optimisation. Green points satisfy the constraint, red points do not.
# %%
from trieste.experimental.plotting import plot_regret
mask_fail = constraint_data.observations.numpy() > Sim.threshold
batch_mask_fail = batch_constraint_data.observations.numpy() > Sim.threshold
fig, ax = plt.subplots(1, 2, sharey="all")
plot_regret(
data[OBJECTIVE].observations.numpy(),
ax[0],
num_init=num_initial_points,
mask_fail=mask_fail.flatten(),
)
plot_regret(
batch_data[OBJECTIVE].observations.numpy(),
ax[1],
num_init=num_initial_points,
mask_fail=batch_mask_fail.flatten(),
)
# %% [markdown]
# ## Constrained optimization with more than one constraint
#
# We'll now show how to use a reducer to combine multiple constraints. The new problem `Sim2` inherits from the previous one its objective and first constraint, but also adds a second constraint. We start by adding an output to our observer, and creating a set of three models.
# %%
class Sim2(Sim):
threshold2 = 0.5
@staticmethod
def constraint2(input_data):
x, y = input_data[:, -2], input_data[:, -1]
z = tf.sin(x) * tf.cos(y) - tf.cos(x) * tf.sin(y)
return z[:, None]
CONSTRAINT2 = "CONSTRAINT2"
def observer_two_constraints(query_points):
return {
OBJECTIVE: Dataset(query_points, Sim2.objective(query_points)),
CONSTRAINT: Dataset(query_points, Sim2.constraint(query_points)),
CONSTRAINT2: Dataset(query_points, Sim2.constraint2(query_points)),
}
num_initial_points = 10
initial_data = observer_two_constraints(search_space.sample(num_initial_points))
initial_models = trieste.utils.map_values(create_bo_model, initial_data)
# %% [markdown]
# Now, the probability that the two constraints are feasible is the product of the two feasibilities. Hence, we combine the two `ProbabilityOfFeasibility` functions into one quantity by using a `Product` `Reducer`:
# %%
from trieste.acquisition.combination import Product
pof1 = trieste.acquisition.ProbabilityOfFeasibility(threshold=Sim2.threshold)
pof2 = trieste.acquisition.ProbabilityOfFeasibility(threshold=Sim2.threshold2)
pof = Product(pof1.using(CONSTRAINT), pof2.using(CONSTRAINT2)) # type: ignore
# %% [markdown]
# We can now run the BO loop as before, and visualize the results:
# %%
eci = trieste.acquisition.ExpectedConstrainedImprovement(OBJECTIVE, pof) # type: ignore
rule = EfficientGlobalOptimization(eci)
num_steps = 20
bo = trieste.bayesian_optimizer.BayesianOptimizer(
observer_two_constraints, search_space
)
data = bo.optimize(
num_steps, initial_data, initial_models, rule, track_state=False
).try_get_final_datasets()
constraint_data = data[CONSTRAINT]
new_query_points = constraint_data.query_points[-num_steps:]
new_observations = constraint_data.observations[-num_steps:]
new_data = (new_query_points, new_observations)
def masked_objective(x):
mask_nan = np.logical_or(
Sim2.constraint(x) > Sim2.threshold,
Sim2.constraint2(x) > Sim2.threshold2,
)
y = np.array(Sim2.objective(x))
y[mask_nan] = np.nan
return tf.convert_to_tensor(y.reshape(-1, 1), x.dtype)
mask_fail1 = (
data[CONSTRAINT].observations.numpy().flatten().astype(int) > Sim2.threshold
)
mask_fail2 = (
data[CONSTRAINT2].observations.numpy().flatten().astype(int)
> Sim2.threshold2
)
mask_fail = np.logical_or(mask_fail1, mask_fail2)
import matplotlib.pyplot as plt
from trieste.experimental.plotting import plot_function_2d, plot_bo_points
fig, ax = plot_function_2d(
masked_objective,
search_space.lower,
search_space.upper,
contour=True,
)
plot_bo_points(
data[OBJECTIVE].query_points.numpy(),
ax=ax[0, 0],
num_init=num_initial_points,
mask_fail=mask_fail,
)
plt.show()
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 12,770 | 34.085165 | 829 | py |
trieste-develop | trieste-develop/docs/notebooks/thompson_sampling.pct.py | # %% [markdown]
# # Batch-sequential optimization with Thompson sampling
# %%
import numpy as np
import tensorflow as tf
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# ## Define the problem and model
#
# You can use Thompson sampling for Bayesian optimization in much the same way as we used EGO and EI in the tutorial _Introduction_. Since the setup is much the same is in that tutorial, we'll skip over most of the detail.
#
# We'll use a continuous bounded search space, and evaluate the observer at ten random points.
# %%
import trieste
from trieste.objectives import Branin
branin = Branin.objective
search_space = Branin.search_space
num_initial_data_points = 10
initial_query_points = search_space.sample(num_initial_data_points)
observer = trieste.objectives.utils.mk_observer(branin)
initial_data = observer(initial_query_points)
# %% [markdown]
# We'll use Gaussian process regression to model the function, as implemented in GPflow. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model using Trieste's convenient model build function `build_gpr` and pass it to the `GaussianProcessRegression` wrapper. Note that we set the likelihood variance to a small number because we are dealing with a noise-free problem.
# %%
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
model = GaussianProcessRegression(gpflow_model)
# %% [markdown]
# ## Create the Thompson sampling acquisition rule
#
# We achieve Bayesian optimization with Thompson sampling by specifying `DiscreteThompsonSampling` as the acquisition rule. Unlike the `EfficientGlobalOptimization` acquisition rule, `DiscreteThompsonSampling` does not use an acquisition function. Instead, in each optimization step, the rule samples `num_query_points` samples from the model posterior at `num_search_space_samples` points on the search space. It then returns the `num_query_points` points of those that minimise the model posterior.
# %%
num_search_space_samples = 1000
num_query_points = 10
acq_rule = trieste.acquisition.rule.DiscreteThompsonSampling(
num_search_space_samples=num_search_space_samples,
num_query_points=num_query_points,
)
# %% [markdown]
# ## Run the optimization loop
#
# All that remains is to pass the Thompson sampling rule to the `BayesianOptimizer`. Once the optimization loop is complete, the optimizer will return `num_query_points` new query points for every step in the loop. With five steps, that's fifty points.
# %%
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
num_steps = 5
result = bo.optimize(
num_steps, initial_data, model, acq_rule, track_state=False
)
dataset = result.try_get_final_dataset()
# %% [markdown]
# ## Visualising the result
#
# We can take a look at where we queried the observer, both the original query points (crosses) and new query points (dots), and where they lie with respect to the contours of the Branin.
# %%
from trieste.experimental.plotting import plot_function_2d, plot_bo_points
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
_, ax = plot_function_2d(
branin,
search_space.lower,
search_space.upper,
grid_density=40,
contour=True,
)
plot_bo_points(query_points, ax[0, 0], num_initial_data_points, arg_min_idx)
# %% [markdown]
# We can also visualise the observations on a three-dimensional plot of the Branin. We'll add the contours of the mean and variance of the model's predictive distribution as translucent surfaces.
# %%
from trieste.experimental.plotting import (
plot_model_predictions_plotly,
add_bo_points_plotly,
)
fig = plot_model_predictions_plotly(
result.try_get_final_model(),
search_space.lower,
search_space.upper,
)
fig = add_bo_points_plotly(
x=query_points[:, 0],
y=query_points[:, 1],
z=observations[:, 0],
num_init=num_initial_data_points,
idx_best=arg_min_idx,
fig=fig,
figrow=1,
figcol=1,
)
fig.show()
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 4,301 | 35.151261 | 500 | py |
trieste-develop | trieste-develop/docs/notebooks/feasible_sets.pct.py | # %% [markdown]
# # Bayesian active learning of failure or feasibility regions
#
# When designing a system it is important to identify design parameters that may affect the reliability of the system and cause failures, or lead to unsatisfactory performance. Consider designing a communication network that for some design parameters would lead to too long delays for users. A designer of the system would then decide what is the maximum acceptable delay and want to identify a *failure region* in the parameter space that would lead to longer delays., or conversely, a *feasible region* with safe performance.
#
# When evaluating the system is expensive (e.g. lengthy computer simulations), identification of the failure region needs to be performed with a limited number of evaluations. Traditional Monte Carlo based methods are not suitable here as they require too many evaluations. Bayesian active learning methods, however, are well suited for the task. Here we show how Trieste can be used to identify failure or feasible regions with the help of acquisition functions designed with this goal in mind.
#
# %%
# %matplotlib inline
# silence TF warnings and info messages, only print errors
# https://stackoverflow.com/questions/35911252/disable-tensorflow-debugging-information
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
tf.get_logger().setLevel("ERROR")
import numpy as np
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# ## A toy problem
#
# Throughout the tutorial we will use the standard Branin function as a stand-in for an expensive-to-evaluate system. We create a failure region by thresholding the value at 80, space with value above 80 is considered a failure region. This region needs to be learned as efficiently as possible by the active learning algorithm.
#
# Note that if we are interested in a feasibility region instead, it is simply a complement of the failure region, space with the value below 80.
#
# We illustrate the thresholded Branin function below, you can note that above the threshold of 80 there are no more values observed.
# %%
from trieste.objectives import Branin
from trieste.experimental.plotting import plot_function_plotly
branin = Branin.objective
search_space = Branin.search_space
# threshold is arbitrary, but has to be within the range of the function
threshold = 80.0
# define a modified branin function
def thresholded_branin(x):
y = np.array(branin(x))
y[y > threshold] = np.nan
return tf.convert_to_tensor(y.reshape(-1, 1), x.dtype)
# illustrate the thresholded branin function
fig = plot_function_plotly(
thresholded_branin, search_space.lower, search_space.upper
)
fig.show()
# %% [markdown]
# We start with a small initial dataset where our expensive-to-evaluate function is evaluated on points coming from a space-filling Halton sequence.
# %%
import trieste
observer = trieste.objectives.utils.mk_observer(branin)
num_initial_points = 6
initial_query_points = search_space.sample_halton(num_initial_points)
initial_data = observer(initial_query_points)
# %% [markdown]
# ## Probabilistic model of the objective function
#
# Just like in sequential optimization, we use a probabilistic model of the objective function. Acquisition functions will exploit the predictive posterior of the model to identify the failure region. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model using Trieste's convenient model build function `build_gpr` and pass it to the `GaussianProcessRegression` wrapper. Note that we set the likelihood variance to a small number because we are dealing with a noise-free problem.
# %%
import gpflow
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
model = GaussianProcessRegression(gpflow_model)
# %% [markdown]
# ## Active learning with Expected feasibility acquisition function
#
# The problem of identifying a failure or feasibility region of a (expensive-to-evaluate) function $f$ can be formalized as estimating the excursion set, $\Gamma^* = \{ x \in X: f(x) \ge T\}$, or estimating the contour line, $C^* = \{ x \in X: f(x) = T\}$, for some threshold $T$ (see <cite data-cite="bect2012sequential"/> for more details).
#
# It turns out that Gaussian processes can be used as classifiers for identifying where excursion probability is larger than 1/2 and this idea is used to build many sequential sampling strategies. Here we introduce Expected feasibility acquisition function that implements two related sampling strategies called *bichon* criterion (<cite data-cite="bichon2008efficient"/>) and *ranjan* criterion (<cite data-cite="ranjan2008sequential"/>). <cite data-cite="bect2012sequential"/> provides a common expression for these two criteria: $$\mathbb{E}[\max(0, (\alpha s(x))^\delta - |T - m(x)|^\delta)]$$
#
# Here $m(x)$ and $s(x)$ are the mean and standard deviation of the predictive posterior of the Gaussian process model. Bichon criterion is obtained when $\delta = 1$ while ranjan criterion is obtained when $\delta = 2$. $\alpha>0$ is another parameter that acts as a percentage of standard deviation of the posterior around the current boundary estimate where we want to sample. The goal is to sample a point with a mean close to the threshold $T$ and a high variance, so that the positive difference in the equation above is as large as possible.
# %% [markdown]
# We now illustrate `ExpectedFeasibility` acquisition function using the Bichon criterion. Performance for the Ranjan criterion is typically very similar. `ExpectedFeasibility` takes threshold as an input and has two parameters, `alpha` and `delta` following the description above.
#
# Note that even though we use below `ExpectedFeasibility` with `EfficientGlobalOptimization` `BayesianOptimizer` routine, we are actually performing active learning. The only relevant difference between the two is the nature of the acquisition function - optimization ones are designed with the goal of finding the optimum of a function, while active learning ones are designed to learn the function (or some aspect of it, like here).
# %%
from trieste.acquisition.rule import EfficientGlobalOptimization
from trieste.acquisition.function import ExpectedFeasibility
# Bichon criterion
delta = 1
# set up the acquisition rule and initialize the Bayesian optimizer
acq = ExpectedFeasibility(threshold, delta=delta)
rule = EfficientGlobalOptimization(builder=acq) # type: ignore
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
num_steps = 10
result = bo.optimize(num_steps, initial_data, model, rule)
# %% [markdown]
# Let's illustrate the results.
#
# To identify the failure or feasibility region we compute the excursion probability using our Gaussian process model: $$P\left(\frac{m(x) - T}{s(x)}\right)$$ where $m(x)$ and $s(x)$ are the mean and standard deviation of the predictive posterior of the model given the data.
#
# We plot a two-dimensional contour map of our thresholded Branin function as a reference, excursion probability map using the model fitted to the initial data alone, and updated excursion probability map after all the active learning steps.
#
# We first define helper functions for computing excursion probabilities and plotting, and then plot the thresholded Branin function as a reference. White area represents the failure region.
# %%
from trieste.experimental.plotting import plot_bo_points, plot_function_2d
import tensorflow_probability as tfp
def excursion_probability(x, model, threshold=80):
mean, variance = model.model.predict_f(x)
normal = tfp.distributions.Normal(tf.cast(0, x.dtype), tf.cast(1, x.dtype))
threshold = tf.cast(threshold, x.dtype)
if tf.size(threshold) == 1:
t = (mean - threshold) / tf.sqrt(variance)
return normal.cdf(t)
else:
t0 = (mean - threshold[0]) / tf.sqrt(variance)
t1 = (mean - threshold[1]) / tf.sqrt(variance)
return normal.cdf(t1) - normal.cdf(t0)
def plot_excursion_probability(
title, model=None, query_points=None, threshold=80.0
):
if model is None:
objective_function = thresholded_branin
else:
def objective_function(x):
return excursion_probability(x, model, threshold)
_, ax = plot_function_2d(
objective_function,
search_space.lower - 0.01,
search_space.upper + 0.01,
contour=True,
colorbar=True,
figsize=(10, 6),
title=[title],
xlabel="$X_1$",
ylabel="$X_2$",
fill=True,
)
if query_points is not None:
plot_bo_points(query_points, ax[0, 0], num_initial_points)
plot_excursion_probability("Excursion set, Branin function")
# %% [markdown]
# Next we illustrate the excursion probability map using the model fitted to the initial data alone. On the figure below we can see that the failure region boundary has been identified with some accuracy in the upper right corner, but not in the lower left corner. It is also loosely defined, as indicated by a slow decrease and increase away from the 0.5 excursion probability contour.
# %%
# extracting the data to illustrate the points
dataset = result.try_get_final_dataset()
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
# fitting the model only to the initial data
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
initial_model = GaussianProcessRegression(gpflow_model)
initial_model.optimize(initial_data)
plot_excursion_probability(
"Probability of excursion, initial data",
initial_model,
query_points[:num_initial_points,],
)
# %% [markdown]
# Next we examine an updated excursion probability map after the 10 active learning steps. We can now see that the model is much more accurate and confident, as indicated by a good match with the reference thresholded Branin function and sharp decrease/increase away from the 0.5 excursion probability contour.
# %%
updated_model = result.try_get_final_model()
plot_excursion_probability(
"Updated probability of excursion", updated_model, query_points
)
# %% [markdown]
# We can also examine what would happen if we would continue for many more active learning steps. One would expect that choices would be allocated closer and closer to the boundary, and uncertainty continuing to collapse. Indeed, on the figure below we observe exactly that. With 10 observations more the model is precisely representing the failure region boundary. Most of the additional query points lie close to the threshold line.
# %%
num_steps = 10
result = bo.optimize(num_steps, dataset, model, rule)
final_model = result.try_get_final_model()
dataset = result.try_get_final_dataset()
query_points = dataset.query_points.numpy()
plot_excursion_probability(
"Final probability of excursion", final_model, query_points
)
# %% [markdown]
# ## Active learning with Integrated Variance Reduction acquisition function
#
# An alternative to the `ExpectedFeasibility` acquisition function is called `IntegratedVarianceReduction`. This acquisition has the advantage of taking into account reduction of uncertainty in a region of the search space when choosing the next point to sample, instead of considering only the sampling point. This makes it more expensive to compute than `ExpectedFeasibility`, since it involves computing an integral over a set of integration points. This integration region is determined by the user, with the `integration_points` parameter. Another advantage is that `IntegratedVarianceReduction` can produce batches of points, which becomes useful when parallel evaluations are possible.
#
# Below we perform 10 active learning steps of batch size 2, with `IntegratedVarianceReduction` acquisition function and same as above plot excursion probability of the final model.
# %%
from trieste.acquisition.function import IntegratedVarianceReduction
# Choose integration points uniformly over the design space
integration_points = search_space.sample_halton(1000)
acq_ivr = IntegratedVarianceReduction(
integration_points=integration_points,
threshold=threshold,
)
# Set a batch size greater than 1 with the 'num_query_points' parameter
rule_ivr = EfficientGlobalOptimization(builder=acq_ivr, num_query_points=2) # type: ignore
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
num_steps = 10
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
model = GaussianProcessRegression(gpflow_model)
result_ivr = bo.optimize(num_steps, initial_data, model, rule_ivr)
final_model_ivr = result_ivr.try_get_final_model()
dataset_ivr = result_ivr.try_get_final_dataset()
query_points_ivr = dataset_ivr.query_points.numpy()
plot_excursion_probability(
"Final probability of excursion", final_model_ivr, query_points_ivr
)
# %% [markdown]
# One can also specify a range of thresholds rather than a single value. We can do this by specifying a range with a minimum and a maximum threshold, rather than a single threshold as the `threshold` parameter. The resulting query points are likely to be more spread out than previously, as now the whole region between the thresholds is aimed to be well estimated, rather than a single line.
# %%
thresholds = [50.0, 110.0]
acq_range = IntegratedVarianceReduction(
integration_points=integration_points, threshold=thresholds
)
rule_range = EfficientGlobalOptimization(builder=acq_range, num_query_points=2) # type: ignore
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
model = GaussianProcessRegression(gpflow_model)
result_range = bo.optimize(num_steps, initial_data, model, rule_range)
# %% [markdown]
# We can now illustrate the probability that a point in the search space belongs to the threshold interval rather than the probability that points exceed a single threshold. We compare probability maps obtained with the `IntegratedVarianceReduction` (IVR) when optimising for the threshold range and for the single threshold at the center of the range, as well as to a probability map for the `ExpectedFeasibility` function obtained with a single threshold. As expected, the `IntegratedVarianceReduction` with threshold range spreads query points a bit more, which leads to a sharper probability boundary.
# %%
final_model_range = result_range.try_get_final_model()
dataset_range = result_range.try_get_final_dataset()
query_points_range = dataset_range.query_points.numpy()
plot_excursion_probability(
"Probability of being in the range (IVR range of thresholds)",
final_model_range,
query_points_range,
threshold=thresholds,
)
plot_excursion_probability(
"Probability of being in the range (IVR single threshold)",
final_model_ivr,
query_points_ivr,
threshold=thresholds,
)
plot_excursion_probability(
"Probability of being in the range (EF)",
final_model,
query_points,
threshold=thresholds,
)
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 15,240 | 48.644951 | 692 | py |
trieste-develop | trieste-develop/docs/notebooks/multi_objective_ehvi.pct.py | # -*- coding: utf-8 -*-
# %% [markdown]
# # Multi-objective optimization with Expected HyperVolume Improvement
# %%
import math
import gpflow
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from trieste.experimental.plotting import (
plot_bo_points,
plot_function_2d,
plot_mobo_history,
plot_mobo_points_in_obj_space,
)
# %%
import trieste
from trieste.acquisition.function import ExpectedHypervolumeImprovement
from trieste.acquisition.rule import EfficientGlobalOptimization
from trieste.data import Dataset
from trieste.models import TrainableModelStack
from trieste.models.gpflow import build_gpr, GaussianProcessRegression
from trieste.space import Box, SearchSpace
from trieste.objectives.multi_objectives import VLMOP2
from trieste.acquisition.multi_objective.pareto import (
Pareto,
get_reference_point,
)
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# ## Describe the problem
#
# In this tutorial, we provide a multi-objective optimization example using the expected hypervolume improvement acquisition function.
# We consider the VLMOP2 problem --- a synthetic benchmark problem with two objectives and input dimensionality of two. We start by defining the problem parameters.
# %%
vlmop2 = VLMOP2(2)
observer = trieste.objectives.utils.mk_observer(vlmop2.objective)
# %%
mins = [-2, -2]
maxs = [2, 2]
search_space = Box(mins, maxs)
num_objective = 2
# %% [markdown]
# Let's randomly sample some initial data from the observer ...
# %%
num_initial_points = 20
initial_query_points = search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)
# %% [markdown]
# ... and visualise the data across the design space: each figure contains the contour lines of each objective function.
# %%
_, ax = plot_function_2d(
vlmop2.objective,
mins,
maxs,
contour=True,
title=["Obj 1", "Obj 2"],
figsize=(12, 6),
colorbar=True,
xlabel="$X_1$",
ylabel="$X_2$",
)
plot_bo_points(initial_query_points, ax=ax[0, 0], num_init=num_initial_points)
plot_bo_points(initial_query_points, ax=ax[0, 1], num_init=num_initial_points)
plt.show()
# %% [markdown]
# ... and in the objective space. The `plot_mobo_points_in_obj_space` will automatically search for non-dominated points and colours them in purple.
# %%
plot_mobo_points_in_obj_space(initial_data.observations)
plt.show()
# %% [markdown]
# ## Modelling the two functions
#
# In this example we model the two objective functions individually with their own Gaussian process models, for problems where the objective functions are similar it may make sense to build a joint model.
#
# We use a model wrapper: `TrainableModelStack` to stack these two independent GPs into a single model working as an (independent) multi-output model. Note that we set the likelihood variance to a small number because we are dealing with a noise-free problem.
# %%
def build_stacked_independent_objectives_model(
data: Dataset, num_output: int, search_space: SearchSpace
) -> TrainableModelStack:
gprs = []
for idx in range(num_output):
single_obj_data = Dataset(
data.query_points, tf.gather(data.observations, [idx], axis=1)
)
gpr = build_gpr(single_obj_data, search_space, likelihood_variance=1e-7)
gprs.append((GaussianProcessRegression(gpr), 1))
return TrainableModelStack(*gprs)
# %%
model = build_stacked_independent_objectives_model(
initial_data, num_objective, search_space
)
# %% [markdown]
# ## Define the acquisition function
# Here we utilize the [EHVI](https://link.springer.com/article/10.1007/s10898-019-00798-7): `ExpectedHypervolumeImprovement` acquisition function:
# %%
ehvi = ExpectedHypervolumeImprovement()
rule: EfficientGlobalOptimization = EfficientGlobalOptimization(builder=ehvi)
# %% [markdown]
# ## Run the optimization loop
#
# We can now run the optimization loop
# %%
num_steps = 30
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result = bo.optimize(num_steps, initial_data, model, acquisition_rule=rule)
# %% [markdown]
# To conclude, we visualize the queried data across the design space.
# We represent the initial points as crosses and the points obtained by our optimization loop as dots.
# %%
dataset = result.try_get_final_dataset()
data_query_points = dataset.query_points
data_observations = dataset.observations
_, ax = plot_function_2d(
vlmop2.objective,
mins,
maxs,
contour=True,
figsize=(12, 6),
title=["Obj 1", "Obj 2"],
xlabel="$X_1$",
ylabel="$X_2$",
colorbar=True,
)
plot_bo_points(data_query_points, ax=ax[0, 0], num_init=num_initial_points)
plot_bo_points(data_query_points, ax=ax[0, 1], num_init=num_initial_points)
plt.show()
# %% [markdown]
# Visualize in objective space. Purple dots denote the non-dominated points.
# %%
plot_mobo_points_in_obj_space(data_observations, num_init=num_initial_points)
plt.show()
# %% [markdown]
# We can also visualize how a performance metric evolved with respect to the number of BO iterations.
# First, we need to define a performance metric. Many metrics have been considered for multi-objective optimization. Here, we use the log hypervolume difference, defined as the difference between the hypervolume of the actual Pareto front and the hypervolume of the approximate Pareto front based on the bo-obtained data.
# %% [markdown]
#
# $$
# log_{10}\ \text{HV}_{\text{diff}} = log_{10}(\text{HV}_{\text{actual}} - \text{HV}_{\text{bo-obtained}})
# $$
#
# %% [markdown]
# First we need to calculate the $\text{HV}_{\text{actual}}$ based on the actual Pareto front. For some multi-objective synthetic functions like VLMOP2, the actual Pareto front has a clear definition, thus we could use `gen_pareto_optimal_points` to near uniformly sample on the actual Pareto front. And use these generated Pareto optimal points to (approximately) calculate the hypervolume of the actual Pareto frontier:
# %%
actual_pf = vlmop2.gen_pareto_optimal_points(100) # gen 100 pf points
ref_point = get_reference_point(data_observations)
idea_hv = Pareto(
tf.cast(actual_pf, dtype=data_observations.dtype)
).hypervolume_indicator(ref_point)
# %% [markdown]
# Then we define the metric function:
# %%
def log_hv(observations):
obs_hv = Pareto(observations).hypervolume_indicator(ref_point)
return math.log10(idea_hv - obs_hv)
# %% [markdown]
# Finally, we can plot the convergence of our performance metric over the course of the optimization.
# The blue vertical line in the figure denotes the time after which BO starts.
# %%
fig, ax = plot_mobo_history(
data_observations, log_hv, num_init=num_initial_points
)
ax.set_xlabel("Iterations")
ax.set_ylabel("log HV difference")
plt.show()
# %% [markdown]
# ## Batch multi-objective optimization
#
# EHVI can be extended to the case of batches (i.e. query several points at a time) using the `Fantasizer`. `Fantasizer` works by greedily optimising a base acquisition function, then "fantasizing" the observations at the chosen query points and updating the predictive equations of the models as if the fantasized data was added to the models. The only changes that need to be done here are to wrap the `ExpectedHypervolumeImprovement` in a `Fantasizer` object, and set the rule argument `num_query_points` to a value greater than one. Here, we choose 10 batches of size 3, so the observation budget is the same as before.
# %%
model = build_stacked_independent_objectives_model(
initial_data, num_objective, search_space
)
from trieste.acquisition.function import Fantasizer
batch_ehvi = Fantasizer(ExpectedHypervolumeImprovement())
batch_rule: EfficientGlobalOptimization = EfficientGlobalOptimization(
builder=batch_ehvi, num_query_points=3
)
num_steps = 10
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
batch_result = bo.optimize(
num_steps, initial_data, model, acquisition_rule=batch_rule
)
# %% [markdown]
# We can have a look at the results, as in the previous case. For this relatively simple problem, the greedy heuristic works quite well, and the performance is similar to the non-batch run.
# %%
dataset = batch_result.try_get_final_dataset()
batch_data_query_points = dataset.query_points
batch_data_observations = dataset.observations
_, ax = plot_function_2d(
vlmop2.objective,
mins,
maxs,
contour=True,
figsize=(12, 6),
title=["Obj 1", "Obj 2"],
xlabel="$X_1$",
ylabel="$X_2$",
colorbar=True,
)
plot_bo_points(
batch_data_query_points, ax=ax[0, 0], num_init=num_initial_points
)
plot_bo_points(
batch_data_query_points, ax=ax[0, 1], num_init=num_initial_points
)
plt.show()
plot_mobo_points_in_obj_space(
batch_data_observations, num_init=num_initial_points
)
plt.show()
fig, ax = plot_mobo_history(
batch_data_observations, log_hv, num_init=num_initial_points
)
ax.set_xlabel("Iterations")
ax.set_ylabel("log HV difference")
plt.show()
# %% [markdown]
# ## Multi-objective optimization with constraints
#
# EHVI can be adapted to the case of constraints, as we show below. We start by defining a problem with the same objectives as above, but with an inequality constraint, and we define the corresponding `Observer`.
# %%
class Sim:
threshold = 0.75
@staticmethod
def objective(input_data):
return vlmop2.objective(input_data)
@staticmethod
def constraint(input_data):
x, y = input_data[:, -2], input_data[:, -1]
z = tf.cos(x) * tf.cos(y) - tf.sin(x) * tf.sin(y)
return z[:, None]
OBJECTIVE = "OBJECTIVE"
CONSTRAINT = "CONSTRAINT"
def observer_cst(query_points):
return {
OBJECTIVE: Dataset(query_points, Sim.objective(query_points)),
CONSTRAINT: Dataset(query_points, Sim.constraint(query_points)),
}
num_initial_points = 10
initial_query_points = search_space.sample(num_initial_points)
initial_data_with_cst = observer_cst(initial_query_points)
# %% [markdown]
# As previously, we visualise the data across the design space: each figure contains the contour lines of each objective function and in the objective space. The `plot_mobo_points_in_obj_space` will automatically search for non-dominated points and colours them in purple, and the points in red violate the constraint.
# %%
from trieste.experimental.plotting import plot_2obj_cst_query_points
plot_2obj_cst_query_points(
search_space,
Sim,
initial_data_with_cst[OBJECTIVE].astuple(),
initial_data_with_cst[CONSTRAINT].astuple(),
)
plt.show()
mask_fail = (
initial_data_with_cst[CONSTRAINT].observations.numpy() > Sim.threshold
)
plot_mobo_points_in_obj_space(
initial_data_with_cst[OBJECTIVE].observations, mask_fail=mask_fail[:, 0]
)
plt.show()
# %% [markdown]
# We use the same model wrapper to build and stack the two GP models of the objective:
# %%
objective_model = build_stacked_independent_objectives_model(
initial_data_with_cst[OBJECTIVE], num_objective, search_space
)
# %% [markdown]
# We also create a single model of the constraint. Note that we set the likelihood variance to a small number because we are dealing with a noise-free problem.
# %%
gpflow_model = build_gpr(
initial_data_with_cst[CONSTRAINT], search_space, likelihood_variance=1e-7
)
constraint_model = GaussianProcessRegression(gpflow_model)
# %% [markdown]
# We store both sets of models in a dictionary:
# %%
models = {OBJECTIVE: objective_model, CONSTRAINT: constraint_model}
# %% [markdown]
# ## Acquisition function for multiple objectives and constraints
# We utilize the `ExpectedConstrainedHypervolumeImprovement` acquisition function, which is the product of EHVI (based on the feasible Pareto set) with the probability of feasibility:
# %%
from trieste.acquisition.function import (
ExpectedConstrainedHypervolumeImprovement,
)
pof = trieste.acquisition.ProbabilityOfFeasibility(threshold=Sim.threshold)
echvi = ExpectedConstrainedHypervolumeImprovement(
OBJECTIVE, pof.using(CONSTRAINT)
)
rule = EfficientGlobalOptimization(builder=echvi)
# %% [markdown]
# We can now run the optimization loop
# %%
num_steps = 30
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer_cst, search_space)
result = bo.optimize(
num_steps, initial_data_with_cst, models, acquisition_rule=rule
)
# %% [markdown]
# As previously, we visualize the queried data across the design space.
# We represent the initial points as crosses and the points obtained by our optimization loop as dots.
# %%
objective_dataset = result.final_result.unwrap().datasets[OBJECTIVE]
constraint_dataset = result.final_result.unwrap().datasets[CONSTRAINT]
data_query_points = objective_dataset.query_points
data_observations = objective_dataset.observations
plot_2obj_cst_query_points(
search_space,
Sim,
objective_dataset.astuple(),
constraint_dataset.astuple(),
)
plt.show()
# %% [markdown]
# Finally, we visualize them in the objective space. Purple dots denote the non-dominated points, and red ones the points that violate the constraint.
# %%
mask_fail = constraint_dataset.observations.numpy() > Sim.threshold
plot_mobo_points_in_obj_space(
data_observations, num_init=num_initial_points, mask_fail=mask_fail[:, 0]
)
plt.show()
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 13,422 | 31.344578 | 623 | py |
trieste-develop | trieste-develop/docs/notebooks/data_transformation.pct.py | # -*- coding: utf-8 -*-
# %% [markdown]
# # Data transformation with the help of Ask-Tell interface.
# %%
import os
import gpflow
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from trieste.experimental.plotting import plot_regret
import trieste
from trieste.ask_tell_optimization import AskTellOptimizer
from trieste.data import Dataset
from trieste.models.gpflow import GaussianProcessRegression
from trieste.objectives import Trid10
from trieste.objectives.utils import mk_observer
from trieste.space import Box
np.random.seed(1794)
tf.random.set_seed(1794)
# silence TF warnings and info messages, only print errors
# https://stackoverflow.com/questions/35911252/disable-tensorflow-debugging-information
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
tf.get_logger().setLevel("ERROR")
# %% [markdown]
# ## Describe the problem
#
# In this notebook, we show how to perform data transformation during Bayesian optimization. This is usually required by the models. A very common example is normalising the data before fitting the model, either min-max or standard normalization. This is usually done for numerical stability, or to improve or speed up the convergence.
#
# In regression problems it is easy to perform data transformations as you do it once before training. In Bayesian optimization this is more complex, as the data added with each iteration and needs to be transformed as well before the model is updated. At the moment Trieste cannot do such transformations for the user. Luckily, this can be easily done by using the [Ask-Tell interface](ask_tell_optimization.ipynb), as it provides greater control of the optimization loop. The disadvantage is that it is up to the user to take care of all the data transformation.
#
# As an example, we will be searching for a minimum of a 10-dimensional [Trid function](https://www.sfu.ca/~ssurjano/trid.html). The range of variation of the Trid function values is large. It varies from values of $10^5$ to its global minimum $f(x^∗) = −210$. This large variation range makes it difficult for Bayesian optimization with Gaussian processes to find the global minimum. However, with data normalisation it becomes possible (see <cite data-cite="hebbal2019bayesian">[Hebbal et al. 2019](https://arxiv.org/abs/1905.03350)</cite>).
# %%
function = Trid10.objective
F_MINIMUM = Trid10.minimum
search_space = Trid10.search_space
# %% [markdown]
# ## Collect initial points
#
# We set up the observer as usual over the Trid function search space, using Sobol sampling to sample the initial points.
# %%
num_initial_points = 50
observer = mk_observer(function)
initial_query_points = search_space.sample_sobol(num_initial_points)
initial_data = observer(initial_query_points)
# %% [markdown]
# ## Model the objective function
#
# The Bayesian optimization procedure estimates the next best points to query by using a probabilistic model of the objective. We'll use a Gaussian process (GP) model, built using GPflow. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper.
#
# Here as the first example, we model the objective function using the original data, without performing any data transformation. In the next example we will model it using normalised data. We also put priors on the parameters of our GP model's kernel in order to stabilize model fitting. We found the priors below to be highly effective for objective functions defined over the unit hypercube and with an output normalised to have zero mean and unit variance. Since the non-normalised data from the original objective function comes with different scaling, we rescale the priors based on approximate standard deviation of inputs and outputs.
# %%
def build_gp_model(data, x_std=1.0, y_std=0.1):
dim = data.query_points.shape[-1]
empirical_variance = tf.math.reduce_variance(data.observations)
prior_lengthscales = [0.2 * x_std * np.sqrt(dim)] * dim
prior_scale = tf.cast(1.0, dtype=tf.float64)
x_std = tf.cast(x_std, dtype=tf.float64)
y_std = tf.cast(y_std, dtype=tf.float64)
kernel = gpflow.kernels.Matern52(
variance=empirical_variance,
lengthscales=prior_lengthscales,
)
kernel.variance.prior = tfp.distributions.LogNormal(
tf.math.log(y_std), prior_scale
)
kernel.lengthscales.prior = tfp.distributions.LogNormal(
tf.math.log(kernel.lengthscales), prior_scale
)
gpr = gpflow.models.GPR(
data.astuple(),
kernel,
mean_function=gpflow.mean_functions.Constant(),
noise_variance=1e-5,
)
gpflow.set_trainable(gpr.likelihood, False)
return GaussianProcessRegression(gpr)
model = build_gp_model(initial_data, 20, 10000)
# %% [markdown]
# ## Run the optimization loop
#
# We can now run the Bayesian optimization loop by defining a `BayesianOptimizer` and calling its `optimize` method.
#
# The optimizer uses an acquisition rule to choose where in the search space to try on each optimization step. We'll be using Expected improvement acquisition function - it is used by default, so no need to specify it.
#
# We'll run the optimizer for 100 steps. Note: this may take a while!
# %%
num_steps = 100
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result = bo.optimize(num_steps, initial_data, model)
dataset = result.try_get_final_dataset()
# %% [markdown]
# ## Explore the results
#
# We can now get the best point found by the optimizer. Note this isn't necessarily the point that was last evaluated. We will also plot regret for each optimization step.
#
# We can see that the optimization did not get close to the global optimum of -210.
# %%
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
arg_min_idx = tf.squeeze(tf.argmin(observations, axis=0))
print(f"query point: {query_points[arg_min_idx, :]}")
print(f"observation: {observations[arg_min_idx, :]}")
# %% [markdown]
# We can plot regret for each optimization step to illustrate the performance more completely.
# %%
def plot_regret_with_min(dataset):
observations = dataset.observations.numpy()
arg_min_idx = tf.squeeze(tf.argmin(observations, axis=0))
suboptimality = observations - F_MINIMUM.numpy()
ax = plt.gca()
plot_regret(
suboptimality, ax, num_init=num_initial_points, idx_best=arg_min_idx
)
ax.set_yscale("log")
ax.set_ylabel("Regret")
ax.set_ylim(0.001, 100000)
ax.set_xlabel("# evaluations")
plot_regret_with_min(dataset)
# %% [markdown]
# # Data transformation with the help of Ask-Tell interface
#
# We will now show how data normalization can improve results achieved by Bayesian optimization.
#
# We first write a simple function for doing the standardisation of the data, that is, we scale the data to have a zero mean and a variance equal to 1. We also return the mean and standard deviation parameters as we will use them to transform new points.
# %%
def normalise(x, mean=None, std=None):
if mean is None:
mean = tf.math.reduce_mean(x, 0, True)
if std is None:
std = tf.math.sqrt(tf.math.reduce_variance(x, 0, True))
return (x - mean) / std, mean, std
# %% [markdown]
#
# Note that we also need to modify the search space, from the original $[-100, 100]$ for all 10 dimensions to the normalised space. For illustration, $[-1,1]$ will suffice here.
# %%
search_space = Box([-1], [1]) ** 10
# %% [markdown]
#
# Next we have to define our own Bayesian optimization loop where Ask-Tell optimizer performs optimisation, and we take care of data transformation and model fitting.
#
# We are using a simple approach whereby we normalize the initial data and use estimated mean and standard deviation from the initial normalization for transforming the new points that the Bayesian optimization loop adds to the dataset.
# %%
x_sta, x_mean, x_std = normalise(initial_data.query_points)
y_sta, y_mean, y_std = normalise(initial_data.observations)
normalised_data = Dataset(query_points=x_sta, observations=y_sta)
dataset = initial_data
for step in range(num_steps):
if step == 0:
model = build_gp_model(normalised_data)
model.optimize(normalised_data)
else:
model.update(normalised_data)
model.optimize(normalised_data)
# Asking for a new point to observe
ask_tell = AskTellOptimizer(search_space, normalised_data, model)
query_point = ask_tell.ask()
# Transforming the query point back to the non-normalised space
query_point = x_std * query_point + x_mean
# Evaluating the function at the new query point
new_data_point = observer(query_point)
dataset = dataset + new_data_point
# Normalize the dataset with the new query point and observation
x_sta, _, _ = normalise(dataset.query_points, x_mean, x_std)
y_sta, _, _ = normalise(dataset.observations, y_mean, y_std)
normalised_data = Dataset(query_points=x_sta, observations=y_sta)
# %% [markdown]
#
# We inspect again the best point found by the optimizer and plot regret for each optimization step.
#
# We can see that the optimization now gets almost to the global optimum of -210.
# %%
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
arg_min_idx = tf.squeeze(tf.argmin(observations, axis=0))
plot_regret_with_min(dataset)
print(f"query point: {query_points[arg_min_idx, :]}")
print(f"observation: {observations[arg_min_idx, :]}")
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 9,732 | 38.565041 | 642 | py |
trieste-develop | trieste-develop/docs/notebooks/active_learning.pct.py | # %% [markdown]
# # Active Learning
# %% [markdown]
# Sometimes, we may just want to learn a black-box function, rather than optimizing it. This goal is known as active learning and corresponds to choosing query points that reduce our model uncertainty. This notebook demonstrates how to perform Bayesian active learning using Trieste.
# %%
# %matplotlib inline
import numpy as np
import tensorflow as tf
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# ## Describe the problem
#
# In this example, we will perform active learning for the scaled Branin function.
# %%
from trieste.objectives import ScaledBranin
from trieste.experimental.plotting import plot_function_plotly
scaled_branin = ScaledBranin.objective
search_space = ScaledBranin.search_space
fig = plot_function_plotly(
scaled_branin,
search_space.lower,
search_space.upper,
)
fig.show()
# %% [markdown]
# We begin our Bayesian active learning from a small initial design built from a space-filling Halton sequence.
# %%
import trieste
observer = trieste.objectives.utils.mk_observer(scaled_branin)
num_initial_points = 4
initial_query_points = search_space.sample_halton(num_initial_points)
initial_data = observer(initial_query_points)
# %% [markdown]
# ## Surrogate model
#
# Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model using Trieste's convenient model build function `build_gpr` and pass it to the `GaussianProcessRegression` wrapper. Note that we set the likelihood variance to a small number because we are dealing with a noise-free problem.
# %%
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
model = GaussianProcessRegression(gpflow_model)
# %% [markdown]
# ## Active learning using predictive variance
#
# For our first active learning example, we will use a simple acquisition function known as `PredictiveVariance` which chooses points for which we are highly uncertain (i.e. the predictive posterior covariance matrix at these points has large determinant), as discussed in <cite data-cite="MacKay1992"/>. Note that this also implies that our model needs to have `predict_joint` method to be able to return the full covariance, and it's likely to be expensive to compute.
#
# We will now demonstrate how to choose individual query points using `PredictiveVariance` before moving onto batch active learning. For both cases, we can utilize Trieste's `BayesianOptimizer` to do the active learning steps.
#
# %%
from trieste.acquisition.function import PredictiveVariance
from trieste.acquisition.optimizer import generate_continuous_optimizer
from trieste.acquisition.rule import EfficientGlobalOptimization
acq = PredictiveVariance()
rule = EfficientGlobalOptimization(builder=acq) # type: ignore
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
# %% [markdown]
# To plot the contour of variance of our model at each step, we can set the `track_state` parameter to `True` in `bo.optimize()`, this will make Trieste record our model at each iteration.
# %%
bo_iter = 5
result = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)
# %% [markdown]
# Then we can retrieve our final dataset from the active learning steps.
# %%
dataset = result.try_get_final_dataset()
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
# %% [markdown]
# Finally, we can check the performance of our `PredictiveVariance` active learning acquisition function by plotting the predictive variance landscape of our model. We can see how it samples regions for which our model is highly uncertain.
# %%
from trieste.experimental.plotting import plot_bo_points, plot_function_2d
def plot_active_learning_query(
result, bo_iter, num_initial_points, query_points, num_query=1
):
for i in range(bo_iter):
def pred_var(x):
_, var = result.history[i].models["OBJECTIVE"].model.predict_f(x)
return var
_, ax = plot_function_2d(
pred_var,
search_space.lower - 0.01,
search_space.upper + 0.01,
contour=True,
colorbar=True,
figsize=(10, 6),
title=[
"Variance contour with queried points at iter:" + str(i + 1)
],
xlabel="$X_1$",
ylabel="$X_2$",
)
plot_bo_points(
query_points[: num_initial_points + (i * num_query)],
ax[0, 0],
num_initial_points,
)
plot_active_learning_query(result, bo_iter, num_initial_points, query_points)
# %% [markdown]
# ## Batch active learning using predictive variance
#
# In cases when we can evaluate the black-box function in parallel, it would be useful to produce a batch of points rather than a single point. `PredictiveVariance` acquisition function can also perform batch active learning. We must pass a `num_query_points` input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is that it tends to query in high variance area less accurately, compared to sequentially drawing one point at a time.
# %%
bo_iter = 5
num_query = 3
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
model = GaussianProcessRegression(gpflow_model)
acq = PredictiveVariance()
rule = EfficientGlobalOptimization(
num_query_points=num_query,
builder=acq,
optimizer=generate_continuous_optimizer(num_optimization_runs=1),
)
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result = bo.optimize(bo_iter, initial_data, model, rule, track_state=True)
# %% [markdown]
# After that, we can retrieve our final dataset.
# %%
dataset = result.try_get_final_dataset()
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
# %% [markdown]
# Now we can visualize the batch predictive variance using our plotting function.
# %%
plot_active_learning_query(
result, bo_iter, num_initial_points, query_points, num_query
)
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 6,410 | 35.426136 | 473 | py |
trieste-develop | trieste-develop/docs/notebooks/ask_tell_optimization.pct.py | # %% [markdown]
# # Ask-Tell Optimization Interface
# %% [markdown]
# In this notebook we will illustrate the use of an Ask-Tell interface in Trieste. It is useful for cases where you want to have greater control of the optimization loop, or when letting Trieste manage this loop is impossible.
#
# First, some code to set up the problem we will be using throughout the notebook. If you would like more details about this problem setup, please refer to [introductory EI notebook](expected_improvement.ipynb).
# %%
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import gpflow
from trieste.ask_tell_optimization import AskTellOptimizer
from trieste.bayesian_optimizer import Record
from trieste.data import Dataset
from trieste.models.gpflow.models import GaussianProcessRegression
from trieste.objectives import ScaledBranin
from trieste.objectives.utils import mk_observer
from trieste.space import Box
from trieste.experimental.plotting import plot_regret
np.random.seed(1234)
tf.random.set_seed(1234)
search_space = Box([0, 0], [1, 1])
n_steps = 5
def build_model(data, kernel_func=None):
"""kernel_func should be a function that takes variance as a single input parameter"""
variance = tf.math.reduce_variance(data.observations)
if kernel_func is None:
kernel = gpflow.kernels.Matern52(variance=variance)
else:
kernel = kernel_func(variance)
gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)
gpflow.set_trainable(gpr.likelihood, False)
return GaussianProcessRegression(gpr)
num_initial_points = 5
initial_query_points = search_space.sample(num_initial_points)
observer = mk_observer(ScaledBranin.objective)
initial_data = observer(initial_query_points)
# %% [markdown]
# ## Timing acquisition function: simple use case for Ask-Tell
#
# Let's say we are very concerned with the performance of the acquisition function, and want a simple way of measuring its performance over the course of the optimization. At the time of writing these lines, regular Trieste's optimizer does not provide such customization functionality, and this is where Ask-Tell comes in handy.
# %%
import timeit
model = build_model(initial_data)
ask_tell = AskTellOptimizer(search_space, initial_data, model)
for step in range(n_steps):
start = timeit.default_timer()
new_point = ask_tell.ask()
stop = timeit.default_timer()
print(f"Time at step {step + 1}: {stop - start}")
new_data = observer(new_point)
ask_tell.tell(new_data)
# %% [markdown]
# Once ask-tell optimization is over, you can extract an optimization result object and perform whatever analysis you need, just like with regular Trieste optimization interface. For instance, here we will plot regret for each optimization step.
# %%
def plot_ask_tell_regret(ask_tell_result):
observations = ask_tell_result.try_get_final_dataset().observations.numpy()
arg_min_idx = tf.squeeze(tf.argmin(observations, axis=0))
suboptimality = observations - ScaledBranin.minimum.numpy()
ax = plt.gca()
plot_regret(
suboptimality, ax, num_init=num_initial_points, idx_best=arg_min_idx
)
ax.set_yscale("log")
ax.set_ylabel("Regret")
ax.set_ylim(0.001, 100)
ax.set_xlabel("# evaluations")
plot_ask_tell_regret(ask_tell.to_result())
# %% [markdown]
# ## Model selection: using only Ask part
#
# We now turn to a slightly more complex use case. Let's suppose we want to switch between two models depending on some criteria dynamically during the optimization loop, e.g. we want to be able to train a model outside of Trieste. In this case we can only use Ask part of the Ask-Tell interface.
# %%
model1 = build_model(
initial_data, kernel_func=lambda v: gpflow.kernels.RBF(variance=v)
)
model2 = build_model(
initial_data, kernel_func=lambda v: gpflow.kernels.Matern32(variance=v)
)
dataset = initial_data
for step in range(n_steps):
# this criterion is meaningless
# but hopefully illustrates the idea!
if step % 2 == 0:
print("Using model 1")
model = model1
else:
print("Using model 2")
model = model2
print("Asking for new point to observe")
ask_tell = AskTellOptimizer(search_space, dataset, model)
new_point = ask_tell.ask()
new_data_point = observer(new_point)
dataset = dataset + new_data_point
print("Training models externally")
model1.update(dataset)
model1.optimize(dataset)
model2.update(dataset)
model2.optimize(dataset)
plot_ask_tell_regret(ask_tell.to_result())
# %% [markdown]
# ## External experiment: storing optimizer state
#
# Now let's suppose you are optimizing a process that takes hours or even days to complete, e.g. a lab experiment or a hyperparameter optimization of a big machine learning model. This time you cannot even express the objective function in Python code. Instead you would like to ask Trieste what configuration to run next, go to the lab, perform the experiment, collect data, feed it back to Trieste and ask for the next configuration, and so on. It would be very convenient to be able to store intermediate optimization state to disk or database or other storage, so that your machine can be switched off while you are waiting for observation results.
#
# In this section we'll show how you could do it with Ask-Tell in Trieste. Of course we cannot perform a real physical experiment within this notebook, so we will just mimick it by using pickle to write optimization state and read it back.
# %%
import pickle
model = build_model(initial_data)
ask_tell = AskTellOptimizer(search_space, initial_data, model)
for step in range(n_steps):
print(f"Ask Trieste for configuration #{step}")
new_config = ask_tell.ask()
print("Saving Trieste state to re-use later")
state: Record[None] = ask_tell.to_record()
saved_state = pickle.dumps(state)
print(f"In the lab running the experiment #{step}.")
new_datapoint = ScaledBranin.objective(new_config)
print("Back from the lab")
print("Restore optimizer from the saved state")
loaded_state = pickle.loads(saved_state)
ask_tell = AskTellOptimizer.from_record(loaded_state, search_space)
ask_tell.tell(Dataset(new_config, new_datapoint))
plot_ask_tell_regret(ask_tell.to_result())
# %% [markdown]
# In some more complicated scenarios we may also wish to serialise the acquisition function, rather than creating a new one from the models and data, as it may contain stochastic internal data (for example with continuous Thompson sampling, which uses trajectory samplers). This is not an issue here (where we used the default `EfficientGlobalOptimization` rule and `ExpectedImprovement` function) but we can demonstrate it neverthless:
# %%
from trieste.acquisition.rule import EfficientGlobalOptimization
# (recreate acquisition function and extract default rule)
ask_tell.ask()
rule: EfficientGlobalOptimization = ask_tell._acquisition_rule # type: ignore
# save acquisition function
acq_fn = rule.acquisition_function
saved_acq_fn = pickle.dumps(acq_fn)
# regenerate asktell with loaded acquisition function
loaded_acq_fn = pickle.loads(saved_acq_fn)
rule = EfficientGlobalOptimization(initial_acquisition_function=loaded_acq_fn)
ask_tell = AskTellOptimizer.from_record(loaded_state, search_space, rule)
# %% [markdown]
# A word of warning. These serialization techniques are not guaranteed to work smoothly with every Tensorflow-based model, so apply to your own problems with caution.
| 7,545 | 38.507853 | 652 | py |
trieste-develop | trieste-develop/docs/notebooks/multifidelity_modelling.pct.py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.1
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %%
import gpflow.kernels
# %% [markdown]
# # Multifidelity Modelling with Autoregressive Model
#
# This tutorial demonstrates the usage of the `MultifidelityAutoregressive` model for fitting multifidelity data. This is an implementation of the AR1 model initially described in <cite data-cite="Kennedy2000"/>.
# %%
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# ## Describe the problem
#
# In this tutorial we will consider the scenario where we have a simulator that can be run at three fidelities, with the ability to get cheap but coarse results at the lowest fidelity, more expensive but more refined results at a middle fidelity and very accurate but very expensive results at the highest fidelity.
#
# We define the true functions for the fidelities as:
#
# $$ f_{i} : [0, 1] \rightarrow \mathbb{R} $$
#
# $$ f_0(x) = \frac{\sin(12x -4)(6x - 2)^2 + 10(x-1)}{2}, \quad x \in [0,1] $$
#
#
# $$ f_i(x) = f_{i-1}(x) + i(f_{i-1}(x) - 20(x - 1)), \quad x \in [0,1] , \quad i \in \mathbb{N} $$
#
# Note that noise is optionally added to any observations in all but the lowest fidelity. There are a few modelling assumptions:
# 1. The lowest fidelity is noise-free
# 2. The data is cascading, e.g any point that has an observation at a high fidelity also has one at the lower fidelities.
# %%
# Define the multifidelity simulator
def linear_simulator(x_input, fidelity, add_noise=False):
f = 0.5 * ((6.0 * x_input - 2.0) ** 2) * tf.math.sin(
12.0 * x_input - 4.0
) + 10.0 * (x_input - 1.0)
f = f + fidelity * (f - 20.0 * (x_input - 1.0))
if add_noise:
noise = tf.random.normal(f.shape, stddev=1e-1, dtype=f.dtype)
else:
noise = 0
f = tf.where(fidelity > 0, f + noise, f)
return f
# Plot the fidelities
x = np.linspace(0, 1, 400)
y0 = linear_simulator(x, 0)
y1 = linear_simulator(x, 1)
y2 = linear_simulator(x, 2)
plt.plot(y0, label="Fidelity 0")
plt.plot(y1, label="Fidelity 1")
plt.plot(y2, label="Fidelity 2")
plt.legend()
plt.show()
# %% [markdown]
# Trieste handles fidelities by adding an extra column to the data containing the fidelity information of the query point. The function `check_and_extract_fidelity_query_points` will check that the fidelity column is valid, and if so, will separate the query points and the fidelity information.
# %%
from trieste.data import Dataset, check_and_extract_fidelity_query_points
# Create an observer class to deal with multifidelity input query points
class Observer:
def __init__(self, simulator):
self.simulator = simulator
def __call__(self, x, add_noise=True):
# Extract raw input and fidelity columns
x_input, x_fidelity = check_and_extract_fidelity_query_points(x)
# note: this assumes that my_simulator broadcasts, i.e. accept matrix inputs.
# If not you need to replace this by a for loop over all rows of "input"
observations = self.simulator(x_input, x_fidelity, add_noise)
return Dataset(query_points=x, observations=observations)
# Instantiate the observer
observer = Observer(linear_simulator)
# %% [markdown]
# Now we can define the other parameters of our problem, such as the input dimension, search space and number of fidelities.
# %%
from trieste.space import Box
input_dim = 1
n_fidelities = 3
lb = np.zeros(input_dim)
ub = np.ones(input_dim)
input_search_space = Box(lb, ub)
# %% [markdown]
# ## Create initial dataset
# %%
from trieste.data import add_fidelity_column
# Define sample sizes of low, mid and high fidelities
sample_sizes = [18, 12, 6]
xs = [tf.linspace(0, 1, sample_sizes[0])[:, None]]
# Take a subsample of each lower fidelity to sample at the next fidelity up
for fidelity in range(1, n_fidelities):
samples = tf.Variable(
np.random.choice(
xs[fidelity - 1][:, 0], size=sample_sizes[fidelity], replace=False
)
)[:, None]
xs.append(samples)
# Add fidelity columns to training data
initial_samples_list = [add_fidelity_column(x, i) for i, x in enumerate(xs)]
initial_sample = tf.concat(initial_samples_list, 0)
initial_data = observer(initial_sample, add_noise=True)
# %% [markdown]
# We can plot the initial data. We separate the dataset into individual fidelities using the `split_dataset_by_fidelity` function.
# %%
from trieste.data import split_dataset_by_fidelity
data = split_dataset_by_fidelity(initial_data, num_fidelities=n_fidelities)
plt.scatter(data[0].query_points, data[0].observations, label="Fidelity 0")
plt.scatter(data[1].query_points, data[1].observations, label="Fidelity 1")
plt.scatter(data[2].query_points, data[2].observations, label="Fidelity 2")
plt.legend()
plt.show()
# %% [markdown]
# ## Fit AR(1) model
# %% [markdown]
# Now we can fit the `MultifidelityAutoregressive` model to this data. We use the `build_multifidelity_autoregressive_models` to create the sub-models required by the multifidelity model.
# %%
from trieste.models.gpflow import (
MultifidelityAutoregressive,
build_multifidelity_autoregressive_models,
)
# Initialise model
multifidelity_model = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
initial_data, n_fidelities, input_search_space
)
)
# Update and optimize model
multifidelity_model.update(initial_data)
multifidelity_model.optimize(initial_data)
# %% [markdown]
# ## Plot Results
#
# Now we can plot the results to have a look at the fit. The `MultifidelityAutoregressive.predict` method requires data with a fidelity column that specifies the fidelity for each data point to be predicted at. We use the `add_fidelity_column` function to add this.
# %%
X = tf.linspace(0, 1, 200)[:, None]
X_list = [add_fidelity_column(X, i) for i in range(n_fidelities)]
predictions = [multifidelity_model.predict(x) for x in X_list]
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
pred_colors = ["tab:blue", "tab:orange", "tab:green"]
gt_colors = ["tab:red", "tab:purple", "tab:brown"]
for fidelity, prediction in enumerate(predictions):
mean, var = prediction
ax.plot(
X,
mean,
label=f"Predicted fidelity {fidelity}",
color=pred_colors[fidelity],
)
ax.plot(
X,
mean + 1.96 * tf.math.sqrt(var),
alpha=0.2,
color=pred_colors[fidelity],
)
ax.plot(
X,
mean - 1.96 * tf.math.sqrt(var),
alpha=0.2,
color=pred_colors[fidelity],
)
ax.plot(
X,
observer(X_list[fidelity], add_noise=False).observations,
label=f"True fidelity {fidelity}",
color=gt_colors[fidelity],
)
ax.scatter(
data[fidelity].query_points,
data[fidelity].observations,
label=f"fidelity {fidelity} data",
color=gt_colors[fidelity],
)
ax.legend(loc="center left", bbox_to_anchor=(1, 0.5))
plt.show()
# %% [markdown]
# ## Comparison with naive model fit on high fidelity
#
# We can compare with a model that was fit just on the high fidelity data, and see the gains from using the low fidelity data.
# %%
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
from trieste.data import add_fidelity_column
# Get high fidleity data
hf_data = data[2]
# Fit simple gpr model to high fidelity data
gpr_model = GaussianProcessRegression(build_gpr(hf_data, input_search_space))
gpr_model.update(hf_data)
gpr_model.optimize(hf_data)
X = tf.linspace(0, 1, 200)[:, None]
# Turn X into high fidelity query points for the multifidelity model
X_for_multifid = add_fidelity_column(X, 2)
gpr_predictions = gpr_model.predict(X)
multifidelity_predictions = multifidelity_model.predict(X_for_multifid)
fig, ax = plt.subplots(1, 1, figsize=(10, 7))
"tab:blue", "tab:orange", "tab:green"
# Plot gpr results
mean, var = gpr_predictions
ax.plot(X, mean, label=f"GPR", color="tab:blue")
ax.plot(X, mean + 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:blue")
ax.plot(X, mean - 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:blue")
# Plot gpr results
mean, var = multifidelity_predictions
ax.plot(X, mean, label=f"MultifidelityAutoregressive", color="tab:orange")
ax.plot(X, mean + 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:orange")
ax.plot(X, mean - 1.96 * tf.math.sqrt(var), alpha=0.2, color="tab:orange")
# Plot true function
ax.plot(
X,
observer(X_for_multifid, add_noise=False).observations,
label=f"True function",
color="tab:green",
)
# Scatter the data
ax.scatter(
hf_data.query_points, hf_data.observations, label=f"Data", color="tab:green"
)
plt.legend()
plt.show()
# %% [markdown]
# It's clear that there is a large benefit to being able to make use of the low fidelity data, and this is particularly noticable in the greatly reduced confidence intervals.
# %% [markdown]
# ## A more complex model for non-linear problems
# %% [markdown]
# A more complex multifidelity model (NARGP, here `MultifidelityNonlinearAutoregressive`), originally proposed in :cite:`perdikaris2017nonlinear` is also available, to tackle the case where the relation between fidelities is strongly non-linear.
#
# We start by defining a new multi-fidelity problem, with two fidelities, for $x \in [0,1]$:
#
# $$ f_0(x) = \sin(8 \pi x) $$
#
#
# $$ f_1(x) = x - \sqrt{2} \times f_0(x)^2 $$
#
# Contrary to the previous case, the high-fidelity level follows the square of the low-fidelity one. As the low fidelity values oscillate between positive and negative ones, it makes inferring this relationship particularly difficult for the AR(1) model, as we see below.
#
# As previously, we create an observer, and some initial data.
# %%
def nonlinear_simulator(x_input, fidelity, add_noise):
bad_fidelities = tf.math.logical_and(fidelity != 0, fidelity != 1)
if tf.math.count_nonzero(bad_fidelities) > 0:
raise ValueError(
"Nonlinear simulator only supports 2 fidelities (0 and 1)"
)
else:
f = tf.math.sin(8 * np.pi * x_input)
fh = (
x_input - tf.sqrt(tf.Variable(2.0, dtype=tf.float64))
) * tf.square(f)
f = tf.where(fidelity > 0, fh, f)
if add_noise:
f += tf.random.normal(f.shape, stddev=1e-2, dtype=f.dtype)
return f
observation_noise = True
observer = Observer(nonlinear_simulator)
n_fidelities = 2
sample_sizes = [50, 14]
xs = [tf.linspace(0, 1, sample_sizes[0])[:, None]]
xh = tf.Variable(
np.random.choice(xs[0][:, 0], size=sample_sizes[1], replace=False)
)[:, None]
xs.append(xh)
initial_samples_list = [
tf.concat([x, tf.ones_like(x) * i], 1) for i, x in enumerate(xs)
]
initial_sample = tf.concat(initial_samples_list, 0)
initial_data = observer(initial_sample, add_noise=observation_noise)
# %% [markdown]
# We create an AR(1) model as before, and use the `build_multifidelity_nonlinear_autoregressive_models` to create the NARGP. We then train both model on the same data.
# %%
from trieste.models.gpflow import (
MultifidelityNonlinearAutoregressive,
build_multifidelity_nonlinear_autoregressive_models,
)
ar1 = MultifidelityAutoregressive(
build_multifidelity_autoregressive_models(
initial_data, n_fidelities, input_search_space
)
)
nargp = MultifidelityNonlinearAutoregressive(
build_multifidelity_nonlinear_autoregressive_models(
initial_data, n_fidelities, input_search_space
)
)
ar1.update(initial_data)
ar1.optimize(initial_data)
nargp.update(initial_data)
nargp.optimize(initial_data)
# %% [markdown]
# Now we can plot the two model predictions.
# %%
data = split_dataset_by_fidelity(initial_data, n_fidelities)
X = tf.linspace(0, 1, 200)[:, None]
X_list = [tf.concat([X, tf.ones_like(X) * i], 1) for i in range(n_fidelities)]
predictions_ar1 = [ar1.predict(x) for x in X_list]
predictions_nargp = [nargp.predict(x) for x in X_list]
fig, ax = plt.subplots(2, 1, figsize=(10, 7))
for ax_id, model_predictions in enumerate([predictions_ar1, predictions_nargp]):
for fidelity, prediction in enumerate(model_predictions):
mean, var = prediction
ax[ax_id].plot(X, mean, label=f"Predicted fidelity {fidelity}")
ax[ax_id].plot(X, mean + 1.96 * tf.math.sqrt(var), alpha=0.2)
ax[ax_id].plot(X, mean - 1.96 * tf.math.sqrt(var), alpha=0.2)
ax[ax_id].plot(
X,
observer(X_list[fidelity], add_noise=False).observations,
label=f"True fidelity {fidelity}",
)
ax[ax_id].scatter(
data[fidelity].query_points,
data[fidelity].observations,
label=f"fidelity {fidelity} data",
)
ax[ax_id].title.set_text(
"MultifidelityAutoregressive" if ax_id == 0 else "NARGP"
)
ax[ax_id].legend(loc="center left", bbox_to_anchor=(1, 0.5))
fig.suptitle("Non-Linear Problem")
plt.show()
# %% [markdown]
# The AR(1) model is incapable of using the lower fidelity data and its prediction for the high fidelity level simply returns to the prior when there is no high-fidelity data. In contrast, the NARGP model clearly captures the non-linear relashionship and is able to predict accurately the high-fideility level everywhere.
| 13,491 | 31.747573 | 321 | py |
trieste-develop | trieste-develop/docs/notebooks/qhsri-tutorial.pct.py | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.14.1
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %% [markdown]
# # Batch HSRI Tutorial
# %% [markdown]
# Batch Hypervolume Sharpe Ratio Indicator (qHSRI) is a method proposed by Binois et al. (see <cite data-cite="binois2021portfolio"/>) for picking a batch of query points during Bayesian Optimisation. It makes use of the Sharpe Ratio, a portfolio selection method used in investment selection to carefully balance risk and reward.
#
# This tutorial will first cover the main points of how the `Trieste` implementation of qHSRI works under the hood, and then demonstrate how to use the `trieste.acquisition.rule.BatchHypervolumeRatioIndicator` acquisition rule.
#
# Some of the dependencies for `BatchHypervolumeRatioIndicator` are not included in `Trieste` by default, and instead can be installed via `pip install trieste[qhsri]`.
#
# First we will set up our problem and get our initial datapoints. For this walkthrough we will use the noiseless scaled Branin objective.
# %%
import tensorflow as tf
import matplotlib.pyplot as plt
from trieste.objectives import ScaledBranin
from trieste.objectives.utils import mk_observer
from trieste.space import Box
tf.random.set_seed(1)
# Create the observer
observer = mk_observer(ScaledBranin.objective)
# Define Search space
search_space = Box([0, 0], [1, 1])
# Set initial number of query points
num_initial_points = 5
initial_query_points = search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)
# %% [markdown]
# We can now fit a GP to our initial data.
# %%
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
# Set up model
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-7)
model = GaussianProcessRegression(gpflow_model)
# %% [markdown]
# Now consider how we might want to select a batch of $q$ query points to observe. It would be useful for some of these to be "safe bets" that we think are very likely to provide good values (i.e low predicted mean). It would also be valuable for some of these to be sampling parts of the space that we have no idea what the expected observed value is (i.e. high variance). This problem is very similar to that encountered in building finance portfolios, where you want a mix of high risk/high reward and low risk/low reward assets. You would also want to know how much of your total capital to invest in each asset.
#
# To visualise the different trade-offs, we can sample from the input space, compute the predictive mean and variance at those locations, and plot the mean against minus the standard deviation (so that both quantities need to be minimised)
# %%
uniform_points = search_space.sample(1000)
uniform_pts_mean, uniform_pts_var = model.predict(uniform_points)
uniform_pts_std = -tf.sqrt(uniform_pts_var)
plt.scatter(uniform_points[:, 0], uniform_points[:, 1])
plt.title("Uniformly sampled points")
plt.show()
plt.close()
plt.scatter(uniform_pts_mean, uniform_pts_std)
plt.title("Mean vs negative std of uniformly sampled points")
plt.xlabel("Mean")
plt.ylabel("Negative std")
plt.show()
# %% [markdown]
# Since we only want the best points in terms of the risk-reward tradeoff, we can remove all the points that are not optimal in the Pareto sense, i.e. the points that are dominated by another point. A point `a` is dominated by another point `b` if `b.mean` <= `a.mean` and `b.std` >= `a.std`.
#
# There is a function in trieste that lets us calculate this non-dominated set. Let's find the non-dominated points and plot them on the above chart.
# %%
from trieste.acquisition.multi_objective.dominance import non_dominated
uniform_non_dominated = non_dominated(
tf.concat([uniform_pts_mean, uniform_pts_std], axis=1)
)[0]
plt.scatter(uniform_pts_mean, uniform_pts_std)
plt.scatter(uniform_non_dominated[:, 0], uniform_non_dominated[:, 1], c="r")
plt.title("Mean vs negative std of uniformly sampled points")
plt.xlabel("Mean")
plt.ylabel("Negative std")
plt.show()
print(f"There are {len(uniform_non_dominated)} non-dominated points")
# %% [markdown]
# We can see that there's only a few non-dominated points to choose from the select the next batch. This set of non-dominated points is the Pareto front in the optimisation task of minimising mean and maximising standard deviation.
#
# This means we can improve on this by using optimisation rather than random sampling to pick the points that we will select our batch from. In trieste we use the NSGA-II multi-objective optimisation method from the [pymoo](https://pymoo.org/) library to find the Pareto front.
#
# The `BatchHypervolumeSharpeRatioIndicator` acquisition rule makes use of the `_MeanStdTradeoff` class, which expresses the optimisation problem in the pymoo framework. Pymoo is then used to run the optimisation.
#
# NSGA-II is a genetic algorithm, and so we need to define a population size and number of generations.
# %%
import numpy as np
from trieste.acquisition.rule import _MeanStdTradeoff
from pymoo.algorithms.moo.nsga2 import NSGA2
from pymoo.optimize import minimize
problem = _MeanStdTradeoff(model, search_space)
algorithm = NSGA2(pop_size=500)
result = minimize(problem, algorithm, ("n_gen", 200), seed=1, verbose=False)
optimised_points, optimised_mean_std = result.X, result.F
# %% [markdown]
# Now we can plot the points that have been optimised for mean and standard deviation, and their means and stds.
# %%
plt.scatter(result.X[:, 0], result.X[:, 1])
plt.title("Points optimised for mean and std")
plt.show()
plt.scatter(result.F[:, 0], result.F[:, 1])
plt.title("Mean vs std of optimised points")
plt.show()
# %% [markdown]
# We can check the non-dominated points again, and see that the outcome of NSGA-II is much better than the randomly sampled ones.
# %%
optimised_non_dominated = non_dominated(result.F)[0]
plt.scatter(result.F[:, 0], result.F[:, 1])
plt.scatter(optimised_non_dominated[:, 0], optimised_non_dominated[:, 1], c="r")
plt.show()
print(f"There are {len(optimised_non_dominated)} non-dominated points")
# %% [markdown]
#
# The Sharpe ratio is used to get an optimal mix of low mean and high standard deviation points from this Pareto front, so that these can then be observed.
#
# A portfolio with the maximum Sharpe ratio is defined as:
#
# $$ \max_{x \in [0,1]^n} h(x) = {{r^{T}x-r_{f}} \over {\sqrt{x^{T}Qx}}} \;\;\; s.t \sum^{n}_{i=1}x_i = 1 $$
#
# where $x_i$ are weights for each asset $i$, $r_i$ is the expected return of asset $i$ and $Q_{i,j}$ is the covariance of assets $i$ and $j$. $r_f$ is a risk free asset, and we will assume this does not exist in this case. Note that weighting assets with high expected rewards will increase the Sharpe ratio, as will weighting assets with low covariance.
#
# This problem can be converted into a quadratic programming problem and solved to give a diverse sample from the Pareto front.
#
# The `trieste.acquisition.multi_objective.Pareto` class has a `sample_diverse_subset` method that implements this.
# %%
from trieste.acquisition.multi_objective import Pareto
# Since we've already ensured the set is non-dominated we don't need to repeat this
front = Pareto(optimised_non_dominated, already_non_dominated=True)
sampled_points, _ = front.sample_diverse_subset(
sample_size=5, allow_repeats=False
)
# %% [markdown]
# Now we can see which points we selected from the Pareto front
# %%
plt.scatter(
optimised_non_dominated[:, 0],
optimised_non_dominated[:, 1],
label="Pareto front",
)
plt.scatter(sampled_points[:, 0], sampled_points[:, 1], label="selected points")
plt.legend()
plt.title("Pareto front and selected points for observation")
plt.show()
# %% [markdown]
# These points can then be observed and the model updated. This aquisition method has been implemented in `trieste` as an acquisition rule, `trieste.acquisition.rule.BatchHypervolumeRatioIndicator`.
# %% [markdown]
# ## Using the Acqusition rule for Bayesian Optimisation
#
# The `BatchHypervolumeRatioIndicator` can be used in the same way as other batch acquisition rules. We set up the problem as before, and then run `optimize` with the `BatchHypervolumeRatioIndicator` rule.
# %%
from trieste.bayesian_optimizer import BayesianOptimizer
from trieste.acquisition.rule import BatchHypervolumeSharpeRatioIndicator
# Create observer
observer = mk_observer(ScaledBranin.objective)
# Define Search space
search_space = Box([0, 0], [1, 1])
# Set initial number of query points
num_initial_points = 5
initial_query_points = search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)
from trieste.models.gpflow import GaussianProcessRegression, build_gpr
# Set up model
gpflow_model = build_gpr(initial_data, search_space, likelihood_variance=1e-3)
model = GaussianProcessRegression(gpflow_model)
bo = BayesianOptimizer(observer=observer, search_space=search_space)
results = bo.optimize(
acquisition_rule=BatchHypervolumeSharpeRatioIndicator(num_query_points=10),
num_steps=8,
datasets=initial_data,
models=model,
)
# %% [markdown]
# We can now plot the regret of the observations, and see that the regret has decreased from the initial sample.
# %%
from trieste.experimental.plotting import plot_regret
observations = (
results.try_get_final_dataset().observations - ScaledBranin.minimum
)
min_idx = tf.squeeze(tf.argmin(observations, axis=0))
min_regret = tf.reduce_min(observations)
fig, ax = plt.subplots(1, 1)
plot_regret(observations.numpy(), ax, num_init=5, idx_best=min_idx)
ax.set_yscale("log")
ax.set_ylabel("Regret")
ax.set_ylim(0.00001, 100)
ax.set_xlabel("# evaluations")
ax.set_title("Qhsri")
plt.show()
| 9,874 | 40.145833 | 616 | py |
trieste-develop | trieste-develop/docs/notebooks/visualizing_with_tensorboard.pct.py | # %% [markdown]
# # Tracking and visualizing optimizations using Tensorboard
# %%
import numpy as np
import tensorflow as tf
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# We often wish to track or visualize the Bayesian optimization process, either during and following execution. This tutorial shows how to do this using the [TensorBoard](https://www.tensorflow.org/tensorboard) visualization toolkit. By default we keep track of evolution of the found minimizer and report timings for key parts of each optimization step.
# %% [markdown]
# ## Set up the problem
#
# For this tutorial, we'll use the same set up as before.
# %%
import trieste
search_space = trieste.space.Box([0, 0], [1, 1])
observer = trieste.objectives.utils.mk_observer(
trieste.objectives.ScaledBranin.objective
)
initial_query_points = search_space.sample_sobol(5)
initial_data = observer(initial_query_points)
gpr = trieste.models.gpflow.build_gpr(initial_data, search_space)
model = trieste.models.gpflow.GaussianProcessRegression(gpr)
# %% [markdown]
# ## Setting up TensorBoard
#
# Before running the optimization loop, we decide where TensorBoard summary logs should be stored, and set a summary writer to do this.
# %%
# Clear any logs from previous runs
# !rm -rf logs/tensorboard
summary_writer = tf.summary.create_file_writer("logs/tensorboard/experiment1")
trieste.logging.set_tensorboard_writer(summary_writer)
# %% [markdown]
# We can now also load the TensorBoard extension, though at this point there will not be any data to dispay. To run this notebook locally, remove the %%script line.
# %% magic_args="echo Loading TensorBoard..." language="script"
# %load_ext tensorboard
# %tensorboard --logdir "logs/tensorboard"
# %% [markdown]
# 
# %% [markdown]
# ## Running and tracking the Bayesian Optimizer
#
# By setting the summary writer, we tell Trieste to log relevant information during optimization. While the optimization is running, we can refresh TensorBoard to see its progress.
# %%
num_steps = 15
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result, history = bo.optimize(num_steps, initial_data, model).astuple()
# %% [markdown]
# The data remains available when the optimization is over.
#
# 
#
# In addition to these scalar plots, there may also be histograms, textual metadata and image plots.
# %% [markdown]
# ## Deciding what to log
#
# When monitoring is enabled, Trieste decides what information is interesting enough to log. This includes objective and acquisition function values, query points, and some model parameters (note that model logs start at step 0 if models are fit to the initial data, i.e. `fit_initial_model=True`). Some data summaries, such as query point pairplots, are not generated by default as they can affect running time. You can control which summaries to generate and display by overriding the default summary filter. By default this shows any summary apart from those whose name starts with an underscore character.
# %%
trieste.logging.set_summary_filter(lambda name: True) # enable all summaries
# %% [markdown]
# Furthermore, some summaries such as pairplots also require special plotting tools, which can be installed by running `pip install trieste[plotting]`. A warning message will be printed if you try to generate a summary without the necessary tools.
#
# 
# %% [markdown]
# ## Logging additional model parameters
#
# Where trieste's monitoring is insufficient, you can also add your own logs. To log additional model parameters, you can define your own model subclass and override the `log` method. You can use the various `tf.summary` method wrappers in `trieste.logging` to ensure that your logs are filtered correctly. For example, the following GPR subclass also logs the average lengthscale at each step. Note that `dataset` argument is required, but here it is unused - one could use it to log additional data-based model summary statistics.
# %%
class GPRExtraLogging(trieste.models.gpflow.GaussianProcessRegression):
def log(self, dataset):
super().log(dataset)
summary_writer = trieste.logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(
step=trieste.logging.get_step_number()
):
trieste.logging.scalar(
"kernel.lengthscales.mean",
np.mean(self.get_kernel().lengthscales),
)
model = GPRExtraLogging(gpr)
# %% [markdown]
# Running with this model now also produces logs for the mean lengthscale. We mark this optimization run as a separate experiment by creating a new summary writer. Tensorboard will conveniently offer an automatic comparison of the experiments.
# %%
summary_writer = tf.summary.create_file_writer("logs/tensorboard/experiment2")
trieste.logging.set_tensorboard_writer(summary_writer)
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result, history = bo.optimize(num_steps, initial_data, model).astuple()
# %% [markdown]
# 
# %% [markdown]
# ## Logging additional acquisition rule metrics
#
#
# Similarly, it is possible to log additional metrics connected to the acquisition rule by overriding rule's `acquire` method (or any other method used while evaluating the rule). For example, the following class also logs the mean coordinates of the selected points:
# %%
class EGOExtraLogging(trieste.acquisition.rule.EfficientGlobalOptimization):
def acquire(self, search_space, models, datasets=None):
points = super().acquire(search_space, models, datasets)
summary_writer = trieste.logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(
step=trieste.logging.get_step_number()
):
trieste.logging.scalar(
"EGO.points_selected.mean", tf.math.reduce_mean(points)
)
return points
summary_writer = tf.summary.create_file_writer("logs/tensorboard/experiment3")
trieste.logging.set_tensorboard_writer(summary_writer)
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result, history = bo.optimize( # type: ignore
num_steps, initial_data, model, acquisition_rule=EGOExtraLogging()
).astuple()
# %% [markdown]
# 
# %% [markdown]
# ## Using Tensorboard with Ask-Tell Optimization
#
# To use Tensorboard logging with the [Ask-Tell interface](ask_tell_optimization.ipynb), you must also explicitly set the optimization step number before calling ask or tell methods. If fitting the model on initialization of the Ask-Tell interface (default), then you must also set the optimization step number before initializing the interface to log initial model fitting as a different step.
# %%
summary_writer = tf.summary.create_file_writer("logs/tensorboard/experiment4")
trieste.logging.set_tensorboard_writer(summary_writer)
trieste.logging.set_step_number(0)
ask_tell = trieste.ask_tell_optimization.AskTellOptimizer(
search_space, initial_data, model
)
for step in range(1, num_steps + 1):
trieste.logging.set_step_number(step)
new_point = ask_tell.ask()
new_data = observer(new_point)
ask_tell.tell(new_data)
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 7,672 | 41.865922 | 609 | py |
trieste-develop | trieste-develop/docs/notebooks/deep_ensembles.pct.py | # %% [markdown]
# # Bayesian optimization with deep ensembles
#
# Gaussian processes as a surrogate models are hard to beat on smaller datasets and optimization budgets. However, they scale poorly with amount of data, cannot easily capture non-stationarities and they are rather slow at prediction time. Here we show how uncertainty-aware neural networks can be effective alternative to Gaussian processes in Bayesian optimisation, in particular for large budgets, non-stationary objective functions or when predictions need to be made quickly.
#
# Check out our tutorial on [Deep Gaussian Processes for Bayesian optimization](deep_gaussian_processes.ipynb) as another alternative model type supported by Trieste that can model non-stationary functions (but also deal well with small datasets).
#
# Let's start by importing some essential packages and modules.
# %%
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import numpy as np
import tensorflow as tf
import trieste
# silence TF warnings and info messages, only print errors
# https://stackoverflow.com/questions/35911252/disable-tensorflow-debugging-information
tf.get_logger().setLevel("ERROR")
np.random.seed(1794)
tf.random.set_seed(1794)
# %% [markdown]
# ## Deep ensembles
#
# Deep neural networks typically output only mean predictions, not posterior distributions as probabilistic models such as Gaussian processes do. Posterior distributions encode mean predictions, but also *epistemic* uncertainty - type of uncertainty that stems from model misspecification, and which can be eliminated with further data. Aleatoric uncertainty that stems from stochasticity of the data generating process is not contained in the posterior, but can be learned from the data. Bayesian optimization requires probabilistic models because epistemic uncertainty plays a key role in balancing between exploration and exploitation.
#
# Recently, however, there has been some development of uncertainty-aware deep neural networks. Ensembles of deep neural networks, introduced recently by <cite data-cite="lakshminarayanan2016simple"/>, is a type of such networks. Main ingredients are probabilistic feed-forward networks as members of the ensemble, where the final layers is a Gaussian distribution, training with maximum likelihood instead of typical root mean square error, and different random initialization of weights for generating diversity among the networks.
#
# Monte carlo dropout (<cite data-cite="gal2016dropout"/>), Bayes-by-backprop (<cite data-cite="blundell2015weight"/>) or evidential deep regression (<cite data-cite="amini2019deep"/>) are some of the other types of uncertainty-aware deep neural networks. Systematic comparisons however show that deep ensembles represent the uncertainty the best and are probably the simplest of the major alternatives (see, for example, <cite data-cite="osband2021epistemic"/>). Good estimates of uncertainty makes deep ensembles a potentially attractive model for Bayesian optimization.
# %% [markdown]
# ### How good is uncertainty representation of deep ensembles?
#
# We will use a simple one-dimensional toy problem introduced by <cite data-cite="hernandez2015probabilistic"/>, which was used in <cite data-cite="lakshminarayanan2016simple"/> to provide some illustrative evidence that deep ensembles do a good job of estimating uncertainty. We will replicate this exercise here.
#
# The toy problem is a simple cubic function with some Normally distributed noise around it. We will randomly sample 20 input points from [-4,4] interval that we will use as a training data later on.
# %%
from trieste.space import Box
from trieste.data import Dataset
def objective(x, error=True):
y = tf.pow(x, 3)
if error:
y += tf.random.normal(x.shape, 0, 3, dtype=x.dtype)
return y
num_points = 20
# we define the [-4,4] interval using a `Box` search space that has convenient sampling methods
search_space = Box([-4], [4])
inputs = search_space.sample_sobol(num_points)
outputs = objective(inputs)
data = Dataset(inputs, outputs)
# %% [markdown]
# Next we define a deep ensemble model and train it. Trieste supports neural network models defined as TensorFlow's Keras models. Since creating ensemble models in Keras can be somewhat involved, Trieste provides some basic architectures. Here we use the `build_keras_ensemble` function which builds a simple ensemble of neural networks in Keras where each network has the same architecture: number of hidden layers, nodes in hidden layers and activation function. It uses sensible defaults for many parameters and finally returns a model of `KerasEnsemble` class.
#
# As with other supported types of models (e.g. Gaussian process models from GPflow), we cannot use `KerasEnsemble` directly in Bayesian optimization routines, we need to pass it through an appropriate wrapper, `DeepEnsemble` wrapper in this case. One difference with respect to other model types is that we need to use a Keras specific optimizer wrapper `KerasOptimizer` where we need to specify a stochastic optimizer (Adam is used by default, but we can use other stochastic optimizers from TensorFlow), objective function (here negative log likelihood) and we can provide custom arguments for the Keras `fit` method (here we modify the default arguments; check [Keras API documentation](https://keras.io/api/models/model_training_apis/#fit-method) for a list of possible arguments).
#
# For the cubic function toy problem we use the same architecture as in <cite data-cite="lakshminarayanan2016simple"/>: ensemble size of 5 networks, where each network has one hidden layer with 100 nodes. All other implementation details were missing and we used sensible choices, as well as details about training the network.
# %%
from trieste.models.keras import (
DeepEnsemble,
KerasPredictor,
build_keras_ensemble,
)
from trieste.models.optimizer import KerasOptimizer
def build_cubic_model(data: Dataset) -> DeepEnsemble:
ensemble_size = 5
num_hidden_layers = 1
num_nodes = 100
keras_ensemble = build_keras_ensemble(
data, ensemble_size, num_hidden_layers, num_nodes
)
fit_args = {
"batch_size": 10,
"epochs": 1000,
"verbose": 0,
}
optimizer = KerasOptimizer(tf.keras.optimizers.Adam(0.01), fit_args)
return DeepEnsemble(keras_ensemble, optimizer)
# building and optimizing the model
model = build_cubic_model(data)
model.optimize(data)
# %% [markdown]
# Let's illustrate the results of the model training. We create a test set that includes points outside the interval on which the model has been trained. These extrapolation points are a good test of model's representation of uncertainty. What would we expect to see? Bayesian inference provides a reference frame. Predictive uncertainty should increase the farther we are from the training data and the predictive mean should start returning to the prior mean (assuming standard zero mean function).
#
# We can see in the figure below that predictive distribution of deep ensembles indeed exhibits these features. The figure also replicates fairly well Figure 1 (rightmost panel) from <cite data-cite="lakshminarayanan2016simple"/> and provides a reasonable match to Bayesian neural network trained on same toy problem with Hamiltonian Monte Carlo (golden standard that is usually very expensive) as illustrated in Figure 1 (upper right panel) <cite data-cite="hernandez2015probabilistic"/>. This gives us some assurance that deep ensembles might provide uncertainty that is good enough for trading off between exploration and exploitation in Bayesian optimization.
# %%
import matplotlib.pyplot as plt
# test data that includes extrapolation points
test_points = tf.linspace(-6, 6, 1000)
# generating a plot with ground truth function, mean prediction and 3 standard
# deviations around it
plt.scatter(inputs, outputs, marker=".", alpha=0.6, color="red", label="data")
plt.plot(
test_points, objective(test_points, False), color="blue", label="function"
)
y_hat, y_var = model.predict(test_points)
y_hat_minus_3sd = y_hat - 3 * tf.math.sqrt(y_var)
y_hat_plus_3sd = y_hat + 3 * tf.math.sqrt(y_var)
plt.plot(test_points, y_hat, color="gray", label="model $\mu$")
plt.fill_between(
test_points,
tf.squeeze(y_hat_minus_3sd),
tf.squeeze(y_hat_plus_3sd),
color="gray",
alpha=0.5,
label="$\mu -/+ 3SD$",
)
plt.ylim([-100, 100])
plt.show()
# %% [markdown]
# ## Non-stationary toy problem
#
# Now we turn to a somewhat more serious synthetic optimization problem. We want to find the minimum of the two-dimensional version of the [Michalewicz function](https://www.sfu.ca/~ssurjano/michal.html). Even though we stated that deep ensembles should be used with larger budget sizes, here we will show them on a small dataset to provide a problem that is feasible for the scope of the tutorial.
# The Michalewicz function is defined on the search space of $[0, \pi]^2$. Below we plot the function over this space. The Michalewicz function is interesting case for deep ensembles as it features sharp ridges that are difficult to capture with Gaussian processes. This occurs because lengthscale parameters in typical kernels cannot easily capture both ridges (requiring smaller lengthscales) and fairly flat areas everywhere else (requiring larger lengthscales).
# %%
from trieste.objectives import Michalewicz2
from trieste.experimental.plotting import plot_function_plotly
search_space = Michalewicz2.search_space
function = Michalewicz2.objective
MINIMUM = Michalewicz2.minimum
MINIMIZER = Michalewicz2.minimum
# we illustrate the 2-dimensional Michalewicz function
fig = plot_function_plotly(
function, search_space.lower, search_space.upper, grid_density=20
)
fig.show()
# %% [markdown]
# ## Initial design
#
# We set up the observer as usual, using Sobol sampling to sample the initial points.
# %%
from trieste.objectives.utils import mk_observer
num_initial_points = 20
initial_query_points = search_space.sample(num_initial_points)
observer = trieste.objectives.utils.mk_observer(function)
initial_data = observer(initial_query_points)
# %% [markdown]
# ## Modelling the objective function
#
# The Bayesian optimization procedure estimates the next best points to query by using a probabilistic model of the objective. Here we use a deep ensemble instead of a typical probabilistic model. Same as above we use the `build_keras_ensemble` function to build a simple ensemble of neural networks in Keras and wrap it with a `DeepEnsemble` wrapper so it can be used in Trieste's Bayesian optimization loop.
#
# Some notes on choosing the model architecture are necessary. Unfortunately, choosing an architecture that works well for small datasets, a common setting in Bayesian optimization, is not easy. Here we do demonstrate it can work with smaller datasets, but choosing the architecture and model optimization parameters was a lengthy process that does not necessarily generalize to other problems. Hence, we advise to use deep ensembles with larger datasets and ideally large batches so that the model is not retrained after adding a single point.
#
# We can offer some practical advices, however. Architecture parameters like the ensemble size, the number of hidden layers, the number of nodes in the layers and so on affect the capacity of the model. If the model is too large for the amount of data, it will be difficult to train the model and result will be a poor model that cannot be used for optimizing the objective function. Hence, with small datasets like the one used here, we advise to always err on the smaller size, one or two hidden layers, and up to 25 nodes per layer. If we suspect the objective function is more complex these numbers should be increased slightly. With regards to model optimization we advise using a lot of epochs, typically at least 1000, and potentially higher learning rates. Ideally, every once in a while capacity should be increased to be able to use larger amount of data more effectively. Unfortunately, there is almost no research literature that would guide us in how to do this properly.
#
# Interesting alternative to a manual architecture search is to use a separate Bayesian optimization process to optimize the architecture and model optimizer parameters (see recent work by <cite data-cite="kadra2021well"/>). This optimization is much faster as it optimizes model performance. It would slow down the original optimization, so its worthwhile only if optimizing the objective function is much more costly.
#
# Below we change the `build_model` function to adapt the model slightly for the Michalewicz function. Since it's a more complex function we increase the number of hidden layers but keep the number of nodes per layer on the lower side. Note the large number of epochs
# %%
def build_model(data: Dataset) -> DeepEnsemble:
ensemble_size = 5
num_hidden_layers = 3
num_nodes = 25
keras_ensemble = build_keras_ensemble(
data, ensemble_size, num_hidden_layers, num_nodes
)
fit_args = {
"batch_size": 10,
"epochs": 1000,
"callbacks": [
tf.keras.callbacks.EarlyStopping(monitor="loss", patience=100)
],
"verbose": 0,
}
optimizer = KerasOptimizer(tf.keras.optimizers.Adam(0.001), fit_args)
return DeepEnsemble(keras_ensemble, optimizer)
# building and optimizing the model
model = build_model(initial_data)
# %% [markdown]
# ## Run the optimization loop
#
# In Bayesian optimization we use an acquisition function to choose where in the search space to evaluate the objective function in each optimization step. Deep ensemble model uses probabilistic neural networks whose output is at the end approximated with a single Gaussian distribution, which acts as a predictive posterior distribution. This means that any acquisition function can be used that requires only predictive mean and variance. For example, predictive mean and variance is sufficient for standard acquisition functions such as Expected improvement (see `ExpectedImprovement`), Lower confidence bound (see `NegativeLowerConfidenceBound`) or Thompson sampling (see `ExactThompsonSampling`). Some acquisition functions have additional requirements and these cannot be used (e.g. covariance between sets of query points, as in an entropy-based acquisition function `GIBBON`).
#
# Here we will illustrate Deep ensembles with a Thompson sampling acquisition function. We use a discrete Thompson sampling strategy that samples a fixed number of points (`grid_size`) from the search space and takes a certain number of samples at each point based on the model posterior (`num_samples`, if more than 1 then this is a batch strategy).
# %%
from trieste.acquisition.rule import DiscreteThompsonSampling
grid_size = 2000
num_samples = 4
# note that `DiscreteThompsonSampling` by default uses `ExactThompsonSampler`
acquisition_rule = DiscreteThompsonSampling(grid_size, num_samples)
# %% [markdown]
# We can now run the Bayesian optimization loop by defining a `BayesianOptimizer` and calling its `optimize` method.
#
# Note that the optimization might take a while!
# %%
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
num_steps = 25
# The Keras interface does not currently support using `track_state=True` which saves the model
# in each iteration. This will be addressed in a future update.
result = bo.optimize(
num_steps,
initial_data,
model,
acquisition_rule=acquisition_rule,
track_state=False,
)
dataset = result.try_get_final_dataset()
# %% [markdown]
# ## Explore the results
#
# We can now get the best point found by the optimizer. Note this isn't necessarily the point that was last evaluated.
# %%
query_points = dataset.query_points.numpy()
observations = dataset.observations.numpy()
arg_min_idx = tf.squeeze(tf.argmin(observations, axis=0))
print(f"Minimizer query point: {query_points[arg_min_idx, :]}")
print(f"Minimum observation: {observations[arg_min_idx, :]}")
print(f"True minimum: {MINIMUM}")
# %% [markdown]
# We can visualise how the optimizer performed as a three-dimensional plot. Crosses mark the initial data points while dots mark the points chosen during the Bayesian optimization run. You can see that there are some samples on the flat regions of the space, while most of the points are exploring the ridges, in particular in the vicinity of the minimum point.
# %%
from trieste.experimental.plotting import add_bo_points_plotly
fig = plot_function_plotly(
function,
search_space.lower,
search_space.upper,
alpha=0.7,
)
fig = add_bo_points_plotly(
x=query_points[:, 0],
y=query_points[:, 1],
z=observations[:, 0],
num_init=num_initial_points,
idx_best=arg_min_idx,
fig=fig,
)
fig.show()
# %% [markdown]
# We can visualise the model over the objective function by plotting the mean and 95% confidence intervals of its predictive distribution. Since it is not easy to choose the architecture of the deep ensemble we advise to always check with these types of plots whether the model seems to be doing a good job at modelling the objective function. In this case we can see that the model was able to capture the relevant parts of the objective function.
# %%
import matplotlib.pyplot as plt
from trieste.experimental.plotting import plot_model_predictions_plotly
fig = plot_model_predictions_plotly(
result.try_get_final_model(),
search_space.lower,
search_space.upper,
)
fig = add_bo_points_plotly(
x=query_points[:, 0],
y=query_points[:, 1],
z=observations[:, 0],
num_init=num_initial_points,
idx_best=arg_min_idx,
fig=fig,
figrow=1,
figcol=1,
)
fig.show()
# %% [markdown]
# Finally, let's plot the regret over time, i.e. difference between the minimum of the objective function and lowest observations found by the Bayesian optimization over time. Below you can see two plots. The left hand plot shows the regret over time: the observations (crosses and dots), the current best (orange line), and the start of the optimization loop (blue line). The right hand plot is a two-dimensional search space that shows where in the search space initial points were located (crosses again) and where Bayesian optimization allocated samples (dots). The best point is shown in each (purple dot) and on the left plot you can see that we come very close to 0 which is the minimum of the objective function.
# %%
from trieste.experimental.plotting import plot_regret, plot_bo_points
suboptimality = observations - MINIMUM.numpy()
fig, ax = plt.subplots(1, 2)
plot_regret(
suboptimality,
ax[0],
num_init=num_initial_points,
idx_best=arg_min_idx,
)
plot_bo_points(
query_points, ax[1], num_init=num_initial_points, idx_best=arg_min_idx
)
ax[0].set_title("Minimum achieved")
ax[0].set_ylabel("Regret")
ax[0].set_xlabel("# evaluations")
ax[1].set_ylabel("$x_2$")
ax[1].set_xlabel("$x_1$")
ax[1].set_title("Points in the search space")
fig.show()
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 19,205 | 52.798319 | 984 | py |
trieste-develop | trieste-develop/docs/notebooks/openai_gym_lunar_lander.pct.py | # %% [markdown]
# # Trieste meets OpenAI Gym
#
# This notebook demonstrates how to use Trieste to apply Bayesian optimization to a problem that is slightly more practical than classical optimization benchmarks shown used in other tutorials. We will use OpenAI Gym, which is a popular toolkit for reinforcement learning (RL) algorithms.
#
# Concretely, we are going to take the [Lunar Lander](https://gym.openai.com/envs/LunarLander-v2/) environment, define a search space and describe it as an optimization problem, and use Trieste to find an optimal solution for the problem. And hopefully avoid too many landers crashing on the Moon surface along the way.
# %%
import tensorflow as tf
import numpy as np
import trieste
import gpflow
import gym
env_name = "LunarLander-v2"
env = gym.make(env_name)
seed = 1793
np.random.seed(seed)
tf.random.set_seed(seed)
env.reset(seed=seed)
# %% [markdown]
# ### Introduction
#
# Let's start by discussing the problem itself. In the Lunar Lander environment we are controlling a space module that needs to land on a Moon surface. The surface is piecewise linear and is generated randomly, but always has a flat landing pad in the middle (marked with flags on the renders). The module starts at the top with some random initial speed and direction. We are controlling three engines on the module: one on each side, and one at the bottom. At each step of the simulation we can choose to either fire one of the engines, or do nothing. The ultimate goal of the simulation is to land safely on the marked landing pad.
#
# As usual in RL settings, the environment calculates reward points. Landing in the designated area gives the biggest reward, landing safely elsewhere is also rewarded but with less points. Crashing or flying off the screen results in big negative reward. Few points are also deducted along the way for firing up engines, thus motivating smaller usage of fuel. Additionally, to make the running time manageable, we are going to penalize simulations that take too long, by stopping them after a certain amount of steps and penalizing the reward.
#
# ### Optimization problem
#
# Now let's see how this task can be formulated as an optimization problem. We will be following an approach used by Turbo <cite data-cite="eriksson2019scalable"/> and BOSH <cite data-cite="Moss2020BOSHBO"/> papers. The environment comes with a heuristic controller that makes decisions based on the current position and velocity of the module. This controller can be tuned by modifying 12 of its internal numerical parameters. These parameters form our optimization search space. The objective is the same as in the original RL setup: maximize the reward. Therefore we will be using Trieste to learn how to land the module safely on the designated pad, without taking too much time and wasting too much fuel.
#
# The original code for the heuristic controller can be found in [OpenAI Gym GitHub repo](https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py). Here is the parametrized version, taken from the [repository](https://github.com/uber-research/TuRBO) of the Turbo paper:
# %%
# controller code is copied verbatim from https://github.com/uber-research/TuRBO
# s is the state of the environment, an array of shape (1, 8)
# for details on its content see https://github.com/openai/gym/blob/master/gym/envs/box2d/lunar_lander.py
# w is the array of controller parameters, of shape (1, 12)
def heuristic_Controller(s, w):
angle_targ = s[0] * w[0] + s[2] * w[1]
if angle_targ > w[2]:
angle_targ = w[2]
if angle_targ < -w[2]:
angle_targ = -w[2]
hover_targ = w[3] * np.abs(s[0])
angle_todo = (angle_targ - s[4]) * w[4] - (s[5]) * w[5]
hover_todo = (hover_targ - s[1]) * w[6] - (s[3]) * w[7]
if s[6] or s[7]:
angle_todo = w[8]
hover_todo = -(s[3]) * w[9]
a = 0
if hover_todo > np.abs(angle_todo) and hover_todo > w[10]:
a = 2
elif angle_todo < -w[11]:
a = 3
elif angle_todo > +w[11]:
a = 1
return a
steps_limit = 1000
timeout_reward = -100
# this wrapper runs a single simulation of the landing
# for a given controller parameters values
# and computes the reward
# to keep running time reasonable simulation is stopped after `steps_limit` steps
def demo_heuristic_lander(env, w, print_reward=False):
total_reward = 0
steps = 0
s = env.reset()[0]
while True:
if steps > steps_limit:
total_reward -= timeout_reward
break
a = heuristic_Controller(s, w)
s, r, done, info, _ = env.step(a)
total_reward += r
steps += 1
if done:
break
if print_reward:
print(f"Total reward: {total_reward}")
return total_reward
# %% [markdown]
# In the original OpenAI Gym Lunar Lander code controller parameters have fixed values. The smallest parameter is set to 0.05, and the biggest parameter value is 1.0. Thus we will set the search range for each parameter to be the same from 0.0 to 1.2.
# %%
search_space = trieste.space.Box([0.0] * 12, [1.2] * 12)
# %% [markdown]
# Let's see what kind of reward we might get by just using random parameters from this search space. Usual reward values for the Lunar Lander environment are between -250 (terrible crash) to 250 (excellent landing).
# %%
for _ in range(10):
sample_w = search_space.sample(1).numpy()[0]
demo_heuristic_lander(env, sample_w, print_reward=True)
# %% [markdown]
# As you can see, most of the random sets of parameters result in a negative reward. So picking a value from this search space at random can result in a various unwanted behaviors. Here we show some examples of the landing not going according to plan. Each of these examples was created with a sample of the parameter values from the search space.
#
# **Warning:** all the videos in this notebook were pre-generated. Creating renders of OpenAI Gym environments requires various dependencies depending on software setups and operating systems, so we have chosen not to do it here in the interest of transferability of this notebook. For those interested in reproducing these videos, we have saved the input parameters and the code we used to generate them in the Trieste repository, in the folder next to this notebook. However because of the stochastic nature of the environment and the optimization described here, your results might differ slightly from those shown here.
# %%
import io
import base64
from IPython.display import HTML
def load_video(filename):
video = io.open("./lunar_lander_videos/" + filename, "r+b").read()
encoded = base64.b64encode(video)
return HTML(
data="""
<video width="360" height="auto" alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>""".format(
encoded.decode("ascii")
)
)
# %% [markdown]
# #### Crash
# %%
load_video("crash.mp4")
# %% [markdown]
# #### Timeout
# %%
load_video("timeout.mp4")
# %% [markdown]
# #### Flying out of bounds
# %%
load_video("out_of_bounds.mp4")
# %% [markdown]
# #### Slamming on the surface
# This is a very common failure mode in this environment - going too fast and slamming on the surface.
# %%
load_video("slam.mp4")
# %% [markdown]
# Since the reward is stochastic, our goal is to maximize its expectation (or rather here minimize its negated expectation since Trieste only deals with minimization problems). To decrease the observation noise, our observer returns the average of the reward over 10 runs.
# %%
N_RUNS = 10
def lander_objective(x):
# for each point compute average reward over n_runs runs
all_rewards = []
for w in x.numpy():
rewards = [demo_heuristic_lander(env, w) for _ in range(N_RUNS)]
all_rewards.append(rewards)
rewards_tensor = tf.convert_to_tensor(all_rewards, dtype=tf.float64)
# triste minimizes, and we want to maximize
return -1 * tf.reshape(tf.math.reduce_mean(rewards_tensor, axis=1), (-1, 1))
observer = trieste.objectives.utils.mk_observer(lander_objective)
# %% [markdown]
# ### Solving the optimization problem with Trieste
#
# Here we do normal steps required to solve an optimization problem with Trieste: generate some initial data, create a surrogate model, define an acquisition funciton and rule, and run the optimization. Optimization step may take a few minutes to complete.
#
# We are using standard Gaussian Process with an RBF kernel, and Augmented Expected Improvement <cite data-cite="Huang:2006"/> as an acquisition function that can handle higher noise.
# %%
num_initial_points = 2 * search_space.dimension
initial_query_points = search_space.sample(num_initial_points)
initial_data = observer(initial_query_points)
# %%
def build_model(data):
variance = tf.math.reduce_variance(data.observations)
kernel = gpflow.kernels.RBF(variance=variance)
gpr = gpflow.models.GPR(data.astuple(), kernel)
gpflow.set_trainable(gpr.likelihood, False)
# Since we are running multiple simulations per observation,
# it is possible to account for variations in observation noise
# by using a different likelihood variance for each observation.
# This is possible to model with VGP, as described here:
# https://gpflow.readthedocs.io/en/master/notebooks/advanced/varying_noise.html\
# In the interest of brevity we have chosen not to do it in this notebook.
return trieste.models.gpflow.GaussianProcessRegression(gpr)
model = build_model(initial_data)
# %%
acq_fn = trieste.acquisition.function.AugmentedExpectedImprovement()
rule = trieste.acquisition.rule.EfficientGlobalOptimization(acq_fn) # type: ignore
# %%
N_OPTIMIZATION_STEPS = 200
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
result = bo.optimize(
N_OPTIMIZATION_STEPS, initial_data, model, rule
).final_result.unwrap()
# %% [markdown]
# ### Analyzing the results
#
# First, let's just plot observations of the expected reward, to ensure Trieste indeed found a better configuration of the controller. Remember that we flipped the sign of the reward.
# %%
from trieste.experimental import plotting
import matplotlib.pyplot as plt
ax = plt.gca()
plotting.plot_regret(
result.dataset.observations.numpy(), ax, num_init=len(initial_data)
)
# %% [markdown]
# Here we choose the query point that gives the best predictive expected reward according to our model. When running the simulation with at this point, we expect to see mostly large positive rewards.
# %%
mean = result.model.predict(result.dataset.query_points)[0]
w_best = result.dataset.query_points[np.argmin(mean), :]
for _ in range(10):
demo_heuristic_lander(env, w_best.numpy(), print_reward=True)
# %% [markdown]
# Finally, let's have a look at what the good controller configuration looks like in action.
#
# **Warning:** as mentioned above, this video was also pre-generated.
# %%
load_video("success.mp4")
# %%
| 11,033 | 41.114504 | 709 | py |
trieste-develop | trieste-develop/docs/notebooks/active_learning_for_binary_classification.pct.py | # %% [markdown]
# # Active Learning for Gaussian Process Classification Model
# %%
import gpflow
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import trieste
from trieste.acquisition.function import BayesianActiveLearningByDisagreement
from trieste.acquisition.rule import OBJECTIVE
from trieste.models.gpflow.models import VariationalGaussianProcess
from trieste.objectives.utils import mk_observer
np.random.seed(1793)
tf.random.set_seed(1793)
# %% [markdown]
# ## The problem
# %% [markdown]
# In Trieste, it is also possible to query most interesting points for learning the problem, i.e we want to have as little data as possible to construct the best possible model (active learning). In this tutorial we will try to do active learning for binary classification problem using Bayesain Active Learning by Disagreement (BALD) for a Gaussian Process Classification Model.
#
# We will illustrate the BALD algorithm on a synthetic binary classification problem where one class takes shape of a circle in the search space. The input space is continuous so we can use continuous optimiser for our BALD acquisition function.
# %%
search_space = trieste.space.Box([-1, -1], [1, 1])
input_dim = 2
def circle(x):
return tf.cast(
(tf.reduce_sum(tf.square(x), axis=1, keepdims=True) - 0.5) > 0,
tf.float64,
)
# %% [markdown]
# Let's first illustrate how this two dimensional problem looks like. Class 1 is the area outside of the circle and class 0 is area inside the circle.
# %%
from trieste.experimental.plotting import plot_function_2d
_, ax = plot_function_2d(
circle,
search_space.lower,
search_space.upper,
contour=True,
title=["Circle classification problem (1 outside, 0 inside)"],
xlabel="$X_1$",
ylabel="$X_2$",
fill=True,
)
plt.show()
# %% [markdown]
# Let's generate some data for our initial model. Here we randomly sample a small number of data points.
# %%
num_initial_points = 5
X = search_space.sample(num_initial_points)
observer = mk_observer(circle)
initial_data = observer(X)
# %% [markdown]
# ## Modelling the binary classification task
# %% [markdown]
# For the binary classification model, we use the Variational Gaussian Process with Bernoulli likelihood. For more detail of this model, see <cite data-cite="Nickisch08a">[Nickisch et al.](https://www.jmlr.org/papers/volume9/nickisch08a/nickisch08a.pdf)</cite>. Here we use trieste's gpflow model builder `build_vgp_classifier`.
# User can also use Sparse Variational Gaussian Process(SVGP) for building the classification model via `build_svgp` function and `SparseVariational` class. SVGP is preferable for bigger amount of data.
# %%
from trieste.models.gpflow import VariationalGaussianProcess
from trieste.models.gpflow.builders import build_vgp_classifier
model = VariationalGaussianProcess(
build_vgp_classifier(initial_data, search_space, noise_free=True)
)
# %% [markdown]
# Lets see our model landscape using only those initial data
# %%
from trieste.experimental.plotting import (
plot_model_predictions_plotly,
add_bo_points_plotly,
)
model.update(initial_data)
model.optimize(initial_data)
fig = plot_model_predictions_plotly(
model,
search_space.lower,
search_space.upper,
)
fig = add_bo_points_plotly(
x=initial_data.query_points[:, 0],
y=initial_data.query_points[:, 1],
z=initial_data.observations[:, 0],
num_init=num_initial_points,
fig=fig,
figrow=1,
figcol=1,
)
fig.show()
# %% [markdown]
# ## The acquisition process
#
# We can construct the BALD acquisition function which maximises information gain about the model parameters, by maximising the mutual information between predictions and model posterior:
#
# $$\mathbb{I}\left[y, \boldsymbol{\theta} \mid \mathbf{x}, \mathcal{D}\right]=\mathbb{H}\left[y \mid \mathbf{x}, \mathcal{D}\right]-\mathbb{E}_{p\left(\boldsymbol{\theta} \mid \mathcal{D}\right)}[\mathbb{H}[y \mid \mathbf{x}, \boldsymbol{\theta}]]$$
#
# See <cite data-cite="houlsby2011bayesian">[Houlsby et al.](https://arxiv.org/pdf/1112.5745.pdf)</cite> for more details. Then, Trieste's `EfficientGlobalOptimization` is used for the query rule:
# %%
acq = BayesianActiveLearningByDisagreement()
rule = trieste.acquisition.rule.EfficientGlobalOptimization(acq) # type: ignore
# %% [markdown]
# ## Run the active learning loop
# Let's run our active learning iteration:
# %%
num_steps = 30
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
results = bo.optimize(num_steps, initial_data, model, rule)
final_dataset = results.try_get_final_datasets()[OBJECTIVE]
final_model = results.try_get_final_models()[OBJECTIVE]
# %% [markdown]
# ## Visualising the result
# Now, we can visualize our model after the active learning run. Points marked with a cross are initial points while circles are points queried by the optimizer.
# %% Plot BO results
from trieste.experimental.plotting import plot_bo_points
_, ax = plot_function_2d(
lambda x: gpflow.likelihoods.Bernoulli().invlink(final_model.predict(x)[0]),
search_space.lower,
search_space.upper,
contour=True,
colorbar=True,
title=["Predictive mean of the final model"],
xlabel="$X_1$",
ylabel="$X_2$",
fill=True,
)
plot_bo_points(final_dataset.query_points, ax[0, 0], num_initial_points)
plt.show()
# %% [markdown]
# As expected, BALD will query in important regions like points near the domain boundary and class boundary.
# %% [markdown]
# ## LICENSE
#
# [Apache License 2.0](https://github.com/secondmind-labs/trieste/blob/develop/LICENSE)
| 5,625 | 33.304878 | 379 | py |
Subsets and Splits