python_code
stringlengths 0
66.4k
|
---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from collections.abc import Mapping
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_ask(server, request):
"""Returns dictionary with two entries:
"config" -- dictionary with config (keys are strings, values are floats)
"is_finished" -- bool, true if the strat is finished
"""
logger.debug("got ask message!")
if server._pregen_asks:
params = server._pregen_asks.pop()
else:
# Some clients may still send "message" as an empty string, so we need to check if its a dict or not.
msg = request["message"]
if isinstance(msg, Mapping):
params = ask(server, **msg)
else:
params = ask(server)
new_config = {"config": params, "is_finished": server.strat.finished}
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="ask", request=request
)
return new_config
def ask(server, num_points=1):
"""get the next point to query from the model
Returns:
dict -- new config dict (keys are strings, values are floats)
"""
if server.skip_computations:
# HACK to makke sure strategies finish correctly
server.strat._strat._count += 1
if server.strat._strat.finished:
server.strat._make_next_strat()
return None
if not server.use_ax:
# index by [0] is temporary HACK while serverside
# doesn't handle batched ask
next_x = server.strat.gen()[0]
return server._tensor_to_config(next_x)
next_x = server.strat.gen(num_points)
return next_x
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .handle_ask import handle_ask
from .handle_can_model import handle_can_model
from .handle_exit import handle_exit
from .handle_finish_strategy import handle_finish_strategy
from .handle_get_config import handle_get_config
from .handle_info import handle_info
from .handle_params import handle_params
from .handle_query import handle_query
from .handle_resume import handle_resume
from .handle_setup import handle_setup
from .handle_tell import handle_tell
MESSAGE_MAP = {
"setup": handle_setup,
"ask": handle_ask,
"tell": handle_tell,
"query": handle_query,
"parameters": handle_params,
"can_model": handle_can_model,
"exit": handle_exit,
"get_config": handle_get_config,
"finish_strategy": handle_finish_strategy,
"info": handle_info,
"resume": handle_resume,
}
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
def handle_finish_strategy(self, request):
self.strat.finish()
return f"finished strategy {self.strat.name}"
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
import numpy as np
logger = utils_logging.getLogger(logging.INFO)
def handle_query(server, request):
logger.debug("got query message!")
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="query", request=request
)
response = query(server, **request["message"])
return response
def query(
server,
query_type="max",
probability_space=False,
x=None,
y=None,
constraints=None,
):
if server.skip_computations:
return None
constraints = constraints or {}
response = {
"query_type": query_type,
"probability_space": probability_space,
"constraints": constraints,
}
if query_type == "max":
fmax, fmax_loc = server.strat.get_max(constraints)
response["y"] = fmax.item()
response["x"] = server._tensor_to_config(fmax_loc)
elif query_type == "min":
fmin, fmin_loc = server.strat.get_min(constraints)
response["y"] = fmin.item()
response["x"] = server._tensor_to_config(fmin_loc)
elif query_type == "prediction":
# returns the model value at x
if x is None: # TODO: ensure if x is between lb and ub
raise RuntimeError("Cannot query model at location = None!")
mean, _var = server.strat.predict(
server._config_to_tensor(x).unsqueeze(axis=0),
probability_space=probability_space,
)
response["x"] = x
response["y"] = mean.item()
elif query_type == "inverse":
# expect constraints to be a dictionary; values are float arrays size 1 (exact) or 2 (upper/lower bnd)
constraints = {server.parnames.index(k): v for k, v in constraints.items()}
nearest_y, nearest_loc = server.strat.inv_query(
y, constraints, probability_space=probability_space
)
response["y"] = nearest_y
response["x"] = server._tensor_to_config(nearest_loc)
else:
raise RuntimeError("unknown query type!")
# ensure all x values are arrays
response["x"] = {
k: np.array([v]) if np.array(v).ndim == 0 else v
for k, v in response["x"].items()
}
return response
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_get_config(server, request):
msg = request["message"]
section = msg.get("section", None)
prop = msg.get("property", None)
# If section and property are not specified, return the whole config
if section is None and prop is None:
return server.config.to_dict(deduplicate=False)
# If section and property are not both specified, raise an error
if section is None and prop is not None:
raise RuntimeError("Message contains a property but not a section!")
if section is not None and prop is None:
raise RuntimeError("Message contains a section but not a property!")
# If both section and property are specified, return only the relevant value from the config
return server.config.to_dict(deduplicate=False)[section][prop]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_can_model(server, request):
# Check if the strategy has finished initialization; i.e.,
# if it has a model and data to fit (strat.can_fit)
logger.debug("got can_model message!")
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="can_model", request=request
)
return {"can_model": server.strat.can_fit}
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import io
import logging
from collections.abc import Iterable
import aepsych.utils_logging as utils_logging
import dill
import pandas as pd
import torch
logger = utils_logging.getLogger(logging.INFO)
DEFAULT_DESC = "default description"
DEFAULT_NAME = "default name"
def handle_tell(server, request):
logger.debug("got tell message!")
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="tell", request=request
)
# Batch update mode
if type(request["message"]) == list:
for msg in request["message"]:
tell(server, **msg)
else:
tell(server, **request["message"])
if server.strat is not None and server.strat.finished is True:
logger.info("Recording strat because the experiment is complete.")
buffer = io.BytesIO()
torch.save(server.strat, buffer, pickle_module=dill)
buffer.seek(0)
server.db.record_strat(master_table=server._db_master_record, strat=buffer)
return "acq"
def flatten_tell_record(server, rec):
out = {}
out["response"] = int(rec.message_contents["message"]["outcome"])
out.update(
pd.json_normalize(rec.message_contents["message"]["config"], sep="_").to_dict(
orient="records"
)[0]
)
if rec.extra_info is not None:
out.update(rec.extra_info)
return out
def tell(server, outcome, config=None, model_data=True, trial_index=-1):
"""tell the model which input was run and what the outcome was
Arguments:
server: The AEPsych server object.
outcome: The outcome of the trial. If using the legacy backend, this must be an int or a float. If using the Ax
backend, this may be an int or float if using a single outcome, or if using multiple outcomes, it must be a
dictionary mapping outcome names to values.
config: A dictionary mapping parameter names to values. This must be provided if using the legacy backend. If
using the Ax backend, this should be provided only for trials that do not already have a trial_index.
model_data: If True, the data from this trial will be added to the model. If False, the trial will be recorded in
the db, but will not be modeled.
trial_index: The trial_index for the trial as provided by the ask response when using the Ax backend. Ignored by
the legacy backend.
"""
if config is None:
config = {}
if not server.is_performing_replay:
_record_tell(server, outcome, config, model_data)
if model_data:
if not server.use_ax:
x = server._config_to_tensor(config)
server.strat.add_data(x, outcome)
else:
assert (
config or trial_index >= 0
), "Must supply a trial parameterization or a trial index!"
if trial_index >= 0:
server.strat.complete_existing_trial(trial_index, outcome)
else:
server.strat.complete_new_trial(config, outcome)
def _record_tell(server, outcome, config, model_data):
server._db_raw_record = server.db.record_raw(
master_table=server._db_master_record,
model_data=bool(model_data),
)
for param_name, param_value in config.items():
if isinstance(param_value, Iterable) and type(param_value) != str:
if len(param_value) == 1:
server.db.record_param(
raw_table=server._db_raw_record,
param_name=str(param_name),
param_value=str(param_value[0]),
)
else:
for i, v in enumerate(param_value):
server.db.record_param(
raw_table=server._db_raw_record,
param_name=str(param_name) + "_stimuli" + str(i),
param_value=str(v),
)
else:
server.db.record_param(
raw_table=server._db_raw_record,
param_name=str(param_name),
param_value=str(param_value),
)
if isinstance(outcome, dict):
for key in outcome.keys():
server.db.record_outcome(
raw_table=server._db_raw_record,
outcome_name=key,
outcome_value=float(outcome[key]),
)
# Check if we get single or multiple outcomes
# Multiple outcomes come in the form of iterables that aren't strings or single-element tensors
elif isinstance(outcome, Iterable) and type(outcome) != str:
for i, outcome_value in enumerate(outcome):
if isinstance(outcome_value, Iterable) and type(outcome_value) != str:
if isinstance(outcome_value, torch.Tensor) and outcome_value.dim() < 2:
outcome_value = outcome_value.item()
elif len(outcome_value) == 1:
outcome_value = outcome_value[0]
else:
raise ValueError(
"Multi-outcome values must be a list of lists of length 1!"
)
server.db.record_outcome(
raw_table=server._db_raw_record,
outcome_name="outcome_" + str(i),
outcome_value=float(outcome_value),
)
else:
server.db.record_outcome(
raw_table=server._db_raw_record,
outcome_name="outcome",
outcome_value=float(outcome),
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_exit(server, request):
# Make local server write strats into DB and close the connection
termination_type = "Normal termination"
logger.info("Got termination message!")
server.write_strats(termination_type)
server.exit_server_loop = True
return "Terminate"
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Any, Dict
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_info(server, request: Dict[str, Any]) -> Dict[str, Any]:
"""Handles info message from the client.
Args:
request (Dict[str, Any]): The info message from the client
Returns:
Dict[str, Any]: Returns dictionary containing the current state of the experiment
"""
logger.debug("got info message!")
ret_val = info(server)
return ret_val
def info(server) -> Dict[str, Any]:
"""Returns details about the current state of the server and experiments
Returns:
Dict: Dict containing server and experiment details
"""
current_strat_model = (
server.config.get(server.strat.name, "model", fallback="model not set")
if server.config and ("model" in server.config.get_section(server.strat.name))
else "model not set"
)
current_strat_acqf = (
server.config.get(server.strat.name, "acqf", fallback="acqf not set")
if server.config and ("acqf" in server.config.get_section(server.strat.name))
else "acqf not set"
)
response = {
"db_name": server.db._db_name,
"exp_id": server._db_master_record.experiment_id,
"strat_count": server.n_strats,
"all_strat_names": server.strat_names,
"current_strat_index": server.strat_id,
"current_strat_name": server.strat.name,
"current_strat_data_pts": server.strat.x.shape[0]
if server.strat.x is not None
else 0,
"current_strat_model": current_strat_model,
"current_strat_acqf": current_strat_acqf,
"current_strat_finished": server.strat.finished,
}
logger.debug(f"Current state of server: {response}")
return response
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_resume(server, request):
logger.debug("got resume message!")
strat_id = int(request["message"]["strat_id"])
server.strat_id = strat_id
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="resume", request=request
)
return server.strat_id
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
from aepsych.config import Config
from aepsych.strategy import AEPsychStrategy, SequentialStrategy
from aepsych.version import __version__
logger = utils_logging.getLogger(logging.INFO)
DEFAULT_DESC = "default description"
DEFAULT_NAME = "default name"
def _configure(server, config):
server._pregen_asks = (
[]
) # TODO: Allow each strategy to have its own stack of pre-generated asks
parnames = config._str_to_list(config.get("common", "parnames"), element_type=str)
server.parnames = parnames
server.config = config
server.use_ax = config.getboolean("common", "use_ax", fallback=False)
server.enable_pregen = config.getboolean("common", "pregen_asks", fallback=False)
if server.use_ax:
server.trial_index = -1
server.strat = AEPsychStrategy.from_config(config)
server.strat_id = server.n_strats - 1
else:
server.strat = SequentialStrategy.from_config(config)
server.strat_id = server.n_strats - 1 # 0-index strats
return server.strat_id
def configure(server, config=None, **config_args):
# To preserve backwards compatibility, config_args is still usable for unittests and old functions.
# But if config is specified, the server will use that rather than create a new config object.
if config is None:
usedconfig = Config(**config_args)
else:
usedconfig = config
if "experiment" in usedconfig:
logger.warning(
'The "experiment" section is being deprecated from configs. Please put everything in the "experiment" section in the "common" section instead.'
)
for i in usedconfig["experiment"]:
usedconfig["common"][i] = usedconfig["experiment"][i]
del usedconfig["experiment"]
version = usedconfig.version
if version < __version__:
try:
usedconfig.convert_to_latest()
server.db.perform_updates()
logger.warning(
f"Config version {version} is less than AEPsych version {__version__}. The config was automatically modified to be compatible. Check the config table in the db to see the changes."
)
except RuntimeError:
logger.warning(
f"Config version {version} is less than AEPsych version {__version__}, but couldn't automatically update the config! Trying to configure the server anyway..."
)
server.db.record_config(master_table=server._db_master_record, config=usedconfig)
return _configure(server, usedconfig)
def handle_setup(server, request):
logger.debug("got setup message!")
### make a temporary config object to derive parameters because server handles config after table
if (
"config_str" in request["message"].keys()
or "config_dict" in request["message"].keys()
):
tempconfig = Config(**request["message"])
if not server.is_performing_replay:
experiment_id = None
if server._db_master_record is not None:
experiment_id = server._db_master_record.experiment_id
if "metadata" in tempconfig.keys():
cdesc = (
tempconfig["metadata"]["experiment_description"]
if ("experiment_description" in tempconfig["metadata"].keys())
else DEFAULT_DESC
)
cname = (
tempconfig["metadata"]["experiment_name"]
if ("experiment_name" in tempconfig["metadata"].keys())
else DEFAULT_NAME
)
cid = (
tempconfig["metadata"]["experiment_id"]
if ("experiment_id" in tempconfig["metadata"].keys())
else None
)
server._db_master_record = server.db.record_setup(
description=cdesc,
name=cname,
request=request,
id=cid,
extra_metadata=tempconfig.jsonifyMetadata(),
)
### if the metadata does not exist, we are going to log nothing
else:
server._db_master_record = server.db.record_setup(
description=DEFAULT_DESC,
name=DEFAULT_NAME,
request=request,
id=experiment_id,
)
strat_id = configure(server, config=tempconfig)
else:
raise RuntimeError("Missing a configure message!")
return strat_id
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import aepsych.utils_logging as utils_logging
logger = utils_logging.getLogger(logging.INFO)
def handle_params(server, request):
logger.debug("got parameters message!")
if not server.is_performing_replay:
server.db.record_message(
master_table=server._db_master_record, type="parameters", request=request
)
config_setup = {
server.parnames[i]: [server.strat.lb[i].item(), server.strat.ub[i].item()]
for i in range(len(server.parnames))
}
return config_setup
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
from typing import Any, Optional, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.acquisition.objective import FloorLogitObjective
from aepsych.acquisition.objective.semi_p import SemiPThresholdObjective
from aepsych.config import Config
from aepsych.likelihoods import BernoulliObjectiveLikelihood, LinearBernoulliLikelihood
from aepsych.models import GPClassificationModel
from aepsych.utils import _process_bounds, promote_0d
from aepsych.utils_logging import getLogger
from botorch.optim.fit import fit_gpytorch_mll_scipy
from botorch.posteriors import GPyTorchPosterior
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import RBFKernel, ScaleKernel
from gpytorch.likelihoods import BernoulliLikelihood, Likelihood
from gpytorch.means import ConstantMean, ZeroMean
from gpytorch.priors import GammaPrior
from torch import Tensor
from torch.distributions import Normal
# TODO: Implement a covar factory and analytic method for getting the lse
logger = getLogger()
def _hadamard_mvn_approx(x_intensity, slope_mean, slope_cov, offset_mean, offset_cov):
"""
MVN approximation to the hadamard product of GPs (from the SemiP paper, extending the
zero-mean results in https://mathoverflow.net/questions/293955/normal-approximation-to-the-pointwise-hadamard-schur-product-of-two-multivariat)
"""
offset_mean = offset_mean + x_intensity
mean_x = offset_mean * slope_mean
# Same as torch.diag_embed(slope_mean) @ offset_cov @ torch.diag_embed(slope_mean), but more efficient
term1 = slope_mean.unsqueeze(-1) * offset_cov * slope_mean.unsqueeze(-2)
# Same as torch.diag_embed(offset_mean) @ slope_cov @ torch.diag_embed(offset_mean), but more efficient
term2 = offset_mean.unsqueeze(-1) * slope_cov * offset_mean.unsqueeze(-2)
term3 = slope_cov * offset_cov
cov_x = term1 + term2 + term3
return mean_x, cov_x
def semi_p_posterior_transform(posterior):
batch_mean = posterior.mvn.mean
batch_cov = posterior.mvn.covariance_matrix
offset_mean = batch_mean[..., 0, :]
slope_mean = batch_mean[..., 1, :]
offset_cov = batch_cov[..., 0, :, :]
slope_cov = batch_cov[..., 1, :, :]
Xi = posterior.Xi
approx_mean, approx_cov = _hadamard_mvn_approx(
x_intensity=Xi,
slope_mean=slope_mean,
slope_cov=slope_cov,
offset_mean=offset_mean,
offset_cov=offset_cov,
)
approx_mvn = MultivariateNormal(mean=approx_mean, covariance_matrix=approx_cov)
return GPyTorchPosterior(mvn=approx_mvn)
class SemiPPosterior(GPyTorchPosterior):
def __init__(
self,
mvn: MultivariateNormal,
likelihood: LinearBernoulliLikelihood,
Xi: torch.Tensor,
):
super().__init__(distribution=mvn)
self.likelihood = likelihood
self.Xi = Xi
def rsample_from_base_samples(
self,
sample_shape: torch.Size,
base_samples: Tensor,
) -> Tensor:
r"""Sample from the posterior (with gradients) using base samples.
This is intended to be used with a sampler that produces the corresponding base
samples, and enables acquisition optimization via Sample Average Approximation.
"""
return (
super()
.rsample_from_base_samples(
sample_shape=sample_shape, base_samples=base_samples
)
.squeeze(-1)
)
def rsample(
self,
sample_shape: Optional[torch.Size] = None,
base_samples: Optional[torch.Tensor] = None,
):
kcsamps = (
super()
.rsample(sample_shape=sample_shape, base_samples=base_samples)
.squeeze(-1)
)
# fsamps is of shape nsamp x 2 x n, or nsamp x b x 2 x n
return kcsamps
def sample_p(
self,
sample_shape: Optional[torch.Size] = None,
base_samples: Optional[torch.Tensor] = None,
):
kcsamps = self.rsample(sample_shape=sample_shape, base_samples=base_samples)
return self.likelihood.p(function_samples=kcsamps, Xi=self.Xi).squeeze(-1)
def sample_f(
self,
sample_shape: Optional[torch.Size] = None,
base_samples: Optional[torch.Tensor] = None,
):
kcsamps = self.rsample(sample_shape=sample_shape, base_samples=base_samples)
return self.likelihood.f(function_samples=kcsamps, Xi=self.Xi).squeeze(-1)
def sample_thresholds(
self,
threshold_level: float,
sample_shape: Optional[torch.Size] = None,
base_samples: Optional[torch.Tensor] = None,
):
fsamps = self.rsample(sample_shape=sample_shape, base_samples=base_samples)
return SemiPThresholdObjective(
likelihood=self.likelihood, target=threshold_level
)(samples=fsamps, X=self.Xi)
class SemiParametricGPModel(GPClassificationModel):
"""
Semiparametric GP model for psychophysics.
Implements a semi-parametric model with a functional form like :math:`k(x_c()x_i + c(x_c))`,
for scalar intensity dimension :math:`x_i` and vector-valued context dimensions :math:`x_c`,
with k and c having a GP prior. In contrast to HadamardSemiPModel, this version uses a batched GP
directly, which is about 2-3x slower but does not use the MVN approximation.
Intended for use with a BernoulliObjectiveLikelihood with flexible link function such as
Logistic or Gumbel nonlinearity with a floor.
"""
_num_outputs = 1
_batch_shape = 2
stimuli_per_trial = 1
outcome_type = "binary"
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
stim_dim: int = 0,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Any] = None,
slope_mean: float = 2,
inducing_size: int = 100,
max_fit_time: Optional[float] = None,
inducing_point_method: str = "auto",
):
"""
Initialize SemiParametricGP.
Args:
Args:
lb (Union[numpy.ndarray, torch.Tensor]): Lower bounds of the parameters.
ub (Union[numpy.ndarray, torch.Tensor]): Upper bounds of the parameters.
dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
of lb and ub.
stim_dim (int): Index of the intensity (monotonic) dimension. Defaults to 0.
mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior.
covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a
gamma prior.
likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to
linear-Bernouli likelihood with probit link.
inducing_size (int): Number of inducing points. Defaults to 100.
max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
there is no limit to the fitting time.
inducing_point_method (string): The method to use to select the inducing points. Defaults to "auto".
If "sobol", a number of Sobol points equal to inducing_size will be selected.
If "pivoted_chol", selects points based on the pivoted Cholesky heuristic.
If "kmeans++", selects points by performing kmeans++ clustering on the training data.
If "auto", tries to determine the best method automatically.
"""
lb, ub, dim = _process_bounds(lb, ub, dim)
self.stim_dim = stim_dim
self.context_dims = list(range(dim))
self.context_dims.pop(stim_dim)
if mean_module is None:
mean_module = ConstantMean(batch_shape=torch.Size([2]))
mean_module.requires_grad_(False)
mean_module.constant.copy_(
torch.tensor([0.0, slope_mean]) # offset mean is 0, slope mean is 2
)
if covar_module is None:
covar_module = ScaleKernel(
RBFKernel(
ard_num_dims=dim - 1,
lengthscale_prior=GammaPrior(3, 6),
active_dims=self.context_dims, # Operate only on x_s
batch_shape=torch.Size([2]),
),
outputscale_prior=GammaPrior(1.5, 1.0),
)
likelihood = likelihood or LinearBernoulliLikelihood()
assert isinstance(
likelihood, LinearBernoulliLikelihood
), "SemiP model only supports linear Bernoulli likelihoods!"
super().__init__(
lb=lb,
ub=ub,
dim=dim,
mean_module=mean_module,
covar_module=covar_module,
likelihood=likelihood,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
@classmethod
def from_config(cls, config: Config) -> SemiParametricGPModel:
"""Alternate constructor for SemiParametricGPModel model.
This is used when we recursively build a full sampling strategy
from a configuration.
Args:
config (Config): A configuration containing keys/values matching this class
Returns:
SemiParametricGPModel: Configured class instance.
"""
classname = cls.__name__
inducing_size = config.getint(classname, "inducing_size", fallback=100)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
inducing_point_method = config.get(
classname, "inducing_point_method", fallback="auto"
)
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
stim_dim = config.getint(classname, "stim_dim", fallback=0)
slope_mean = config.getfloat(classname, "slope_mean", fallback=2)
return cls(
lb=lb,
ub=ub,
stim_dim=stim_dim,
dim=dim,
likelihood=likelihood,
slope_mean=slope_mean,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
def fit(
self,
train_x: torch.Tensor,
train_y: torch.Tensor,
warmstart_hyperparams: bool = False,
warmstart_induc: bool = False,
**kwargs,
) -> None:
"""Fit underlying model.
Args:
train_x (torch.Tensor): Inputs.
train_y (torch.LongTensor): Responses.
warmstart_hyperparams (bool): Whether to reuse the previous hyperparameters (True) or fit from scratch
(False). Defaults to False.
warmstart_induc (bool): Whether to reuse the previous inducing points or fit from scratch (False).
Defaults to False.
kwargs: Keyword arguments passed to `optimizer=fit_gpytorch_mll_scipy`.
"""
super().fit(
train_x=train_x,
train_y=train_y,
optimizer=fit_gpytorch_mll_scipy,
warmstart_hyperparams=warmstart_hyperparams,
warmstart_induc=warmstart_induc,
closure_kwargs={"Xi": train_x[..., self.stim_dim]},
**kwargs,
)
def sample(
self,
x: Union[torch.Tensor, np.ndarray],
num_samples: int,
probability_space=False,
) -> torch.Tensor:
"""Sample from underlying model.
Args:
x ((n x d) torch.Tensor): Points at which to sample.
num_samples (int, optional): Number of samples to return. Defaults to None.
kwargs are ignored
Returns:
(num_samples x n) torch.Tensor: Posterior samples
"""
post = self.posterior(x)
if probability_space is True:
samps = post.sample_p(torch.Size([num_samples])).detach()
else:
samps = post.sample_f(torch.Size([num_samples])).detach()
assert samps.shape == (num_samples, 1, x.shape[0])
return samps.squeeze(1)
def predict(
self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (torch.Tensor): Points at which to predict from the model.
probability_space (bool, optional): Return outputs in units of
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at query points.
"""
with torch.no_grad():
samps = self.sample(
x, num_samples=10000, probability_space=probability_space
)
m, v = samps.mean(0), samps.var(0)
return promote_0d(m), promote_0d(v)
def posterior(self, X, posterior_transform=None):
# Assume x is (b) x n x d
if X.ndim > 3:
raise ValueError
# Add in the extra 2 batch for the 2 GPs in this model
Xnew = X.unsqueeze(-3).expand(
X.shape[:-2] # (b)
+ torch.Size([2]) # For the two GPs
+ X.shape[-2:] # n x d
)
# The shape of Xnew is: (b) x 2 x n x d
posterior = SemiPPosterior(
mvn=self(Xnew),
likelihood=self.likelihood,
Xi=X[..., self.stim_dim],
)
if posterior_transform is not None:
return posterior_transform(posterior)
else:
return posterior
class HadamardSemiPModel(GPClassificationModel):
"""
Semiparametric GP model for psychophysics, with a MVN approximation to the elementwise
product of GPs.
Implements a semi-parametric model with a functional form like :math:`k(x_c()x_i + c(x_c))`,
for scalar intensity dimension :math:`x_i` and vector-valued context dimensions :math:`x_c`,
with k and c having a GP prior. In contrast to SemiParametricGPModel, this version approximates
the product as a single multivariate normal, which should be faster (the approximation is exact
if one of the GP's variance goes to zero).
Intended for use with a BernoulliObjectiveLikelihood with flexible link function such as
Logistic or Gumbel nonlinearity with a floor.
"""
_num_outputs = 1
stimuli_per_trial = 1
outcome_type = "binary"
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
stim_dim: int = 0,
slope_mean_module: Optional[gpytorch.means.Mean] = None,
slope_covar_module: Optional[gpytorch.kernels.Kernel] = None,
offset_mean_module: Optional[gpytorch.means.Mean] = None,
offset_covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Likelihood] = None,
slope_mean: float = 2,
inducing_size: int = 100,
max_fit_time: Optional[float] = None,
inducing_point_method: str = "auto",
):
"""
Initialize HadamardSemiPModel.
Args:
lb (Union[numpy.ndarray, torch.Tensor]): Lower bounds of the parameters.
ub (Union[numpy.ndarray, torch.Tensor]): Upper bounds of the parameters.
dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
of lb and ub.
stim_dim (int): Index of the intensity (monotonic) dimension. Defaults to 0.
slope_mean_module (gpytorch.means.Mean, optional): Mean module to use (default: constant mean) for slope.
slope_covar_module (gpytorch.kernels.Kernel, optional): Covariance kernel to use (default: scaled RBF) for slope.
offset_mean_module (gpytorch.means.Mean, optional): Mean module to use (default: constant mean) for offset.
offset_covar_module (gpytorch.kernels.Kernel, optional): Covariance kernel to use (default: scaled RBF) for offset.
likelihood (gpytorch.likelihood.Likelihood, optional)): defaults to bernoulli with logistic input and a floor of .5
inducing_size (int): Number of inducing points. Defaults to 100.
max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
there is no limit to the fitting time.
inducing_point_method (string): The method to use to select the inducing points. Defaults to "auto".
If "sobol", a number of Sobol points equal to inducing_size will be selected.
If "pivoted_chol", selects points based on the pivoted Cholesky heuristic.
If "kmeans++", selects points by performing kmeans++ clustering on the training data.
If "auto", tries to determine the best method automatically.
"""
super().__init__(
lb=lb,
ub=ub,
dim=dim,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
self.stim_dim = stim_dim
if slope_mean_module is None:
self.slope_mean_module = ConstantMean()
self.slope_mean_module.requires_grad_(False)
self.slope_mean_module.constant.copy_(
torch.tensor(slope_mean)
) # magic number to shift the slope prior to be generally positive.
else:
self.slope_mean_module = slope_mean_module
if offset_mean_module is None:
self.offset_mean_module = ZeroMean()
else:
self.offset_mean_module = offset_mean_module
self.offset_mean_module = offset_mean_module or ZeroMean()
context_dims = list(range(self.dim))
context_dims.pop(stim_dim)
self.slope_covar_module = slope_covar_module or ScaleKernel(
RBFKernel(
ard_num_dims=self.dim - 1,
lengthscale_prior=GammaPrior(3, 6),
active_dims=context_dims, # Operate only on x_s
),
outputscale_prior=GammaPrior(1.5, 1.0),
)
self.offset_covar_module = offset_covar_module or ScaleKernel(
RBFKernel(
ard_num_dims=self.dim - 1,
lengthscale_prior=GammaPrior(3, 6),
active_dims=context_dims, # Operate only on x_s
),
outputscale_prior=GammaPrior(1.5, 1.0),
)
self.likelihood = likelihood or BernoulliObjectiveLikelihood(
objective=FloorLogitObjective()
)
self._fresh_state_dict = deepcopy(self.state_dict())
self._fresh_likelihood_dict = deepcopy(self.likelihood.state_dict())
def forward(self, x: torch.Tensor) -> MultivariateNormal:
"""Forward pass for semip GP.
generates a k(c + x[:,stim_dim]) = kc + kx[:,stim_dim] mvn object where k and c are
slope and offset GPs and x[:,stim_dim] are the intensity stimulus (x)
locations and thus acts as a constant offset to the k mvn.
Args:
x (torch.Tensor): Points at which to sample.
Returns:
MVN object evaluated at samples
"""
transformed_x = self.normalize_inputs(x)
# TODO: make slope prop to intensity width.
slope_mean = self.slope_mean_module(transformed_x)
# kc mvn
offset_mean = self.offset_mean_module(transformed_x)
slope_cov = self.slope_covar_module(transformed_x)
offset_cov = self.offset_covar_module(transformed_x)
mean_x, cov_x = _hadamard_mvn_approx(
x_intensity=transformed_x[..., self.stim_dim],
slope_mean=slope_mean,
slope_cov=slope_cov,
offset_mean=offset_mean,
offset_cov=offset_cov,
)
return MultivariateNormal(mean_x, cov_x)
@classmethod
def from_config(cls, config: Config) -> HadamardSemiPModel:
"""Alternate constructor for HadamardSemiPModel model.
This is used when we recursively build a full sampling strategy
from a configuration.
Args:
config (Config): A configuration containing keys/values matching this class
Returns:
HadamardSemiPModel: Configured class instance.
"""
classname = cls.__name__
inducing_size = config.getint(classname, "inducing_size", fallback=100)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
slope_mean_module = config.getobj(classname, "slope_mean_module", fallback=None)
slope_covar_module = config.getobj(
classname, "slope_covar_module", fallback=None
)
offset_mean_module = config.getobj(
classname, "offset_mean_module", fallback=None
)
offset_covar_module = config.getobj(
classname, "offset_covar_module", fallback=None
)
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
inducing_point_method = config.get(
classname, "inducing_point_method", fallback="auto"
)
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
slope_mean = config.getfloat(classname, "slope_mean", fallback=2)
stim_dim = config.getint(classname, "stim_dim", fallback=0)
return cls(
lb=lb,
ub=ub,
stim_dim=stim_dim,
dim=dim,
slope_mean_module=slope_mean_module,
slope_covar_module=slope_covar_module,
offset_mean_module=offset_mean_module,
offset_covar_module=offset_covar_module,
likelihood=likelihood,
slope_mean=slope_mean,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
def predict(
self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (torch.Tensor): Points at which to predict from the model.
probability_space (bool, optional): Return outputs in units of
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at queries points.
"""
if probability_space:
if hasattr(self.likelihood, "objective"):
fsamps = self.sample(x, 1000)
psamps = self.likelihood.objective(fsamps)
return psamps.mean(0).squeeze(), psamps.var(0).squeeze()
elif isinstance(self.likelihood, BernoulliLikelihood): # default to probit
fsamps = self.sample(x, 1000)
psamps = Normal(0, 1).cdf(fsamps)
return psamps.mean(0).squeeze(), psamps.var(0).squeeze()
else:
raise NotImplementedError(
f"p-space sampling not defined if likelihood ({self.likelihood}) does not have a link!"
)
else:
with torch.no_grad():
post = self.posterior(x)
fmean = post.mean.squeeze()
fvar = post.variance.squeeze()
return fmean, fvar
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import time
from typing import Any, Dict, Optional, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config
from aepsych.factory import default_mean_covar_factory
from aepsych.models.base import AEPsychMixin
from aepsych.utils import _process_bounds, promote_0d
from aepsych.utils_logging import getLogger
from botorch.fit import fit_gpytorch_mll
from botorch.models import PairwiseGP, PairwiseLaplaceMarginalLogLikelihood
from botorch.models.transforms.input import Normalize
from scipy.stats import norm
logger = getLogger()
class PairwiseProbitModel(PairwiseGP, AEPsychMixin):
_num_outputs = 1
stimuli_per_trial = 2
outcome_type = "binary"
def _pairs_to_comparisons(self, x, y):
"""
Takes x, y structured as pairs and judgments and
returns pairs and comparisons as PairwiseGP requires
"""
# This needs to take a unique over the feature dim by flattening
# over pairs but not instances/batches. This is actually tensor
# matricization over the feature dimension but awkward in numpy
unique_coords = torch.unique(
torch.transpose(x, 1, 0).reshape(self.dim, -1), dim=1
)
def _get_index_of_equal_row(arr, x, dim=0):
return torch.all(torch.eq(arr, x[:, None]), dim=dim).nonzero().item()
comparisons = []
for pair, judgement in zip(x, y):
comparison = (
_get_index_of_equal_row(unique_coords, pair[..., 0]),
_get_index_of_equal_row(unique_coords, pair[..., 1]),
)
if judgement == 0:
comparisons.append(comparison)
else:
comparisons.append(comparison[::-1])
return unique_coords.T, torch.LongTensor(comparisons)
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
max_fit_time: Optional[float] = None,
):
self.lb, self.ub, dim = _process_bounds(lb, ub, dim)
self.max_fit_time = max_fit_time
bounds = torch.stack((self.lb, self.ub))
input_transform = Normalize(d=dim, bounds=bounds)
if covar_module is None:
config = Config(
config_dict={
"default_mean_covar_factory": {
"lb": str(self.lb.tolist()),
"ub": str(self.ub.tolist()),
}
}
) # type: ignore
_, covar_module = default_mean_covar_factory(config)
super().__init__(
datapoints=None,
comparisons=None,
covar_module=covar_module,
jitter=1e-3,
input_transform=input_transform,
)
self.dim = dim # The Pairwise constructor sets self.dim = None.
def fit(
self,
train_x: torch.Tensor,
train_y: torch.Tensor,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
**kwargs,
):
self.train()
mll = PairwiseLaplaceMarginalLogLikelihood(self.likelihood, self)
datapoints, comparisons = self._pairs_to_comparisons(train_x, train_y)
self.set_train_data(datapoints, comparisons)
optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs.copy()
max_fit_time = kwargs.pop("max_fit_time", self.max_fit_time)
if max_fit_time is not None:
# figure out how long evaluating a single samp
starttime = time.time()
_ = mll(self(datapoints), comparisons)
single_eval_time = time.time() - starttime
n_eval = int(max_fit_time / single_eval_time)
optimizer_kwargs["maxfun"] = n_eval
logger.info(f"fit maxfun is {n_eval}")
logger.info("Starting fit...")
starttime = time.time()
fit_gpytorch_mll(mll, **kwargs, **optimizer_kwargs)
logger.info(f"Fit done, time={time.time()-starttime}")
def update(
self, train_x: torch.Tensor, train_y: torch.Tensor, warmstart: bool = True
):
"""Perform a warm-start update of the model from previous fit."""
self.fit(train_x, train_y)
def predict(
self, x, probability_space=False, num_samples=1000, rereference="x_min"
):
if rereference is not None:
samps = self.sample(x, num_samples, rereference)
fmean, fvar = samps.mean(0).squeeze(), samps.var(0).squeeze()
else:
post = self.posterior(x)
fmean, fvar = post.mean.squeeze(), post.variance.squeeze()
if probability_space:
return (
promote_0d(norm.cdf(fmean)),
promote_0d(norm.cdf(fvar)),
)
else:
return fmean, fvar
def predict_probability(
self, x, probability_space=False, num_samples=1000, rereference="x_min"
):
return self.predict(
x, probability_space=True, num_samples=num_samples, rereference=rereference
)
def sample(self, x, num_samples, rereference="x_min"):
if len(x.shape) < 2:
x = x.reshape(-1, 1)
if rereference is None:
return self.posterior(x).rsample(torch.Size([num_samples]))
if rereference == "x_min":
x_ref = self.lb
elif rereference == "x_max":
x_ref = self.ub
elif rereference == "f_max":
x_ref = torch.Tensor(self.get_max()[1])
elif rereference == "f_min":
x_ref = torch.Tensor(self.get_min()[1])
else:
raise RuntimeError(
f"Unknown rereference type {rereference}! Options: x_min, x_max, f_min, f_max."
)
x_stack = torch.vstack([x, x_ref])
samps = self.posterior(x_stack).rsample(torch.Size([num_samples]))
samps, samps_ref = torch.split(samps, [samps.shape[1] - 1, 1], dim=1)
if rereference == "x_min" or rereference == "f_min":
return samps - samps_ref
else:
return -samps + samps_ref
@classmethod
def from_config(cls, config):
classname = cls.__name__
mean_covar_factory = config.getobj(
"PairwiseProbitModel",
"mean_covar_factory",
fallback=default_mean_covar_factory,
)
# no way of passing mean into PairwiseGP right now
_, covar = mean_covar_factory(config)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = lb.shape[0]
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
return cls(lb=lb, ub=ub, dim=dim, covar_module=covar, max_fit_time=max_fit_time)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gpytorch
import torch
from aepsych.likelihoods import OrdinalLikelihood
from aepsych.models import GPClassificationModel
class OrdinalGPModel(GPClassificationModel):
"""
Convenience wrapper for GPClassificationModel that hardcodes
an ordinal likelihood, better priors for this setting, and
adds a convenience method for computing outcome probabilities.
TODO: at some point we should refactor posteriors so that things like
OrdinalPosterior and MonotonicPosterior don't have to have their own
model classes.
"""
outcome_type = "ordinal"
def __init__(self, likelihood=None, *args, **kwargs):
covar_module = kwargs.pop("covar_module", None)
dim = kwargs.get("dim")
if covar_module is None:
ls_prior = gpytorch.priors.GammaPrior(concentration=1.5, rate=3.0)
ls_prior_mode = (ls_prior.concentration - 1) / ls_prior.rate
ls_constraint = gpytorch.constraints.Positive(
transform=None, initial_value=ls_prior_mode
)
# no outputscale due to shift identifiability in d.
covar_module = gpytorch.kernels.RBFKernel(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
)
if likelihood is None:
likelihood = OrdinalLikelihood(n_levels=5)
super().__init__(
*args,
covar_module=covar_module,
likelihood=likelihood,
**kwargs,
)
def predict_probs(self, xgrid):
fmean, fvar = self.predict(xgrid)
return self.calculate_probs(fmean, fvar)
def calculate_probs(self, fmean, fvar):
fsd = torch.sqrt(1 + fvar)
probs = torch.zeros(*fmean.size(), self.likelihood.n_levels)
probs[..., 0] = self.likelihood.link(
(self.likelihood.cutpoints[0] - fmean) / fsd
)
for i in range(1, self.likelihood.n_levels - 1):
probs[..., i] = self.likelihood.link(
(self.likelihood.cutpoints[i] - fmean) / fsd
) - self.likelihood.link((self.likelihood.cutpoints[i - 1] - fmean) / fsd)
probs[..., -1] = 1 - self.likelihood.link(
(self.likelihood.cutpoints[-1] - fmean) / fsd
)
return probs
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import warnings
from typing import Dict, List, Optional, Sequence, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.acquisition.rejection_sampler import RejectionSampler
from aepsych.config import Config
from aepsych.factory.factory import monotonic_mean_covar_factory
from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad
from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad
from aepsych.models.base import AEPsychMixin
from aepsych.models.utils import select_inducing_points
from aepsych.utils import _process_bounds, promote_0d
from botorch.fit import fit_gpytorch_mll
from gpytorch.kernels import Kernel
from gpytorch.likelihoods import BernoulliLikelihood, Likelihood
from gpytorch.means import Mean
from gpytorch.mlls.variational_elbo import VariationalELBO
from gpytorch.models import ApproximateGP
from gpytorch.variational import CholeskyVariationalDistribution, VariationalStrategy
from scipy.stats import norm
from torch import Tensor
class MonotonicRejectionGP(AEPsychMixin, ApproximateGP):
"""A monotonic GP using rejection sampling.
This takes the same insight as in e.g. Riihimäki & Vehtari 2010 (that the derivative of a GP
is likewise a GP) but instead of approximately optimizing the likelihood of the model
using EP, we optimize an unconstrained model by VI and then draw monotonic samples
by rejection sampling.
References:
Riihimäki, J., & Vehtari, A. (2010). Gaussian processes with monotonicity information.
Journal of Machine Learning Research, 9, 645–652.
"""
_num_outputs = 1
stimuli_per_trial = 1
outcome_type = "binary"
def __init__(
self,
monotonic_idxs: Sequence[int],
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
mean_module: Optional[Mean] = None,
covar_module: Optional[Kernel] = None,
likelihood: Optional[Likelihood] = None,
fixed_prior_mean: Optional[float] = None,
num_induc: int = 25,
num_samples: int = 250,
num_rejection_samples: int = 5000,
inducing_point_method: str = "auto",
) -> None:
"""Initialize MonotonicRejectionGP.
Args:
likelihood (str): Link function and likelihood. Can be 'probit-bernoulli' or
'identity-gaussian'.
monotonic_idxs (List[int]): List of which columns of x should be given monotonicity
constraints.
fixed_prior_mean (Optional[float], optional): Fixed prior mean. If classification, should be the prior
classification probability (not the latent function value). Defaults to None.
covar_module (Optional[Kernel], optional): Covariance kernel to use (default: scaled RBF).
mean_module (Optional[Mean], optional): Mean module to use (default: constant mean).
num_induc (int, optional): Number of inducing points for variational GP.]. Defaults to 25.
num_samples (int, optional): Number of samples for estimating posterior on preDict or
acquisition function evaluation. Defaults to 250.
num_rejection_samples (int, optional): Number of samples used for rejection sampling. Defaults to 4096.
acqf (MonotonicMCAcquisition, optional): Acquisition function to use for querying points. Defaults to MonotonicMCLSE.
objective (Optional[MCAcquisitionObjective], optional): Transformation of GP to apply before computing acquisition function. Defaults to identity transform for gaussian likelihood, probit transform for probit-bernoulli.
extra_acqf_args (Optional[Dict[str, object]], optional): Additional arguments to pass into the acquisition function. Defaults to None.
"""
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
if likelihood is None:
likelihood = BernoulliLikelihood()
self.inducing_size = num_induc
self.inducing_point_method = inducing_point_method
inducing_points = select_inducing_points(
inducing_size=self.inducing_size,
bounds=self.bounds,
method="sobol",
)
inducing_points_aug = self._augment_with_deriv_index(inducing_points, 0)
variational_distribution = CholeskyVariationalDistribution(
inducing_points_aug.size(0)
)
variational_strategy = VariationalStrategy(
model=self,
inducing_points=inducing_points_aug,
variational_distribution=variational_distribution,
learn_inducing_locations=False,
)
if mean_module is None:
mean_module = ConstantMeanPartialObsGrad()
if fixed_prior_mean is not None:
if isinstance(likelihood, BernoulliLikelihood):
fixed_prior_mean = norm.ppf(fixed_prior_mean)
mean_module.constant.requires_grad_(False)
mean_module.constant.copy_(torch.tensor(fixed_prior_mean))
if covar_module is None:
ls_prior = gpytorch.priors.GammaPrior(
concentration=4.6, rate=1.0, transform=lambda x: 1 / x
)
ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1)
ls_constraint = gpytorch.constraints.GreaterThan(
lower_bound=1e-4, transform=None, initial_value=ls_prior_mode
)
covar_module = gpytorch.kernels.ScaleKernel(
RBFKernelPartialObsGrad(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
),
outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
)
super().__init__(variational_strategy)
self.bounds_ = torch.stack([self.lb, self.ub])
self.mean_module = mean_module
self.covar_module = covar_module
self.likelihood = likelihood
self.num_induc = num_induc
self.monotonic_idxs = monotonic_idxs
self.num_samples = num_samples
self.num_rejection_samples = num_rejection_samples
self.fixed_prior_mean = fixed_prior_mean
self.inducing_points = inducing_points
def fit(self, train_x: Tensor, train_y: Tensor, **kwargs) -> None:
"""Fit the model
Args:
train_x (Tensor): Training x points
train_y (Tensor): Training y points. Should be (n x 1).
"""
self.set_train_data(train_x, train_y)
self.inducing_points = select_inducing_points(
inducing_size=self.inducing_size,
covar_module=self.covar_module,
X=self.train_inputs[0],
bounds=self.bounds,
method=self.inducing_point_method,
)
self._set_model(train_x, train_y)
def _set_model(
self,
train_x: Tensor,
train_y: Tensor,
model_state_dict: Optional[Dict[str, Tensor]] = None,
likelihood_state_dict: Optional[Dict[str, Tensor]] = None,
) -> None:
train_x_aug = self._augment_with_deriv_index(train_x, 0)
self.set_train_data(train_x_aug, train_y)
# Set model parameters
if model_state_dict is not None:
self.load_state_dict(model_state_dict)
if likelihood_state_dict is not None:
self.likelihood.load_state_dict(likelihood_state_dict)
# Fit!
mll = VariationalELBO(
likelihood=self.likelihood, model=self, num_data=train_y.numel()
)
mll = fit_gpytorch_mll(mll)
def update(self, train_x: Tensor, train_y: Tensor, warmstart: bool = True) -> None:
"""
Update the model with new data.
Expects the full set of data, not the incremental new data.
Args:
train_x (Tensor): Train X.
train_y (Tensor): Train Y. Should be (n x 1).
warmstart (bool): If True, warm-start model fitting with current parameters.
"""
if warmstart:
model_state_dict = self.state_dict()
likelihood_state_dict = self.likelihood.state_dict()
else:
model_state_dict = None
likelihood_state_dict = None
self._set_model(
train_x=train_x,
train_y=train_y,
model_state_dict=model_state_dict,
likelihood_state_dict=likelihood_state_dict,
)
def sample(
self,
x: Tensor,
num_samples: Optional[int] = None,
num_rejection_samples: Optional[int] = None,
) -> torch.Tensor:
"""Sample from monotonic GP
Args:
x (Tensor): tensor of n points at which to sample
num_samples (int, optional): how many points to sample (default: self.num_samples)
Returns: a Tensor of shape [n_samp, n]
"""
if num_samples is None:
num_samples = self.num_samples
if num_rejection_samples is None:
num_rejection_samples = self.num_rejection_samples
rejection_ratio = 20
if num_samples * rejection_ratio > num_rejection_samples:
warnings.warn(
f"num_rejection_samples should be at least {rejection_ratio} times greater than num_samples."
)
n = x.shape[0]
# Augment with derivative index
x_aug = self._augment_with_deriv_index(x, 0)
# Add in monotonicity constraint points
deriv_cp = self._get_deriv_constraint_points()
x_aug = torch.cat((x_aug, deriv_cp), dim=0)
assert x_aug.shape[0] == x.shape[0] + len(
self.monotonic_idxs * self.inducing_points.shape[0]
)
constrained_idx = torch.arange(n, x_aug.shape[0])
with torch.no_grad():
posterior = self.posterior(x_aug)
sampler = RejectionSampler(
num_samples=num_samples,
num_rejection_samples=num_rejection_samples,
constrained_idx=constrained_idx,
)
samples = sampler(posterior)
samples_f = samples[:, :n, 0].detach().cpu()
return samples_f
def predict(
self, x: Tensor, probability_space: bool = False
) -> Tuple[Tensor, Tensor]:
"""Predict
Args:
x: tensor of n points at which to predict.
Returns: tuple (f, var) where f is (n,) and var is (n,)
"""
samples_f = self.sample(x)
mean = torch.mean(samples_f, dim=0).squeeze()
variance = torch.var(samples_f, dim=0).clamp_min(0).squeeze()
if probability_space:
return (
torch.Tensor(promote_0d(norm.cdf(mean))),
torch.Tensor(promote_0d(norm.cdf(variance))),
)
return mean, variance
def predict_probability(
self, x: Union[torch.Tensor, np.ndarray]
) -> Tuple[torch.Tensor, torch.Tensor]:
return self.predict(x, probability_space=True)
def _augment_with_deriv_index(self, x: Tensor, indx):
return torch.cat(
(x, indx * torch.ones(x.shape[0], 1)),
dim=1,
)
def _get_deriv_constraint_points(self):
deriv_cp = torch.tensor([])
for i in self.monotonic_idxs:
induc_i = self._augment_with_deriv_index(self.inducing_points, i + 1)
deriv_cp = torch.cat((deriv_cp, induc_i), dim=0)
return deriv_cp
@classmethod
def from_config(cls, config: Config) -> MonotonicRejectionGP:
classname = cls.__name__
num_induc = config.gettensor(classname, "num_induc", fallback=25)
num_samples = config.gettensor(classname, "num_samples", fallback=250)
num_rejection_samples = config.getint(
classname, "num_rejection_samples", fallback=5000
)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
mean_covar_factory = config.getobj(
classname, "mean_covar_factory", fallback=monotonic_mean_covar_factory
)
mean, covar = mean_covar_factory(config)
monotonic_idxs: List[int] = config.getlist(
classname, "monotonic_idxs", fallback=[-1]
)
return cls(
monotonic_idxs=monotonic_idxs,
lb=lb,
ub=ub,
dim=dim,
num_induc=num_induc,
num_samples=num_samples,
num_rejection_samples=num_rejection_samples,
mean_module=mean,
covar_module=covar,
)
def forward(self, x: torch.Tensor) -> gpytorch.distributions.MultivariateNormal:
"""Evaluate GP
Args:
x (torch.Tensor): Tensor of points at which GP should be evaluated.
Returns:
gpytorch.distributions.MultivariateNormal: Distribution object
holding mean and covariance at x.
"""
# final dim is deriv index, we only normalize the "real" dims
transformed_x = x.clone()
transformed_x[..., :-1] = self.normalize_inputs(transformed_x[..., :-1])
mean_x = self.mean_module(transformed_x)
covar_x = self.covar_module(transformed_x)
latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
return latent_pred
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Any, List, Optional, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config
from aepsych.factory.factory import default_mean_covar_factory
from aepsych.models.gp_classification import GPClassificationModel
from botorch.posteriors.gpytorch import GPyTorchPosterior
from gpytorch.likelihoods import Likelihood
from statsmodels.stats.moment_helpers import corr2cov, cov2corr
class MonotonicProjectionGP(GPClassificationModel):
"""A monotonic GP based on posterior projection
NOTE: This model does not currently support backprop and so cannot be used
with gradient optimization for active learning.
This model produces predictions that are monotonic in any number of
specified monotonic dimensions. It follows the intuition of the paper
Lin L, Dunson DB (2014) Bayesian monotone regression using Gaussian process
projection, Biometrika 101(2): 303-317.
but makes significant departures by using heuristics for a lot of what is
done in a more principled way in the paper. The reason for the move to
heuristics is to improve scaling, especially with multiple monotonic
dimensions.
The method in the paper applies PAVA projection at the sample level,
which requires a significant amount of costly GP posterior sampling. The
approach taken here applies rolling-max projection to quantiles of the
distribution, and so requires only marginal posterior evaluation. There is
also a significant departure in the way multiple monotonic dimensions are
handled, since in the paper computation scales exponentially with the
number of monotonic dimensions and the heuristic approach taken here scales
linearly in the number of dimensions.
The cost of these changes is that the convergence guarantees proven in the
paper no longer hold. The method implemented here is a heuristic, and it
may be useful in some problems.
The principle behind the method given here is that sample-level
monotonicity implies monotonicity in the quantiles. We enforce monotonicity
in several quantiles, and use that as an approximation for the true
projected posterior distribution.
The approach here also supports specifying a minimum value of f. That
minimum will be enforced on mu, but not necessarily on the lower bound
of the projected posterior since we keep the projected posterior normal.
The min f value will also be enforced on samples drawn from the model,
while monotonicity will not be enforced at the sample level.
The procedure for computing the monotonic projected posterior at x is:
1. Separately for each monotonic dimension, create a grid of s points that
differ only in that dimension, and sweep from the lower bound up to x.
2. Evaluate the marginal distribution, mu and sigma, on the full set of
points (x and the s grid points for each monotonic dimension).
3. Compute the mu +/- 2 * sigma quantiles.
4. Enforce monotonicity in the quantiles by taking mu_proj as the maximum
mu across the set, and lb_proj as the maximum of mu - 2 * sigma across the
set. ub_proj is left as mu(x) + 2 * sigma(x), but is clamped to mu_proj in
case that project put it above the original ub.
5. Clamp mu and lb to the minimum value for f, if one was set.
6. Construct a new normal posterior given the projected quantiles by taking
mu_proj as the mean, and (ub - lb) / 4 as the standard deviation. Adjust
the covariance matrix to account for the change in the marginal variances.
The process above requires only marginal posterior evaluation on the grid
of points used for the posterior projection, and the size of that grid
scales linearly with the number of monotonic dimensions, not exponentially.
The args here are the same as for GPClassificationModel with the addition
of:
Args:
monotonic_dims: A list of the dimensions on which monotonicity should
be enforced.
monotonic_grid_size: The size of the grid, s, in 1. above.
min_f_val: If provided, maintains this minimum in the projection in 5.
"""
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
monotonic_dims: List[int],
monotonic_grid_size: int = 20,
min_f_val: Optional[float] = None,
dim: Optional[int] = None,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Likelihood] = None,
inducing_size: int = 100,
max_fit_time: Optional[float] = None,
inducing_point_method: str = "auto",
):
assert len(monotonic_dims) > 0
self.monotonic_dims = monotonic_dims
self.mon_grid_size = monotonic_grid_size
self.min_f_val = min_f_val
super().__init__(
lb=lb,
ub=ub,
dim=dim,
mean_module=mean_module,
covar_module=covar_module,
likelihood=likelihood,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
def posterior(
self,
X: torch.Tensor,
observation_noise: Union[bool, torch.Tensor] = False,
**kwargs: Any,
) -> GPyTorchPosterior:
# Augment X with monotonicity grid points, for each monotonic dim
n, d = X.shape # Require no batch dimensions
m = len(self.monotonic_dims)
s = self.mon_grid_size
X_aug = X.repeat(s * m + 1, 1, 1)
for i, dim in enumerate(self.monotonic_dims):
# using numpy because torch doesn't support vectorized linspace,
# pytorch/issues/61292
grid: Union[np.ndarray, torch.Tensor] = np.linspace(
self.lb[dim],
X[:, dim].numpy(),
s + 1,
) # (s+1 x n)
grid = torch.tensor(grid[:-1, :], dtype=X.dtype) # Drop x; (s x n)
X_aug[(1 + i * s) : (1 + (i + 1) * s), :, dim] = grid
# X_aug[0, :, :] is X, and then subsequent indices are points in the grids
# Predict marginal distributions on X_aug
with torch.no_grad():
post_aug = super().posterior(X=X_aug)
mu_aug = post_aug.mean.squeeze() # (m*s+1 x n)
var_aug = post_aug.variance.squeeze() # (m*s+1 x n)
mu_proj = mu_aug.max(dim=0).values
lb_proj = (mu_aug - 2 * torch.sqrt(var_aug)).max(dim=0).values
if self.min_f_val is not None:
mu_proj = mu_proj.clamp(min=self.min_f_val)
lb_proj = lb_proj.clamp(min=self.min_f_val)
ub_proj = (mu_aug[0, :] + 2 * torch.sqrt(var_aug[0, :])).clamp(min=mu_proj)
sigma_proj = ((ub_proj - lb_proj) / 4).clamp(min=1e-4)
# Adjust the whole covariance matrix to accomadate the projected marginals
with torch.no_grad():
post = super().posterior(X=X)
R = cov2corr(post.distribution.covariance_matrix.squeeze().numpy())
S_proj = torch.tensor(corr2cov(R, sigma_proj.numpy()), dtype=X.dtype)
mvn_proj = gpytorch.distributions.MultivariateNormal(
mu_proj.unsqueeze(0),
S_proj.unsqueeze(0),
)
return GPyTorchPosterior(mvn_proj)
def sample(
self, x: Union[torch.Tensor, np.ndarray], num_samples: int
) -> torch.Tensor:
samps = super().sample(x=x, num_samples=num_samples)
if self.min_f_val is not None:
samps = samps.clamp(min=self.min_f_val)
return samps
@classmethod
def from_config(cls, config: Config) -> MonotonicProjectionGP:
"""Alternate constructor for MonotonicProjectionGP model.
This is used when we recursively build a full sampling strategy
from a configuration. TODO: document how this works in some tutorial.
Args:
config (Config): A configuration containing keys/values matching this class
Returns:
MonotonicProjectionGP: Configured class instance.
"""
classname = cls.__name__
inducing_size = config.getint(classname, "inducing_size", fallback=10)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
mean_covar_factory = config.getobj(
classname, "mean_covar_factory", fallback=default_mean_covar_factory
)
mean, covar = mean_covar_factory(config)
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
inducing_point_method = config.get(
classname, "inducing_point_method", fallback="auto"
)
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
monotonic_dims: List[int] = config.getlist(
classname, "monotonic_dims", fallback=[-1]
)
monotonic_grid_size = config.getint(
classname, "monotonic_grid_size", fallback=20
)
min_f_val = config.getfloat(classname, "min_f_val", fallback=None)
return cls(
lb=lb,
ub=ub,
dim=dim,
inducing_size=inducing_size,
mean_module=mean,
covar_module=covar,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
likelihood=likelihood,
monotonic_dims=monotonic_dims,
monotonic_grid_size=monotonic_grid_size,
min_f_val=min_f_val,
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ..config import Config
from .exact_gp import ContinuousRegressionGP, ExactGP
from .gp_classification import GPBetaRegressionModel, GPClassificationModel
from .gp_regression import GPRegressionModel
from .monotonic_projection_gp import MonotonicProjectionGP
from .monotonic_rejection_gp import MonotonicRejectionGP
from .multitask_regression import IndependentMultitaskGPRModel, MultitaskGPRModel
from .ordinal_gp import OrdinalGPModel
from .pairwise_probit import PairwiseProbitModel
from .semi_p import (
HadamardSemiPModel,
semi_p_posterior_transform,
SemiParametricGPModel,
)
from .variational_gp import BetaRegressionGP, BinaryClassificationGP, OrdinalGP, VariationalGP
__all__ = [
"GPClassificationModel",
"MonotonicRejectionGP",
"GPRegressionModel",
"PairwiseProbitModel",
"OrdinalGPModel",
"MonotonicProjectionGP",
"VariationalGP",
"BinaryClassificationGP",
"BetaRegressionGP",
"ExactGP",
"ContinuousRegressionGP",
"MultitaskGPRModel",
"IndependentMultitaskGPRModel",
"HadamardSemiPModel",
"SemiParametricGPModel",
"semi_p_posterior_transform",
"OrdinalGP",
"GPBetaRegressionModel",
]
Config.register_module(sys.modules[__name__])
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config
from aepsych.factory.factory import ordinal_mean_covar_factory
from aepsych.likelihoods.ordinal import OrdinalLikelihood
from aepsych.models.base import AEPsychModel
from aepsych.models.ordinal_gp import OrdinalGPModel
from aepsych.models.utils import get_probability_space, select_inducing_points
from aepsych.utils import get_dim
from botorch.models import SingleTaskVariationalGP
from gpytorch.likelihoods import BernoulliLikelihood, BetaLikelihood
from gpytorch.mlls import VariationalELBO
# TODO: Find a better way to do this on the Ax/Botorch side
class MyHackyVariationalELBO(VariationalELBO):
def __init__(self, likelihood, model, beta=1.0, combine_terms=True):
num_data = model.model.train_targets.shape[0]
super().__init__(likelihood, model.model, num_data, beta, combine_terms)
class VariationalGP(AEPsychModel, SingleTaskVariationalGP):
@classmethod
def get_mll_class(cls):
return MyHackyVariationalELBO
@classmethod
def construct_inputs(cls, training_data, **kwargs):
inputs = super().construct_inputs(training_data=training_data, **kwargs)
inducing_size = kwargs.get("inducing_size")
inducing_point_method = kwargs.get("inducing_point_method")
bounds = kwargs.get("bounds")
inducing_points = select_inducing_points(
inducing_size,
inputs["covar_module"],
inputs["train_X"],
bounds,
inducing_point_method,
)
inputs.update(
{
"inducing_points": inducing_points,
}
)
return inputs
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None) -> Dict:
classname = cls.__name__
options = super().get_config_options(config, classname)
inducing_point_method = config.get(
classname, "inducing_point_method", fallback="auto"
)
inducing_size = config.getint(classname, "inducing_size", fallback=100)
learn_inducing_points = config.getboolean(
classname, "learn_inducing_points", fallback=False
)
options.update(
{
"inducing_size": inducing_size,
"inducing_point_method": inducing_point_method,
"learn_inducing_points": learn_inducing_points,
}
)
return options
class BinaryClassificationGP(VariationalGP):
stimuli_per_trial = 1
outcome_type = "binary"
def predict_probability(
self, x: Union[torch.Tensor, np.ndarray]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (torch.Tensor): Points at which to predict from the model.
probability_space (bool, optional): Return outputs in units of
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at queries points.
"""
with torch.no_grad():
post = self.posterior(x)
fmean, fvar = get_probability_space(
likelihood=self.likelihood, posterior=post
)
return fmean, fvar
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None):
options = super().get_config_options(config)
if options["likelihood"] is None:
options["likelihood"] = BernoulliLikelihood()
return options
class BetaRegressionGP(VariationalGP):
outcome_type = "percentage"
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None):
options = super().get_config_options(config)
if options["likelihood"] is None:
options["likelihood"] = BetaLikelihood()
return options
class OrdinalGP(VariationalGP):
"""
Convenience class for using a VariationalGP with an OrdinalLikelihood.
"""
outcome_type = "ordinal"
def predict_probability(self, x: Union[torch.Tensor, np.ndarray]):
fmean, fvar = super().predict(x)
return OrdinalGPModel.calculate_probs(self, fmean, fvar)
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None):
options = super().get_config_options(config)
if options["likelihood"] is None:
options["likelihood"] = OrdinalLikelihood(n_levels=5)
dim = get_dim(config)
if config.getobj(cls.__name__, "mean_covar_factory", fallback=None) is None:
mean, covar = ordinal_mean_covar_factory(config)
options["mean_covar_factory"] = (mean, covar)
ls_prior = gpytorch.priors.GammaPrior(concentration=1.5, rate=3.0)
ls_prior_mode = (ls_prior.concentration - 1) / ls_prior.rate
ls_constraint = gpytorch.constraints.Positive(
transform=None, initial_value=ls_prior_mode
)
# no outputscale due to shift identifiability in d.
covar_module = gpytorch.kernels.RBFKernel(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
)
options["covar_module"] = covar_module
assert options["inducing_size"] >= 1, "Inducing size must be non-zero."
return options
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
from typing import Optional, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config
from aepsych.factory.factory import default_mean_covar_factory
from aepsych.models.base import AEPsychMixin
from aepsych.models.utils import select_inducing_points
from aepsych.utils import _process_bounds, promote_0d
from aepsych.utils_logging import getLogger
from gpytorch.likelihoods import BernoulliLikelihood, BetaLikelihood, Likelihood
from gpytorch.models import ApproximateGP
from gpytorch.variational import CholeskyVariationalDistribution, VariationalStrategy
from scipy.special import owens_t
from scipy.stats import norm
from torch.distributions import Normal
logger = getLogger()
class GPClassificationModel(AEPsychMixin, ApproximateGP):
"""Probit-GP model with variational inference.
From a conventional ML perspective this is a GP Classification model,
though in the psychophysics context it can also be thought of as a
nonlinear generalization of the standard linear model for 1AFC or
yes/no trials.
For more on variational inference, see e.g.
https://docs.gpytorch.ai/en/v1.1.1/examples/04_Variational_and_Approximate_GPs/
"""
_batch_size = 1
_num_outputs = 1
stimuli_per_trial = 1
outcome_type = "binary"
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Likelihood] = None,
inducing_size: int = 100,
max_fit_time: Optional[float] = None,
inducing_point_method: str = "auto",
):
"""Initialize the GP Classification model
Args:
lb (Union[numpy.ndarray, torch.Tensor]): Lower bounds of the parameters.
ub (Union[numpy.ndarray, torch.Tensor]): Upper bounds of the parameters.
dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
of lb and ub.
mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior.
covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a
gamma prior.
likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to
Bernouli likelihood.
inducing_size (int): Number of inducing points. Defaults to 100.
max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
there is no limit to the fitting time.
inducing_point_method (string): The method to use to select the inducing points. Defaults to "auto".
If "sobol", a number of Sobol points equal to inducing_size will be selected.
If "pivoted_chol", selects points based on the pivoted Cholesky heuristic.
If "kmeans++", selects points by performing kmeans++ clustering on the training data.
If "auto", tries to determine the best method automatically.
"""
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.max_fit_time = max_fit_time
self.inducing_size = inducing_size
if likelihood is None:
likelihood = BernoulliLikelihood()
self.inducing_point_method = inducing_point_method
# initialize to sobol before we have data
inducing_points = select_inducing_points(
inducing_size=self.inducing_size, bounds=self.bounds, method="sobol"
)
variational_distribution = CholeskyVariationalDistribution(
inducing_points.size(0), batch_shape=torch.Size([self._batch_size])
)
variational_strategy = VariationalStrategy(
self,
inducing_points,
variational_distribution,
learn_inducing_locations=False,
)
super().__init__(variational_strategy)
if mean_module is None or covar_module is None:
default_mean, default_covar = default_mean_covar_factory(dim=self.dim)
self.mean_module = mean_module or default_mean
self.covar_module = covar_module or default_covar
self.likelihood = likelihood
self._fresh_state_dict = deepcopy(self.state_dict())
self._fresh_likelihood_dict = deepcopy(self.likelihood.state_dict())
@classmethod
def from_config(cls, config: Config) -> GPClassificationModel:
"""Alternate constructor for GPClassification model.
This is used when we recursively build a full sampling strategy
from a configuration. TODO: document how this works in some tutorial.
Args:
config (Config): A configuration containing keys/values matching this class
Returns:
GPClassificationModel: Configured class instance.
"""
classname = cls.__name__
inducing_size = config.getint(classname, "inducing_size", fallback=10)
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
mean_covar_factory = config.getobj(
classname, "mean_covar_factory", fallback=default_mean_covar_factory
)
mean, covar = mean_covar_factory(config)
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
inducing_point_method = config.get(
classname, "inducing_point_method", fallback="auto"
)
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
return cls(
lb=lb,
ub=ub,
dim=dim,
inducing_size=inducing_size,
mean_module=mean,
covar_module=covar,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
likelihood=likelihood,
)
def _reset_hyperparameters(self):
# warmstart_hyperparams affects hyperparams but not the variational strat,
# so we keep the old variational strat (which is only refreshed
# if warmstart_induc=False).
vsd = self.variational_strategy.state_dict() # type: ignore
vsd_hack = {f"variational_strategy.{k}": v for k, v in vsd.items()}
state_dict = deepcopy(self._fresh_state_dict)
state_dict.update(vsd_hack)
self.load_state_dict(state_dict)
self.likelihood.load_state_dict(self._fresh_likelihood_dict)
def _reset_variational_strategy(self):
inducing_points = select_inducing_points(
inducing_size=self.inducing_size,
covar_module=self.covar_module,
X=self.train_inputs[0],
bounds=self.bounds,
method=self.inducing_point_method,
)
variational_distribution = CholeskyVariationalDistribution(
inducing_points.size(0), batch_shape=torch.Size([self._batch_size])
)
self.variational_strategy = VariationalStrategy(
self,
inducing_points,
variational_distribution,
learn_inducing_locations=False,
)
def fit(
self,
train_x: torch.Tensor,
train_y: torch.Tensor,
warmstart_hyperparams: bool = False,
warmstart_induc: bool = False,
**kwargs,
) -> None:
"""Fit underlying model.
Args:
train_x (torch.Tensor): Inputs.
train_y (torch.LongTensor): Responses.
warmstart_hyperparams (bool): Whether to reuse the previous hyperparameters (True) or fit from scratch
(False). Defaults to False.
warmstart_induc (bool): Whether to reuse the previous inducing points or fit from scratch (False).
Defaults to False.
"""
self.set_train_data(train_x, train_y)
# by default we reuse the model state and likelihood. If we
# want a fresh fit (no warm start), copy the state from class initialization.
if not warmstart_hyperparams:
self._reset_hyperparameters()
if not warmstart_induc:
self._reset_variational_strategy()
n = train_y.shape[0]
mll = gpytorch.mlls.VariationalELBO(self.likelihood, self, n)
self._fit_mll(mll, **kwargs)
def sample(
self, x: Union[torch.Tensor, np.ndarray], num_samples: int
) -> torch.Tensor:
"""Sample from underlying model.
Args:
x (torch.Tensor): Points at which to sample.
num_samples (int, optional): Number of samples to return. Defaults to None.
kwargs are ignored
Returns:
torch.Tensor: Posterior samples [num_samples x dim]
"""
return self.posterior(x).rsample(torch.Size([num_samples])).detach().squeeze()
def predict(
self, x: Union[torch.Tensor, np.ndarray], probability_space: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (torch.Tensor): Points at which to predict from the model.
probability_space (bool, optional): Return outputs in units of
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at queries points.
"""
with torch.no_grad():
post = self.posterior(x)
fmean = post.mean.squeeze()
fvar = post.variance.squeeze()
if probability_space:
if isinstance(self.likelihood, BernoulliLikelihood):
# Probability-space mean and variance for Bernoulli-probit models is
# available in closed form, Proposition 1 in Letham et al. 2022 (AISTATS).
a_star = fmean / torch.sqrt(1 + fvar)
pmean = Normal(0, 1).cdf(a_star)
t_term = torch.tensor(
owens_t(a_star.numpy(), 1 / np.sqrt(1 + 2 * fvar.numpy())),
dtype=a_star.dtype,
)
pvar = pmean - 2 * t_term - pmean.square()
return promote_0d(pmean), promote_0d(pvar)
else:
fsamps = post.sample(torch.Size([10000]))
if hasattr(self.likelihood, "objective"):
psamps = self.likelihood.objective(fsamps)
else:
psamps = norm.cdf(fsamps)
pmean, pvar = psamps.mean(0), psamps.var(0)
return promote_0d(pmean), promote_0d(pvar)
else:
return promote_0d(fmean), promote_0d(fvar)
def predict_probability(
self, x: Union[torch.Tensor, np.ndarray]
) -> Tuple[torch.Tensor, torch.Tensor]:
return self.predict(x, probability_space=True)
def update(self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs):
"""Perform a warm-start update of the model from previous fit."""
return self.fit(
train_x, train_y, warmstart_hyperparams=True, warmstart_induc=True, **kwargs
)
class GPBetaRegressionModel(GPClassificationModel):
outcome_type = "percentage"
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Likelihood] = None,
inducing_size: int = 100,
max_fit_time: Optional[float] = None,
inducing_point_method: str = "auto",
):
if likelihood is None:
likelihood = BetaLikelihood()
super().__init__(
lb=lb,
ub=ub,
dim=dim,
mean_module=mean_module,
covar_module=covar_module,
likelihood=likelihood,
inducing_size=inducing_size,
max_fit_time=max_fit_time,
inducing_point_method=inducing_point_method,
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from copy import deepcopy
from typing import Dict, Optional, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config
from aepsych.factory.factory import default_mean_covar_factory
from aepsych.models.base import AEPsychMixin
from aepsych.utils import _process_bounds, promote_0d
from aepsych.utils_logging import getLogger
from gpytorch.likelihoods import GaussianLikelihood, Likelihood
from gpytorch.models import ExactGP
logger = getLogger()
class GPRegressionModel(AEPsychMixin, ExactGP):
"""GP Regression model for continuous outcomes, using exact inference."""
_num_outputs = 1
_batch_size = 1
stimuli_per_trial = 1
outcome_type = "continuous"
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[Likelihood] = None,
max_fit_time: Optional[float] = None,
):
"""Initialize the GP regression model
Args:
lb (Union[numpy.ndarray, torch.Tensor]): Lower bounds of the parameters.
ub (Union[numpy.ndarray, torch.Tensor]): Upper bounds of the parameters.
dim (int, optional): The number of dimensions in the parameter space. If None, it is inferred from the size
of lb and ub.
mean_module (gpytorch.means.Mean, optional): GP mean class. Defaults to a constant with a normal prior.
covar_module (gpytorch.kernels.Kernel, optional): GP covariance kernel class. Defaults to scaled RBF with a
gamma prior.
likelihood (gpytorch.likelihood.Likelihood, optional): The likelihood function to use. If None defaults to
Gaussian likelihood.
max_fit_time (float, optional): The maximum amount of time, in seconds, to spend fitting the model. If None,
there is no limit to the fitting time.
"""
if likelihood is None:
likelihood = GaussianLikelihood()
super().__init__(None, None, likelihood)
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.max_fit_time = max_fit_time
if mean_module is None or covar_module is None:
default_mean, default_covar = default_mean_covar_factory(dim=self.dim)
self.mean_module = mean_module or default_mean
self.covar_module = covar_module or default_covar
self._fresh_state_dict = deepcopy(self.state_dict())
self._fresh_likelihood_dict = deepcopy(self.likelihood.state_dict())
@classmethod
def construct_inputs(cls, config: Config) -> Dict:
classname = cls.__name__
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
mean_covar_factory = config.getobj(
classname, "mean_covar_factory", fallback=default_mean_covar_factory
)
mean, covar = mean_covar_factory(config)
likelihood_cls = config.getobj(classname, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
max_fit_time = config.getfloat(classname, "max_fit_time", fallback=None)
return {
"lb": lb,
"ub": ub,
"dim": dim,
"mean_module": mean,
"covar_module": covar,
"likelihood": likelihood,
"max_fit_time": max_fit_time,
}
@classmethod
def from_config(cls, config: Config) -> GPRegressionModel:
"""Alternate constructor for GP regression model.
This is used when we recursively build a full sampling strategy
from a configuration. TODO: document how this works in some tutorial.
Args:
config (Config): A configuration containing keys/values matching this class
Returns:
GPRegressionModel: Configured class instance.
"""
args = cls.construct_inputs(config)
return cls(**args)
def fit(self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs) -> None:
"""Fit underlying model.
Args:
train_x (torch.Tensor): Inputs.
train_y (torch.LongTensor): Responses.
"""
self.set_train_data(train_x, train_y)
mll = gpytorch.mlls.ExactMarginalLogLikelihood(self.likelihood, self)
return self._fit_mll(mll, **kwargs)
def sample(
self, x: Union[torch.Tensor, np.ndarray], num_samples: int
) -> torch.Tensor:
"""Sample from underlying model.
Args:
x (torch.Tensor): Points at which to sample.
num_samples (int, optional): Number of samples to return. Defaults to None.
kwargs are ignored
Returns:
torch.Tensor: Posterior samples [num_samples x dim]
"""
return self.posterior(x).rsample(torch.Size([num_samples])).detach().squeeze()
def update(self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs):
"""Perform a warm-start update of the model from previous fit."""
return self.fit(train_x, train_y, **kwargs)
def predict(
self, x: Union[torch.Tensor, np.ndarray], **kwargs
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (torch.Tensor): Points at which to predict from the model.
probability_space (bool, optional): Return outputs in units of
response probability instead of latent function value. Defaults to False.
Returns:
Tuple[np.ndarray, np.ndarray]: Posterior mean and variance at queries points.
"""
with torch.no_grad():
post = self.posterior(x)
fmean = post.mean.squeeze()
fvar = post.variance.squeeze()
return promote_0d(fmean), promote_0d(fvar)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional, Union
import gpytorch
import torch
from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad
from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad
from botorch.models.gpytorch import GPyTorchModel
from gpytorch.distributions import MultivariateNormal
from gpytorch.kernels import Kernel
from gpytorch.kernels.scale_kernel import ScaleKernel
from gpytorch.means import Mean
from gpytorch.priors.torch_priors import GammaPrior
from gpytorch.variational import CholeskyVariationalDistribution, VariationalStrategy
class MixedDerivativeVariationalGP(gpytorch.models.ApproximateGP, GPyTorchModel):
"""A variational GP with mixed derivative observations.
For more on GPs with derivative observations, see e.g. Riihimaki & Vehtari 2010.
References:
Riihimäki, J., & Vehtari, A. (2010). Gaussian processes with
monotonicity information. Journal of Machine Learning Research, 9, 645–652.
"""
def __init__(
self,
train_x: torch.Tensor,
train_y: torch.Tensor,
inducing_points: torch.Tensor,
scales: Union[torch.Tensor, float] = 1.0,
mean_module: Optional[Mean] = None,
covar_module: Optional[Kernel] = None,
fixed_prior_mean: Optional[float] = None,
) -> None:
"""Initialize MixedDerivativeVariationalGP
Args:
train_x (torch.Tensor): Training x points. The last column of x is the derivative
indiciator: 0 if it is an observation of f(x), and i if it
is an observation of df/dx_i.
train_y (torch.Tensor): Training y points
inducing_points (torch.Tensor): Inducing points to use
scales (Union[torch.Tensor, float], optional): Typical scale of each dimension
of input space (this is used to set the lengthscale prior).
Defaults to 1.0.
mean_module (Mean, optional): A mean class that supports derivative
indexes as the final dim. Defaults to a constant mean.
covar_module (Kernel, optional): A covariance kernel class that
supports derivative indexes as the final dim. Defaults to RBF kernel.
fixed_prior_mean (float, optional): A prior mean value to use with the
constant mean. Often setting this to the target threshold speeds
up experiments. Defaults to None, in which case the mean will be inferred.
"""
variational_distribution = CholeskyVariationalDistribution(
inducing_points.size(0)
)
variational_distribution.to(train_x)
variational_strategy = VariationalStrategy(
model=self,
inducing_points=inducing_points,
variational_distribution=variational_distribution,
learn_inducing_locations=False,
)
super(MixedDerivativeVariationalGP, self).__init__(variational_strategy)
# Set the mean if specified to
if mean_module is None:
self.mean_module = ConstantMeanPartialObsGrad()
else:
self.mean_module = mean_module
if fixed_prior_mean is not None:
self.mean_module.constant.requires_grad_(False)
self.mean_module.constant.copy_(
torch.tensor(fixed_prior_mean, dtype=train_x.dtype)
)
if covar_module is None:
self.base_kernel = RBFKernelPartialObsGrad(
ard_num_dims=train_x.shape[-1] - 1,
lengthscale_prior=GammaPrior(3.0, 6.0 / scales),
)
self.covar_module = ScaleKernel(
self.base_kernel, outputscale_prior=GammaPrior(2.0, 0.15)
)
else:
self.covar_module = covar_module
self._num_outputs = 1
self.train_inputs = (train_x,)
self.train_targets = train_y
self(train_x) # Necessary for CholeskyVariationalDistribution
def forward(self, x: torch.Tensor) -> MultivariateNormal:
"""Evaluate the model
Args:
x (torch.Tensor): Points at which to evaluate.
Returns:
MultivariateNormal: Object containig mean and covariance
of GP at these points.
"""
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Optional
import gpytorch
import torch
from aepsych.models import GPRegressionModel
class MultitaskGPRModel(GPRegressionModel):
"""
Multitask (multi-output) GP regression, using a kronecker-separable model
where [a] each output is observed at each input, and [b] the kernel between
two outputs at two points is given by k_x(x, x') * k_t[i, j] where k(x, x')
is the usual GP kernel and k_t[i, j] is indexing into a freeform covariance
of potentially low rank.
This essentially implements / wraps the GPyTorch multitask GPR tutorial
in https://docs.gpytorch.ai/en/stable/examples/03_Multitask_Exact_GPs/Multitask_GP_Regression.html
with AEPsych API and convenience fitting / prediction methods.
"""
_num_outputs = 1
_batch_size = 1
stimuli_per_trial = 1
outcome_type = "continuous"
def __init__(
self,
num_outputs: int = 2,
rank: int = 1,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[gpytorch.likelihoods.Likelihood] = None,
*args,
**kwargs,
):
"""Initialize multitask GPR model.
Args:
num_outputs (int, optional): Number of tasks (outputs). Defaults to 2.
rank (int, optional): Rank of cross-task covariance. Lower rank is a simpler model.
Should be less than or equal to num_outputs. Defaults to 1.
mean_module (Optional[gpytorch.means.Mean], optional): GP mean. Defaults to a constant mean.
covar_module (Optional[gpytorch.kernels.Kernel], optional): GP kernel module.
Defaults to scaled RBF kernel.
likelihood (Optional[gpytorch.likelihoods.Likelihood], optional): Likelihood
(should be a multitask-compatible likelihood). Defaults to multitask Gaussian likelihood.
"""
self._num_outputs = num_outputs
self.rank = rank
likelihood = likelihood or gpytorch.likelihoods.MultitaskGaussianLikelihood(
num_tasks=self._num_outputs
)
super().__init__(
mean_module=mean_module,
covar_module=covar_module,
likelihood=likelihood,
*args,
**kwargs,
) # type: ignore # mypy issue 4335
self.mean_module = gpytorch.means.MultitaskMean(
self.mean_module, num_tasks=num_outputs
)
self.covar_module = gpytorch.kernels.MultitaskKernel(
self.covar_module, num_tasks=num_outputs, rank=rank
)
def forward(self, x):
transformed_x = self.normalize_inputs(x)
mean_x = self.mean_module(transformed_x)
covar_x = self.covar_module(transformed_x)
return gpytorch.distributions.MultitaskMultivariateNormal(mean_x, covar_x)
@classmethod
def construct_inputs(cls, config):
classname = cls.__name__
args = super().construct_inputs(config)
args["num_outputs"] = config.getint(classname, "num_outputs", 2)
args["rank"] = config.getint(classname, "rank", 1)
return args
class IndependentMultitaskGPRModel(GPRegressionModel):
"""Independent multitask GP regression. This is a convenience wrapper for
fitting a batch of independent GPRegression models. It wraps the GPyTorch tutorial here
https://docs.gpytorch.ai/en/stable/examples/03_Multitask_Exact_GPs/Batch_Independent_Multioutput_GP.html
with AEPsych API and convenience fitting / prediction methods.
"""
_num_outputs = 1
_batch_size = 1
stimuli_per_trial = 1
outcome_type = "continuous"
def __init__(
self,
num_outputs: int = 2,
mean_module: Optional[gpytorch.means.Mean] = None,
covar_module: Optional[gpytorch.kernels.Kernel] = None,
likelihood: Optional[gpytorch.likelihoods.Likelihood] = None,
*args,
**kwargs,
):
"""Initialize independent multitask GPR model.
Args:
num_outputs (int, optional): Number of tasks (outputs). Defaults to 2.
mean_module (Optional[gpytorch.means.Mean], optional): GP mean. Defaults to a constant mean.
covar_module (Optional[gpytorch.kernels.Kernel], optional): GP kernel module.
Defaults to scaled RBF kernel.
likelihood (Optional[gpytorch.likelihoods.Likelihood], optional): Likelihood
(should be a multitask-compatible likelihood). Defaults to multitask Gaussian likelihood.
"""
self._num_outputs = num_outputs
self._batch_size = num_outputs
self._batch_shape = torch.Size([num_outputs])
mean_module = mean_module or gpytorch.means.ConstantMean(
batch_shape=self._batch_shape
)
covar_module = covar_module or gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(batch_shape=self._batch_shape),
batch_shape=self._batch_shape,
)
likelihood = likelihood or gpytorch.likelihoods.MultitaskGaussianLikelihood(
num_tasks=self._batch_shape[0]
)
super().__init__(
mean_module=mean_module,
covar_module=covar_module,
likelihood=likelihood,
*args,
**kwargs,
) # type: ignore # mypy issue 4335
def forward(self, x):
base_mvn = super().forward(x) # do transforms
return gpytorch.distributions.MultitaskMultivariateNormal.from_batch_mvn(
base_mvn
)
@classmethod
def get_config_args(cls, config):
classname = cls.__name__
args = super().get_config_args(config)
args["num_outputs"] = config.getint(classname, "num_outputs", 2)
return args
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import List, Mapping, Optional, Tuple, Union
import numpy as np
import torch
from botorch.acquisition import PosteriorMean
from botorch.models.model import Model
from botorch.models.utils.inducing_point_allocators import GreedyVarianceReduction
from botorch.optim import optimize_acqf
from botorch.utils.sampling import draw_sobol_samples
from gpytorch.kernels import Kernel
from gpytorch.likelihoods import BernoulliLikelihood
from scipy.cluster.vq import kmeans2
from scipy.special import owens_t
from scipy.stats import norm
from torch.distributions import Normal
def compute_p_quantile(
f_mean: torch.Tensor, f_std: torch.Tensor, alpha: Union[torch.Tensor, float]
) -> torch.Tensor:
"""Compute quantile of p in probit model
For f ~ N(mu_f, sigma_f^2) and p = Phi(f), computes the alpha quantile of p
using the formula
x = Phi(mu_f + Phi^-1(alpha) * sigma_f),
which solves for x such that P(p <= x) = alpha.
A 95% CI for p can be computed as
p_l = compute_p_quantile(f_mean, f_std, 0.025)
p_u = compute_p_quantile(f_mean, f_std, 0.975)
"""
norm = torch.distributions.Normal(0, 1)
alpha = torch.tensor(alpha, dtype=f_mean.dtype)
return norm.cdf(f_mean + norm.icdf(alpha) * f_std)
def select_inducing_points(
inducing_size: int,
covar_module: Kernel = None,
X: Optional[torch.Tensor] = None,
bounds: Optional[Union[torch.Tensor, np.ndarray]] = None,
method: str = "auto",
):
with torch.no_grad():
assert method in (
"pivoted_chol",
"kmeans++",
"auto",
"sobol",
), f"Inducing point method should be one of pivoted_chol, kmeans++, sobol, or auto; got {method}"
if method == "sobol":
assert bounds is not None, "Must pass bounds for sobol inducing points!"
inducing_points = draw_sobol_samples(
bounds=bounds, n=inducing_size, q=1
).squeeze()
if len(inducing_points.shape) == 1:
inducing_points = inducing_points.reshape(-1, 1)
return inducing_points
assert X is not None, "Must pass X for non-sobol inducing point selection!"
# remove dupes from X, which is both wasteful for inducing points
# and would break kmeans++
unique_X = torch.unique(X, dim=0)
if method == "auto":
if unique_X.shape[0] <= inducing_size:
return unique_X
else:
method = "kmeans++"
if method == "pivoted_chol":
inducing_point_allocator = GreedyVarianceReduction()
inducing_points = inducing_point_allocator.allocate_inducing_points(
inputs=X,
covar_module=covar_module,
num_inducing=inducing_size,
input_batch_shape=torch.Size([]),
)
elif method == "kmeans++":
# initialize using kmeans
inducing_points = torch.tensor(
kmeans2(unique_X.numpy(), inducing_size, minit="++")[0],
dtype=X.dtype,
)
return inducing_points
def get_probability_space(likelihood, posterior):
fmean = posterior.mean.squeeze()
fvar = posterior.variance.squeeze()
if isinstance(likelihood, BernoulliLikelihood):
# Probability-space mean and variance for Bernoulli-probit models is
# available in closed form, Proposition 1 in Letham et al. 2022 (AISTATS).
a_star = fmean / torch.sqrt(1 + fvar)
pmean = Normal(0, 1).cdf(a_star)
t_term = torch.tensor(
owens_t(a_star.numpy(), 1 / np.sqrt(1 + 2 * fvar.numpy())),
dtype=a_star.dtype,
)
pvar = pmean - 2 * t_term - pmean.square()
else:
fsamps = posterior.sample(torch.Size([10000]))
if hasattr(likelihood, "objective"):
psamps = likelihood.objective(fsamps)
else:
psamps = norm.cdf(fsamps)
pmean, pvar = psamps.mean(0), psamps.var(0)
return pmean, pvar
def get_extremum(
model: Model,
extremum_type: str,
bounds: torch.Tensor,
locked_dims: Optional[Mapping[int, List[float]]],
n_samples: int,
) -> Tuple[float, np.ndarray]:
"""Return the extremum (min or max) of the modeled function
Args:
extremum_type (str): type of extremum (currently 'min' or 'max'
n_samples int: number of coarse grid points to sample for optimization estimate.
Returns:
Tuple[float, np.ndarray]: Tuple containing the min and its location (argmin).
"""
locked_dims = locked_dims or {}
acqf = PosteriorMean(model=model, maximize=(extremum_type == "max"))
best_point, best_val = optimize_acqf(
acq_function=acqf,
bounds=bounds,
q=1,
num_restarts=10,
raw_samples=n_samples,
fixed_features=locked_dims,
)
# PosteriorMean flips the sign on minimize, we flip it back
if extremum_type == "min":
best_val = -best_val
return best_val, best_point.squeeze(0)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import dataclasses
import time
from typing import Dict, List, Optional
from aepsych.utils_logging import getLogger
from ax.core.search_space import SearchSpaceDigest
from ax.core.types import TCandidateMetadata
from ax.models.torch.botorch_modular.surrogate import Surrogate
from botorch.fit import fit_gpytorch_mll
from botorch.utils.datasets import SupervisedDataset
from torch import Tensor
logger = getLogger()
class AEPsychSurrogate(Surrogate):
def __init__(self, max_fit_time: Optional[float] = None, **kwargs) -> None:
self.max_fit_time = max_fit_time
super().__init__(**kwargs)
def fit(
self,
datasets: List[SupervisedDataset],
metric_names: List[str],
search_space_digest: SearchSpaceDigest,
candidate_metadata: Optional[List[List[TCandidateMetadata]]] = None,
state_dict: Optional[Dict[str, Tensor]] = None,
refit: bool = True,
**kwargs,
) -> None:
self.construct(
datasets=datasets,
metric_names=metric_names,
**dataclasses.asdict(search_space_digest),
)
self._outcomes = metric_names
if state_dict:
self.model.load_state_dict(state_dict)
if state_dict is None or refit:
mll = self.mll_class(self.model.likelihood, self.model, **self.mll_options)
optimizer_kwargs = {}
if self.max_fit_time is not None:
# figure out how long evaluating a single samp
starttime = time.time()
_ = mll(self.model(datasets[0].X()), datasets[0].Y().squeeze())
single_eval_time = time.time() - starttime
n_eval = int(self.max_fit_time / single_eval_time)
logger.info(f"fit maxfun is {n_eval}")
optimizer_kwargs["options"] = {"maxfun": n_eval}
logger.info("Starting fit...")
starttime = time.time()
fit_gpytorch_mll(
mll, optimizer_kwargs=optimizer_kwargs
) # TODO: Support flexible optimizers
logger.info(f"Fit done, time={time.time()-starttime}")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from aepsych.models.base import AEPsychModel
from botorch.models import SingleTaskGP
from gpytorch.mlls import ExactMarginalLogLikelihood
class ExactGP(AEPsychModel, SingleTaskGP):
@classmethod
def get_mll_class(cls):
return ExactMarginalLogLikelihood
class ContinuousRegressionGP(ExactGP):
"""GP Regression model for single continuous outcomes, using exact inference."""
stimuli_per_trial = 1
outcome_type = "continuous"
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import time
from typing import Any, Dict, List, Mapping, Optional, Protocol, Tuple, Union
import gpytorch
import numpy as np
import torch
from aepsych.config import Config, ConfigurableMixin
from aepsych.factory.factory import default_mean_covar_factory
from aepsych.models.utils import get_extremum
from aepsych.utils import dim_grid, get_jnd_multid, make_scaled_sobol, promote_0d
from aepsych.utils_logging import getLogger
from botorch.fit import fit_gpytorch_mll, fit_gpytorch_mll_scipy
from botorch.models.gpytorch import GPyTorchModel
from botorch.posteriors import GPyTorchPosterior
from gpytorch.likelihoods import Likelihood
from gpytorch.mlls import MarginalLogLikelihood
from scipy.optimize import minimize
from scipy.stats import norm
logger = getLogger()
torch.set_default_dtype(torch.double) # TODO: find a better way to prevent type errors
class ModelProtocol(Protocol):
@property
def _num_outputs(self) -> int:
pass
@property
def outcome_type(self) -> str:
pass
@property
def extremum_solver(self) -> str:
pass
@property
def train_inputs(self) -> torch.Tensor:
pass
@property
def lb(self) -> torch.Tensor:
pass
@property
def ub(self) -> torch.Tensor:
pass
@property
def bounds(self) -> torch.Tensor:
pass
@property
def dim(self) -> int:
pass
def posterior(self, x: torch.Tensor) -> GPyTorchPosterior:
pass
def predict(self, x: torch.Tensor, **kwargs) -> torch.Tensor:
pass
@property
def stimuli_per_trial(self) -> int:
pass
@property
def likelihood(self) -> Likelihood:
pass
def sample(self, x: torch.Tensor, num_samples: int) -> torch.Tensor:
pass
def _get_extremum(
self,
extremum_type: str,
locked_dims: Optional[Mapping[int, List[float]]],
n_samples=1000,
) -> Tuple[float, np.ndarray]:
pass
def dim_grid(self, gridsize: int = 30) -> torch.Tensor:
pass
def fit(self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs: Any) -> None:
pass
def update(
self, train_x: torch.Tensor, train_y: torch.Tensor, **kwargs: Any
) -> None:
pass
def p_below_threshold(self, x, f_thresh) -> np.ndarray:
pass
class AEPsychMixin(GPyTorchModel):
"""Mixin class that provides AEPsych-specific utility methods."""
extremum_solver = "Nelder-Mead"
outcome_types: List[str] = []
@property
def bounds(self):
return torch.stack((self.lb, self.ub))
def get_max(
self: ModelProtocol,
locked_dims: Optional[Mapping[int, List[float]]] = None,
n_samples: int = 1000,
) -> Tuple[float, np.ndarray]:
"""Return the maximum of the modeled function, subject to constraints
Returns:
Tuple[float, np.ndarray]: Tuple containing the max and its location (argmax).
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
n_samples int: number of coarse grid points to sample for optimization estimate.
"""
locked_dims = locked_dims or {}
return get_extremum(self, "max", self.bounds, locked_dims, n_samples)
def get_min(
self: ModelProtocol,
locked_dims: Optional[Mapping[int, List[float]]] = None,
n_samples: int = 1000,
) -> Tuple[float, np.ndarray]:
"""Return the minimum of the modeled function, subject to constraints
Returns:
Tuple[float, np.ndarray]: Tuple containing the min and its location (argmin).
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
n_samples int: number of coarse grid points to sample for optimization estimate.
"""
locked_dims = locked_dims or {}
return get_extremum(self, "min", self.bounds, locked_dims, n_samples)
def inv_query(
self: ModelProtocol,
y: float,
locked_dims: Optional[Mapping[int, List[float]]] = None,
probability_space: bool = False,
n_samples: int = 1000,
) -> Tuple[float, torch.Tensor]:
"""Query the model inverse.
Return nearest x such that f(x) = queried y, and also return the
value of f at that point.
Args:
y (float): Points at which to find the inverse.
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
probability_space (bool, optional): Is y (and therefore the
returned nearest_y) in probability space instead of latent
function space? Defaults to False.
Returns:
Tuple[float, np.ndarray]: Tuple containing the value of f
nearest to queried y and the x position of this value.
"""
if probability_space:
assert (
self.outcome_type == "binary"
), f"Cannot get probability space for outcome_type '{self.outcome_type}'"
locked_dims = locked_dims or {}
def model_distance(x, pt, probability_space):
return np.abs(
self.predict(torch.tensor([x]), probability_space=probability_space)[0]
.detach()
.numpy()
- pt
)
# Look for point with value closest to y, subject the dict of locked dims
query_lb = self.lb.clone()
query_ub = self.ub.clone()
for locked_dim in locked_dims.keys():
dim_values = locked_dims[locked_dim]
if len(dim_values) == 1:
query_lb[locked_dim] = dim_values[0]
query_ub[locked_dim] = dim_values[0]
else:
query_lb[locked_dim] = dim_values[0]
query_ub[locked_dim] = dim_values[1]
d = make_scaled_sobol(query_lb, query_ub, n_samples, seed=0)
bounds = zip(query_lb.numpy(), query_ub.numpy())
fmean, _ = self.predict(d, probability_space=probability_space)
f = torch.abs(fmean - y)
estimate = d[torch.where(f == torch.min(f))[0][0]].numpy()
a = minimize(
model_distance,
estimate,
args=(y, probability_space),
method=self.extremum_solver,
bounds=bounds,
)
val = self.predict(torch.tensor([a.x]), probability_space=probability_space)[
0
].item()
return val, torch.Tensor(a.x)
def get_jnd(
self: ModelProtocol,
grid: Optional[Union[np.ndarray, torch.Tensor]] = None,
cred_level: Optional[float] = None,
intensity_dim: int = -1,
confsamps: int = 500,
method: str = "step",
) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""Calculate the JND.
Note that JND can have multiple plausible definitions
outside of the linear case, so we provide options for how to compute it.
For method="step", we report how far one needs to go over in stimulus
space to move 1 unit up in latent space (this is a lot of people's
conventional understanding of the JND).
For method="taylor", we report the local derivative, which also maps to a
1st-order Taylor expansion of the latent function. This is a formal
generalization of JND as defined in Weber's law.
Both definitions are equivalent for linear psychometric functions.
Args:
grid (Optional[np.ndarray], optional): Mesh grid over which to find the JND.
Defaults to a square grid of size as determined by aepsych.utils.dim_grid
cred_level (float, optional): Credible level for computing an interval.
Defaults to None, computing no interval.
intensity_dim (int, optional): Dimension over which to compute the JND.
Defaults to -1.
confsamps (int, optional): Number of posterior samples to use for
computing the credible interval. Defaults to 500.
method (str, optional): "taylor" or "step" method (see docstring).
Defaults to "step".
Raises:
RuntimeError: for passing an unknown method.
Returns:
Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]: either the
mean JND, or a median, lower, upper tuple of the JND posterior.
"""
if grid is None:
grid = self.dim_grid()
else:
grid = torch.tensor(grid)
# this is super awkward, back into intensity dim grid assuming a square grid
gridsize = int(grid.shape[0] ** (1 / grid.shape[1]))
coords = torch.linspace(
self.lb[intensity_dim].item(), self.ub[intensity_dim].item(), gridsize
)
if cred_level is None:
fmean, _ = self.predict(grid)
fmean = fmean.reshape(*[gridsize for i in range(self.dim)])
if method == "taylor":
return torch.tensor(1 / np.gradient(fmean, coords, axis=intensity_dim))
elif method == "step":
return torch.clip(
torch.tensor(
get_jnd_multid(
fmean.detach().numpy(),
coords.detach().numpy(),
mono_dim=intensity_dim,
)
),
0,
np.inf,
)
alpha = 1 - cred_level # type: ignore
qlower = alpha / 2
qupper = 1 - alpha / 2
fsamps = self.sample(grid, confsamps)
if method == "taylor":
jnds = torch.tensor(
1
/ np.gradient(
fsamps.reshape(confsamps, *[gridsize for i in range(self.dim)]),
coords,
axis=intensity_dim,
)
)
elif method == "step":
samps = [s.reshape((gridsize,) * self.dim) for s in fsamps]
jnds = torch.stack(
[get_jnd_multid(s, coords, mono_dim=intensity_dim) for s in samps]
)
else:
raise RuntimeError(f"Unknown method {method}!")
upper = torch.clip(torch.quantile(jnds, qupper, axis=0), 0, np.inf) # type: ignore
lower = torch.clip(torch.quantile(jnds, qlower, axis=0), 0, np.inf) # type: ignore
median = torch.clip(torch.quantile(jnds, 0.5, axis=0), 0, np.inf) # type: ignore
return median, lower, upper
def dim_grid(
self: ModelProtocol,
gridsize: int = 30,
slice_dims: Optional[Mapping[int, float]] = None,
) -> torch.Tensor:
return dim_grid(self.lb, self.ub, self.dim, gridsize, slice_dims)
def set_train_data(self, inputs=None, targets=None, strict=False):
"""
:param torch.Tensor inputs: The new training inputs.
:param torch.Tensor targets: The new training targets.
:param bool strict: (default False, ignored). Here for compatibility with
input transformers. TODO: actually use this arg or change input transforms
to not require it.
"""
if inputs is not None:
self.train_inputs = (inputs,)
if targets is not None:
self.train_targets = targets
def normalize_inputs(self, x):
scale = self.ub - self.lb
return (x - self.lb) / scale
def forward(self, x: torch.Tensor) -> gpytorch.distributions.MultivariateNormal:
"""Evaluate GP
Args:
x (torch.Tensor): Tensor of points at which GP should be evaluated.
Returns:
gpytorch.distributions.MultivariateNormal: Distribution object
holding mean and covariance at x.
"""
transformed_x = self.normalize_inputs(x)
mean_x = self.mean_module(transformed_x)
covar_x = self.covar_module(transformed_x)
pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
return pred
def _fit_mll(
self,
mll: MarginalLogLikelihood,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
optimizer=fit_gpytorch_mll_scipy,
**kwargs,
) -> None:
self.train()
train_x, train_y = mll.model.train_inputs[0], mll.model.train_targets
optimizer_kwargs = {} if optimizer_kwargs is None else optimizer_kwargs.copy()
max_fit_time = kwargs.pop("max_fit_time", self.max_fit_time)
if max_fit_time is not None:
# figure out how long evaluating a single samp
starttime = time.time()
_ = mll(self(train_x), train_y)
single_eval_time = time.time() - starttime
n_eval = int(max_fit_time / single_eval_time)
optimizer_kwargs["options"] = {"maxfun": n_eval}
logger.info(f"fit maxfun is {n_eval}")
starttime = time.time()
res = fit_gpytorch_mll(
mll, optimizer=optimizer, optimizer_kwargs=optimizer_kwargs, **kwargs
)
return res
def p_below_threshold(self, x, f_thresh) -> np.ndarray:
f, var = self.predict(x)
return norm.cdf((f_thresh - f.detach().numpy()) / var.sqrt().detach().numpy())
class AEPsychModel(ConfigurableMixin, abc.ABC):
extremum_solver = "Nelder-Mead"
outcome_type: Optional[str] = None
def predict(
self: GPyTorchModel, x: Union[torch.Tensor, np.ndarray]
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Query the model for posterior mean and variance.
Args:
x (Union[torch.Tensor, np.ndarray]): Points at which to predict from the model.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Posterior mean and variance at queried points.
"""
with torch.no_grad():
post = self.posterior(x)
fmean = post.mean.squeeze()
fvar = post.variance.squeeze()
return promote_0d(fmean), promote_0d(fvar)
def predict_probability(self: GPyTorchModel, x: Union[torch.Tensor, np.ndarray]):
raise NotImplementedError
def sample(
self: GPyTorchModel, x: Union[torch.Tensor, np.ndarray], n: int
) -> torch.Tensor:
"""Sample the model posterior at the given points.
Args:
x (Union[torch.Tensor, np.ndarray]): Points at which to sample from the model.
n (int): Number of samples to take at each point.
Returns:
torch.Tensor: Posterior samples at queried points. Shape is n x len(x) x number of outcomes.
"""
return self.posterior(x).sample(torch.Size([n]))
@classmethod
def get_config_options(cls, config: Config, name: Optional[str] = None) -> Dict:
if name is None:
name = cls.__name__
mean_covar_factory = config.getobj(
name, "mean_covar_factory", fallback=default_mean_covar_factory
)
mean, covar = mean_covar_factory(config)
likelihood_cls = config.getobj(name, "likelihood", fallback=None)
if likelihood_cls is not None:
if hasattr(likelihood_cls, "from_config"):
likelihood = likelihood_cls.from_config(config)
else:
likelihood = likelihood_cls()
else:
likelihood = None # fall back to __init__ default
max_fit_time = config.getfloat(name, "max_fit_time", fallback=None)
options = {
"likelihood": likelihood,
"covar_module": covar,
"mean_module": mean,
"max_fit_time": max_fit_time,
}
return options
@classmethod
def construct_inputs(cls, training_data, **kwargs):
train_X = training_data.X()
train_Y = training_data.Y()
likelihood = kwargs.get("likelihood")
covar_module = kwargs.get("covar_module")
mean_module = kwargs.get("mean_module")
inputs = {
"train_X": train_X,
"train_Y": train_Y,
"likelihood": likelihood,
"covar_module": covar_module,
"mean_module": mean_module,
}
return inputs
def get_max(
self,
bounds: torch.Tensor,
locked_dims: Optional[Mapping[int, List[float]]] = None,
n_samples: int = 1000,
) -> Tuple[float, np.ndarray]:
"""Return the maximum of the modeled function, subject to constraints
Args:
bounds (torch.Tensor): The lower and upper bounds in the parameter space to search for the maximum,
formatted as a 2xn tensor, where d is the number of parameters.
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
n_samples int: number of coarse grid points to sample for optimization estimate.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Tuple containing the max and its location (argmax).
"""
locked_dims = locked_dims or {}
return get_extremum(self, "max", bounds, locked_dims, n_samples)
def get_min(
self,
bounds: torch.Tensor,
locked_dims: Optional[Mapping[int, List[float]]] = None,
n_samples: int = 1000,
) -> Tuple[float, np.ndarray]:
"""Return the minimum of the modeled function, subject to constraints
Args:
bounds (torch.Tensor): The lower and upper bounds in the parameter space to search for the minimum,
formatted as a 2xn tensor, where d is the number of parameters.
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
Returns:
Tuple[torch.Tensor, torch.Tensor]: Tuple containing the min and its location (argmin).
"""
locked_dims = locked_dims or {}
return get_extremum(self, "min", bounds, locked_dims, n_samples)
def inv_query(
self,
y: float,
bounds: torch.Tensor,
locked_dims: Optional[Mapping[int, List[float]]] = None,
probability_space: bool = False,
n_samples: int = 1000,
) -> Tuple[float, torch.Tensor]:
"""Query the model inverse.
Return nearest x such that f(x) = queried y, and also return the
value of f at that point.
Args:
y (float): Points at which to find the inverse.
locked_dims (Mapping[int, List[float]]): Dimensions to fix, so that the
inverse is along a slice of the full surface.
probability_space (bool): Is y (and therefore the
returned nearest_y) in probability space instead of latent
function space? Defaults to False.
Returns:
Tuple[float, np.ndarray]: Tuple containing the value of f
nearest to queried y and the x position of this value.
"""
if probability_space:
assert (
self.outcome_type == "binary" or self.outcome_type is None
), f"Cannot get probability space for outcome_type '{self.outcome_type}'"
pred_function = self.predict_probability
else:
pred_function = self.predict
locked_dims = locked_dims or {}
def model_distance(x, pt, probability_space):
return np.abs(pred_function(torch.tensor([x]))[0].detach().numpy() - pt)
# Look for point with value closest to y, subject the dict of locked dims
query_lb = bounds[0]
query_ub = bounds[-1]
for locked_dim in locked_dims.keys():
dim_values = locked_dims[locked_dim]
if len(dim_values) == 1:
query_lb[locked_dim] = dim_values[0]
query_ub[locked_dim] = dim_values[0]
else:
query_lb[locked_dim] = dim_values[0]
query_ub[locked_dim] = dim_values[1]
d = make_scaled_sobol(query_lb, query_ub, n_samples, seed=0)
opt_bounds = zip(query_lb.numpy(), query_ub.numpy())
fmean, _ = pred_function(d)
f = torch.abs(fmean - y)
estimate = d[torch.where(f == torch.min(f))[0][0]].numpy()
a = minimize(
model_distance,
estimate,
args=(y, probability_space),
method=self.extremum_solver,
bounds=opt_bounds,
)
val = pred_function(torch.tensor([a.x]))[0].item()
return val, torch.Tensor(a.x)
@abc.abstractmethod
def get_mll_class(self):
raise NotImplementedError
def fit(self):
mll_class = self.get_mll_class()
mll = mll_class(self.likelihood, self)
fit_gpytorch_mll(mll)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
from aepsych.acquisition.objective import AEPsychObjective, FloorProbitObjective
from aepsych.config import Config
from gpytorch.likelihoods import _OneDimensionalLikelihood
class LinearBernoulliLikelihood(_OneDimensionalLikelihood):
"""
A likelihood of the form Bernoulli(sigma(k(x+c))), where k and c are
GPs and sigma is a flexible link function.
"""
def __init__(self, objective: Optional[AEPsychObjective] = None):
"""Initializes the linear bernoulli likelihood.
Args:
objective (Callable, optional): Link function to use (sigma in the notation above).
Defaults to probit with no floor.
"""
super().__init__()
self.objective = objective or FloorProbitObjective(floor=0.0)
def f(self, function_samples: torch.Tensor, Xi: torch.Tensor) -> torch.Tensor:
"""Return the latent function value, k(x-c).
Args:
function_samples (torch.Tensor): Samples from a batched GP
Xi (torch.Tensor): Intensity values.
Returns:
torch.Tensor: latent function value.
"""
# function_samples is of shape nsamp x (b) x 2 x n
# If (b) is present,
if function_samples.ndim > 3:
assert function_samples.ndim == 4
assert function_samples.shape[2] == 2
# In this case, Xi will be of size b x n
# Offset and slope should be num_samps x b x n
offset = function_samples[:, :, 0, :]
slope = function_samples[:, :, 1, :]
fsamps = slope * (Xi - offset)
# Expand from (nsamp x b x n) to (nsamp x b x n x 1)
fsamps = fsamps.unsqueeze(-1)
else:
assert function_samples.ndim == 3
assert function_samples.shape[1] == 2
# Shape is num_samps x 2 x n
# Offset and slope should be num_samps x n
# Xi will be of size n
offset = function_samples[:, 0, :]
slope = function_samples[:, 1, :]
fsamps = slope * (Xi - offset)
# Expand from (nsamp x n) to (nsamp x 1 x n x 1)
fsamps = fsamps.unsqueeze(1).unsqueeze(-1)
return fsamps
def p(self, function_samples: torch.Tensor, Xi: torch.Tensor) -> torch.Tensor:
"""Returns the response probability sigma(k(x+c)).
Args:
function_samples (torch.Tensor): Samples from the batched GP (see documentation for self.f)
Xi (torch.Tensor): Intensity Values.
Returns:
torch.Tensor: Response probabilities.
"""
fsamps = self.f(function_samples, Xi)
return self.objective(fsamps)
def forward(
self, function_samples: torch.Tensor, Xi: torch.Tensor, **kwargs
) -> torch.distributions.Bernoulli:
"""Forward pass for the likelihood
Args:
function_samples (torch.Tensor): Samples from a batched GP of batch size 2.
Xi (torch.Tensor): Intensity values.
Returns:
torch.distributions.Bernoulli: Outcome likelihood.
"""
output_probs = self.p(function_samples, Xi)
return torch.distributions.Bernoulli(probs=output_probs)
def expected_log_prob(self, observations, function_dist, *args, **kwargs):
"""This has to be overridden to fix a bug in gpytorch where the kwargs
aren't being passed along to self.forward.
"""
# modified, TODO fixme upstream (cc @bletham)
def log_prob_lambda(function_samples):
return self.forward(function_samples, **kwargs).log_prob(observations)
log_prob = self.quadrature(log_prob_lambda, function_dist)
return log_prob
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
objective = config.getobj(classname, "objective")
if hasattr(objective, "from_config"):
objective = objective.from_config(config)
else:
objective = objective
return cls(objective=objective)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable, Optional
import gpytorch
import torch
from gpytorch.likelihoods import Likelihood
from torch.distributions import Categorical, Normal
class OrdinalLikelihood(Likelihood):
"""
Ordinal likelihood, suitable for rating models (e.g. likert scales). Formally,
.. math:: z_k(x\\mid f) := p(d_k < f(x) \\le d_{k+1}) = \\sigma(d_{k+1}-f(x)) - \\sigma(d_{k}-f(x)),
where :math:`\\sigma()` is the link function (equivalent to the perceptual noise
distribution in psychophysics terms), :math:`f(x)` is the latent GP evaluated at x,
and :math:`d_k` is a learned cutpoint parameter for each level.
"""
def __init__(self, n_levels: int, link: Optional[Callable] = None):
super().__init__()
self.n_levels = n_levels
self.register_parameter(
name="raw_cutpoint_deltas",
parameter=torch.nn.Parameter(torch.abs(torch.randn(n_levels - 2))),
)
self.register_constraint("raw_cutpoint_deltas", gpytorch.constraints.Positive())
self.link = link or Normal(0, 1).cdf
@property
def cutpoints(self):
cutpoint_deltas = self.raw_cutpoint_deltas_constraint.transform(
self.raw_cutpoint_deltas
)
# for identification, the first cutpoint is 0
return torch.cat((torch.tensor([0]), torch.cumsum(cutpoint_deltas, 0)))
def forward(self, function_samples, *params, **kwargs):
# this whole thing can probably be some clever batched thing, meh
probs = torch.zeros(*function_samples.size(), self.n_levels)
probs[..., 0] = self.link(self.cutpoints[0] - function_samples)
for i in range(1, self.n_levels - 1):
probs[..., i] = self.link(self.cutpoints[i] - function_samples) - self.link(
self.cutpoints[i - 1] - function_samples
)
probs[..., -1] = 1 - self.link(self.cutpoints[-1] - function_samples)
res = Categorical(probs=probs)
return res
@classmethod
def from_config(cls, config):
classname = cls.__name__
n_levels = config.getint(classname, "n_levels")
link = config.getobj(classname, "link", fallback=None)
return cls(n_levels=n_levels, link=link)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ..config import Config
from .bernoulli import BernoulliObjectiveLikelihood
from .ordinal import OrdinalLikelihood
from .semi_p import LinearBernoulliLikelihood
__all__ = [
"BernoulliObjectiveLikelihood",
"OrdinalLikelihood",
"LinearBernoulliLikelihood",
]
Config.register_module(sys.modules[__name__])
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Callable
import torch
from aepsych.config import Config
from gpytorch.likelihoods import _OneDimensionalLikelihood
class BernoulliObjectiveLikelihood(_OneDimensionalLikelihood):
"""
Bernoulli likelihood with a flexible link (objective) defined
by a callable (which can be a botorch objective)
"""
def __init__(self, objective: Callable):
super().__init__()
self.objective = objective
def forward(self, function_samples, **kwargs):
output_probs = self.objective(function_samples)
return torch.distributions.Bernoulli(probs=output_probs)
@classmethod
def from_config(cls, config: Config):
objective_cls = config.getobj(cls.__name__, "objective")
objective = objective_cls.from_config(config)
return cls(objective=objective)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from typing import Optional, Union
import numpy as np
import torch
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerator
from aepsych.models.base import AEPsychMixin
from aepsych.utils import _process_bounds
class ManualGenerator(AEPsychGenerator):
"""Generator that generates points from the Sobol Sequence."""
_requires_model = False
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
points: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
shuffle: bool = True,
):
"""Iniatialize SobolGenerator.
Args:
lb (Union[np.ndarray, torch.Tensor]): Lower bounds of each parameter.
ub (Union[np.ndarray, torch.Tensor]): Upper bounds of each parameter.
points (Union[np.ndarray, torch.Tensor]): The points that will be generated.
dim (int, optional): Dimensionality of the parameter space. If None, it is inferred from lb and ub.
shuffle (bool): Whether or not to shuffle the order of the points. True by default.
"""
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.points = points
if shuffle:
np.random.shuffle(points)
self.finished = False
self._idx = 0
def gen(
self,
num_points: int = 1,
model: Optional[AEPsychMixin] = None, # included for API compatibility
):
"""Query next point(s) to run by quasi-randomly sampling the parameter space.
Args:
num_points (int): Number of points to query.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
if num_points > (len(self.points) - self._idx):
warnings.warn(
"Asked for more points than are left in the generator! Giving everthing it has!",
RuntimeWarning,
)
points = self.points[self._idx : self._idx + num_points]
self._idx += num_points
if self._idx >= len(self.points):
self.finished = True
return points
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
points = config.getarray(classname, "points")
shuffle = config.getboolean(classname, "shuffle", fallback=True)
return cls(lb=lb, ub=ub, dim=dim, points=points, shuffle=shuffle)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from aepsych.config import Config
from aepsych.generators import OptimizeAcqfGenerator
class PairwiseOptimizeAcqfGenerator(OptimizeAcqfGenerator):
"""Deprecated. Use OptimizeAcqfGenerator instead."""
stimuli_per_trial = 2
@classmethod
def from_config(cls, config: Config):
warnings.warn(
"PairwiseOptimizeAcqfGenerator is deprecated. Use OptimizeAcqfGenerator instead.",
DeprecationWarning,
)
return super().from_config(config)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Dict, Optional, Union
import numpy as np
import torch
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerationStep, AEPsychGenerator
from aepsych.models.base import AEPsychMixin
from aepsych.utils import _process_bounds
from ax.modelbridge import Models
from torch.quasirandom import SobolEngine
class SobolGenerator(AEPsychGenerator):
"""Generator that generates points from the Sobol Sequence."""
_requires_model = False
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
seed: Optional[int] = None,
stimuli_per_trial: int = 1,
):
"""Iniatialize SobolGenerator.
Args:
lb (Union[np.ndarray, torch.Tensor]): Lower bounds of each parameter.
ub (Union[np.ndarray, torch.Tensor]): Upper bounds of each parameter.
dim (int, optional): Dimensionality of the parameter space. If None, it is inferred from lb and ub.
seed (int, optional): Random seed.
"""
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.lb = self.lb.repeat(stimuli_per_trial)
self.ub = self.ub.repeat(stimuli_per_trial)
self.stimuli_per_trial = stimuli_per_trial
self.seed = seed
self.engine = SobolEngine(
dimension=self.dim * stimuli_per_trial, scramble=True, seed=self.seed
)
def gen(
self,
num_points: int = 1,
model: Optional[AEPsychMixin] = None, # included for API compatibility
):
"""Query next point(s) to run by quasi-randomly sampling the parameter space.
Args:
num_points (int, optional): Number of points to query.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
grid = self.engine.draw(num_points)
grid = self.lb + (self.ub - self.lb) * grid
if self.stimuli_per_trial == 1:
return grid
return torch.tensor(
np.moveaxis(
grid.reshape(num_points, self.stimuli_per_trial, -1).numpy(),
-1,
-self.stimuli_per_trial,
)
)
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
seed = config.getint(classname, "seed", fallback=None)
stimuli_per_trial = config.getint(classname, "stimuli_per_trial")
return cls(
lb=lb, ub=ub, dim=dim, seed=seed, stimuli_per_trial=stimuli_per_trial
)
@classmethod
def get_config_options(cls, config: Config, name: str):
return AxSobolGenerator.get_config_options(config, name)
class AxSobolGenerator(AEPsychGenerationStep):
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict:
classname = "SobolGenerator"
seed = config.getint(classname, "seed", fallback=None)
scramble = config.getboolean(classname, "scramble", fallback=True)
opts = {
"model": Models.SOBOL,
"model_kwargs": {"seed": seed, "scramble": scramble},
}
opts.update(super().get_config_options(config, name))
return opts
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import warnings
from aepsych.config import Config
from .sobol_generator import SobolGenerator
class PairwiseSobolGenerator(SobolGenerator):
"""Deprecated. Use SobolGenerator instead."""
stimuli_per_trial = 2
@classmethod
def from_config(cls, config: Config):
warnings.warn(
"PairwiseSobolGenerator is deprecated. Use SobolGenerator instead.",
DeprecationWarning,
)
return super().from_config(config)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Type
import torch
from aepsych.acquisition.objective.semi_p import SemiPThresholdObjective
from aepsych.generators import OptimizeAcqfGenerator
from aepsych.models.semi_p import SemiParametricGPModel
class IntensityAwareSemiPGenerator(OptimizeAcqfGenerator):
"""Generator for SemiP. With botorch machinery, in order to optimize acquisition
separately over context and intensity, we need two ingredients.
1. An objective that samples from some posterior w.r.t. the context. From the
paper, this is ThresholdBALV and needs the threshold posterior.
`SemiPThresholdObjective` implements this for ThresholdBALV but theoretically
this can be any subclass of `SemiPObjectiveBase`.
2. A way to do acquisition over context and intensity separately, which is
provided by this class. We optimize the acquisition function over context
dimensions, then conditioned on the optimum we evaluate the intensity
at the objective to obtain the intensity value.
We only developed ThresholdBALV that is specific to SemiP, which is what we tested
with this generator. It should work with other similar acquisition functions.
"""
def gen( # type: ignore[override]
self,
num_points: int,
model: SemiParametricGPModel, # type: ignore[override]
context_objective: Type = SemiPThresholdObjective,
) -> torch.Tensor:
fixed_features = {model.stim_dim: 0}
next_x = super().gen(
num_points=num_points, model=model, fixed_features=fixed_features
)
# to compute intensity, we need the point where f is at the
# threshold as a function of context. self.acqf_kwargs should contain
# remaining objective args (like threshold target value)
thresh_objective = context_objective(
likelihood=model.likelihood, stim_dim=model.stim_dim, **self.acqf_kwargs
)
kc_mean_at_best_context = model(torch.Tensor(next_x)).mean
thresh_at_best_context = thresh_objective(kc_mean_at_best_context)
thresh_at_best_context = torch.clamp(
thresh_at_best_context,
min=model.lb[model.stim_dim],
max=model.ub[model.stim_dim],
)
next_x[..., model.stim_dim] = thresh_at_best_context.detach()
return next_x
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional, Sequence
import torch
from aepsych.acquisition.monotonic_rejection import MonotonicMCAcquisition
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerator
from aepsych.models.monotonic_rejection_gp import MonotonicRejectionGP
from botorch.logging import logger
from botorch.optim.initializers import gen_batch_initial_conditions
from botorch.optim.utils import columnwise_clamp, fix_features
def default_loss_constraint_fun(
loss: torch.Tensor, candidates: torch.Tensor
) -> torch.Tensor:
"""Identity transform for constrained optimization.
This simply returns loss as-is. Write your own versions of this
for constrained optimization by e.g. interior point method.
Args:
loss (torch.Tensor): Value of loss at candidate points.
candidates (torch.Tensor): Location of candidate points.
Returns:
torch.Tensor: New loss (unchanged)
"""
return loss
class MonotonicRejectionGenerator(AEPsychGenerator[MonotonicRejectionGP]):
"""Generator specifically to be used with MonotonicRejectionGP, which generates new points to sample by minimizing
an acquisition function through stochastic gradient descent."""
def __init__(
self,
acqf: MonotonicMCAcquisition,
acqf_kwargs: Optional[Dict[str, Any]] = None,
model_gen_options: Optional[Dict[str, Any]] = None,
explore_features: Optional[Sequence[int]] = None,
) -> None:
"""Initialize MonotonicRejectionGenerator.
Args:
acqf (AcquisitionFunction): Acquisition function to use.
acqf_kwargs (Dict[str, object], optional): Extra arguments to
pass to acquisition function. Defaults to no arguments.
model_gen_options: Dictionary with options for generating candidate, such as
SGD parameters. See code for all options and their defaults.
explore_features: List of features that will be selected randomly and then
fixed for acquisition fn optimization.
"""
if acqf_kwargs is None:
acqf_kwargs = {}
self.acqf = acqf
self.acqf_kwargs = acqf_kwargs
self.model_gen_options = model_gen_options
self.explore_features = explore_features
def _instantiate_acquisition_fn(self, model: MonotonicRejectionGP):
return self.acqf(
model=model,
deriv_constraint_points=model._get_deriv_constraint_points(),
**self.acqf_kwargs,
)
def gen(
self,
num_points: int, # Current implementation only generates 1 point at a time
model: MonotonicRejectionGP,
):
"""Query next point(s) to run by optimizing the acquisition function.
Args:
num_points (int, optional): Number of points to query.
model (AEPsychMixin): Fitted model of the data.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
options = self.model_gen_options or {}
num_restarts = options.get("num_restarts", 10)
raw_samples = options.get("raw_samples", 1000)
verbosity_freq = options.get("verbosity_freq", -1)
lr = options.get("lr", 0.01)
momentum = options.get("momentum", 0.9)
nesterov = options.get("nesterov", True)
epochs = options.get("epochs", 50)
milestones = options.get("milestones", [25, 40])
gamma = options.get("gamma", 0.1)
loss_constraint_fun = options.get(
"loss_constraint_fun", default_loss_constraint_fun
)
# Augment bounds with deriv indicator
bounds = torch.cat((model.bounds_, torch.zeros(2, 1)), dim=1)
# Fix deriv indicator to 0 during optimization
fixed_features = {(bounds.shape[1] - 1): 0.0}
# Fix explore features to random values
if self.explore_features is not None:
for idx in self.explore_features:
val = (
bounds[0, idx]
+ torch.rand(1, dtype=bounds.dtype)
* (bounds[1, idx] - bounds[0, idx])
).item()
fixed_features[idx] = val
bounds[0, idx] = val
bounds[1, idx] = val
acqf = self._instantiate_acquisition_fn(model)
# Initialize
batch_initial_conditions = gen_batch_initial_conditions(
acq_function=acqf,
bounds=bounds,
q=1,
num_restarts=num_restarts,
raw_samples=raw_samples,
)
clamped_candidates = columnwise_clamp(
X=batch_initial_conditions, lower=bounds[0], upper=bounds[1]
).requires_grad_(True)
candidates = fix_features(clamped_candidates, fixed_features)
optimizer = torch.optim.SGD(
params=[clamped_candidates], lr=lr, momentum=momentum, nesterov=nesterov
)
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=milestones, gamma=gamma
)
# Optimize
for epoch in range(epochs):
loss = -acqf(candidates).sum()
# adjust loss based on constraints on candidates
loss = loss_constraint_fun(loss, candidates)
if verbosity_freq > 0 and epoch % verbosity_freq == 0:
logger.info("Iter: {} - Value: {:.3f}".format(epoch, -(loss.item())))
def closure():
optimizer.zero_grad()
loss.backward(
retain_graph=True
) # Variational model requires retain_graph
return loss
optimizer.step(closure)
clamped_candidates.data = columnwise_clamp(
X=clamped_candidates, lower=bounds[0], upper=bounds[1]
)
candidates = fix_features(clamped_candidates, fixed_features)
lr_scheduler.step()
# Extract best point
with torch.no_grad():
batch_acquisition = acqf(candidates)
best = torch.argmax(batch_acquisition.view(-1), dim=0)
Xopt = candidates[best][:, :-1].detach()
return Xopt
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
acqf = config.getobj("common", "acqf", fallback=None)
extra_acqf_args = cls._get_acqf_options(acqf, config)
options = {}
options["num_restarts"] = config.getint(classname, "restarts", fallback=10)
options["raw_samples"] = config.getint(classname, "samps", fallback=1000)
options["verbosity_freq"] = config.getint(
classname, "verbosity_freq", fallback=-1
)
options["lr"] = config.getfloat(classname, "lr", fallback=0.01) # type: ignore
options["momentum"] = config.getfloat(classname, "momentum", fallback=0.9) # type: ignore
options["nesterov"] = config.getboolean(classname, "nesterov", fallback=True)
options["epochs"] = config.getint(classname, "epochs", fallback=50)
options["milestones"] = config.getlist(
classname, "milestones", fallback=[25, 40] # type: ignore
)
options["gamma"] = config.getfloat(classname, "gamma", fallback=0.1) # type: ignore
options["loss_constraint_fun"] = config.getobj(
classname, "loss_constraint_fun", fallback=default_loss_constraint_fun
)
explore_features = config.getlist(classname, "explore_idxs", fallback=None) # type: ignore
return cls(
acqf=acqf,
acqf_kwargs=extra_acqf_args,
model_gen_options=options,
explore_features=explore_features,
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ..config import Config
from .epsilon_greedy_generator import EpsilonGreedyGenerator
from .manual_generator import ManualGenerator
from .monotonic_rejection_generator import MonotonicRejectionGenerator
from .monotonic_thompson_sampler_generator import MonotonicThompsonSamplerGenerator
from .multi_outcome_generator import MultiOutcomeOptimizationGenerator
from .optimize_acqf_generator import AxOptimizeAcqfGenerator, OptimizeAcqfGenerator
from .pairwise_optimize_acqf_generator import PairwiseOptimizeAcqfGenerator
from .pairwise_sobol_generator import PairwiseSobolGenerator
from .random_generator import RandomGenerator
from .random_generator import AxRandomGenerator, RandomGenerator
from .semi_p import IntensityAwareSemiPGenerator
from .sobol_generator import AxSobolGenerator, SobolGenerator
__all__ = [
"OptimizeAcqfGenerator",
"MonotonicRejectionGenerator",
"MonotonicThompsonSamplerGenerator",
"RandomGenerator",
"SobolGenerator",
"EpsilonGreedyGenerator",
"ManualGenerator",
"PairwiseOptimizeAcqfGenerator",
"PairwiseSobolGenerator",
"AxOptimizeAcqfGenerator",
"AxSobolGenerator",
"IntensityAwareSemiPGenerator",
"MultiOutcomeOptimizationGenerator",
"AxRandomGenerator",
]
Config.register_module(sys.modules[__name__])
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import time
from inspect import signature
from typing import Any, cast, Dict, Optional
import numpy as np
import torch
from aepsych.acquisition.acquisition import AEPsychAcquisition
from aepsych.config import Config, ConfigurableMixin
from aepsych.generators.base import AEPsychGenerationStep, AEPsychGenerator
from aepsych.models.base import ModelProtocol
from aepsych.models.surrogate import AEPsychSurrogate
from aepsych.utils_logging import getLogger
from ax.modelbridge import Models
from ax.modelbridge.registry import Cont_X_trans
from botorch.acquisition import AcquisitionFunction
from botorch.acquisition.preference import AnalyticExpectedUtilityOfBestOption
from botorch.optim import optimize_acqf
from botorch.utils import draw_sobol_samples
logger = getLogger()
class OptimizeAcqfGenerator(AEPsychGenerator):
"""Generator that chooses points by minimizing an acquisition function."""
def __init__(
self,
acqf: AcquisitionFunction,
acqf_kwargs: Optional[Dict[str, Any]] = None,
restarts: int = 10,
samps: int = 1000,
max_gen_time: Optional[float] = None,
stimuli_per_trial: int = 1,
) -> None:
"""Initialize OptimizeAcqfGenerator.
Args:
acqf (AcquisitionFunction): Acquisition function to use.
acqf_kwargs (Dict[str, object], optional): Extra arguments to
pass to acquisition function. Defaults to no arguments.
restarts (int): Number of restarts for acquisition function optimization.
samps (int): Number of samples for quasi-random initialization of the acquisition function optimizer.
max_gen_time (optional, float): Maximum time (in seconds) to optimize the acquisition function.
This is only loosely followed by scipy's optimizer, so consider using a number about 1/3 or
less of what your true upper bound is.
"""
if acqf_kwargs is None:
acqf_kwargs = {}
self.acqf = acqf
self.acqf_kwargs = acqf_kwargs
self.restarts = restarts
self.samps = samps
self.max_gen_time = max_gen_time
self.stimuli_per_trial = stimuli_per_trial
def _instantiate_acquisition_fn(self, model: ModelProtocol):
if self.acqf == AnalyticExpectedUtilityOfBestOption:
return self.acqf(pref_model=model)
if self.acqf in self.baseline_requiring_acqfs:
return self.acqf(
model=model, X_baseline=model.train_inputs[0], **self.acqf_kwargs
)
else:
return self.acqf(model=model, **self.acqf_kwargs)
def gen(self, num_points: int, model: ModelProtocol, **gen_options) -> torch.Tensor:
"""Query next point(s) to run by optimizing the acquisition function.
Args:
num_points (int, optional): Number of points to query.
model (ModelProtocol): Fitted model of the data.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
if self.stimuli_per_trial == 2:
qbatch_points = self._gen(
num_points=num_points * 2, model=model, **gen_options
)
# output of super() is (q, dim) but the contract is (num_points, dim, 2)
# so we need to split q into q and pairs and then move the pair dim to the end
return qbatch_points.reshape(num_points, 2, -1).swapaxes(-1, -2)
else:
return self._gen(num_points=num_points, model=model, **gen_options)
def _gen(
self, num_points: int, model: ModelProtocol, **gen_options
) -> torch.Tensor:
# eval should be inherited from superclass
model.eval() # type: ignore
train_x = model.train_inputs[0]
acqf = self._instantiate_acquisition_fn(model)
logger.info("Starting gen...")
starttime = time.time()
if self.max_gen_time is None:
new_candidate, _ = optimize_acqf(
acq_function=acqf,
bounds=torch.tensor(np.c_[model.lb, model.ub]).T.to(train_x),
q=num_points,
num_restarts=self.restarts,
raw_samples=self.samps,
**gen_options,
)
else:
# figure out how long evaluating a single samp
starttime = time.time()
_ = acqf(train_x[0:num_points, :])
single_eval_time = time.time() - starttime
# only a heuristic for total num evals since everything is stochastic,
# but the reasoning is: we initialize with self.samps samps, subsample
# self.restarts from them in proportion to the value of the acqf, and
# run that many optimization. So:
# total_time = single_eval_time * n_eval * restarts + single_eval_time * samps
# and we solve for n_eval
n_eval = int(
(self.max_gen_time - single_eval_time * self.samps)
/ (single_eval_time * self.restarts)
)
if n_eval > 10:
# heuristic, if we can't afford 10 evals per restart, just use quasi-random search
options = {"maxfun": n_eval}
logger.info(f"gen maxfun is {n_eval}")
new_candidate, _ = optimize_acqf(
acq_function=acqf,
bounds=torch.tensor(np.c_[model.lb, model.ub]).T.to(train_x),
q=num_points,
num_restarts=self.restarts,
raw_samples=self.samps,
options=options,
)
else:
logger.info(f"gen maxfun is {n_eval}, falling back to random search...")
nsamp = max(int(self.max_gen_time / single_eval_time), 10)
# Generate the points at which to sample
bounds = torch.stack((model.lb, model.ub))
X = draw_sobol_samples(bounds=bounds, n=nsamp, q=num_points)
acqvals = acqf(X)
best_indx = torch.argmax(acqvals, dim=0)
new_candidate = X[best_indx]
logger.info(f"Gen done, time={time.time()-starttime}")
return new_candidate
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
acqf = config.getobj(classname, "acqf", fallback=None)
extra_acqf_args = cls._get_acqf_options(acqf, config)
stimuli_per_trial = config.getint(classname, "stimuli_per_trial")
restarts = config.getint(classname, "restarts", fallback=10)
samps = config.getint(classname, "samps", fallback=1000)
max_gen_time = config.getfloat(classname, "max_gen_time", fallback=None)
return cls(
acqf=acqf,
acqf_kwargs=extra_acqf_args,
restarts=restarts,
samps=samps,
max_gen_time=max_gen_time,
stimuli_per_trial=stimuli_per_trial,
)
@classmethod
def get_config_options(cls, config: Config, name: str):
return AxOptimizeAcqfGenerator.get_config_options(config, name)
class AxOptimizeAcqfGenerator(AEPsychGenerationStep, ConfigurableMixin):
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict:
classname = "OptimizeAcqfGenerator"
model_class = config.getobj(name, "model", fallback=None)
model_options = model_class.get_config_options(config)
acqf_cls = config.getobj(name, "acqf", fallback=None)
if acqf_cls is None:
acqf_cls = config.getobj(classname, "acqf")
acqf_options = cls._get_acqf_options(acqf_cls, config)
gen_options = cls._get_gen_options(config)
max_fit_time = model_options["max_fit_time"]
model_kwargs = {
"surrogate": AEPsychSurrogate(
botorch_model_class=model_class,
mll_class=model_class.get_mll_class(),
model_options=model_options,
max_fit_time=max_fit_time,
),
"acquisition_class": AEPsychAcquisition,
"botorch_acqf_class": acqf_cls,
"acquisition_options": acqf_options,
# The Y transforms are removed because they are incompatible with our thresholding-finding acqfs
# The target value doesn't get transformed, so it searches for the target in the wrong space.
"transforms": Cont_X_trans, # TODO: Make LSE acqfs compatible with Y transforms
}
opts = {
"model": Models.BOTORCH_MODULAR,
"model_kwargs": model_kwargs,
"model_gen_kwargs": gen_options,
}
opts.update(super().get_config_options(config, name))
return opts
@classmethod
def _get_acqf_options(cls, acqf: AcquisitionFunction, config: Config):
class MissingValue:
pass
if acqf is not None:
acqf_name = acqf.__name__
acqf_args_expected = signature(acqf).parameters.keys()
acqf_args = {
k: config.getobj(
acqf_name,
k,
fallback_type=float,
fallback=MissingValue(),
warn=False,
)
for k in acqf_args_expected
}
acqf_args = {
k: v for k, v in acqf_args.items() if not isinstance(v, MissingValue)
}
for k, v in acqf_args.items():
if hasattr(v, "from_config"): # configure if needed
acqf_args[k] = cast(Any, v).from_config(config)
elif isinstance(v, type): # instaniate a class if needed
acqf_args[k] = v()
else:
acqf_args = {}
return acqf_args
@classmethod
def _get_gen_options(cls, config: Config):
classname = "OptimizeAcqfGenerator"
restarts = config.getint(classname, "restarts", fallback=10)
samps = config.getint(classname, "samps", fallback=1000)
return {"restarts": restarts, "samps": samps}
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Optional, Type
import numpy as np
import torch
from aepsych.acquisition.objective import ProbitObjective
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerator
from aepsych.models.monotonic_rejection_gp import MonotonicRejectionGP
from botorch.acquisition.objective import MCAcquisitionObjective
from botorch.utils.sampling import draw_sobol_samples
class MonotonicThompsonSamplerGenerator(AEPsychGenerator[MonotonicRejectionGP]):
"""A generator specifically to be used with MonotonicRejectionGP that uses a Thompson-sampling-style
approach for gen, rather than using an acquisition function. We draw a posterior sample at a large number
of points, and then choose the point that is closest to the target value.
"""
def __init__(
self,
n_samples: int,
n_rejection_samples: int,
num_ts_points: int,
target_value: float,
objective: MCAcquisitionObjective,
explore_features: Optional[List[Type[int]]] = None,
) -> None:
"""Initialize MonotonicMCAcquisition
Args:
n_samples (int): Number of samples to select point from.
num_rejection_samples (int): Number of rejection samples to draw.
num_ts_points (int): Number of points at which to sample.
target_value (float): target value that is being looked for
objective (Optional[MCAcquisitionObjective], optional): Objective transform of the GP output
before evaluating the acquisition. Defaults to identity transform.
explore_features (Sequence[int], optional)
"""
self.n_samples = n_samples
self.n_rejection_samples = n_rejection_samples
self.num_ts_points = num_ts_points
self.target_value = target_value
self.objective = objective()
self.explore_features = explore_features
def gen(
self,
num_points: int, # Current implementation only generates 1 point at a time
model: MonotonicRejectionGP,
) -> torch.Tensor:
"""Query next point(s) to run by optimizing the acquisition function.
Args:
num_points (int, optional): Number of points to query.
model (AEPsychMixin): Fitted model of the data.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
# Generate the points at which to sample
X = draw_sobol_samples(bounds=model.bounds_, n=self.num_ts_points, q=1).squeeze(
1
)
# Fix any explore features
if self.explore_features is not None:
for idx in self.explore_features:
val = (
model.bounds_[0, idx]
+ torch.rand(1) * (model.bounds_[1, idx] - model.bounds_[0, idx])
).item()
X[:, idx] = val
# Draw n samples
f_samp = model.sample(
X,
num_samples=self.n_samples,
num_rejection_samples=self.n_rejection_samples,
)
# Find the point closest to target
dist = torch.abs(self.objective(f_samp) - self.target_value)
best_indx = torch.argmin(dist, dim=1)
return torch.Tensor(X[best_indx])
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
n_samples = config.getint(classname, "num_samples", fallback=1)
n_rejection_samples = config.getint(
classname, "num_rejection_samples", fallback=500
)
num_ts_points = config.getint(classname, "num_ts_points", fallback=1000)
target = config.getfloat(classname, "target", fallback=0.75)
objective = config.getobj(classname, "objective", fallback=ProbitObjective)
explore_features = config.getlist(classname, "explore_idxs", element_type=int, fallback=None) # type: ignore
return cls(
n_samples=n_samples,
n_rejection_samples=n_rejection_samples,
num_ts_points=num_ts_points,
target_value=target,
objective=objective,
explore_features=explore_features, # type: ignore
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from aepsych.config import Config
from ..models.base import ModelProtocol
from .base import AEPsychGenerator
from .optimize_acqf_generator import OptimizeAcqfGenerator
class EpsilonGreedyGenerator(AEPsychGenerator):
def __init__(self, subgenerator: AEPsychGenerator, epsilon: float = 0.1):
self.subgenerator = subgenerator
self.epsilon = epsilon
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
subgen_cls = config.getobj(
classname, "subgenerator", fallback=OptimizeAcqfGenerator
)
subgen = subgen_cls.from_config(config)
epsilon = config.getfloat(classname, "epsilon", fallback=0.1)
return cls(subgenerator=subgen, epsilon=epsilon)
def gen(self, num_points: int, model: ModelProtocol):
if num_points > 1:
raise NotImplementedError("Epsilon-greedy batched gen is not implemented!")
if np.random.uniform() < self.epsilon:
sample = np.random.uniform(low=model.lb, high=model.ub)
return torch.tensor(sample).reshape(1, -1)
else:
return self.subgenerator.gen(num_points, model)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from typing import Dict
from ax.modelbridge import Models
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerationStep
class MultiOutcomeOptimizationGenerator(AEPsychGenerationStep):
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict:
# classname = cls.__name__
opts = {
"model": Models.MOO,
}
opts.update(super().get_config_options(config, name))
return opts
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Optional, Union
import numpy as np
import torch
from aepsych.config import Config
from aepsych.generators.base import AEPsychGenerationStep, AEPsychGenerator
from aepsych.models.base import AEPsychMixin
from aepsych.utils import _process_bounds
from ax.modelbridge import Models
class RandomGenerator(AEPsychGenerator):
"""Generator that generates points randomly without an acquisition function."""
_requires_model = False
def __init__(
self,
lb: Union[np.ndarray, torch.Tensor],
ub: Union[np.ndarray, torch.Tensor],
dim: Optional[int] = None,
):
"""Iniatialize RandomGenerator.
Args:
lb (Union[np.ndarray, torch.Tensor]): Lower bounds of each parameter.
ub (Union[np.ndarray, torch.Tensor]): Upper bounds of each parameter.
dim (int, optional): Dimensionality of the parameter space. If None, it is inferred from lb and ub.
"""
self.lb, self.ub, self.dim = _process_bounds(lb, ub, dim)
self.bounds_ = torch.stack([self.lb, self.ub])
def gen(
self,
num_points: int = 1,
model: Optional[AEPsychMixin] = None, # included for API compatibility.
) -> torch.Tensor:
"""Query next point(s) to run by randomly sampling the parameter space.
Args:
num_points (int, optional): Number of points to query. Currently, only 1 point can be queried at a time.
Returns:
np.ndarray: Next set of point(s) to evaluate, [num_points x dim].
"""
X = self.bounds_[0] + torch.rand((num_points, self.bounds_.shape[1])) * (
self.bounds_[1] - self.bounds_[0]
)
return X
@classmethod
def from_config(cls, config: Config):
classname = cls.__name__
lb = config.gettensor(classname, "lb")
ub = config.gettensor(classname, "ub")
dim = config.getint(classname, "dim", fallback=None)
return cls(lb=lb, ub=ub, dim=dim)
class AxRandomGenerator(AEPsychGenerationStep):
classname = "RandomGenerator"
model = Models.UNIFORM
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict:
seed = config.getint(cls.classname, "seed", fallback=None)
deduplicate = config.getboolean(cls.classname, "deduplicate", fallback=True)
opts = {
"model": cls.model,
"model_kwargs": {"seed": seed, "deduplicate": deduplicate},
}
opts.update(super().get_config_options(config, name))
return opts
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import abc
from inspect import signature
from typing import Any, Dict, Generic, Protocol, runtime_checkable, TypeVar
import torch
from aepsych.config import Config, ConfigurableMixin
from aepsych.models.base import AEPsychMixin
from ax.core.experiment import Experiment
from ax.modelbridge.generation_node import GenerationStep
from botorch.acquisition import (
AcquisitionFunction,
NoisyExpectedImprovement,
qNoisyExpectedImprovement,
)
from .completion_criterion import completion_criteria
AEPsychModelType = TypeVar("AEPsychModelType", bound=AEPsychMixin)
@runtime_checkable
class AcqArgProtocol(Protocol):
@classmethod
def from_config(cls, config: Config) -> Any:
pass
class AEPsychGenerator(abc.ABC, Generic[AEPsychModelType]):
"""Abstract base class for generators, which are responsible for generating new points to sample."""
_requires_model = True
baseline_requiring_acqfs = [qNoisyExpectedImprovement, NoisyExpectedImprovement]
stimuli_per_trial = 1
def __init__(
self,
) -> None:
pass
@abc.abstractmethod
def gen(self, num_points: int, model: AEPsychModelType) -> torch.Tensor:
pass
@classmethod
@abc.abstractmethod
def from_config(cls, config: Config):
pass
@classmethod
def _get_acqf_options(cls, acqf: AcquisitionFunction, config: Config):
if acqf is not None:
acqf_name = acqf.__name__
# model is not an extra arg, it's a default arg
acqf_args_expected = [
i for i in list(signature(acqf).parameters.keys()) if i != "model"
]
# this is still very ugly
extra_acqf_args = {}
if acqf_name in config:
full_section = config[acqf_name]
for k in acqf_args_expected:
# if this thing is configured
if k in full_section.keys():
# if it's an object make it an object
if full_section[k] in Config.registered_names.keys():
extra_acqf_args[k] = config.getobj(acqf_name, k)
else:
# otherwise try a float
try:
extra_acqf_args[k] = config.getfloat(acqf_name, k)
# finally just return a string
except ValueError:
extra_acqf_args[k] = config.get(acqf_name, k)
# next, do more processing
for k, v in extra_acqf_args.items():
if hasattr(v, "from_config"): # configure if needed
assert isinstance(v, AcqArgProtocol) # make mypy happy
extra_acqf_args[k] = v.from_config(config)
elif isinstance(v, type): # instaniate a class if needed
extra_acqf_args[k] = v()
else:
extra_acqf_args = {}
return extra_acqf_args
class AEPsychGenerationStep(GenerationStep, ConfigurableMixin, abc.ABC):
def __init__(self, name, **kwargs):
super().__init__(num_trials=-1, **kwargs)
self.name = name
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict:
criteria = []
for crit in completion_criteria:
# TODO: Figure out how to convince mypy that CompletionCriterion have `from_config`
criterion = crit.from_config(config, name) # type: ignore
criteria.append(criterion)
options = {"completion_criteria": criteria, "name": name}
return options
def finished(self, experiment: Experiment):
finished = all(
[criterion.is_met(experiment) for criterion in self.completion_criteria]
)
return finished
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from ax.core.experiment import Experiment
from ax.modelbridge.completion_criterion import CompletionCriterion
from aepsych.config import Config, ConfigurableMixin
class MinAsks(CompletionCriterion, ConfigurableMixin):
def __init__(self, threshold: int) -> None:
self.threshold = threshold
def is_met(self, experiment: Experiment) -> bool:
return experiment.num_asks >= self.threshold
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict[str, Any]:
min_asks = config.getint(name, "min_asks", fallback=1)
options = {"threshold": min_asks}
return options
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from aepsych.config import Config, ConfigurableMixin
from ax.core import Experiment
from ax.modelbridge.completion_criterion import CompletionCriterion
class RunIndefinitely(CompletionCriterion, ConfigurableMixin):
def __init__(self, run_indefinitely: bool) -> None:
self.run_indefinitely = run_indefinitely
def is_met(self, experiment: Experiment) -> bool:
return not self.run_indefinitely
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict[str, Any]:
run_indefinitely = config.getboolean(name, "run_indefinitely", fallback=False)
options = {"run_indefinitely": run_indefinitely}
return options
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from aepsych.config import Config
from .min_asks import MinAsks
from .min_total_outcome_occurrences import MinTotalOutcomeOccurrences
from .min_total_tells import MinTotalTells
from .run_indefinitely import RunIndefinitely
completion_criteria = [
MinTotalTells,
MinAsks,
MinTotalOutcomeOccurrences,
RunIndefinitely,
]
__all__ = [
"completion_criteria",
"MinTotalTells",
"MinAsks",
"MinTotalOutcomeOccurrences",
"RunIndefinitely",
]
Config.register_module(sys.modules[__name__])
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from aepsych.config import Config, ConfigurableMixin
from ax.modelbridge.completion_criterion import MinimumPreferenceOccurances
class MinTotalOutcomeOccurrences(MinimumPreferenceOccurances, ConfigurableMixin):
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict[str, Any]:
outcome_types = config.getlist(name, "outcome_types", element_type=str)
outcome_names = config.getlist(
name, "outcome_names", element_type=str, fallback=None
)
# The completion criterion needs to get the name of the first outcome.
# TODO: Make it so that the criterion can be configured to which outcome
# it cares about instead of defaulting to the first one.
if outcome_names is None:
outcome_name = "outcome_1"
else:
outcome_name = str(outcome_names[0])
min_total_outcome_occurrences = config.getint(
name,
"min_total_outcome_occurrences",
fallback=1 if "binary" in outcome_types else 0,
)
options = {
"metric_name": outcome_name,
"threshold": min_total_outcome_occurrences,
}
return options
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict
from aepsych.config import Config, ConfigurableMixin
from ax.core.base_trial import TrialStatus
from ax.modelbridge.completion_criterion import MinimumTrialsInStatus
class MinTotalTells(MinimumTrialsInStatus, ConfigurableMixin):
@classmethod
def get_config_options(cls, config: Config, name: str) -> Dict[str, Any]:
min_total_tells = config.getint(name, "min_total_tells", fallback=1)
options = {"status": TrialStatus.COMPLETED, "threshold": min_total_tells}
return options
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
from ..config import Config
from .factory import (
default_mean_covar_factory,
monotonic_mean_covar_factory,
ordinal_mean_covar_factory,
song_mean_covar_factory,
)
__all__ = [
"default_mean_covar_factory",
"ordinal_mean_covar_factory",
"monotonic_mean_covar_factory",
"song_mean_covar_factory",
]
Config.register_module(sys.modules[__name__])
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from configparser import NoOptionError
from typing import Optional, Tuple
import gpytorch
import torch
from aepsych.config import Config
from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad
from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad
from aepsych.utils import get_dim
from scipy.stats import norm
"""AEPsych factory functions.
These functions generate a gpytorch Mean and Kernel objects from
aepsych.config.Config configurations, including setting lengthscale
priors and so on. They are primarily used for programmatically
constructing modular AEPsych models from configs.
TODO write a modular AEPsych tutorial.
"""
# AEPsych assumes input dimensions are transformed to [0,1] and we want
# a lengthscale prior that excludes lengthscales that are larger than the
# range of inputs (i.e. >1) or much smaller (i.e. <0.1). This inverse
# gamma prior puts about 99% of the prior probability mass on such values,
# with a preference for small values to prevent oversmoothing. The idea
# is taken from https://betanalpha.github.io/assets/case_studies/gaussian_processes.html#323_Informative_Prior_Model
__default_invgamma_concentration = 4.6
__default_invgamma_rate = 1.0
def default_mean_covar_factory(
config: Optional[Config] = None, dim: Optional[int] = None
) -> Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.ScaleKernel]:
"""Default factory for generic GP models
Args:
config (Config, optional): Object containing bounds (and potentially other
config details).
dim (int, optional): Dimensionality of the parameter space. Must be provided
if config is None.
Returns:
Tuple[gpytorch.means.Mean, gpytorch.kernels.Kernel]: Instantiated
ConstantMean and ScaleKernel with priors based on bounds.
"""
assert (config is not None) or (
dim is not None
), "Either config or dim must be provided!"
fixed_mean = False
lengthscale_prior = "gamma"
outputscale_prior = "box"
kernel = gpytorch.kernels.RBFKernel
mean = gpytorch.means.ConstantMean()
if config is not None:
fixed_mean = config.getboolean(
"default_mean_covar_factory", "fixed_mean", fallback=fixed_mean
)
lengthscale_prior = config.get(
"default_mean_covar_factory",
"lengthscale_prior",
fallback=lengthscale_prior,
)
outputscale_prior = config.get(
"default_mean_covar_factory",
"outputscale_prior",
fallback=outputscale_prior,
)
kernel = config.getobj("default_mean_covar_factory", "kernel", fallback=kernel)
if fixed_mean:
try:
target = config.getfloat("default_mean_covar_factory", "target")
mean.constant.requires_grad_(False)
mean.constant.copy_(torch.tensor(norm.ppf(target)))
except NoOptionError:
raise RuntimeError("Config got fixed_mean=True but no target included!")
if config.getboolean("common", "use_ax", fallback=False):
config_dim = get_dim(config)
else:
lb = config.gettensor("default_mean_covar_factory", "lb")
ub = config.gettensor("default_mean_covar_factory", "ub")
assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!"
config_dim = lb.shape[0]
if dim is not None:
assert dim == config_dim, "Provided config does not match provided dim!"
else:
dim = config_dim
if lengthscale_prior == "invgamma":
ls_prior = gpytorch.priors.GammaPrior(
concentration=__default_invgamma_concentration,
rate=__default_invgamma_rate,
transform=lambda x: 1 / x,
)
ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1)
elif lengthscale_prior == "gamma":
ls_prior = gpytorch.priors.GammaPrior(concentration=3.0, rate=6.0)
ls_prior_mode = (ls_prior.concentration - 1) / ls_prior.rate
else:
raise RuntimeError(
f"Lengthscale_prior should be invgamma or gamma, got {lengthscale_prior}"
)
if outputscale_prior == "gamma":
os_prior = gpytorch.priors.GammaPrior(concentration=2.0, rate=0.15)
elif outputscale_prior == "box":
os_prior = gpytorch.priors.SmoothedBoxPrior(a=1, b=4)
else:
raise RuntimeError(
f"Outputscale_prior should be gamma or box, got {outputscale_prior}"
)
ls_constraint = gpytorch.constraints.GreaterThan(
lower_bound=1e-4, transform=None, initial_value=ls_prior_mode
)
covar = gpytorch.kernels.ScaleKernel(
kernel(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
),
outputscale_prior=os_prior,
)
return mean, covar
def monotonic_mean_covar_factory(
config: Config,
) -> Tuple[ConstantMeanPartialObsGrad, gpytorch.kernels.ScaleKernel]:
"""Default factory for monotonic GP models based on derivative observations.
Args:
config (Config): Config containing (at least) bounds, and optionally LSE target.
Returns:
Tuple[ConstantMeanPartialObsGrad, gpytorch.kernels.ScaleKernel]: Instantiated mean and
scaled RBF kernels with partial derivative observations.
"""
lb = config.gettensor("monotonic_mean_covar_factory", "lb")
ub = config.gettensor("monotonic_mean_covar_factory", "ub")
assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!"
dim = lb.shape[0]
fixed_mean = config.getboolean(
"monotonic_mean_covar_factory", "fixed_mean", fallback=False
)
mean = ConstantMeanPartialObsGrad()
if fixed_mean:
try:
target = config.getfloat("monotonic_mean_covar_factory", "target")
mean.constant.requires_grad_(False)
mean.constant.copy_(torch.tensor(norm.ppf(target)))
except NoOptionError:
raise RuntimeError("Config got fixed_mean=True but no target included!")
ls_prior = gpytorch.priors.GammaPrior(
concentration=__default_invgamma_concentration,
rate=__default_invgamma_rate,
transform=lambda x: 1 / x,
)
ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1)
ls_constraint = gpytorch.constraints.GreaterThan(
lower_bound=1e-4, transform=None, initial_value=ls_prior_mode
)
covar = gpytorch.kernels.ScaleKernel(
RBFKernelPartialObsGrad(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
),
outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
)
return mean, covar
def song_mean_covar_factory(
config: Config,
) -> Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.AdditiveKernel]:
"""
Factory that makes kernels like Song et al. 2018:
Linear in intensity dimension (assumed to be the last
dimension), RBF in context dimensions, summed.
Args:
config (Config): Config object containing (at least) bounds and optionally
LSE target.
Returns:
Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.AdditiveKernel]: Instantiated
constant mean object and additive kernel object.
"""
if config.getboolean("common", "use_ax", fallback=False):
dim = get_dim(config)
else:
lb = config.gettensor("song_mean_covar_factory", "lb")
ub = config.gettensor("song_mean_covar_factory", "ub")
assert lb.shape[0] == ub.shape[0], "bounds shape mismatch!"
dim = lb.shape[0]
mean = gpytorch.means.ConstantMean()
try:
target = config.getfloat("song_mean_covar_factory", "target")
except NoOptionError:
target = 0.75
mean.constant.requires_grad_(False)
mean.constant.copy_(torch.tensor(norm.ppf(target)))
ls_prior = gpytorch.priors.GammaPrior(
concentration=__default_invgamma_concentration,
rate=__default_invgamma_rate,
transform=lambda x: 1 / x,
)
ls_prior_mode = ls_prior.rate / (ls_prior.concentration + 1)
ls_constraint = gpytorch.constraints.GreaterThan(
lower_bound=1e-4, transform=None, initial_value=ls_prior_mode
)
stim_dim = config.getint("song_mean_covar_factory", "stim_dim", fallback=-1)
context_dims = list(range(dim))
# if intensity RBF is true, the intensity dimension
# will have both the RBF and linear kernels
intensity_RBF = config.getboolean(
"song_mean_covar_factory", "intensity_RBF", fallback=False
)
if not intensity_RBF:
intensity_dim = 1
stim_dim = context_dims.pop(stim_dim) # support relative stim dims
else:
intensity_dim = 0
stim_dim = context_dims[stim_dim]
# create the LinearKernel
intensity_covar = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.LinearKernel(active_dims=stim_dim, ard_num_dims=1),
outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
)
if dim == 1:
# this can just be LinearKernel but for consistency of interface
# we make it additive with one module
if not intensity_RBF:
return (
mean,
gpytorch.kernels.AdditiveKernel(intensity_covar),
)
else:
context_covar = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim,
active_dims=context_dims,
),
outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
)
return mean, context_covar + intensity_covar
else:
context_covar = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.RBFKernel(
lengthscale_prior=ls_prior,
lengthscale_constraint=ls_constraint,
ard_num_dims=dim - intensity_dim,
active_dims=context_dims,
),
outputscale_prior=gpytorch.priors.SmoothedBoxPrior(a=1, b=4),
)
return mean, context_covar + intensity_covar
def ordinal_mean_covar_factory(
config: Config,
) -> Tuple[gpytorch.means.ConstantMean, gpytorch.kernels.ScaleKernel]:
try:
base_factory = config.getobj("ordinal_mean_covar_factory", "base_factory")
except NoOptionError:
base_factory = default_mean_covar_factory
_, base_covar = base_factory(config)
mean = gpytorch.means.ZeroMean() # wlog since ordinal is shift-invariant
if isinstance(base_covar, gpytorch.kernels.ScaleKernel):
covar = base_covar.base_kernel
else:
covar = base_covar
return mean, covar
|
#!/usr/bin/env python3
# coding: utf-8
# ### Semi-parametric psychophysical model tutorial
#
# This tutorial will demonstrate how to fit the semiparametric psychophysical models described in [Keeley et al., 2023](https://arxiv.org/abs/2302.01187).
#
# The semi-parametric model uses a conventional parametric form for the monotonic intensity or stimulus dimension (e.g. contrast, amplitude sound pressure level, etc), of the form $f(x) = k(x + c)$ for a stimulus intensity level $x$, intercept or threshold $c$ and slope $k$. It then puts flexible Gaussian Process (GP) priors on the both the slope and intercept parameters as a function of other stimulus dimensions. Informally, it can be thought of as a flexible nonlinear generalization of the model in QUEST+ ([Watson, 2017](https://jov.arvojournals.org/article.aspx?articleid=2611972)) in that it uses the same model for the intensity dimension but replaces the fixed parametric model for the context dimension that QUEST+ uses, with a GP. AEPsych's `BernoulliMCMutualInformation` acquisition function is equivalent to the infomax sampling policy in QUEST+ (also known as BALD in the ML literature).
#
# The SemiP paper provides both the "full" semi-parametric model (with independent GP posteriors estimated for the slope and intercept), and a multivariate normal-approximate semiparametric model, which derives an approximate single-MVN posterior to the SemiP model. The full SemiP model tends to be more accurate but slower and less compatible with state of the art lookahead sampling policies that rely on an MVN posterior.
#
# This notebook will demonstrate fitting of both models on the novel discrimination and detection test functions developed in [Owen et al., 2021](https://arxiv.org/abs/2104.09549). These test functions include a nontrivial interaction between context and intensity dimensions devised to be challenging for traditional methods that only assume additive threshold shifts by context.
# In[1]:
# imports and seeds
import matplotlib.pyplot as plt
import numpy as np
import torch
from aepsych.benchmark.test_functions import novel_detection_testfun, novel_discrimination_testfun
from aepsych.utils import dim_grid, make_scaled_sobol
from scipy.stats import norm
from torch.distributions import Bernoulli, Normal
np.random.seed(1)
torch.manual_seed(1)
# Below we import the SemiP specific packages. Note that we are importing three link functions here, ```FloorGumbelObjective, FloorLogitObjective, FloorProbitObjective```. In this tutorial we will only use the ```FloorProbitObjective``` to match how we generate synthetic data. From a theoretical perspective, the link function is the CDF of the noise in the perceptual system (i.e. Probit link implies Gaussian Noise, Logit link implies Logistic noise, and Gumbel link implies Weibull noise) and empirically different links may perform better on different datasets, so we recommend that you select the appropriate link for your application based on cross-validated performance or a priori theoretical motivation.
# In[2]:
### SemiP imports
from aepsych.likelihoods import BernoulliObjectiveLikelihood
from aepsych.acquisition.objective import (
FloorGumbelObjective,
FloorLogitObjective,
FloorProbitObjective,
)
from aepsych.likelihoods.semi_p import LinearBernoulliLikelihood
from aepsych.models import HadamardSemiPModel, SemiParametricGPModel
# #### Test function
# Here we will import the novel 2D detection test function, which provides detection probabilities ranging from 0 to 1 in a two dimensional space. The slope and intercept of the parameterized sigmoidal function vary with a fourth-order polynomial as a function of the context dimension, which makes this a challenging problem for fully parametric models that tend to not include such higher-order terms.
#
# The lower and upper bound ```lb``` and ```ub``` are needed to specify the boundaries of the function, and should be vectors of length equal to the dimensionality of the dataset you are looking to fit. The function is plotted below.
# In[3]:
# Detection test function
lb = [-1, -1]
ub = [1, 1]
xgrid = dim_grid(lower=lb, upper=ub, dim=2)
fgrid = novel_detection_testfun(xgrid)
plt.imshow(norm.cdf(fgrid).reshape(30, 30).T, aspect="auto", origin="lower")
cbar = plt.colorbar()
plt.xlabel("context dimension")
plt.ylabel("intensity dimension")
cbar.set_label("detection probability", rotation=270, labelpad=15)
plt.title("2D detection test function")
# In[4]:
# now generate some data from the Bernoulli distribution defined over a set of training locations
xtrain = make_scaled_sobol(lb=lb, ub=ub, size=100) # using random sobol sampling here for data generation
ftrain = novel_detection_testfun(xtrain)
ytrain = Bernoulli(torch.Tensor(norm.cdf(ftrain))).sample()
# #### Fitting the model
# Here we are fitting both the SemiParametric ```SemiParametricGPMOdel``` as well as the MVN-approximate SemiP model ```HadamardSemiPModel``` using the synthetic data generated in the cell above. We show tuning estimation for each model. Note here the argument ```stim_dim``` in the function call. This is needed to specify which dimension in your dataset should be the monotonically increasing (sigmoidal) dimension. This could be, for example, volume in an auditory task, or contrast in a visual task. Identifying which dimension is monotonic is crucial to see good SemiP performance. Here, the second dimension is the monotonic dimensional in our 2D test function.
# In[5]:
### fit SemiP models
semip_model = SemiParametricGPModel(lb=lb, ub=ub, dim=2, stim_dim=1,
likelihood=LinearBernoulliLikelihood(objective=FloorProbitObjective(floor=0)))
approx_model = HadamardSemiPModel(lb=lb, ub=ub, dim=2, stim_dim=1,
likelihood=BernoulliObjectiveLikelihood(objective=FloorProbitObjective(floor=0)))
semip_model.fit(xtrain, ytrain)
approx_model.fit(xtrain, ytrain)
# In[6]:
#make predictions at locations on a 2D grid and plot
semiP_pred_mu, _ = semip_model.predict(torch.Tensor(xgrid), probability_space=True)
MVN_pred_mu, _ = approx_model.predict(torch.Tensor(xgrid), probability_space=True)
fig, axs = plt.subplots(1, 2, figsize=(7, 3))
axs[0].set_ylabel("intensity dimension")
im1 = axs[0].imshow(semiP_pred_mu.reshape(30, 30).T, aspect="auto", origin="lower", vmin=0, vmax=1,
extent=[lb[0], ub[0], lb[1], ub[1]])
axs[0].set_title('SemiP inference')
axs[0].set_xlabel("context dimension")
axs[0].plot(xtrain[ytrain==0,0], xtrain[ytrain==0,1], 'rx')
axs[0].plot(xtrain[ytrain==1,0], xtrain[ytrain==1,1], 'g+')
axs[1].imshow(MVN_pred_mu.reshape(30, 30).T, aspect="auto", origin="lower", vmin=0, vmax=1,
extent=[lb[0], ub[0], lb[1], ub[1]])
axs[1].set_title('MVN-approx SemiP inference')
axs[1].set_xlabel("context dimension")
axs[1].plot(xtrain[ytrain==0,0], xtrain[ytrain==0,1], 'rx')
axs[1].plot(xtrain[ytrain==1,0], xtrain[ytrain==1,1], 'g+')
cb_ax = fig.add_axes([.92, 0.1, 0.02, 0.8])
cbar = fig.colorbar(im1, cax=cb_ax)
cbar.set_label("detection probability", rotation=270, labelpad=15)
# It is apparent that in this case quasi-random sobol sampling over-explores parts of the psychometric field where detection probability approaches 1.0. In a real experiment we would also use active sampling to improve sample efficiency.
# ### Discrimination function evaluation
#
# The semiparametric model can be adapted to different psychophysical task by adjusting the ```floor``` value when you instantiate the model. Below, we show an example using the 2D discrimination function, which has a minimum value of p = 0.5, corresponding to an inability to discriminate two stimuli. The logic follows from the above example, but here we simply adjust the ```floor``` value and the test function being evaluated.
#
# As above, the inefficiency of quasi-random sampling means we require a fairly large number of samples to achieve good-looking plots.
# In[7]:
lb = [-1, -1] ##
ub = [1, 1]
xgrid = dim_grid(lower=lb, upper=ub, dim=2, gridsize = 30)
fgrid = novel_discrimination_testfun(xgrid)
# now generate some data from the Bernoulli distribution defined over a set of training locations
xtrain = make_scaled_sobol(lb=lb, ub=ub, size=300) # using random sobol sampling here for data generation
ftrain = novel_discrimination_testfun(xtrain)
ytrain = Bernoulli(torch.Tensor(norm.cdf(ftrain))).sample()
### fit SemiP model
## note you can adjust the slope_mean value to bias a steeper rise with intensity.
# Default value is 2, here we are setting to 6
semip_model = SemiParametricGPModel(lb=lb, ub=ub, dim=2, stim_dim=1, slope_mean=6,
likelihood=LinearBernoulliLikelihood(objective=FloorProbitObjective(floor=0.5)))
semip_model.fit(xtrain, ytrain)
approx_model = HadamardSemiPModel(lb=lb, ub=ub, dim=2, stim_dim=1, slope_mean=6,
likelihood=BernoulliObjectiveLikelihood(objective=FloorProbitObjective(floor=0.5)))
semip_model.fit(xtrain, ytrain)
approx_model.fit(xtrain, ytrain)
#make predictions at locations on a 2D grid and plot
semiP_pred_mu, _ = semip_model.predict(torch.Tensor(xgrid), probability_space=True)
approx_pred_mu, _ = approx_model.predict(torch.Tensor(xgrid), probability_space=True)
fig, axs = plt.subplots(1, 3, figsize=(9, 3))
axs[0].imshow(norm.cdf(fgrid).reshape(30, 30).T, aspect="auto", origin="lower", vmin=0.5, vmax=1,
extent=[lb[0], ub[0], lb[1], ub[1]])
axs[0].set_title('True Discrimination')
axs[0].set_xlabel("context dimension")
axs[0].set_xlabel("intensity dimension")
im1 = axs[1].imshow(semiP_pred_mu.reshape(30, 30).T, aspect="auto", origin="lower", vmin=0.5, vmax=1,
extent=[lb[0], ub[0], lb[1], ub[1]])
axs[1].set_title('SemiP inference')
axs[1].set_xlabel("context dimension")
axs[1].plot(xtrain[ytrain==0,0], xtrain[ytrain==0,1], 'rx')
axs[1].plot(xtrain[ytrain==1,0], xtrain[ytrain==1,1], 'g+')
axs[2].imshow(approx_pred_mu.reshape(30, 30).T, aspect="auto", origin="lower", vmin=0.5, vmax=1,
extent=[lb[0], ub[0], lb[1], ub[1]])
axs[2].set_title('MVN-SemiP inference')
axs[2].set_xlabel("context dimension")
axs[2].plot(xtrain[ytrain==0,0], xtrain[ytrain==0,1], 'rx')
axs[2].plot(xtrain[ytrain==1,0], xtrain[ytrain==1,1], 'g+')
cb_ax = fig.add_axes([.92, 0.1, 0.02, 0.8])
cbar = fig.colorbar(im1, cax=cb_ax)
cbar.set_label("detection probability", rotation=270, labelpad=15)
# # Active learning
#
# Finally, we provide an example active learning experiment using the semi-parametric models. For more on how active learning in AEPsych works, see the [introductory documentation](https://aepsych.org/docs/gp_intro)
#
|
#!/usr/bin/env python3
# coding: utf-8
# # Data Collection and Analysis Using AEPsych
#
# This tutorial serves as a complete example on how to collect and analyze data from perceptual experiments using AEPsych. For more information on AEPsych, refer to the documentation in the [GitHub repository](https://github.com/facebookresearch/aepsych).
#
# This tutorial demonstrates how to create an experiment to measure one's detection threshold for orientation. On each trial of the experiment, the participant is shown two gabor-patch stimuli, one oriented vertically (the foil) and one oriented at an angle (the target). The goal of the experiment is to find the smallest angle at which the participant can reliably identify the target. You can run the code blocks below interactively to participate in the experiment yourself, or you can simply view data collected from an example participant.
# ## Experiment Overview
# Below we define the functions we will need to conduct our experiment. Note that the code here is mainly for demonstration purposes and should not be used for serious experiments. If you would like to run psychophysics experiments in Python, consider using [Psychopy](https://www.psychopy.org/).
# In[1]:
import math
from IPython import get_ipython
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
import time
from IPython.display import clear_output
import random
fig_size = 15
# Show a fixation cross with a blank background
def show_fixation_cross():
_, ax = plt.subplots(1, figsize=(fig_size, fig_size/2))
for spine in ax.spines.values():
spine.set_visible(False)
ax.tick_params(bottom=False, labelbottom=False,
left=False, labelleft=False)
ax.text(0.5, 0.5, '+', size=100,
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes)
ax.axis('off')
ax.grid(b=None)
plt.show()
# Evaluate the gabor filter function at an x,y position
def gabor(x, y, theta):
f = 0.05
sigma = 25
x_ = x * math.cos(theta) + y * math.sin(theta)
y_ = y * math.cos(theta) - x * math.sin(theta)
num = x_ ** 2 + y_ ** 2
return math.exp(num / (-2 * sigma**2)) * math.cos(2 * math.pi * f * x_)
# Show a gabor patch
def show_gabor(theta, ax):
# The size of the gabor patch
radius = 60
# Convert from degrees to radians
theta = math.radians(theta)
M = np.array([[gabor(i, j, theta) for j in range(-radius, radius)] for i in range(-radius, radius)])
M = ((M - M.min())) / (M.max() - M.min())
ax.axis('off')
ax.grid(b=None)
ax.imshow(M.T, cmap=cm.Greys_r)
def run_trial(angle, trial_number):
fixation_duration = 1
trial_duration = 0.5
# Show the fixation cross
show_fixation_cross()
time.sleep(fixation_duration)
clear_output(wait=True)
# randomly select if target should go on left or right side
right_side = random.choice([0, 1])
# randomly select if target angle should be clockwise or counterclockwise
direction = random.choice([-1, 1])
angle *= direction
# Show the foil and target
_, axs = plt.subplots(1, 2, figsize=(fig_size, fig_size))
show_gabor(angle, axs[right_side])
show_gabor(0, axs[int(not right_side)])
# Ask for the participant's response
ans = None
ans_key = {'j': 1, 'J': 1, 'f': 0, 'F': 0}
while ans not in ans_key:
plt.show()
time.sleep(trial_duration)
clear_output()
ans = input(f"Trial #{trial_number}: Which side was angled? (Enter 'F' for left or 'J' for right)")
# Check if the response was correct
is_correct = int(ans_key[ans] == right_side)
target_side = "right" if right_side else "left"
return is_correct, target_side
# On each trial of the experiment, the participant will first see a white box with fixation cross for 1 second. The box looks like this:
# In[2]:
show_fixation_cross()
# After 1 second, the fixation cross will disappear, and two gabor patches will appear side-by-side. One patch will be the foil with a vertical orientation, and one will be the target with an angled orientation. The position of the target and whether the angle is measured clockwise or counterclockwise is randomized each trial. An example foil and target are shown below:
# In[3]:
_, axs = plt.subplots(1, 2, figsize=(fig_size, fig_size))
show_gabor(0, axs[0])
show_gabor(5, axs[1])
# After 0.5 seconds, the patches will disappear, and the participant will be prompted to report which one was the target by typing "F" for left or "J" for right, and then hitting enter. Try running the code block below to experience a trial for yourself. The `run_trial` function takes an angle and a trial number as input and returns whether or not you were correct (1 for correct, 0 for incorrect), as well as the side which the target was actually on.
# In[4]:
run_trial(5, 0)
# ## Starting the AEPsych Server
# The code block below starts an AEPsych server that will run in the background (you can also start the server by running the second line in a command prompt). We can contact the server at IP address 0.0.0.0, port 5555, and the data will be saved in a database named "data_collection_analysis_tutorial.db". In this tutorial, we will run the server on the same computer as the experiment, but it is also possible to run the server remotely.
# In[5]:
get_ipython().run_cell_magic('bash', '--bg', '\naepsych_server --ip 0.0.0.0 --port 5555 database --db data_collection_analysis_tutorial.db\n')
# In[6]:
from aepsych_client import AEPsychClient
client = AEPsychClient(ip="0.0.0.0", port=5555)
# We tell the server what kind of experiment we are running by sending it a configure message (see the [configs folder](https://github.com/facebookresearch/aepsych/tree/main/configs) for some examples. The gist of the config here is that it is telling the server that our experiment will have one parameter called "angle", which will range from 0.1 to 5 degrees. (If you run this experiment on yourself and find that this range of angles makes the experiment too easy or too hard, you can adjust the `lb` and `ub` values in the string below). This experiment will last for 50 trials. The parameter values from the first 10 trials will be drawn from the [Sobol sequence](https://en.wikipedia.org/wiki/Sobol_sequence), to provide some initial data to initialize AEPsych's model; the following 40 trials will be drawn from that model. In this case, the model will be a classification [Gaussian Process](https://en.wikipedia.org/wiki/Gaussian_process) (GP).
#
# GPs can be thought of as generalizations of traditional psychophysics models that can handle multiple dimensions and allow the response function to be nonlinear (for further discussion see the [AEPsych preprint](https://arxiv.org/abs/2104.09549)). Furthermore, GPs can be used in conjunction with acquisition functions to perform [active learning](https://en.wikipedia.org/wiki/Active_learning_(machine_learning)--that is, the model can determine which points in the parameter space should be sampled next to achieve some goal. In this case we use the [level set estimation](https://www.ijcai.org/Proceedings/13/Papers/202.pdf) function to find the angle at which the participant will correctly identify the target 75% of the time.
#
# GPs are defined by a mean function and covariance function. Because we don't define what these functions should be in the config, they revert to their default values of a constant mean function, and a [radial basis covariance function](https://en.wikipedia.org/wiki/Radial_basis_function). These functions are fine for parameter space we want to explore here, but if we wanted to expand our search across a larger range of angles, we would probably want to use a periodic covariance function to account for that fact that angles loop every 360 degrees.
# In[7]:
config_str = """
[common]
parnames = [theta] # names of the parameters
lb = [0.1] # lower bound of the parameter
ub = [5] # upper bound of parameter
stimuli_per_trial = 1 # the number of stimuli shown in each trial; 1 for single, or 2 for pairwise experiments
outcome_types = [binary] # the type of response given by the participant; can be [binary] or [continuous]
target = 0.75 # desired threshold, for threshold estimation.
strategy_names = [init_strat, opt_strat] # The strategies that will be used, corresponding to the named sections below
# Configuration for the initialization strategy, which we use to gather initial points
# before we start doing model-based acquisition
[init_strat]
min_total_tells = 10 # number of sobol trials to run
generator = SobolGenerator # The generator class used to generate new parameter values
# Configuration for the optimization strategy, our model-based acquisition
[opt_strat]
min_total_tells = 50 # total number of trials to run
refit_every = 5 # how often to refit the model from scratch
generator = OptimizeAcqfGenerator # The generator class used to generate new parameter values
acqf = MCLevelSetEstimation # The acquisition function; MCLevelSetEstimation is used for threshold finding
model = GPClassificationModel # The model class
"""
client.configure(config_str=config_str, config_name="1d_gabor_config")
# Now that we have set up our client and configured our server, we can start collecting data. The basic loop of the experiment is as follows:
#
# 1. Ask AEPsych what value of our parameter, angle, to try next.
# 2. Run a trial using this suggested value.
# 3. Tell AEPsych the particant's response so that it can update its model.
# 4. Repeat for the specified number of trials.
#
# We ask AEPsych for parameters by calling client.ask(). This returns a dictionary with two entries. The first, `'config'`, contains another dictionary whose keys are the names of your parameters, and whose values are lists of parameter values to try. The second, `'is_finished'`, is a bool indicating whether the number of trials specified in the config have been completed.
# In[8]:
client.ask()
# We tell AEPsych about the parameter values we have tried by calling client.tell(). This method has two required arguments. The first, config, is a dictionary representing the set of parameter values you would like to tell AEPsych about, and takes the same format as the 'config' entry from client.ask(). The second argument is the binary outcome of a trial, indicated by 0 (the participant did not identify the target) or 1 (the participant did identify the target). This method also optionally takes other keyword arguments that will be stored as metadata in AEPsych's database. For our experiment, we will record which side the target was on.
# In[9]:
client.tell(config={'theta':[.1]}, outcome=0, target_side='right')
# The code below asks AEPsych for parameter values and runs trials until the experiment is completed:
# In[10]:
finished = False
trial_number = 1
while not finished:
response = client.ask()
theta = response["config"]["theta"][0]
outcome, target_side = run_trial(theta, trial_number)
client.tell(config={"theta": [theta]}, outcome=outcome, target_side=target_side)
finished = response["is_finished"]
trial_number += 1
# Note that even after the number of trials specified in the config have completed, you can still ask for more parameter values and conduct more trials:
# In[11]:
client.ask()
# You are also not restricted to only using the parameter values that AEPsych suggests. You can tell it the outcome of any parameter values that you would like:
# In[12]:
client.tell(config={'theta':[5]}, outcome=1, target_side='left')
# Once you are done collecting data, you can close the server by calling `client.finalize()`
# In[13]:
client.finalize()
# ## Replaying the Experiment and Analyzing Data
# To analyze the data, we open the database with an `AEPsychServer` object. This server runs here in the notebook rather than in the background like the server we used to collect data.
# In[14]:
from aepsych.server import AEPsychServer
serv = AEPsychServer(database_path="data_collection_analysis_tutorial.db")
# The database is made of a set of experiments, which have unique experiment UUIDs. Every time the server is started (e.g. from the command line), a new experiment id is generated. For a list of all experiment ids:
# In[15]:
exp_ids = [rec.experiment_id for rec in serv.db.get_master_records()]
print(exp_ids)
# The above indicates that there is only 1 experiment_id in this database.
#
# Note that the above commands do not actually load any of the experiment data from the database. The data is only loaded when you run serv.replay to replay all of the setup, ask, and tell messages that are recorded in the database. We will pass skip_computations = True to this method to skip all of the model-fitting computations and make the replay finish faster.
# In[16]:
serv.replay(exp_ids[-1], skip_computations=True)
# The data has been loaded into the servers list of strategies, which we can access through `serv._strats`. Per our config string, we have two strategies, the first being the model-less initialization strategy, and the second being the model-based threshold-finding strategy. We can see the model-based strategy's data using its `x` and `y` properties:
# In[17]:
strat = serv._strats[-1]
print(strat.x)
print(strat.y)
# Since we passed `skip_computations = True` into the replay method before, we will have to manually refit the strategy's model:
# In[18]:
strat.model.fit(strat.x, strat.y)
# We can now plot the posterior of the fitted model:
# In[20]:
from aepsych.plotting import plot_strat
plt.rcParams["figure.figsize"] = (15,15)
plt.rcParams['figure.facecolor'] = 'white'
plot_strat(strat, xlabel='angle (degrees)', ylabel='Probability of Selecting Target', target_level=.75)
# In this plot, the blue and red ticks at the bottom represent angles at which the participant did and did not successfully identify the target, respectively. The dark blue curve represents the model's posterior probabilty that the participant would select the target, with 95% of the posterior mass lying in the shaded region. The orange horizontal line represents the participant's detection threshold, which once again is defined as the smallest angle at which the participant would select the target 75% of the time. If you are viewing the data from the example participant, you will see that their threshold is somewhere between about 0.5 and 1.5 degrees (note, however, that threshold estimation for non-monotonic models may not always be accurate; we are working on better algorithms for this). More data could be collected to reduce this uncertainty. If you collected data on your own data, your plot may look different; there are often large individual differences in psychophysics tasks. In any case you should see that most of the sampled points are near the estimated threshold; the level set estimation algorithm intelligently selects points so that time is not wasted collecting data at points far away from the threshold, allowing for a more accurate threshold estimate in fewer trials than traditional methods.
#
# ## Conclusion
#
# This tutorial has shwown a complete example of how to conduct an AEPsych experiment and analyze the data. You can easily adapt this code for your own needs by changing the config string and the code that runs trials. If you need any help debugging or setting up your experiment, you can [open a GitHub issue](https://github.com/facebookresearch/aepsych/issues). You can also try conducting AEPsych experiments without writing any code, by running [this notebook](https://github.com/facebookresearch/aepsych/blob/main/examples/Interactive_AEPsych.ipynb).
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from copy import deepcopy
import numpy as np
import torch
from aepsych.config import Config
from aepsych.models import GPClassificationModel
from aepsych.utils import _process_bounds, get_dim, get_parameters, make_scaled_sobol
class UtilsTestCase(unittest.TestCase):
def test_scaled_sobol_asserts(self):
lb = np.r_[0, 0, 1]
ub = np.r_[1]
with self.assertRaises(AssertionError):
make_scaled_sobol(lb, ub, 10)
def test_scaled_sobol_sizes(self):
lb = np.r_[0, 1]
ub = np.r_[1, 30]
grid = make_scaled_sobol(lb, ub, 100)
self.assertEqual(grid.shape, (100, 2))
def test_dim_grid_model_size(self):
lb = -4.0
ub = 4.0
dim = 1
gridsize = 10
mb = GPClassificationModel(lb=lb, ub=ub, dim=dim)
grid = GPClassificationModel.dim_grid(mb, gridsize=gridsize)
self.assertEqual(grid.shape, torch.Size([10, 1]))
def test_process_bounds(self):
lb, ub, dim = _process_bounds(np.r_[0, 1], np.r_[2, 3], None)
self.assertTrue(torch.all(lb == torch.tensor([0.0, 1.0])))
self.assertTrue(torch.all(ub == torch.tensor([2.0, 3.0])))
self.assertEqual(dim, 2)
# Wrong dim
with self.assertRaises(AssertionError):
_process_bounds(np.r_[0, 0], np.r_[1, 1], 3)
# ub < lb
with self.assertRaises(AssertionError):
_process_bounds(np.r_[1], np.r_[0], None)
class ParameterUtilsTestCase(unittest.TestCase):
def setUp(self) -> None:
config_str = """
[common]
parnames = [par1, par2, par3]
lb = [0, 0, 0]
ub = [1, 1000, 10]
choice_parnames = [par4, par5, par6, par9]
fixed_parnames = [par7, par8]
[par2]
log_scale = True
[par3]
value_type = int
[par4]
choices = [a, b]
[par5]
choices = [x]
[par6]
choices = [x, y, z]
is_ordered = True
[par7]
value = 123
[par8]
value = foo
[par9]
choices = [x, y, z]
"""
self.config = Config(config_str=config_str)
def test_get_ax_parameters(self):
params = get_parameters(self.config)
correct_range_params = [
{
"name": "par1",
"type": "range",
"value_type": "float",
"log_scale": False,
"bounds": [0.0, 1.0],
},
{
"name": "par2",
"type": "range",
"value_type": "float",
"log_scale": True,
"bounds": [0.0, 1000.0],
},
{
"name": "par3",
"type": "range",
"value_type": "int",
"log_scale": False,
"bounds": [0.0, 10.0],
},
]
correct_choice_params = [
{
"name": "par4",
"type": "choice",
"value_type": "str",
"is_ordered": False,
"values": ["a", "b"],
},
{
"name": "par5",
"type": "choice",
"value_type": "str",
"is_ordered": False,
"values": ["x"],
},
{
"name": "par6",
"type": "choice",
"value_type": "str",
"is_ordered": True,
"values": ["x", "y", "z"],
},
{
"name": "par9",
"type": "choice",
"value_type": "str",
"is_ordered": False,
"values": ["x", "y", "z"],
},
]
correct_fixed_params = [
{
"name": "par7",
"type": "fixed",
"value": 123.0,
},
{
"name": "par8",
"type": "fixed",
"value": "foo",
},
]
self.assertEqual(
params, correct_range_params + correct_choice_params + correct_fixed_params
)
def test_get_dim(self):
dim = get_dim(self.config)
# 3 dims from par1, par2, par3
# 1 binary dim from par4
# 0 dim from par5 (effectively a fixed dim)
# 1 dim from par6 (is_ordered makes it one continuous dim)
# 0 dim from par7 & par8 (fixed dims aren't modeled)
# 3 dim from par9 (one-hot vector with 3 elements)
# 8 total dims
self.assertEqual(8, dim)
# Count only choice dims
copied_config = deepcopy(self.config)
del copied_config["common"]["parnames"]
del copied_config["common"]["lb"]
del copied_config["common"]["ub"]
dim = get_dim(copied_config)
self.assertEqual(5, dim)
# Removing par5 does nothing
copied_config["common"]["choice_parnames"] = "[par4, par6, par9]"
dim = get_dim(copied_config)
self.assertEqual(5, dim)
# Removing par6 leaves us with 3 binary dimension and 1 continuous dimension
copied_config["common"]["choice_parnames"] = "[par4, par9]"
dim = get_dim(copied_config)
self.assertEqual(4, dim)
# Removing par9 leaves us with 1 binary dimension
copied_config["common"]["choice_parnames"] = "[par4]"
dim = get_dim(copied_config)
self.assertEqual(1, dim)
# Removing par7 & par8 does nothing
del copied_config["common"]["fixed_parnames"]
dim = get_dim(copied_config)
self.assertEqual(1, dim)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock
import numpy as np
import torch
from aepsych.acquisition.monotonic_rejection import MonotonicMCLSE
from aepsych.config import Config
from aepsych.generators import MonotonicRejectionGenerator, SobolGenerator
from aepsych.models.gp_classification import GPClassificationModel
from aepsych.models.monotonic_rejection_gp import MonotonicRejectionGP
from aepsych.strategy import AEPsychStrategy, SequentialStrategy, Strategy
class TestSequenceGenerators(unittest.TestCase):
def setUp(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = [-1, -1]
ub = [1, 1]
extra_acqf_args = {"target": 0.75, "beta": 1.96}
self.strat = Strategy(
model=MonotonicRejectionGP(
lb=lb,
ub=ub,
dim=2,
monotonic_idxs=[1],
),
generator=MonotonicRejectionGenerator(
acqf=MonotonicMCLSE, acqf_kwargs=extra_acqf_args
),
min_asks=50,
lb=lb,
ub=ub,
min_post_range=0.3,
stimuli_per_trial=1,
outcome_types=["binary"],
)
self.strat.model.fit = MagicMock()
self.strat.model.update = MagicMock()
self.strat.generator.gen = MagicMock()
def test_opt_strategy_single(self):
lbs = [[-1], [-10]]
ubs = [[1], [-8]]
n = [3, 5]
strat_list = []
for lb, ub, n in zip(lbs, ubs, n):
gen = SobolGenerator(lb, ub)
strat = Strategy(
min_asks=n,
generator=gen,
lb=lb,
ub=ub,
min_total_outcome_occurrences=0,
stimuli_per_trial=1,
outcome_types=["binary"],
)
strat_list.append(strat)
strat = SequentialStrategy(strat_list)
out = np.zeros(8)
for i in range(8):
next_x = strat.gen()
strat.add_data(next_x, [1])
out[i] = next_x
gen1 = out[:3]
gen2 = out[3:]
self.assertTrue(np.min(gen1) >= -1)
self.assertTrue(np.min(gen2) >= -10)
self.assertTrue(np.max(gen1) <= 1)
self.assertTrue(np.max(gen2) <= -8)
def test_warmstart(self):
self.strat.refit_every = 10
for _ in range(50):
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertEqual(
self.strat.model.fit.call_count, 4
) # first fit gets skipped because there is no data
self.assertEqual(self.strat.model.update.call_count, 45)
def test_no_warmstart(self):
for _ in range(50):
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertEqual(
self.strat.model.fit.call_count, 49
) # first fit gets skipped because there is no data
self.assertEqual(self.strat.model.update.call_count, 0)
def test_finish_criteria(self):
for _ in range(49):
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertFalse(self.strat.finished)
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertFalse(self.strat.finished) # not enough "no" trials
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [0])
self.assertFalse(
self.strat.finished
) # not enough difference between posterior min/max
for _ in range(50):
self.strat.gen()
self.strat.add_data(np.r_[0.0, 0.0], [0])
self.assertTrue(self.strat.finished)
def test_max_asks(self):
self.strat.max_asks = 50
for _ in range(49):
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertFalse(self.strat.finished)
self.strat.gen()
self.strat.add_data(np.r_[1.0, 1.0], [1])
self.assertTrue(self.strat.finished)
def test_keep_most_recent(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = [-1, -1]
ub = [1, 1]
self.strat = Strategy(
model=GPClassificationModel(
lb=lb,
ub=ub,
),
generator=SobolGenerator(lb=lb, ub=ub),
min_asks=50,
lb=lb,
ub=ub,
stimuli_per_trial=1,
outcome_types=["binary"],
)
self.strat.keep_most_recent = 2
data = torch.rand(4, 2)
for i, d in enumerate(data):
self.strat.add_data(d, [0])
self.strat.update()
lb = max(0, i - self.strat.keep_most_recent + 1)
self.assertTrue(
torch.equal(self.strat.model.train_inputs[0], data[lb : i + 1])
)
def test_run_indefinitely(self):
lb = [-1, -1]
ub = [1, 1]
with self.assertWarns(UserWarning):
self.strat = Strategy(
model=GPClassificationModel(
lb=lb,
ub=ub,
),
generator=SobolGenerator(lb=lb, ub=ub),
lb=lb,
ub=ub,
stimuli_per_trial=1,
outcome_types=["binary"],
min_asks=1, # should be ignored
run_indefinitely=True,
)
self.strat.gen()
self.assertFalse(self.strat.finished)
self.strat.finish()
self.assertTrue(self.strat.finished)
def test_n_trials_deprecation(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = [-1, -1]
ub = [1, 1]
self.strat = Strategy(
generator=SobolGenerator(lb=lb, ub=ub),
min_asks=50,
lb=lb,
ub=ub,
stimuli_per_trial=1,
outcome_types=["binary"],
)
with self.assertWarns(DeprecationWarning):
self.assertEqual(self.strat.n_trials, 50)
def test_batchsobol_pairwise(self):
lb = [1, 2, 3]
ub = [2, 3, 4]
min_asks = 10
mod = Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=12345, stimuli_per_trial=2),
min_asks=min_asks,
stimuli_per_trial=2,
outcome_types=["binary"],
)
acq1 = mod.gen(num_points=2)
self.assertEqual(acq1.shape, (2, 3, 2))
acq2 = mod.gen(num_points=3)
self.assertEqual(acq2.shape, (3, 3, 2))
acq3 = mod.gen()
self.assertEqual(acq3.shape, (1, 3, 2))
def test_opt_strategy_pairwise(self):
strat_list = [
Strategy(
lb=[-1],
ub=[1],
min_asks=3,
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=2),
stimuli_per_trial=2,
outcome_types=["binary"],
min_total_outcome_occurrences=0,
),
Strategy(
lb=[-10],
ub=[-8],
min_asks=5,
generator=SobolGenerator(lb=[-10], ub=[-8], stimuli_per_trial=2),
stimuli_per_trial=2,
outcome_types=["binary"],
min_total_outcome_occurrences=0,
),
]
strat = SequentialStrategy(strat_list)
out = np.zeros((8, 2))
for i in range(8):
next_x = strat.gen()
strat.add_data(next_x, [1])
out[i] = next_x
gen1 = out[:3]
gen2 = out[3:]
self.assertTrue(np.min(gen2) >= -10)
self.assertTrue(np.min(gen1) >= -1)
self.assertTrue(np.max(gen1) <= 1)
self.assertTrue(np.max(gen2) <= -8)
def test_strategy_asserts(self):
class MockModel(object):
_num_outputs = 1
_batch_shape = 2
stimuli_per_trial = 1
outcome_type = "binary"
# assert if model and strategy disagree on stimuli_per_trial
with self.assertRaises(AssertionError):
_ = Strategy(
lb=[-1],
ub=[1],
min_asks=5,
stimuli_per_trial=2,
model=MockModel(),
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=2),
outcome_types=["binary"],
)
# assert if model and strategy disagree on outcome_type
with self.assertRaises(AssertionError):
_ = Strategy(
lb=[-1],
ub=[1],
min_asks=5,
stimuli_per_trial=1,
model=MockModel(),
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=1),
outcome_types=["notbinary"],
)
# assert if model and strategy disagree on num outcomes
with self.assertRaises(AssertionError):
_ = Strategy(
lb=[-1],
ub=[1],
min_asks=5,
stimuli_per_trial=1,
model=MockModel(),
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=1),
outcome_types=["binary", "extra"],
)
try:
# no assert on 1 stim per trial
_ = Strategy(
lb=[-1],
ub=[1],
min_asks=5,
stimuli_per_trial=1,
model=MockModel(),
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=1),
outcome_types=["binary"],
)
# no assert on 2 stim per trial
model = MockModel()
model._num_outputs = 2
model.outcome_type = ["binary", "extra"]
_ = Strategy(
lb=[-1],
ub=[1],
min_asks=5,
stimuli_per_trial=1,
model=model,
generator=SobolGenerator(lb=[-1], ub=[1], stimuli_per_trial=2),
outcome_types=["binary", "extra"],
)
except AssertionError:
self.fail("Strategy raised unexpected AssertionError on __init__!")
class GenerationStrategyTestCase(unittest.TestCase):
def test_finish(self):
config_str = """
[common]
use_ax = True
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [x]
lb = [0]
ub = [1]
strategy_names = [test_strat]
[test_strat]
generator = SobolGenerator
run_indefinitely = True
"""
config = Config(config_str=config_str)
strat = AEPsychStrategy.from_config(config)
self.assertFalse(strat.finished)
strat.finish()
self.assertTrue(strat.finished)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from aepsych.benchmark.test_functions import make_songetal_testfun
from aepsych.utils import dim_grid
class BenchmarkTestCase(unittest.TestCase):
def test_songetal_funs_smoke(self):
valid_phenotypes = ["Metabolic", "Sensory", "Metabolic+Sensory", "Older-normal"]
grid = dim_grid(lower=[-3, -20], upper=[4, 120], dim=2, gridsize=30)
try:
for phenotype in valid_phenotypes:
testfun = make_songetal_testfun(phenotype=phenotype)
f = testfun(grid)
self.assertTrue(f.shape == torch.Size([900]))
except Exception:
self.fail()
with self.assertRaises(AssertionError):
_ = make_songetal_testfun(phenotype="not_a_real_phenotype")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import shutil
import unittest
import uuid
from configparser import DuplicateOptionError
from pathlib import Path
import aepsych.config as configuration
import aepsych.database.db as db
import aepsych.database.tables as tables
import sqlalchemy
class DBTestCase(unittest.TestCase):
def setUp(self):
# random datebase path name without dashes
self._dbname = "./{}.db".format(str(uuid.uuid4().hex))
self._database = db.Database(db_path=self._dbname)
def tearDown(self):
self._database.delete_db()
def test_db_create(self):
engine = self._database.get_engine()
self.assertIsNotNone(engine)
self.assertIsNotNone(self._database._engine)
def test_record_setup_basic(self):
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
result = self._database.get_replay_for(master_table.experiment_id)
self.assertNotEqual(None, result)
self.assertEqual(len(result), 1)
self._database.record_message(
master_table=master_table,
type="test_type",
request={"test": "this is a follow on request"},
)
result = self._database.get_replay_for(master_table.experiment_id)
self.assertNotEqual(None, result)
self.assertEqual(len(result), 2)
def test_record_setup_doublesetup_goodid(self):
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
self.assertIsNotNone(master_table)
self.assertEqual(len(master_table.children_replay), 1)
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
id=master_table.experiment_id,
)
self.assertIsNotNone(master_table)
self.assertEqual(len(master_table.children_replay), 2)
def test_record_setup_doublesetup_badid(self):
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
self.assertIsNotNone(master_table)
self.assertEqual(len(master_table.children_replay), 1)
self.assertRaises(
RuntimeError,
self._database.record_setup,
description="test description",
name="test name",
request={"test": "this is a test request"},
id=1,
)
def test_record_setup_master_children(self):
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
self.assertIsNotNone(master_table)
self.assertEqual(len(master_table.children_replay), 1)
self._database.record_message(
master_table, "test", request={"test": "this is a test request"}
)
self.assertEqual(len(master_table.children_replay), 2)
def test_extra_info(self):
extra_info_setup = {"test": "this is extra_info"}
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request", "extra_info": extra_info_setup},
)
extra_info_record = {"test": "This is another extra_info"}
self._database.record_message(
master_table,
"test",
request={"test": "this is a test request", "extra_info": extra_info_record},
)
new_master = self._database.get_master_record(master_table.experiment_id)
self.assertEqual(new_master.children_replay[0].extra_info, extra_info_setup)
self.assertEqual(new_master.children_replay[1].extra_info, extra_info_record)
def test_update_db(self):
current_path = Path(os.path.abspath(__file__)).parent
db_path = current_path
db_path = db_path.joinpath("test_databases/test_original_schema.db")
# copy the db to a new file
dst_db_path = Path(self._dbname)
shutil.copy(str(db_path), str(dst_db_path))
self.assertTrue(dst_db_path.is_file())
# open the new db
test_database = db.Database(db_path=dst_db_path.as_posix())
self.assertFalse(tables.DbReplayTable._has_extra_info(test_database._engine))
self.assertTrue(test_database.is_update_required())
# make sure we raise the exception on newer columns
self.assertRaises(
sqlalchemy.exc.OperationalError,
test_database.record_setup,
description="test description",
name="test name",
request={"test": "this is a test request"},
)
test_database._session.rollback()
test_database.perform_updates()
# retry adding rows
master_table = test_database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
test_database.record_message(
master_table, "test", request={"test": "this is a test request"}
)
# make sure the new column exists
self.assertTrue(tables.DbReplayTable._has_extra_info(test_database._engine))
test_database.delete_db()
def test_update_db_with_raw_data_tables(self):
current_path = Path(os.path.abspath(__file__)).parent
db_path = current_path
db_path = db_path.joinpath("test_databases/multi_stimuli.db")
# copy the db to a new file
dst_db_path = Path(self._dbname)
shutil.copy(str(db_path), str(dst_db_path))
self.assertTrue(dst_db_path.is_file())
# open the new db
test_database = db.Database(db_path=dst_db_path.as_posix())
# Make sure that update is required
self.assertTrue(test_database.is_update_required())
# Update the database
test_database.perform_updates()
# Check that the update was successful
# Known expected data
par1 = [[0.1, 0.2], [0.3, 1], [2, 3], [4, 0.1], [0.2, 2], [1, 0.3], [0.3, 0.1]]
par2 = [[4, 0.1], [3, 0.2], [2, 1], [0.3, 0.2], [2, 0.3], [1, 0.1], [0.3, 4]]
outcomes = [[1, 0], [-1, 0], [0.1, 0], [0, 0], [-0.1, 0], [0, 0], [0, 0]]
param_dict_expected = {x: {} for x in range(1, 8)}
for i in range(1, 8):
param_dict_expected[i]["par1_stimuli0"] = par1[i - 1][0]
param_dict_expected[i]["par1_stimuli1"] = par1[i - 1][1]
param_dict_expected[i]["par2_stimuli0"] = par2[i - 1][0]
param_dict_expected[i]["par2_stimuli1"] = par2[i - 1][1]
outcome_dict_expected = {x: {} for x in range(1, 8)}
for i in range(1, 8):
outcome_dict_expected[i]["outcome_0"] = outcomes[i - 1][0]
outcome_dict_expected[i]["outcome_1"] = outcomes[i - 1][1]
# Check that the number of entries in each table is correct
n_iterations = (
test_database.get_engine()
.execute("SELECT COUNT(*) FROM raw_data")
.fetchone()[0]
)
self.assertEqual(n_iterations, 7)
n_params = (
test_database.get_engine()
.execute("SELECT COUNT(*) FROM param_data")
.fetchone()[0]
)
self.assertEqual(n_params, 28)
n_outcomes = (
test_database.get_engine()
.execute("SELECT COUNT(*) FROM outcome_data")
.fetchone()[0]
)
self.assertEqual(n_outcomes, 14)
# Check that the data is correct
param_data = (
test_database.get_engine().execute("SELECT * FROM param_data").fetchall()
)
param_dict = {x: {} for x in range(1, 8)}
for param in param_data:
param_dict[param.iteration_id][param.param_name] = float(param.param_value)
self.assertEqual(param_dict, param_dict_expected)
outcome_data = (
test_database.get_engine().execute("SELECT * FROM outcome_data").fetchall()
)
outcome_dict = {x: {} for x in range(1, 8)}
for outcome in outcome_data:
outcome_dict[outcome.iteration_id][
outcome.outcome_name
] = outcome.outcome_value
self.assertEqual(outcome_dict, outcome_dict_expected)
# Make sure that update is no longer required
self.assertFalse(test_database.is_update_required())
test_database.delete_db()
def test_update_configs(self):
config_str = """
[common]
parnames = [par1, par2]
lb = [0, 0]
ub = [1, 1]
outcome_type = single_probit
target = 0.75
[SobolStrategy]
n_trials = 10
[ModelWrapperStrategy]
n_trials = 20
refit_every = 5
[experiment]
acqf = MonotonicMCLSE
init_strat_cls = SobolStrategy
opt_strat_cls = ModelWrapperStrategy
modelbridge_cls = MonotonicSingleProbitModelbridge
model = MonotonicRejectionGP
[MonotonicMCLSE]
beta = 3.84
[MonotonicRejectionGP]
inducing_size = 100
mean_covar_factory = monotonic_mean_covar_factory
[MonotonicSingleProbitModelbridge]
restarts = 10
samps = 1000
"""
request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": config_str},
}
dbname = "./{}.db".format(str(uuid.uuid4().hex))
database = db.Database(dbname)
database.record_setup(
description="default description",
name="default name",
request=request,
)
self.assertTrue(database.is_update_required())
database.perform_updates()
self.assertFalse(database.is_update_required())
database.delete_db()
def test_strat_table(self):
test_strat = {"strat": "this is nothing like a strat"}
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
# record a strat
self._database.record_strat(master_table, strat=test_strat)
experiment_id = master_table.experiment_id
strat = self._database.get_strat_for(experiment_id)
self.assertEqual(test_strat, strat)
def test_config_table(self):
test_config = {"config": "this is nothing like a config but it works."}
master_table = self._database.record_setup(
description="test description",
name="test name",
request={"test": "this is a test request"},
)
# record a strat
self._database.record_config(master_table, config=test_config)
experiment_id = master_table.experiment_id
config = self._database.get_config_for(experiment_id)
self.assertEqual(test_config, config)
def test_raw_table(self):
model_data = True
master_table = self._database.record_setup(
description="test raw table",
name="test",
request={"test": "this a test request"},
)
# Record a raw data entry
self._database.record_raw(master_table, model_data=model_data)
experiment_id = master_table.experiment_id
raw_data = self._database.get_raw_for(experiment_id)
self.assertEqual(len(raw_data), 1)
self.assertEqual(raw_data[0].model_data, model_data)
def test_param_table(self):
param_name = "test_param"
param_value = 1.123
master_table = self._database.record_setup(
description="test param table",
name="test",
request={"test": "this a test request"},
)
raw_table = self._database.record_raw(master_table, model_data=True)
# Record a param data entry
self._database.record_param(raw_table, param_name, param_value)
experiment_id = master_table.experiment_id
iteration_id = raw_table.unique_id
param_data = self._database.get_param_for(experiment_id, iteration_id)
self.assertEqual(len(param_data), 1)
self.assertEqual(param_data[0].param_name, param_name)
self.assertEqual(float(param_data[0].param_value), param_value)
def test_outcome_table(self):
outcome_value = 1.123
outcome_name = "test_outcome"
master_table = self._database.record_setup(
description="test outcome table",
name="test",
request={"test": "this a test request"},
)
raw_table = self._database.record_raw(master_table, model_data=True)
# Record an outcome data entry
self._database.record_outcome(raw_table, outcome_name, outcome_value)
experiment_id = master_table.experiment_id
iteration_id = raw_table.unique_id
outcome_data = self._database.get_outcome_for(experiment_id, iteration_id)
self.assertEqual(len(outcome_data), 1)
self.assertEqual(outcome_data[0].outcome_name, outcome_name)
self.assertEqual(outcome_data[0].outcome_value, outcome_value)
# Test some metadata flow stuff and see if it is working.
def test_metadata(self):
# Run tests using the native config_str functionality.
config_str = """
[common]
parnames = [par1, par2]
lb = [0, 0]
ub = [1, 1]
outcome_type = single_probit
target = 0.75
[SobolStrategy]
n_trials = 10
[ModelWrapperStrategy]
n_trials = 20
refit_every = 5
[experiment]
acqf = MonotonicMCLSE
init_strat_cls = SobolStrategy
opt_strat_cls = ModelWrapperStrategy
modelbridge_cls = MonotonicSingleProbitModelbridge
model = MonotonicRejectionGP
[MonotonicMCLSE]
beta = 3.98
[MonotonicRejectionGP]
inducing_size = 100
mean_covar_factory = monotonic_mean_covar_factory
[MonotonicSingleProbitModelbridge]
restarts = 10
samps = 1000
[metadata]
experiment_name = Lucas
experiment_description = Test
metadata1 = one
metadata2 = two
"""
request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": config_str},
}
# Generate a config for later to run .jsonifyMetadata() on.
generated_config = configuration.Config(**request["message"])
master_table = self._database.record_setup(
description=generated_config["metadata"]["experiment_description"],
name=generated_config["metadata"]["experiment_name"],
request=request,
extra_metadata=generated_config.jsonifyMetadata(),
)
self.assertEqual(
generated_config.jsonifyMetadata(),
master_table.extra_metadata, # Test in JSON form
)
# Next I can deserialize into a dictionary and make sure each element is 1-to-1.
## Important thing to note is generated_config will have extra fields because of configparser's.
## Run comparison of json.loads -> generated_config, NOT the other way around.
deserializedjson = json.loads(
master_table.extra_metadata
) # Directly from master table entry.
## Going to check each value in the deserialized json from the DB to the expected values along with the config prior to insertion.
## This will check if it retains the individual values.
self.assertEqual(deserializedjson["metadata1"], "one")
self.assertEqual(deserializedjson["metadata2"], "two")
self.assertEqual(deserializedjson["experiment_name"], "Lucas")
self.assertEqual(deserializedjson["experiment_description"], "Test")
self.assertEqual(
deserializedjson["experiment_name"], master_table.experiment_name
)
self.assertEqual(
deserializedjson["experiment_description"],
master_table.experiment_description,
)
def test_broken_metadata(self):
# We are going to be testing some broken metadata here. We need to make sure it does not misbehave.
config_strdupe = """
[common]
parnames = [par1, par2]
lb = [0, 0]
ub = [1, 1]
outcome_type = single_probit
target = 0.75
[SobolStrategy]
n_trials = 10
[ModelWrapperStrategy]
n_trials = 20
refit_every = 5
[experiment]
acqf = MonotonicMCLSE
init_strat_cls = SobolStrategy
opt_strat_cls = ModelWrapperStrategy
modelbridge_cls = MonotonicSingleProbitModelbridge
model = MonotonicRejectionGP
[MonotonicMCLSE]
beta = 3.98
[MonotonicRejectionGP]
inducing_size = 100
mean_covar_factory = monotonic_mean_covar_factory
[MonotonicSingleProbitModelbridge]
restarts = 10
samps = 1000
[metadata]
experiment_name = Lucas
experiment_description = Test
metadata1 =
metadata2 = two
metadata2 = three
"""
config_str = """
[common]
parnames = [par1, par2]
lb = [0, 0]
ub = [1, 1]
outcome_type = single_probit
target = 0.75
[SobolStrategy]
n_trials = 10
[ModelWrapperStrategy]
n_trials = 20
refit_every = 5
[experiment]
acqf = MonotonicMCLSE
init_strat_cls = SobolStrategy
opt_strat_cls = ModelWrapperStrategy
modelbridge_cls = MonotonicSingleProbitModelbridge
model = MonotonicRejectionGP
[MonotonicMCLSE]
beta = 3.98
[MonotonicRejectionGP]
inducing_size = 100
mean_covar_factory = monotonic_mean_covar_factory
[MonotonicSingleProbitModelbridge]
restarts = 10
samps = 1000
[metadata]
metadata1 =
metadata2 = three
"""
request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": config_strdupe},
}
request2 = {
"type": "setup",
"version": "0.01",
"message": {"config_str": config_str},
}
# Generate a config for later to run .jsonifyMetadata() on.
with self.assertRaises(DuplicateOptionError):
configuration.Config(**request["message"])
generated_config = configuration.Config(**request2["message"])
master_table = self._database.record_setup(
description=(
generated_config["metadata"]["experiment_description"]
if ("experiment_description" in generated_config["metadata"].keys())
else "default description"
),
name=(
generated_config["metadata"]["experiment_name"]
if ("experiment_name" in generated_config["metadata"].keys())
else "default name"
),
request=request,
extra_metadata=generated_config.jsonifyMetadata(),
)
deserializedjson = json.loads(
master_table.extra_metadata
) # This is initial process is exactly the same but now we switch things up...
self.assertEqual(deserializedjson["metadata2"], "three") # test normal value
self.assertEqual(deserializedjson["metadata1"], "") # test an empty value
self.assertEqual(
master_table.experiment_name, "default name"
) # test default name value
self.assertEqual(
master_table.experiment_description, "default description"
) # test default description value
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from itertools import product
import numpy as np
import torch
from aepsych.acquisition import (
ApproxGlobalSUR,
EAVC,
GlobalMI,
GlobalSUR,
LocalMI,
LocalSUR,
)
from aepsych.acquisition.bvn import bvn_cdf
from aepsych.acquisition.lookahead_utils import posterior_at_xstar_xq
from botorch.utils.testing import MockModel, MockPosterior
from gpytorch.distributions import MultivariateNormal
from scipy.stats import multivariate_normal
class BvNCDFTestCase(unittest.TestCase):
def test_bvncdf(self):
rhos = np.linspace(0.3, 0.9, 7)
xus = [0.3, 0.5, 0.7]
yus = [0.3, 0.5, 0.7]
params = product(rhos, xus, yus)
for par in params:
with self.subTest(paraams=params):
rho, xu, yu = par
var = np.r_[1, rho, rho, 1].reshape(2, 2)
x = np.r_[xu, yu]
scipy_answer = multivariate_normal(cov=var).cdf(x)
torch_answer = bvn_cdf(
torch.tensor(xu), torch.tensor(yu), torch.tensor(rho)
)
self.assertTrue(np.isclose(scipy_answer, torch_answer))
class LookaheadPosteriorTestCase(unittest.TestCase):
def setUp(self):
torch.manual_seed(1)
np.random.seed(1)
self.xstar = torch.zeros(1, 1, 1)
self.xq = torch.randn(1, 2, 1)
f = torch.rand(3)
a = torch.rand(3, 3)
covar = a @ a.T
flat_diag = torch.rand(3)
covar = covar + torch.diag_embed(flat_diag)
mvn = MultivariateNormal(mean=f, covariance_matrix=covar)
model = MockModel(
MockPosterior(mean=f[:, None], variance=torch.diag(covar)[None, :, None])
)
model._posterior.distribution = mvn
self.model, self.f, self.covar = model, f, covar
def test_posterior_extraction(self):
mu_s, s2_s, mu_q, s2_q, cov_q = posterior_at_xstar_xq(
self.model, self.xstar, self.xq
)
# mean extraction correct
self.assertTrue(mu_s == self.f[0])
self.assertTrue((mu_q == self.f[1:]).all())
# var extraction correct
self.assertTrue(s2_s == self.covar[0, 0])
self.assertTrue((s2_q == torch.diag(self.covar)[1:]).all())
# covar extraction correct
self.assertTrue((cov_q == self.covar[0, 1:]).all())
self.assertTrue((cov_q == self.covar[1:, 0]).all())
def mi_smoketest(self):
# with the mock posterior, local and global MI should be identical
local_mi = LocalMI(model=self.model, target=0.75)
global_mi = GlobalMI(model=self.model, target=0.75, Xq=self.xq[0])
self.assertTrue(global_mi(self.xstar) == local_mi(self.xstar))
def sur_smoketest(self):
# with the mock posterior, local and global SUR should be identical
local_sur = LocalSUR(model=self.model, target=0.75)
global_sur = GlobalSUR(model=self.model, target=0.75, Xq=self.xq[0])
self.assertTrue(global_sur(self.xstar) == local_sur(self.xstar))
def global_lookahead_smoketest(self):
for global_lookahead_acq in [
GlobalMI,
GlobalSUR,
ApproxGlobalSUR,
EAVC,
]:
acq = global_lookahead_acq(model=self.model, target=0.75, Xq=self.xq[0])
acqval = acq(self.xstar)
self.assertTrue(acqval.shape == torch.Size([]))
self.assertTrue(np.isfinite(acqval.numpy()))
def local_lookahead_smoketest(self):
for local_lookahead_acq in [
LocalMI,
LocalSUR,
]:
acq = local_lookahead_acq(model=self.model, target=0.75)
acqval = acq(self.xstar)
self.assertTrue(acqval.shape == torch.Size([]))
self.assertTrue(np.isfinite(acqval.numpy()))
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import gpytorch
import numpy as np
from aepsych.config import Config
from aepsych.factory import (
default_mean_covar_factory,
monotonic_mean_covar_factory,
song_mean_covar_factory,
)
from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad
from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad
from scipy.stats import norm
class TestFactories(unittest.TestCase):
def _test_mean_covar(self, meanfun, covarfun):
self.assertTrue(covarfun.base_kernel.ard_num_dims == 1)
self.assertTrue(meanfun.constant.requires_grad)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(
covarfun.base_kernel._priors["lengthscale_prior"][0],
gpytorch.priors.GammaPrior,
)
)
self.assertTrue(
isinstance(
covarfun._priors["outputscale_prior"][0],
gpytorch.priors.SmoothedBoxPrior,
)
)
self.assertTrue(isinstance(covarfun.base_kernel, gpytorch.kernels.RBFKernel))
def test_default_factory_1d_config(self):
config = Config(
config_dict={"default_mean_covar_factory": {"lb": [0], "ub": [1]}}
)
meanfun, covarfun = default_mean_covar_factory(config=config)
self._test_mean_covar(meanfun, covarfun)
def test_default_factory_1d_dim(self):
meanfun, covarfun = default_mean_covar_factory(dim=1)
self._test_mean_covar(meanfun, covarfun)
def test_default_factory_args_1d(self):
conf = {
"default_mean_covar_factory": {
"lb": [0],
"ub": [1],
"fixed_mean": True,
"lengthscale_prior": "gamma",
"outputscale_prior": "gamma",
"target": 0.5,
"kernel": "MaternKernel",
}
}
config = Config(config_dict=conf)
meanfun, covarfun = default_mean_covar_factory(config)
self.assertFalse(meanfun.constant.requires_grad)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(
covarfun.base_kernel._priors["lengthscale_prior"][0],
gpytorch.priors.GammaPrior,
)
)
self.assertTrue(
isinstance(
covarfun._priors["outputscale_prior"][0], gpytorch.priors.GammaPrior
)
)
self.assertTrue(
covarfun.base_kernel._priors["lengthscale_prior"][0].concentration == 3.0
)
self.assertTrue(
covarfun.base_kernel._priors["lengthscale_prior"][0].rate == 6.0
)
self.assertTrue(covarfun._priors["outputscale_prior"][0].concentration == 2.0)
self.assertTrue(covarfun._priors["outputscale_prior"][0].rate == 0.15)
self.assertTrue(
covarfun.base_kernel._priors["lengthscale_prior"][0]._transform is None
)
self.assertTrue(isinstance(covarfun.base_kernel, gpytorch.kernels.MaternKernel))
def test_default_factory_raises(self):
bad_confs = [
{
"default_mean_covar_factory": {
"lb": [0],
"ub": [1],
"lengthscale_prior": "box",
}
},
{
"default_mean_covar_factory": {
"lb": [0],
"ub": [1],
"outputscale_prior": "normal",
}
},
{"default_mean_covar_factory": {"lb": [0], "ub": [1], "fixed_mean": True}},
]
for conf in bad_confs:
with self.assertRaises(RuntimeError):
config = Config(conf)
_, __ = default_mean_covar_factory(config)
with self.assertRaises(AssertionError):
default_mean_covar_factory()
config = Config(
config_dict={"default_mean_covar_factory": {"lb": [0], "ub": [1]}}
)
with self.assertRaises(AssertionError):
default_mean_covar_factory(config=config, dim=2)
def test_default_factory_2d(self):
conf = {"default_mean_covar_factory": {"lb": [-2, 3], "ub": [1, 10]}}
config = Config(config_dict=conf)
meanfun, covarfun = default_mean_covar_factory(config)
self.assertTrue(covarfun.base_kernel.ard_num_dims == 2)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.base_kernel, gpytorch.kernels.RBFKernel))
def test_monotonic_factory_1d(self):
conf = {"monotonic_mean_covar_factory": {"lb": [0], "ub": [1]}}
config = Config(config_dict=conf)
meanfun, covarfun = monotonic_mean_covar_factory(config)
self.assertTrue(covarfun.base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, ConstantMeanPartialObsGrad))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.base_kernel, RBFKernelPartialObsGrad))
self.assertTrue(meanfun.constant.requires_grad)
def test_monotonic_factory_args_1d(self):
conf = {
"monotonic_mean_covar_factory": {
"lb": [0],
"ub": [1],
"fixed_mean": True,
"target": 0.88,
}
}
config = Config(config_dict=conf)
meanfun, covarfun = monotonic_mean_covar_factory(config)
self.assertTrue(covarfun.base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, ConstantMeanPartialObsGrad))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.base_kernel, RBFKernelPartialObsGrad))
self.assertFalse(meanfun.constant.requires_grad)
self.assertTrue(np.allclose(meanfun.constant, norm.ppf(0.88)))
def test_monotonic_factory_2d(self):
conf = {
"monotonic_mean_covar_factory": {
"lb": [0, 1],
"ub": [1, 70],
"fixed_mean": True,
"target": 0.89,
}
}
config = Config(config_dict=conf)
meanfun, covarfun = monotonic_mean_covar_factory(config)
self.assertTrue(covarfun.base_kernel.ard_num_dims == 2)
self.assertTrue(isinstance(meanfun, ConstantMeanPartialObsGrad))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.base_kernel, RBFKernelPartialObsGrad))
self.assertFalse(meanfun.constant.requires_grad)
self.assertTrue(np.allclose(meanfun.constant, norm.ppf(0.89)))
def test_song_factory_1d(self):
conf = {"song_mean_covar_factory": {"lb": [0], "ub": [1]}}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[0].base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.AdditiveKernel))
self.assertTrue(isinstance(covarfun.kernels[0], gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(covarfun.kernels[0].base_kernel, gpytorch.kernels.LinearKernel)
)
def test_song_factory_1d_intensity_RBF(self):
conf = {
"song_mean_covar_factory": {"lb": [0], "ub": [1], "intensity_RBF": True}
}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[0].base_kernel.ard_num_dims == 1)
self.assertTrue(covarfun.kernels[1].base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.AdditiveKernel))
self.assertTrue(isinstance(covarfun.kernels[0], gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.kernels[1], gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(covarfun.kernels[0].base_kernel, gpytorch.kernels.RBFKernel)
)
self.assertTrue(
isinstance(covarfun.kernels[1].base_kernel, gpytorch.kernels.LinearKernel)
)
def test_song_factory_2d(self):
conf = {
"song_mean_covar_factory": {"lb": [0, 1], "ub": [1, 70], "target": 0.75}
}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[0].base_kernel.ard_num_dims == 1)
self.assertTrue(covarfun.kernels[1].base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.AdditiveKernel))
self.assertTrue(isinstance(covarfun.kernels[0], gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.kernels[1], gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(covarfun.kernels[0].base_kernel, gpytorch.kernels.RBFKernel)
)
self.assertTrue(covarfun.kernels[0].base_kernel.active_dims == 0)
self.assertTrue(
isinstance(covarfun.kernels[1].base_kernel, gpytorch.kernels.LinearKernel)
)
self.assertTrue(covarfun.kernels[1].base_kernel.active_dims == 1)
# flip the stim dim
conf = {
"song_mean_covar_factory": {
"lb": [0, 1],
"ub": [1, 70],
"target": 0.75,
"stim_dim": 0,
}
}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[1].base_kernel.active_dims == 0)
self.assertTrue(covarfun.kernels[0].base_kernel.active_dims == 1)
def test_song_factory_2d_intensity_RBF(self):
conf = {
"song_mean_covar_factory": {
"lb": [0, 1],
"ub": [1, 70],
"target": 0.75,
"intensity_RBF": True,
}
}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[0].base_kernel.ard_num_dims == 2)
self.assertTrue(covarfun.kernels[1].base_kernel.ard_num_dims == 1)
self.assertTrue(isinstance(meanfun, gpytorch.means.ConstantMean))
self.assertTrue(isinstance(covarfun, gpytorch.kernels.AdditiveKernel))
self.assertTrue(isinstance(covarfun.kernels[0], gpytorch.kernels.ScaleKernel))
self.assertTrue(isinstance(covarfun.kernels[1], gpytorch.kernels.ScaleKernel))
self.assertTrue(
isinstance(covarfun.kernels[0].base_kernel, gpytorch.kernels.RBFKernel)
)
self.assertTrue(
np.allclose(covarfun.kernels[0].base_kernel.active_dims, [0, 1])
)
self.assertTrue(
isinstance(covarfun.kernels[1].base_kernel, gpytorch.kernels.LinearKernel)
)
self.assertTrue(covarfun.kernels[1].base_kernel.active_dims == 1)
# flip the stim dim
conf = {
"song_mean_covar_factory": {
"lb": [0, 1],
"ub": [1, 70],
"target": 0.75,
"stim_dim": 0,
"intensity_RBF": True,
}
}
config = Config(config_dict=conf)
meanfun, covarfun = song_mean_covar_factory(config)
self.assertTrue(covarfun.kernels[1].base_kernel.active_dims == 0)
self.assertTrue(
np.allclose(covarfun.kernels[0].base_kernel.active_dims, [0, 1])
)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from scipy.stats import norm
def f_1d(x, mu=0):
"""
latent is just a gaussian bump at mu
"""
return np.exp(-((x - mu) ** 2))
def f_2d(x):
"""
a gaussian bump at 0 , 0
"""
return np.exp(-np.linalg.norm(x, axis=-1))
def new_novel_det_params(freq, scale_factor=1.0):
"""Get the loc and scale params for 2D synthetic novel_det(frequency) function
Keyword arguments:
freq -- 1D array of frequencies whose thresholds to return
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
target -- target threshold
"""
locs = 0.66 * np.power(0.8 * freq * (0.2 * freq - 1), 2) + 0.05
scale = 2 * locs / (3 * scale_factor)
loc = -1 + 2 * locs
return loc, scale
def target_new_novel_det(freq, scale_factor=1.0, target=0.75):
"""Get the target (i.e. threshold) for 2D synthetic novel_det(frequency) function
Keyword arguments:
freq -- 1D array of frequencies whose thresholds to return
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
target -- target threshold
"""
locs, scale = new_novel_det_params(freq, scale_factor)
return norm.ppf(target, loc=locs, scale=scale)
def new_novel_det(x, scale_factor=1.0):
"""Get the cdf for 2D synthetic novel_det(frequency) function
Keyword arguments:
x -- array of shape (n,2) of locations to sample;
x[...,0] is frequency from -1 to 1; x[...,1] is intensity from -1 to 1
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
"""
freq = x[..., 0]
locs, scale = new_novel_det_params(freq, scale_factor)
return (x[..., 1] - locs) / scale
def cdf_new_novel_det(x, scale_factor=1.0):
"""Get the cdf for 2D synthetic novel_det(frequency) function
Keyword arguments:
x -- array of shape (n,2) of locations to sample;
x[...,0] is frequency from -1 to 1; x[...,1] is intensity from -1 to 1
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
"""
return norm.cdf(new_novel_det(x, scale_factor))
def new_novel_det_channels_params(channel, scale_factor=1.0, wave_freq=1, target=0.75):
"""Get the target parameters for 2D synthetic novel_det(channel) function
Keyword arguments:
channel -- 1D array of channel locations whose thresholds to return
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
wave_freq -- frequency of location waveform on [-1,1]
target -- target threshold
"""
locs = -0.3 * np.sin(5 * wave_freq * (channel - 1 / 6) / np.pi) ** 2 - 0.5
scale = (
1 / (10 * scale_factor) * (0.75 + 0.25 * np.cos(10 * (0.3 + channel) / np.pi))
)
return locs, scale
def target_new_novel_det_channels(channel, scale_factor=1.0, wave_freq=1, target=0.75):
"""Get the target (i.e. threshold) for 2D synthetic novel_det(channel) function
Keyword arguments:
channel -- 1D array of channel locations whose thresholds to return
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
wave_freq -- frequency of location waveform on [-1,1]
target -- target threshold
"""
locs, scale = new_novel_det_channels_params(
channel, scale_factor, wave_freq, target
)
return norm.ppf(target, loc=locs, scale=scale)
def new_novel_det_channels(x, channel, scale_factor=1.0, wave_freq=1, target=0.75):
"""Get the 2D synthetic novel_det(channel) function
Keyword arguments:
x -- array of shape (n,2) of locations to sample;
x[...,0] is channel from -1 to 1; x[...,1] is intensity from -1 to 1
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
wave_freq -- frequency of location waveform on [-1,1]
"""
locs, scale = new_novel_det_channels_params(
channel, scale_factor, wave_freq, target
)
return (x - locs) / scale
def cdf_new_novel_det_channels(channel, scale_factor=1.0, wave_freq=1, target=0.75):
"""Get the cdf for 2D synthetic novel_det(channel) function
Keyword arguments:
x -- array of shape (n,2) of locations to sample;
x[...,0] is channel from -1 to 1; x[...,1] is intensity from -1 to 1
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
wave_freq -- frequency of location waveform on [-1,1]
"""
return norm.cdf(new_novel_det_channels(channel, scale_factor, wave_freq, target))
def new_novel_det_3D_params(x, scale_factor=1.0):
freq = x[..., 0]
chan = x[..., 1]
locs_freq = -0.32 + 2 * (0.66 * np.power(0.8 * freq * (0.2 * freq - 1), 2) + 0.05)
locs = (
0.7 * ((-0.35 * np.sin(5 * (chan - 1 / 6) / np.pi) ** 2) - 0.5)
+ 0.9 * locs_freq
)
scale = 0.3 * locs / (3 * scale_factor) * 1 / (10 * scale_factor) + 0.15 * (
0.75 + 0.25 * np.cos(10 * (0.6 + chan) / np.pi)
)
return locs, scale
def new_novel_det_3D(x, scale_factor=1.0):
"""
Get the synthetic 3D novel_det
function over freqs,channels and amplitudes
"""
locs, scale = new_novel_det_3D_params(x, scale_factor)
return (x[..., 2] - locs) / scale
def cdf_new_novel_det_3D(x, scale_factor=1.0):
"""
Get the cdf for 3D synthetic novel_det function
x -- array of shape (n,3) of locations to sample
x[...,0] is frequency, x[...,1] is channel, x[...,2] is intensity
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
"""
return norm.cdf(new_novel_det_3D(x, scale_factor))
def target_new_novel_det_3D(x, scale_factor=1.0, target=0.75):
"""
Get target for 3D synthetic novel_det function at location x
x -- array of shape (n,2) of locations to sample
x[...,0] is frequency, x[...,1] is channel,
scale factor -- scale for the novel_det function, where higher is steeper/lower SD
target -- target threshold
"""
locs, scale = new_novel_det_3D_params(x, scale_factor)
return norm.ppf(target, loc=locs, scale=scale)
def f_pairwise(f, x, noise_scale=1):
return norm.cdf((f(x[..., 1]) - f(x[..., 0])) / (noise_scale * np.sqrt(2)))
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import random
import time
import unittest
import numpy as np
import torch
from aepsych.benchmark import (
Benchmark,
DerivedValue,
LSEProblem,
PathosBenchmark,
Problem,
)
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
def f(x, delay=False):
if delay:
time.sleep(0.1 * random.random())
if len(x.shape) == 1:
return x
else:
return x.sum(axis=-1)
class TestProblem(Problem):
name = "test problem"
bounds = np.c_[0, 1].T
threshold = 0.75
def f(self, x):
return f(x)
class TestSlowProblem(TestProblem):
name = "test slow problem"
def f(self, x):
return f(x, delay=True)
class LSETestProblem(LSEProblem):
name = "test lse problem"
bounds = np.c_[[-1, -1], [1, 1]].T
threshold = 0.75
def f(self, x):
return f(x)
class BenchmarkTestCase(unittest.TestCase):
def setUp(self):
# run this single-threaded since we parallelize using pathos
self.oldenv = os.environ.copy()
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["NUMEXPR_MAX_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["KMP_AFFINITY"] = "granularity=fine,compact,1,0"
os.environ["KMP_BLOCKTIME"] = "1"
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
self.bench_config = {
"common": {
"invalid_config": DerivedValue(
[("init_strat", "min_asks")],
lambda min_asks: True if min_asks > 2 else False,
),
"stimuli_per_trial": 1,
"outcome_types": ["binary"],
"strategy_names": "[init_strat, opt_strat]",
},
"experiment": {
"acqf": "MCLevelSetEstimation",
"model": "GPClassificationModel",
},
"init_strat": {
"min_asks": [2, 4],
"generator": "SobolGenerator",
"min_total_outcome_occurrences": 0,
},
"opt_strat": {
"min_asks": [
DerivedValue(
[("problem", "name")], lambda x: 1 + int(x == "test problem")
),
DerivedValue(
[("problem", "name")], lambda x: 2 + int(x == "test problem")
),
],
"generator": "OptimizeAcqfGenerator",
"min_total_outcome_occurrences": 0,
},
"MCLevelSetEstimation": {
"target": 0.75,
"beta": 3.84,
},
"GPClassificationModel": {
"inducing_size": 10,
"mean_covar_factory": "default_mean_covar_factory",
"refit_every": 100,
"max_fit_time": 0.1,
},
"OptimizeAcqfGenerator": {
"restarts": 1,
"samps": 20,
"max_gen_time": 0.1,
},
}
def tearDown(self):
os.environ.clear()
os.environ.update(self.oldenv)
def test_bench_smoke(self):
problem1 = TestProblem()
problem2 = LSETestProblem()
bench = Benchmark(
problems=[problem1, problem2],
configs=self.bench_config,
n_reps=2,
log_every=2,
)
bench.run_benchmarks()
out = bench.pandas()
# assert problem metadata was correctly saved
self.assertEqual(
sorted(out["problem_name"].unique()), ["test lse problem", "test problem"]
)
self.assertEqual(
sorted(
out[out["problem_name"] == "test lse problem"][
"problem_threshold"
].unique()
),
["0.75"],
)
# assert derived values work correctly
self.assertEqual(
sorted(
out[out["problem_name"] == "test problem"][
"opt_strat_min_asks"
].unique()
),
["2", "3"],
)
self.assertEqual(
sorted(
out[out["problem_name"] == "test lse problem"][
"opt_strat_min_asks"
].unique()
),
["1", "2"],
)
# have as many final results as we expect. Because of invalid trials,
# only half of benchmarks are valid
self.assertTrue(len(out[out.final]) == bench.num_benchmarks // 2)
# have as many repetitions as we expect
self.assertTrue(len(out.rep.unique()) == bench.n_reps)
# reporting intervals are correct
self.assertTrue((out[~out.final].trial_id % 2 == 0).all())
# we don't run extra trials
total_trials = out.init_strat_min_asks.astype(
int
) + out.opt_strat_min_asks.astype(int)
self.assertTrue((out.trial_id <= total_trials).all())
# ensure each simulation has a unique random seed
self.assertTrue(out[out["final"]]["seed"].is_unique)
def test_bench_pathossmoke(self):
problem1 = TestProblem()
problem2 = LSETestProblem()
bench = PathosBenchmark(
problems=[problem1, problem2], configs=self.bench_config, n_reps=2, nproc=2
)
bench.run_benchmarks()
out = bench.pandas()
# assert problem metadata was correctly saved
self.assertEqual(
sorted(out["problem_name"].unique()), ["test lse problem", "test problem"]
)
self.assertEqual(
sorted(
out[out["problem_name"] == "test lse problem"][
"problem_threshold"
].unique()
),
["0.75"],
)
# assert derived values work correctly
self.assertEqual(
sorted(
out[out["problem_name"] == "test problem"][
"opt_strat_min_asks"
].unique()
),
["2", "3"],
)
self.assertEqual(
sorted(
out[out["problem_name"] == "test lse problem"][
"opt_strat_min_asks"
].unique()
),
["1", "2"],
)
# have as many final results as we expect (half of configs are invalid)
self.assertTrue(len(out[out.final]) == bench.num_benchmarks // 2)
# have as many repetitions as we expect
self.assertTrue(len(out.rep.unique()) == bench.n_reps)
# reporting intervals are correct
self.assertTrue((out[~out.final].trial_id % 2 == 0).all())
# we don't run extra trials
total_trials = out.init_strat_min_asks.astype(
int
) + out.opt_strat_min_asks.astype(int)
self.assertTrue((out.trial_id <= total_trials).all())
# ensure each simulation has a unique random seed
self.assertTrue(out[out["final"]]["seed"].is_unique)
def test_bench_pathos_partial(self):
"""
test that we can launch async and get partial results
"""
problem = TestSlowProblem()
bench = PathosBenchmark(
problems=[problem], configs=self.bench_config, n_reps=1, log_every=2
)
bench.start_benchmarks()
# wait for something to finish
while len(bench._log) == 0:
time.sleep(0.1)
bench.collate_benchmarks(wait=False)
out = bench.pandas() # this should only be a partial result
# have fewer than all the results (which is half of all benchmarks
# since half are invalid)
self.assertTrue(len(out[out.final]) < (bench.num_benchmarks // 2))
bench.collate_benchmarks(wait=True) # wait for everything to finish
out = bench.pandas() # complete results
# now we should have everything (valid = half of all benchmarks)
self.assertTrue(len(out[out.final]) == (bench.num_benchmarks // 2))
class BenchProblemTestCase(unittest.TestCase):
def setUp(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
def test_nonmonotonic_single_lse_eval(self):
config = {
"common": {
"stimuli_per_trial": 1,
"outcome_types": ["binary"],
"strategy_names": "[init_strat, opt_strat]",
"acqf": "MCLevelSetEstimation",
"model": "GPClassificationModel",
},
"init_strat": {"generator": "SobolGenerator", "min_asks": 50},
"opt_strat": {"generator": "OptimizeAcqfGenerator", "min_asks": 1},
"MCLevelSetEstimation": {
"target": 0.75,
"beta": 3.84,
},
"GPClassificationModel": {
"inducing_size": 10,
"mean_covar_factory": "default_mean_covar_factory",
},
"OptimizeAcqfGenerator": {
"restarts": 10,
"samps": 1000,
},
}
problem = LSETestProblem()
bench = Benchmark(problems=[problem], configs=config, log_every=100)
_, strat = bench.run_experiment(problem, bench.combinations[0], 0, 0)
e = problem.evaluate(strat)
self.assertTrue(e["mean_square_err_p"] < 0.05)
def test_monotonic_single_lse_eval(self):
config = {
"common": {
"stimuli_per_trial": 1,
"outcome_types": ["binary"],
"strategy_names": "[init_strat, opt_strat]",
"acqf": "MonotonicMCLSE",
"model": "MonotonicRejectionGP",
},
"init_strat": {"generator": "SobolGenerator", "min_asks": 50},
"opt_strat": {"generator": "MonotonicRejectionGenerator", "min_asks": 1},
"SobolGenerator": {"seed": 1},
"MonotonicMCLSE": {
"target": 0.75,
"beta": 3.84,
},
"MonotonicRejectionGP": {
"inducing_size": 10,
"mean_covar_factory": "monotonic_mean_covar_factory",
"monotonic_idxs": "[1]",
},
"MonotonicRejectionGenerator": {
"model_gen_options": {
"num_restarts": 10,
"raw_samples": 1000,
}
},
}
problem = LSETestProblem()
bench = Benchmark(problems=[problem], configs=config, log_every=100)
_, strat = bench.run_experiment(problem, bench.combinations[0], 0, 0)
e = problem.evaluate(strat)
self.assertTrue(e["mean_square_err_p"] < 0.05)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import unittest
import uuid
import torch
from aepsych.acquisition import EAVC, MCLevelSetEstimation
from aepsych.acquisition.monotonic_rejection import MonotonicMCLSE
from aepsych.acquisition.objective import FloorGumbelObjective, ProbitObjective
from aepsych.config import Config
from aepsych.generators import (
MonotonicRejectionGenerator,
OptimizeAcqfGenerator,
SobolGenerator,
)
from aepsych.likelihoods import BernoulliObjectiveLikelihood
from aepsych.models import (
GPClassificationModel,
HadamardSemiPModel,
MonotonicRejectionGP,
PairwiseProbitModel,
)
from aepsych.server import AEPsychServer
from aepsych.strategy import SequentialStrategy, Strategy
from aepsych.version import __version__
from botorch.acquisition import qNoisyExpectedImprovement
from botorch.acquisition.active_learning import PairwiseMCPosteriorVariance
from aepsych.server.message_handlers.handle_setup import configure
class ConfigTestCase(unittest.TestCase):
def test_single_probit_config(self):
config_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
model = GPClassificationModel
acqf = MCLevelSetEstimation
[init_strat]
generator = SobolGenerator
min_asks = 10
min_total_outcome_occurrences = 5
[opt_strat]
generator = OptimizeAcqfGenerator
min_asks = 20
min_post_range = 0.01
keep_most_recent = 10
[MCLevelSetEstimation]
target = 0.75
beta = 3.84
objective = ProbitObjective
[GPClassificationModel]
inducing_size = 10
mean_covar_factory = default_mean_covar_factory
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
config = Config()
config.update(config_str=config_str)
self.assertTrue(
config.get_section("MCLevelSetEstimation")
== {"beta": "3.84", "objective": "ProbitObjective", "target": "0.75"}
)
self.assertTrue(
config.get_section("OptimizeAcqfGenerator")
== {"restarts": "10", "samps": "1000"}
)
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(
isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator)
)
self.assertTrue(isinstance(strat.strat_list[1].model, GPClassificationModel))
self.assertTrue(strat.strat_list[1].generator.acqf is MCLevelSetEstimation)
# since ProbitObjective() is turned into an obj, we check for keys and then vals
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys())
== {"beta", "target", "objective"}
)
self.assertTrue(strat.strat_list[1].generator.acqf_kwargs["target"] == 0.75)
self.assertTrue(strat.strat_list[1].generator.acqf_kwargs["beta"] == 3.84)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[1].generator.restarts == 10)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 1)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
self.assertEqual(strat.strat_list[0].min_total_outcome_occurrences, 5)
self.assertEqual(strat.strat_list[0].min_post_range, None)
self.assertEqual(strat.strat_list[0].keep_most_recent, None)
self.assertEqual(strat.strat_list[1].min_total_outcome_occurrences, 1)
self.assertEqual(strat.strat_list[1].min_post_range, 0.01)
self.assertEqual(strat.strat_list[1].keep_most_recent, 10)
def test_missing_config_file(self):
config_file = "../configs/does_not_exist.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
with self.assertRaises(FileNotFoundError):
Config(config_fnames=[config_file])
with self.assertRaises(FileNotFoundError):
Config(config_fnames=[])
def test_single_probit_config_file(self):
config_file = "../configs/single_lse_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
config = Config()
config.update(config_fnames=[config_file])
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(
isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator)
)
self.assertTrue(strat.strat_list[1].generator.acqf is EAVC)
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"target"}
)
self.assertTrue(strat.strat_list[1].generator.acqf_kwargs["target"] == 0.75)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 1)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
def test_nonmonotonic_optimization_config_file(self):
config_file = "../configs/nonmonotonic_optimization_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
config = Config()
config.update(config_fnames=[config_file])
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(
isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator)
)
self.assertTrue(strat.strat_list[1].generator.acqf is qNoisyExpectedImprovement)
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"objective"}
)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 1)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
def test_name_conflict_warns(self):
class DummyMod:
pass
Config.register_object(DummyMod)
with self.assertWarns(Warning):
Config.register_object(DummyMod)
def test_multiple_models_and_strats(self):
config_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat1, opt_strat2]
[init_strat]
generator = SobolGenerator
min_asks = 1
[opt_strat1]
generator = OptimizeAcqfGenerator
min_asks = 1
model = GPClassificationModel
acqf = MCLevelSetEstimation
[opt_strat2]
generator = MonotonicRejectionGenerator
min_asks = 1
model = MonotonicRejectionGP
acqf = MonotonicMCLSE
"""
config = Config()
config.update(config_str=config_str)
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(
isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator)
)
self.assertTrue(isinstance(strat.strat_list[1].model, GPClassificationModel))
self.assertTrue(strat.strat_list[1].generator.acqf is MCLevelSetEstimation)
self.assertTrue(
isinstance(strat.strat_list[2].generator, MonotonicRejectionGenerator)
)
self.assertTrue(isinstance(strat.strat_list[2].model, MonotonicRejectionGP))
self.assertTrue(strat.strat_list[2].generator.acqf is MonotonicMCLSE)
def test_experiment_deprecation(self):
config_str = """
[experiment]
acqf = PairwiseMCPosteriorVariance
model = PairwiseProbitModel
"""
config = Config()
config.update(config_str=config_str)
self.assertTrue("acqf" in config["common"])
self.assertTrue("model" in config["common"])
def test_to_string(self):
in_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
model = GPClassificationModel
acqf = LevelSetEstimation
[init_strat]
generator = SobolGenerator
min_asks = 10
[opt_strat]
generator = OptimizeAcqfGenerator
min_asks = 20
[LevelSetEstimation]
beta = 3.84
objective = ProbitObjective
[GPClassificationModel]
inducing_size = 10
mean_covar_factory = default_mean_covar_factory
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000""".strip().replace(
" ", ""
)
config = Config(config_str=in_str)
out_str = str(config).strip().replace(" ", "")
self.assertEqual(in_str, out_str)
def test_conversion(self):
config_str = """
[common]
parnames = [par1, par2]
lb = [0, 0]
ub = [1, 1]
outcome_type = single_probit
target = 0.75
[SobolStrategy]
n_trials = 10
[ModelWrapperStrategy]
n_trials = 20
refit_every = 5
[experiment]
acqf = MonotonicMCLSE
init_strat_cls = SobolStrategy
opt_strat_cls = ModelWrapperStrategy
modelbridge_cls = MonotonicSingleProbitModelbridge
model = MonotonicRejectionGP
[MonotonicMCLSE]
beta = 3.84
[MonotonicRejectionGP]
inducing_size = 100
mean_covar_factory = monotonic_mean_covar_factory
[MonotonicSingleProbitModelbridge]
restarts = 10
samps = 1000
"""
config = Config(config_str=config_str)
self.assertEqual(config.version, "0.0")
config.convert_to_latest()
self.assertEqual(config.version, __version__)
self.assertEqual(config["common"]["strategy_names"], "[init_strat, opt_strat]")
self.assertEqual(config["common"]["acqf"], "MonotonicMCLSE")
self.assertEqual(config["init_strat"]["min_asks"], "10")
self.assertEqual(config["init_strat"]["generator"], "SobolGenerator")
self.assertEqual(config["opt_strat"]["min_asks"], "20")
self.assertEqual(config["opt_strat"]["refit_every"], "5")
self.assertEqual(
config["opt_strat"]["generator"], "MonotonicRejectionGenerator"
)
self.assertEqual(config["opt_strat"]["model"], "MonotonicRejectionGP")
self.assertEqual(config["MonotonicRejectionGenerator"]["restarts"], "10")
self.assertEqual(config["MonotonicRejectionGenerator"]["samps"], "1000")
self.assertEqual(config["common"]["stimuli_per_trial"], "1")
self.assertEqual(config["common"]["outcome_types"], "[binary]")
def test_warn_about_refit(self):
config_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat]
model = GPClassificationModel
[init_strat]
generator = SobolGenerator
min_asks = 10
refit_every = 5
"""
config = Config(config_str=config_str)
with self.assertWarns(UserWarning):
Strategy.from_config(config, "init_strat")
def test_pairwise_probit_config(self):
config_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 2
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
model = PairwiseProbitModel
[init_strat]
min_asks = 10
generator = SobolGenerator
[opt_strat]
min_asks = 20
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
[SobolGenerator]
n_points = 20
"""
config = Config()
config.update(config_str=config_str)
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(isinstance(strat.strat_list[1].model, PairwiseProbitModel))
self.assertTrue(
strat.strat_list[1].generator.acqf is PairwiseMCPosteriorVariance
)
# because ProbitObjective() is an object, test keys then vals
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"objective"}
)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[1].generator.restarts == 10)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 2)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
def test_pairwise_probit_config_file(self):
config_file = "../configs/pairwise_al_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
config = Config()
config.update(config_fnames=[config_file])
strat = SequentialStrategy.from_config(config)
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(isinstance(strat.strat_list[1].model, PairwiseProbitModel))
self.assertTrue(
strat.strat_list[1].generator.acqf is PairwiseMCPosteriorVariance
)
# because ProbitObjective() is an object, we have to be a bit careful with
# this test
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"objective"}
)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[1].generator.restarts == 10)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 2)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
def test_pairwise_al_config_file(self):
# random datebase path name without dashes
database_path = "./{}.db".format(str(uuid.uuid4().hex))
server = AEPsychServer(database_path=database_path)
config_file = "../configs/pairwise_al_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
configure(server, config_fnames=[config_file])
strat = server.strat
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(
isinstance(strat.strat_list[1].generator, OptimizeAcqfGenerator)
)
self.assertTrue(isinstance(strat.strat_list[1].model, PairwiseProbitModel))
self.assertTrue(
strat.strat_list[1].generator.acqf is PairwiseMCPosteriorVariance
)
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"objective"}
)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[1].generator.restarts == 10)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 2)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
# cleanup the db
if server.db is not None:
server.db.delete_db()
def test_pairwise_opt_config(self):
# random datebase path name without dashes
database_path = "./{}.db".format(str(uuid.uuid4().hex))
server = AEPsychServer(database_path=database_path)
config_file = "../configs/pairwise_opt_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
configure(server, config_fnames=[config_file])
strat = server.strat
self.assertTrue(isinstance(strat.strat_list[0].generator, SobolGenerator))
self.assertTrue(strat.strat_list[0].model is None)
self.assertTrue(isinstance(strat.strat_list[1].model, PairwiseProbitModel))
self.assertTrue(strat.strat_list[1].generator.acqf is qNoisyExpectedImprovement)
self.assertTrue(
set(strat.strat_list[1].generator.acqf_kwargs.keys()) == {"objective"}
)
self.assertTrue(
isinstance(
strat.strat_list[1].generator.acqf_kwargs["objective"],
ProbitObjective,
)
)
self.assertTrue(strat.strat_list[1].generator.restarts == 10)
self.assertTrue(strat.strat_list[1].generator.samps == 1000)
self.assertTrue(strat.strat_list[0].min_asks == 10)
self.assertTrue(strat.strat_list[0].stimuli_per_trial == 2)
self.assertTrue(strat.strat_list[0].outcome_types == ["binary"])
self.assertTrue(strat.strat_list[1].min_asks == 20)
self.assertTrue(torch.all(strat.strat_list[0].lb == strat.strat_list[1].lb))
self.assertTrue(torch.all(strat.strat_list[1].model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(strat.strat_list[0].ub == strat.strat_list[1].ub))
self.assertTrue(torch.all(strat.strat_list[1].model.ub == torch.Tensor([1, 1])))
# cleanup the db
if server.db is not None:
server.db.delete_db()
def test_jsonify(self):
sample_configstr = """
[common]
lb = [0, 0]
ub = [1, 1]
outcome_type = pairwise_probit
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
model = PairwiseProbitModel
[init_strat]
min_asks = 10
generator = PairwiseSobolGenerator
[opt_strat]
min_asks = 20
generator = PairwiseOptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[PairwiseOptimizeAcqfGenerator]
restarts = 10
samps = 1000
[PairwiseSobolGenerator]
n_points = 20
"""
request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": sample_configstr},
}
# Generate a configuration object.
temporaryconfig = Config(**request["message"])
configedjson = temporaryconfig.jsonifyAll()
referencejsonstr = """{
"common": {
"lb": "[0, 0]",
"ub": "[1, 1]",
"outcome_type": "pairwise_probit",
"parnames": "[par1, par2]",
"strategy_names": "[init_strat, opt_strat]",
"acqf": "PairwiseMCPosteriorVariance",
"model": "PairwiseProbitModel"
},
"init_strat": {
"min_asks": "10",
"generator": "PairwiseSobolGenerator"
},
"opt_strat": {
"min_asks": "20",
"generator": "PairwiseOptimizeAcqfGenerator"
},
"PairwiseProbitModel": {
"mean_covar_factory": "default_mean_covar_factory"
},
"PairwiseMCPosteriorVariance": {
"objective": "ProbitObjective"
},
"PairwiseOptimizeAcqfGenerator": {
"restarts": "10",
"samps": "1000"
},
"PairwiseSobolGenerator": {
"n_points": "20"
}
} """
# Rather than comparing strings, we should convert to json and then convert back to test equal dicts
testconfig = json.loads(configedjson)
testsample = json.loads(referencejsonstr)
# most depth is option within section
self.assertEqual(testconfig, testsample)
def test_stimuli_compatibility(self):
config_str1 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
"""
config1 = Config()
config1.update(config_str=config_str1)
config_str2 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
"""
config2 = Config()
config2.update(config_str=config_str2)
config_str3 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = PairwiseProbitModel
"""
config3 = Config()
config3.update(config_str=config_str3)
# this should work
SequentialStrategy.from_config(config1)
# this should fail
with self.assertRaises(AssertionError):
SequentialStrategy.from_config(config3)
# this should fail too
with self.assertRaises(AssertionError):
SequentialStrategy.from_config(config3)
def test_outcome_compatibility(self):
config_str1 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
"""
config1 = Config()
config1.update(config_str=config_str1)
config_str2 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [continuous]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
"""
config2 = Config()
config2.update(config_str=config_str2)
config_str3 = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat]
[init_strat]
generator = SobolGenerator
model = GPRegressionModel
"""
config3 = Config()
config3.update(config_str=config_str3)
# this should work
SequentialStrategy.from_config(config1)
# this should fail
with self.assertRaises(AssertionError):
SequentialStrategy.from_config(config3)
# this should fail too
with self.assertRaises(AssertionError):
SequentialStrategy.from_config(config3)
def test_strat_names(self):
good_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
[opt_strat]
generator = OptimizeAcqfGenerator
model = GPClassificationModel
"""
bad_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, init_strat]
[init_strat]
generator = SobolGenerator
model = GPClassificationModel
"""
good_config = Config(config_str=good_str)
bad_config = Config(config_str=bad_str)
# this should work
SequentialStrategy.from_config(good_config)
# this should fail
with self.assertRaises(AssertionError):
SequentialStrategy.from_config(bad_config)
def test_semip_config(self):
config_str = """
[common]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [par1, par2]
strategy_names = [init_strat, opt_strat]
acqf = MCLevelSetEstimation
model = HadamardSemiPModel
[init_strat]
min_asks = 10
generator = SobolGenerator
refit_every = 10
[opt_strat]
min_asks = 20
generator = OptimizeAcqfGenerator
[HadamardSemiPModel]
stim_dim = 1
inducing_size = 10
inducing_point_method = sobol
likelihood = BernoulliObjectiveLikelihood
[BernoulliObjectiveLikelihood]
objective = FloorGumbelObjective
[FloorGumbelObjective]
floor = 0.123
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
config = Config()
config.update(config_str=config_str)
strat = SequentialStrategy.from_config(config)
opt_strat = strat.strat_list[1]
model = opt_strat.model
self.assertTrue(isinstance(model, HadamardSemiPModel))
self.assertTrue(torch.all(model.lb == torch.Tensor([0, 0])))
self.assertTrue(torch.all(model.ub == torch.Tensor([1, 1])))
self.assertTrue(model.dim == 2)
self.assertTrue(model.inducing_size == 10)
self.assertTrue(model.stim_dim == 1)
self.assertTrue(model.inducing_point_method == "sobol")
self.assertTrue(isinstance(model.likelihood, BernoulliObjectiveLikelihood))
self.assertTrue(isinstance(model.likelihood.objective, FloorGumbelObjective))
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from aepsych.likelihoods import OrdinalLikelihood
from gpytorch.test.base_likelihood_test_case import BaseLikelihoodTestCase
emtpy_batch_shape = torch.Size([])
class TestOrdinalLikelihood(BaseLikelihoodTestCase, unittest.TestCase):
seed = 1
n_levels = 3
def _create_targets(self, batch_shape=emtpy_batch_shape):
res = torch.randint(low=0, high=self.n_levels, size=(*batch_shape, 5)).float()
return res
def create_likelihood(self):
return OrdinalLikelihood(n_levels=self.n_levels)
def _test_marginal(self, batch_shape=emtpy_batch_shape):
# disable this test, since Categorical.mean returns nan anyway
# and we're not overriding this method on base Likelihood
pass
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import uuid
import torch
from aepsych.server import AEPsychServer
from aepsych_client import AEPsychClient
from ax.core.optimization_config import MultiObjectiveOptimizationConfig
from ax.modelbridge import Models
from botorch.test_functions.multi_objective import BraninCurrin
branin_currin = BraninCurrin(negate=True).to(
dtype=torch.double,
device=torch.device("cuda" if torch.cuda.is_available() else "cpu"),
)
def evaluate(parameters):
evaluation = branin_currin(
torch.tensor([parameters.get("x1"), parameters.get("x2")])
)
# In our case, standard error is 0, since we are computing a synthetic function.
# Set standard error to None if the noise level is unknown.
return {"out1": evaluation[0].item(), "out2": evaluation[1].item()}
class MultiOutcomeTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Create a server object configured to run a 2d threshold experiment
database_path = "./{}.db".format(str(uuid.uuid4().hex))
cls.client = AEPsychClient(server=AEPsychServer(database_path=database_path))
config_file = "../configs/multi_outcome_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
cls.client.configure(config_file)
cls.gs = cls.client.server.strat.ax_client.generation_strategy
cls.experiment = cls.client.server.strat.ax_client.experiment
def test_generation_strategy(self):
self.assertEqual(len(self.gs._steps), 2 + 1)
self.assertEqual(self.gs._steps[0].model, Models.SOBOL)
self.assertEqual(self.gs._steps[1].model, Models.MOO)
self.assertEqual(self.gs._steps[2].model, Models.MOO) # Extra final step
def test_experiment(self):
self.assertEqual(len(self.experiment.metrics), 2)
self.assertIn("out1", self.experiment.metrics)
self.assertIn("out2", self.experiment.metrics)
self.assertIsInstance(
self.experiment.optimization_config, MultiObjectiveOptimizationConfig
)
(
threshold1,
threshold2,
) = self.experiment.optimization_config.objective_thresholds
self.assertEqual(threshold1.bound, -18)
self.assertEqual(threshold2.bound, -6)
(
objective1,
objective2,
) = self.experiment.optimization_config.objective.objectives
self.assertFalse(objective1.minimize)
self.assertFalse(objective2.minimize)
# Smoke test just to make sure server can handle multioutcome messages
def test_ask_tell(self):
while not self.client.server.strat.finished:
trial_params = self.client.ask()
for trial in trial_params["config"]:
outcome = evaluate(trial_params["config"][trial])
self.client.tell_trial_by_index(trial, outcome)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import unittest
import uuid
import numpy as np
import torch
from aepsych.config import Config
from aepsych.server import AEPsychServer
from aepsych_client import AEPsychClient
from ax.service.utils.report_utils import exp_to_df
from parameterized import parameterized_class
@parameterized_class(
("config_file", "should_ignore"),
[
("../configs/ax_example.ini", False),
("../configs/ax_ordinal_exploration_example.ini", True),
],
)
class AxIntegrationTestCase(unittest.TestCase):
n_extra_asks = 3
@classmethod
def setUpClass(cls):
if cls.should_ignore:
raise unittest.SkipTest("Skipping because should_ignore is True.")
def sigmoid(x):
return 1 / (1 + math.exp(-x / 100))
# Simulate participant responses; just returns the sum of the flat parameters
def simulate_response(trial_params):
pars = [
trial_params[par]
for par in trial_params
if type(trial_params[par]) == float
]
response = round(sigmoid(np.array(pars).mean()) * 4)
return response
# Fix random seeds
np.random.seed(123)
torch.manual_seed(123)
# Create a server object configured to run a 2d threshold experiment
database_path = "./{}.db".format(str(uuid.uuid4().hex))
cls.client = AEPsychClient(server=AEPsychServer(database_path=database_path))
cls.config_file = os.path.join(os.path.dirname(__file__), cls.config_file)
cls.client.configure(cls.config_file)
cls.can_fit_at_start = cls.client.server.strat.can_fit
while not cls.client.server.strat.finished:
# Ask the server what the next parameter values to test should be.
trial_params = cls.client.ask()
# Simulate a participant response.
for trial in trial_params["config"]:
outcome = simulate_response(trial_params["config"][trial])
# Tell the server what happened so that it can update its model.
cls.client.tell_trial_by_index(trial, outcome)
# Make sure we can manually tell without asking first
cls.client.tell(trial_params["config"][trial], outcome)
# Add an extra ask to make sure we can generate trials endlessly
trial_params = cls.client.ask(cls.n_extra_asks)
cls.can_fit_at_end = cls.client.server.strat.can_fit
cls.df = exp_to_df(cls.client.server.strat.experiment)
cls.config = Config(config_fnames=[cls.config_file])
def tearDown(self):
if self.client.server.db is not None:
self.client.server.db.delete_db()
def test_random_seed(self):
self.assertEqual(self.client.server.strat.ax_client._random_seed, 123)
def test_bounds(self):
lb = self.config.getlist("common", "lb", element_type=float)
ub = self.config.getlist("common", "ub", element_type=float)
par4choices = self.config.getlist("par4", "choices", element_type=str)
par5choices = self.config.getlist("par5", "choices", element_type=str)
par6value = self.config.getfloat("par6", "value")
par7value = self.config.get("par7", "value")
self.assertTrue((self.df["par1"] >= lb[0]).all())
self.assertTrue((self.df["par1"] <= ub[0]).all())
self.assertTrue((self.df["par2"] >= lb[1]).all())
self.assertTrue((self.df["par2"] <= ub[1]).all())
self.assertTrue((self.df["par3"] >= lb[2]).all())
self.assertTrue((self.df["par3"] <= ub[2]).all())
self.assertTrue(self.df["par4"].isin(par4choices).all())
self.assertTrue(self.df["par5"].isin(par5choices).all())
self.assertTrue((self.df["par6"] == par6value).all())
self.assertTrue((self.df["par7"] == par7value).all())
@unittest.skip(
"This test is flaky due to non-determinism in asks after the experiment is finished. Skipping until this gets fixed."
)
def test_constraints(self):
constraints = self.config.getlist("common", "par_constraints", element_type=str)
for constraint in constraints:
self.assertEqual(len(self.df.query(constraint)), len(self.df))
self.assertEqual(self.df["par3"].dtype, "int64")
def test_n_trials(self):
n_tells = (self.df["trial_status"] == "COMPLETED").sum()
correct_n_tells = self.config.getint("opt_strat", "min_total_tells") + 1
self.assertEqual(n_tells, correct_n_tells)
n_asks = self.client.server.strat.experiment.num_asks
correct_n_asks = (
self.config.getint("opt_strat", "min_total_tells") + self.n_extra_asks
)
self.assertEqual(n_asks, correct_n_asks)
def test_generation_method(self):
n_sobol = (self.df["generation_method"] == "Sobol").sum()
n_opt = (self.df["generation_method"] == "BoTorch").sum()
n_manual = (self.df["generation_method"] == "Manual").sum()
correct_n_sobol = self.config.getint("init_strat", "min_total_tells")
correct_n_opt = (
self.config.getint("opt_strat", "min_total_tells")
- correct_n_sobol
+ self.n_extra_asks
)
self.assertEqual(n_sobol, correct_n_sobol)
self.assertEqual(n_opt, correct_n_opt)
self.assertEqual(n_manual, 1)
def test_can_fit(self):
self.assertFalse(self.can_fit_at_start)
self.assertTrue(self.can_fit_at_end)
@unittest.skip("Base integration tests already cover most of these")
class AxBetaRegressionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Simulate participant responses; just returns the average percentage value of the par1-3
def simulate_response(trial_params):
pars = [
(trial_params["par1"][0] - cls.lb[0]) / (cls.ub[0] - cls.lb[0]),
(trial_params["par2"][0] - cls.lb[1]) / (cls.ub[1] - cls.lb[1]),
(trial_params["par3"][0] - cls.lb[2]) / (cls.ub[2] - cls.lb[2]),
]
response = np.array(pars).mean()
return response
# Fix random seeds
np.random.seed(0)
torch.manual_seed(0)
# Create a server object configured to run a 2d threshold experiment
database_path = "./{}.db".format(str(uuid.uuid4().hex))
cls.client = AEPsychClient(server=AEPsychServer(database_path=database_path))
config_file = "../configs/ax_beta_regression_example.ini"
config_file = os.path.join(os.path.dirname(__file__), config_file)
cls.client.configure(config_file)
cls.config = Config(config_fnames=[config_file])
cls.lb = cls.config.getlist("common", "lb", element_type=float)
cls.ub = cls.config.getlist("common", "ub", element_type=float)
while True:
# Ask the server what the next parameter values to test should be.
response = cls.client.ask()
if response["is_finished"]:
break
# Simulate a participant response.
outcome = simulate_response(response["config"])
# Tell the server what happened so that it can update its model.
cls.client.tell(response["config"], outcome)
cls.df = exp_to_df(cls.client.server.strat.experiment)
def tearDown(self):
if self.client.server.db is not None:
self.client.server.db.delete_db()
def test_bounds(self):
par4choices = self.config.getlist("par4", "choices", element_type=str)
par5choices = self.config.getlist("par5", "choices", element_type=str)
par6value = self.config.getfloat("par6", "value")
par7value = self.config.get("par7", "value")
self.assertTrue((self.df["par1"] >= self.lb[0]).all())
self.assertTrue((self.df["par1"] <= self.ub[0]).all())
self.assertTrue((self.df["par2"] >= self.lb[1]).all())
self.assertTrue((self.df["par2"] <= self.ub[1]).all())
self.assertTrue((self.df["par3"] >= self.lb[2]).all())
self.assertTrue((self.df["par3"] <= self.ub[2]).all())
self.assertTrue(self.df["par4"].isin(par4choices).all())
self.assertTrue(self.df["par5"].isin(par5choices).all())
self.assertTrue((self.df["par6"] == par6value).all())
self.assertTrue((self.df["par7"] == par7value).all())
def test_constraints(self):
constraints = self.config.getlist("common", "par_constraints", element_type=str)
for constraint in constraints:
self.assertEqual(len(self.df.query(constraint)), len(self.df))
self.assertEqual(self.df["par3"].dtype, "int64")
def test_n_trials(self):
n_tells = (self.df["trial_status"] == "COMPLETED").sum()
correct_n_tells = self.config.getint("opt_strat", "min_total_tells")
self.assertEqual(n_tells, correct_n_tells)
def test_generation_method(self):
n_sobol = (self.df["generation_method"] == "Sobol").sum()
n_opt = (self.df["generation_method"] == "BoTorch").sum()
correct_n_sobol = self.config.getint("init_strat", "min_total_tells")
correct_n_opt = (
self.config.getint("opt_strat", "min_total_tells") - correct_n_sobol
)
self.assertEqual(n_sobol, correct_n_sobol)
self.assertEqual(n_opt, correct_n_opt)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
These tests check that the server can handle different experiments
(multi/single stimuli, multi/single outcome). They ensure that the
data is correctly stored in the database tables (raw, param, and outcome).
It also checks that the experiment table is correctly populated
(generate_experiment_table method).
"""
import logging
import unittest
import uuid
from itertools import product
import aepsych.server as server
import aepsych.utils_logging as utils_logging
from parameterized import parameterized
params = {
"singleStimuli": {
"x1": [0.1, 0.2, 0.3, 1, 2, 3, 4],
"x2": [4, 0.1, 3, 0.2, 2, 1, 0.3],
},
"multiStimuli": {
"x1": [[0.1, 0.2], [0.3, 1], [2, 3], [4, 0.1], [0.2, 2], [1, 0.3], [0.3, 0.1]],
"x2": [[4, 0.1], [3, 0.2], [2, 1], [0.3, 0.2], [2, 0.3], [1, 0.1], [0.3, 4]],
},
}
outcomes = {
"singleOutcome": [1, -1, 0.1, 0, -0.1, 0, 0],
"multiOutcome": [
[[1], [0]],
[[-1], [0]],
[[0.1], [0]],
[[0], [0]],
[[-0.1], [0]],
[[0], [0]],
[[0], [0]],
],
}
multistim_config = """
# Configuration for multi-stimulus experiment integration test
[common]
lb = [0, 0]
ub = [1, 1]
parnames = [x1, x2]
stimuli_per_trial = 2
outcome_types = [binary]
strategy_names = [init_strat, opt_strat]
[init_strat]
min_asks = 3
generator = SobolGenerator
min_total_outcome_occurrences = 0
[opt_strat]
min_asks = 4
generator = OptimizeAcqfGenerator
acqf = qNoisyExpectedImprovement
model = PairwiseProbitModel
min_total_outcome_occurrences = 0
[SobolGenerator]
n_points = 2
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[PairwiseProbitModel]
inducing_size = 100
mean_covar_factory = default_mean_covar_factory
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
[qNoisyExpectedImprovement]
objective = ProbitObjective
"""
singlestim_config = """
[common]
lb = [0, 0]
ub = [1, 1]
parnames = [x1, x2]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat, opt_strat]
[init_strat]
min_asks = 3
generator = SobolGenerator
min_total_outcome_occurrences = 0
[opt_strat]
min_asks = 4
generator = OptimizeAcqfGenerator
acqf = MCPosteriorVariance
model = GPClassificationModel
min_total_outcome_occurrences = 0
[GPClassificationModel]
inducing_size = 10
mean_covar_factory = default_mean_covar_factory
[SobolGenerator]
n_points = 2
"""
test_configs = {"singleStimuli": singlestim_config, "multiStimuli": multistim_config}
all_tests = list(product(params, outcomes))
class IntegrationTestCase(unittest.TestCase):
def setUp(self):
# setup logger
server.logger = utils_logging.getLogger(logging.DEBUG, "logs")
# random port
socket = server.sockets.PySocket(port=0)
# random datebase path name without dashes
database_path = "./{}.db".format(str(uuid.uuid4().hex))
self.s = server.AEPsychServer(socket=socket, database_path=database_path)
# Server messages
self.setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": None},
}
self.ask_request = {"type": "ask", "message": ""}
self.tell_request = {
"type": "tell",
"message": {"config": {}, "outcome": 0},
"extra_info": {},
}
def tearDown(self):
self.s.cleanup()
# cleanup the db
if self.s.db is not None:
self.s.db.delete_db()
def get_tell(self, x1, x2, outcome):
self.tell_request["message"]["config"]["x1"] = x1
self.tell_request["message"]["config"]["x2"] = x2
self.tell_request["message"]["outcome"] = outcome
self.tell_request["extra_info"]["e1"] = 1
self.tell_request["extra_info"]["e2"] = 2
def check_params(self, param_type, x1, x2):
if param_type == "multiStimuli":
x1_stimuli0_saved = (
self.s.db.get_engine()
.execute("SELECT x1_stimuli0 FROM experiment_table")
.fetchall()
)
x1_stimuli1_saved = (
self.s.db.get_engine()
.execute("SELECT x1_stimuli1 FROM experiment_table")
.fetchall()
)
x1_stimuli0_saved = [
float(item) for sublist in x1_stimuli0_saved for item in sublist
]
x1_stimuli1_saved = [
float(item) for sublist in x1_stimuli1_saved for item in sublist
]
# Reshape
x1_saved = []
for i in range(len(x1_stimuli0_saved)):
x1_saved.append([x1_stimuli0_saved[i], x1_stimuli1_saved[i]])
self.assertEqual(x1_saved, x1)
x2_stimuli0_saved = (
self.s.db.get_engine()
.execute("SELECT x2_stimuli0 FROM experiment_table")
.fetchall()
)
x2_stimuli1_saved = (
self.s.db.get_engine()
.execute("SELECT x2_stimuli1 FROM experiment_table")
.fetchall()
)
x2_stimuli0_saved = [
float(item) for sublist in x2_stimuli0_saved for item in sublist
]
x2_stimuli1_saved = [
float(item) for sublist in x2_stimuli1_saved for item in sublist
]
# Reshape
x2_saved = []
for i in range(len(x2_stimuli0_saved)):
x2_saved.append([x2_stimuli0_saved[i], x2_stimuli1_saved[i]])
self.assertEqual(x2_saved, x2)
elif param_type == "singleStimuli":
x1_saved = (
self.s.db.get_engine()
.execute("SELECT x1 FROM experiment_table")
.fetchall()
)
x1_saved = [float(item) for sublist in x1_saved for item in sublist]
self.assertTrue(x1_saved == x1)
x2_saved = (
self.s.db.get_engine()
.execute("SELECT x2 FROM experiment_table")
.fetchall()
)
x2_saved = [float(item) for sublist in x2_saved for item in sublist]
self.assertTrue(x2_saved == x2)
def check_outcome(self, outcome_type, outcome):
if outcome_type == "multiOutcome":
outcome0_saved = (
self.s.db.get_engine()
.execute("SELECT outcome_0 FROM experiment_table")
.fetchall()
)
outcome1_saved = (
self.s.db.get_engine()
.execute("SELECT outcome_1 FROM experiment_table")
.fetchall()
)
outcome0_saved = [item for sublist in outcome0_saved for item in sublist]
outcome1_saved = [item for sublist in outcome1_saved for item in sublist]
outcome_saved = []
for i in range(len(outcome0_saved)):
outcome_saved.append([[outcome0_saved[i]], [outcome1_saved[i]]])
self.assertEqual(outcome_saved, outcome)
elif outcome_type == "singleOutcome":
outcome_saved = (
self.s.db.get_engine()
.execute("SELECT outcome FROM experiment_table")
.fetchall()
)
outcome_saved = [item for sublist in outcome_saved for item in sublist]
self.assertTrue(outcome_saved == outcome)
@parameterized.expand(all_tests)
def test_experiment(self, param_type, outcome_type):
x1 = params[param_type]["x1"]
x2 = params[param_type]["x2"]
outcome = outcomes[outcome_type]
dummy_config = test_configs[param_type]
self.setup_request["message"]["config_str"] = dummy_config
self.s.handle_request(self.setup_request)
i = 0
while not self.s.strat.finished:
self.s.handle_request(self.ask_request)
self.get_tell(x1[i], x2[i], outcome[i])
i = i + 1
self.s.handle_request(self.tell_request)
# Experiment id
exp_id = self.s.db.get_master_records()[0].experiment_id
# Create table with experiment data
self.s.generate_experiment_table(exp_id, return_df=True)
# Check that table exists
self.assertTrue("experiment_table" in self.s.db.get_engine().table_names())
# Check that parameter and outcomes values are correct
self.check_outcome(outcome_type, outcome)
self.check_params(param_type, x1, x2)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from aepsych.acquisition.lse import MCLevelSetEstimation
from aepsych.acquisition.objective import ProbitObjective
from botorch.utils.testing import MockModel, MockPosterior
from scipy.stats import norm
class TestLSE(unittest.TestCase):
def setUp(self):
f = torch.ones(1) * 1.7
var = torch.ones(1) * 2.3
samps = torch.ones(1, 1, 1) * 1.7
self.model = MockModel(MockPosterior(mean=f, variance=var, samples=samps))
def test_mclse(self):
mclse = MCLevelSetEstimation(
model=self.model, target=5.0, beta=3.84, objective=ProbitObjective()
)
expected = np.sqrt(3.84) * np.sqrt(1e-5) - np.abs(norm.cdf(1.7) - 5)
self.assertAlmostEqual(mclse(torch.zeros(1, 1)), expected)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from aepsych.acquisition.mutual_information import (
BernoulliMCMutualInformation,
MonotonicBernoulliMCMutualInformation,
)
from aepsych.acquisition.objective import ProbitObjective
from aepsych.generators import (
MonotonicRejectionGenerator,
OptimizeAcqfGenerator,
SobolGenerator,
)
from aepsych.models import GPClassificationModel, MonotonicRejectionGP
from aepsych.strategy import SequentialStrategy, Strategy
from gpytorch.kernels import LinearKernel
from gpytorch.means import ConstantMean
from scipy.stats import bernoulli, multivariate_normal, norm, pearsonr
from ..common import f_1d
class SingleProbitMI(unittest.TestCase):
def test_1d_monotonic_single_probit(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 15
n_opt = 1
lb = -4.0
ub = 4.0
acqf = MonotonicBernoulliMCMutualInformation
acqf_kwargs = {"objective": ProbitObjective()}
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
min_asks=n_opt,
model=MonotonicRejectionGP(lb=lb, ub=ub, dim=1, monotonic_idxs=[0]),
generator=MonotonicRejectionGenerator(acqf, acqf_kwargs),
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(f_1d(next_x))])
x = torch.linspace(-4, 4, 100).reshape(-1, 1)
zhat, _ = strat.predict(x)
true = f_1d(x.detach().numpy())
est = zhat.detach().numpy()
# close enough!
self.assertTrue((((norm.cdf(est) - true) ** 2).mean()) < 0.25)
def test_1d_single_probit(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 15
n_opt = 20
lb = -4.0
ub = 4.0
acqf = BernoulliMCMutualInformation
extra_acqf_args = {"objective": ProbitObjective()}
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, dim=1, inducing_size=10),
generator=OptimizeAcqfGenerator(acqf, extra_acqf_args),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(f_1d(next_x))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
true = f_1d(x.detach().numpy())
est = zhat.detach().numpy()
# close enough!
self.assertTrue((((norm.cdf(est) - true) ** 2).mean()) < 0.25)
def test_mi_acqf(self):
mean = ConstantMean().initialize(constant=1.2)
covar = LinearKernel().initialize(variance=1.0)
model = GPClassificationModel(
lb=torch.Tensor([0]),
ub=torch.Tensor([1]),
inducing_size=10,
mean_module=mean,
covar_module=covar,
)
x = torch.rand(size=(10, 1))
acqf = BernoulliMCMutualInformation(model=model, objective=ProbitObjective())
acq_pytorch = acqf(x)
samps_numpy = norm.cdf(
multivariate_normal.rvs(mean=np.ones(10) * 1.2, cov=x @ x.T, size=10000)
)
samp_entropies = bernoulli(samps_numpy).entropy()
mean_entropy = bernoulli(samps_numpy.mean(axis=0)).entropy()
acq_numpy = mean_entropy - samp_entropies.mean(axis=0)
# this assertion fails, not sure why, these should be equal to numerical
# precision
# self.assertTrue(np.allclose(acq_numpy, acq_pytorch.detach().numpy().flatten()))
# this one succeeds
self.assertTrue(
pearsonr(acq_numpy, acq_pytorch.detach().numpy().flatten())[0] > (1 - 1e-5)
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from itertools import product
import numpy as np
import torch
from aepsych.acquisition.objective import (
FloorGumbelObjective,
FloorLogitObjective,
FloorProbitObjective,
)
from parameterized import parameterized
from scipy.stats import gumbel_l, logistic, norm
objective_pairs = [
(FloorLogitObjective, logistic),
(FloorProbitObjective, norm),
(FloorGumbelObjective, gumbel_l),
]
floors = [0, 0.5, 0.33]
all_tests = list(product(objective_pairs, floors))
class FloorLinkTests(unittest.TestCase):
@parameterized.expand(all_tests)
def test_floor_links(self, objectives, floor):
our_objective, scipy_dist = objectives
x = np.linspace(-3, 3, 50)
scipy_answer = scipy_dist.cdf(x)
scipy_answer = scipy_answer * (1 - floor) + floor
our_link = our_objective(floor=floor)
our_answer = our_link(torch.Tensor(x).unsqueeze(-1))
self.assertTrue(np.allclose(scipy_answer, our_answer.numpy()))
our_inverse = our_link.inverse(our_answer)
self.assertTrue(np.allclose(x, our_inverse.numpy()))
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from aepsych.acquisition.monotonic_rejection import MonotonicMCLSE
from aepsych.acquisition.objective import ProbitObjective
from aepsych.models.derivative_gp import MixedDerivativeVariationalGP
from botorch.acquisition.objective import IdentityMCObjective
from botorch.utils.testing import BotorchTestCase
class TestMonotonicAcq(BotorchTestCase):
def test_monotonic_acq(self):
# Init
train_X_aug = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 0.0], [2.0, 2.0, 0.0]])
deriv_constraint_points = torch.tensor(
[[0.0, 0.0, 1.0], [1.0, 1.0, 1.0], [2.0, 2.0, 1.0]]
)
train_Y = torch.tensor([[1.0], [2.0], [3.0]])
m = MixedDerivativeVariationalGP(
train_x=train_X_aug, train_y=train_Y, inducing_points=train_X_aug
)
acq = MonotonicMCLSE(
model=m,
deriv_constraint_points=deriv_constraint_points,
num_samples=5,
num_rejection_samples=8,
target=1.9,
)
self.assertTrue(isinstance(acq.objective, IdentityMCObjective))
acq = MonotonicMCLSE(
model=m,
deriv_constraint_points=deriv_constraint_points,
num_samples=5,
num_rejection_samples=8,
target=1.9,
objective=ProbitObjective(),
)
# forward
acq(train_X_aug)
Xfull = torch.cat((train_X_aug, acq.deriv_constraint_points), dim=0)
posterior = m.posterior(Xfull)
samples = acq.sampler(posterior)
self.assertEqual(samples.shape, torch.Size([5, 6, 1]))
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from aepsych.acquisition.rejection_sampler import RejectionSampler
from aepsych.models.derivative_gp import MixedDerivativeVariationalGP
from botorch.utils.testing import BotorchTestCase
class TestRejectionSampling(BotorchTestCase):
def test_rejection_sampling(self):
train_X_aug = torch.tensor([[0.0, 0.0, 0.0], [1.0, 1.0, 0.0], [2.0, 2.0, 0.0]])
deriv_constraint_points = torch.tensor(
[[0.0, 0.0, 1.0], [1.0, 1.0, 1.0], [2.0, 2.0, 1.0]]
)
train_Y = torch.tensor([[1.0], [2.0], [3.0]])
m = MixedDerivativeVariationalGP(
train_x=train_X_aug, train_y=train_Y, inducing_points=train_X_aug
)
Xfull = torch.cat((train_X_aug, deriv_constraint_points), dim=0)
posterior = m.posterior(Xfull)
sampler = RejectionSampler(
num_samples=3,
num_rejection_samples=5000,
constrained_idx=torch.tensor([3, 4, 5]),
)
samples = sampler(posterior)
self.assertEqual(samples.shape, torch.Size([3, 6, 1]))
self.assertTrue(torch.all(samples.squeeze(-1)[:, 3:] > 0).item())
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import select
import unittest
import uuid
from unittest.mock import MagicMock, patch, PropertyMock
import aepsych.server as server
import aepsych.utils_logging as utils_logging
import torch
from aepsych.config import Config
from aepsych.server.message_handlers.handle_setup import configure
from aepsych.server.sockets import BAD_REQUEST
from aepsych.strategy import AEPsychStrategy
dummy_config = """
[common]
lb = [0]
ub = [1]
parnames = [x]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat, opt_strat]
[init_strat]
min_asks = 2
generator = SobolGenerator
min_total_outcome_occurrences = 0
[opt_strat]
min_asks = 2
generator = OptimizeAcqfGenerator
acqf = MCPosteriorVariance
model = GPClassificationModel
min_total_outcome_occurrences = 0
[GPClassificationModel]
inducing_size = 10
mean_covar_factory = default_mean_covar_factory
[SobolGenerator]
n_points = 2
"""
class BaseServerTestCase(unittest.TestCase):
def setUp(self):
# setup logger
server.logger = utils_logging.getLogger(logging.DEBUG, "logs")
# random port
socket = server.sockets.PySocket(port=0)
# random datebase path name without dashes
database_path = "./{}_test_server.db".format(str(uuid.uuid4().hex))
self.s = server.AEPsychServer(socket=socket, database_path=database_path)
self.db_name = database_path.split("/")[1]
self.db_path = database_path
def tearDown(self):
self.s.cleanup()
# cleanup the db
if self.s.db is not None:
self.s.db.delete_db()
def dummy_create_setup(self, server, request=None):
request = request or {"test": "test request"}
server._db_master_record = server.db.record_setup(
description="default description", name="default name", request=request
)
class ServerTestCase(BaseServerTestCase):
def test_final_strat_serialization(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
}
self.s.handle_request(setup_request)
while not self.s.strat.finished:
self.s.handle_request(ask_request)
self.s.handle_request(tell_request)
exp_id = self.s.db.get_master_records()[-1].experiment_id
stored_strat = self.s.get_strat_from_replay(exp_id)
# just some spot checks that the strat's the same
# same data. We do this twice to make sure buffers are
# in a good state and we can load twice without crashing
for _ in range(2):
stored_strat = self.s.get_strat_from_replay(exp_id)
self.assertTrue((stored_strat.x == self.s.strat.x).all())
self.assertTrue((stored_strat.y == self.s.strat.y).all())
# same lengthscale and outputscale
self.assertEqual(
stored_strat.model.covar_module.lengthscale,
self.s.strat.model.covar_module.lengthscale,
)
self.assertEqual(
stored_strat.model.covar_module.outputscale,
self.s.strat.model.covar_module.outputscale,
)
def test_pandadf_dump_single(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
"extra_info": {},
}
self.s.handle_request(setup_request)
expected_x = [0, 1, 2, 3]
expected_z = list(reversed(expected_x))
expected_y = [x % 2 for x in expected_x]
i = 0
while not self.s.strat.finished:
self.s.handle_request(ask_request)
tell_request["message"]["config"]["x"] = [expected_x[i]]
tell_request["message"]["config"]["z"] = [expected_z[i]]
tell_request["message"]["outcome"] = expected_y[i]
tell_request["extra_info"]["e1"] = 1
tell_request["extra_info"]["e2"] = 2
i = i + 1
self.s.handle_request(tell_request)
exp_id = self.s.db.get_master_records()[-1].experiment_id
out_df = self.s.get_dataframe_from_replay(exp_id)
self.assertTrue((out_df.x == expected_x).all())
self.assertTrue((out_df.z == expected_z).all())
self.assertTrue((out_df.response == expected_y).all())
self.assertTrue((out_df.e1 == [1] * 4).all())
self.assertTrue((out_df.e2 == [2] * 4).all())
self.assertTrue("post_mean" in out_df.columns)
self.assertTrue("post_var" in out_df.columns)
def test_pandadf_dump_multistrat(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
"extra_info": {},
}
expected_x = [0, 1, 2, 3]
expected_z = list(reversed(expected_x))
expected_y = [x % 2 for x in expected_x]
i = 0
self.s.handle_request(setup_request)
while not self.s.strat.finished:
self.s.handle_request(ask_request)
tell_request["message"]["config"]["x"] = [expected_x[i]]
tell_request["message"]["config"]["z"] = [expected_z[i]]
tell_request["message"]["outcome"] = expected_y[i]
tell_request["extra_info"]["e1"] = 1
tell_request["extra_info"]["e2"] = 2
i = i + 1
self.s.handle_request(tell_request)
exp_id = self.s.db.get_master_records()[-1].experiment_id
out_df = self.s.get_dataframe_from_replay(exp_id)
self.assertTrue((out_df.x == expected_x).all())
self.assertTrue((out_df.z == expected_z).all())
self.assertTrue((out_df.response == expected_y).all())
self.assertTrue((out_df.e1 == [1] * len(expected_x)).all())
self.assertTrue((out_df.e2 == [2] * len(expected_x)).all())
self.assertTrue("post_mean" in out_df.columns)
self.assertTrue("post_var" in out_df.columns)
def test_pandadf_dump_flat(self):
"""
This test handles the case where the config values are flat
scalars and not lists
"""
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
"extra_info": {},
}
self.s.handle_request(setup_request)
expected_x = [0, 1, 2, 3]
expected_z = list(reversed(expected_x))
expected_y = [x % 2 for x in expected_x]
i = 0
while not self.s.strat.finished:
self.s.handle_request(ask_request)
tell_request["message"]["config"]["x"] = expected_x[i]
tell_request["message"]["config"]["z"] = expected_z[i]
tell_request["message"]["outcome"] = expected_y[i]
tell_request["extra_info"]["e1"] = 1
tell_request["extra_info"]["e2"] = 2
i = i + 1
self.s.handle_request(tell_request)
exp_id = self.s.db.get_master_records()[-1].experiment_id
out_df = self.s.get_dataframe_from_replay(exp_id)
self.assertTrue((out_df.x == expected_x).all())
self.assertTrue((out_df.z == expected_z).all())
self.assertTrue((out_df.response == expected_y).all())
self.assertTrue((out_df.e1 == [1] * 4).all())
self.assertTrue((out_df.e2 == [2] * 4).all())
self.assertTrue("post_mean" in out_df.columns)
self.assertTrue("post_var" in out_df.columns)
def test_receive(self):
"""test_receive - verifies the receive is working when server receives unexpected messages"""
message1 = b"\x16\x03\x01\x00\xaf\x01\x00\x00\xab\x03\x03\xa9\x80\xcc" # invalid message
message2 = b"\xec\xec\x14M\xfb\xbd\xac\xe7jF\xbe\xf9\x9bM\x92\x15b\xb5" # invalid message
message3 = {"message": {"target": "test request"}} # valid message
message_list = [message1, message2, json.dumps(message3)]
self.s.socket.conn = MagicMock()
for i, message in enumerate(message_list):
select.select = MagicMock(return_value=[[self.s.socket.conn], [], []])
self.s.socket.conn.recv = MagicMock(return_value=message)
if i != 2:
self.assertEqual(self.s.socket.receive(False), BAD_REQUEST)
else:
self.assertEqual(self.s.socket.receive(False), message3)
def test_error_handling(self):
# double brace escapes, single brace to substitute, so we end up with 3 braces
request = f"{{{BAD_REQUEST}}}"
expected_error = f"server_error, Request '{request}' raised error ''str' object has no attribute 'keys''!"
self.s.socket.accept_client = MagicMock()
self.s.socket.receive = MagicMock(return_value=request)
self.s.socket.send = MagicMock()
self.s.exit_server_loop = True
with self.assertRaises(SystemExit):
self.s.serve()
self.s.socket.send.assert_called_once_with(expected_error)
def test_queue(self):
"""Test to see that the queue is being handled correctly"""
self.s.socket.accept_client = MagicMock()
ask_request = {"type": "ask", "message": ""}
self.s.socket.receive = MagicMock(return_value=ask_request)
self.s.socket.send = MagicMock()
self.s.exit_server_loop = True
with self.assertRaises(SystemExit):
self.s.serve()
assert len(self.s.queue) == 0
def test_ax_functionality(self):
config_str = """
[common]
use_ax = True
lb = [0]
ub = [1]
parnames = [x]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat, opt_strat]
[init_strat]
generator = SobolGenerator
[opt_strat]
generator = OptimizeAcqfGenerator
model = ContinuousRegressionGP
acqf = qNoisyExpectedImprovement
"""
config = Config(config_str=config_str)
configure(self.s, config=config)
self.assertTrue(self.s.use_ax)
self.assertIsInstance(self.s.strat, AEPsychStrategy)
def test_config_to_tensor(self):
with patch(
"aepsych.server.AEPsychServer.parnames", new_callable=PropertyMock
) as mock_parnames:
mock_parnames.return_value = ["par1", "par2", "par3"]
# test single
config = {"par1": 0.0, "par2": 1.0, "par3": 2.0}
tensor = self.s._config_to_tensor(config)
self.assertTrue(torch.equal(tensor, torch.tensor([0.0, 1.0, 2.0])))
config = {"par1": [0.0], "par2": [1.0], "par3": [2.0]}
tensor = self.s._config_to_tensor(config)
self.assertTrue(torch.equal(tensor, torch.tensor([0.0, 1.0, 2.0])))
# test pairwise
config = {"par1": [0.0, 2.0], "par2": [1.0, 1.0], "par3": [2.0, 0.0]}
tensor = self.s._config_to_tensor(config)
self.assertTrue(
torch.equal(tensor, torch.tensor([[0.0, 2.0], [1.0, 1.0], [2.0, 0.0]]))
)
def test_handle_request_untyped(self):
"""test_handle_request_untyped"""
request = {}
# check untyped request
with self.assertRaises(RuntimeError):
self.s.handle_request(request)
def test_handle_request_type_invalid(self):
"""test_handle_request_type_invalid"""
request = {"type": "invalid"}
# make sure invalid types handle properly
with self.assertRaises(RuntimeError):
self.s.handle_request(request)
def test_serve_handle_request(self):
"""Tests that the full pipeline is working. Message should go from _receive_send to _handle_queue
to the version handler"""
request = {"version": 0}
self.s.socket.receive = MagicMock(return_value=request)
self.s.socket.accept_client = MagicMock()
self.s.handle_request = MagicMock()
self.s.handle_request = MagicMock()
self.s.exit_server_loop = True
with self.assertRaises(SystemExit):
self.s.serve()
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from aepsych.config import Config
from ..test_server import BaseServerTestCase, dummy_config
class HandleExitTestCase(BaseServerTestCase):
def test_get_config(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
get_config_request = {"type": "get_config", "message": {}}
self.s.handle_request(setup_request)
config_dict = self.s.handle_request(get_config_request)
true_config_dict = Config(config_str=dummy_config).to_dict(deduplicate=False)
self.assertEqual(config_dict, true_config_dict)
get_config_request["message"] = {
"section": "init_strat",
"property": "min_asks",
}
response = self.s.handle_request(get_config_request)
self.assertEqual(response, true_config_dict["init_strat"]["min_asks"])
get_config_request["message"] = {"section": "init_strat", "property": "lb"}
response = self.s.handle_request(get_config_request)
self.assertEqual(response, true_config_dict["init_strat"]["lb"])
get_config_request["message"] = {"property": "min_asks"}
with self.assertRaises(RuntimeError):
response = self.s.handle_request(get_config_request)
get_config_request["message"] = {"section": "init_strat"}
with self.assertRaises(RuntimeError):
response = self.s.handle_request(get_config_request)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock
from ..test_server import BaseServerTestCase
class HandleExitTestCase(BaseServerTestCase):
def test_handle_exit(self):
request = {}
request["type"] = "exit"
self.s.socket.accept_client = MagicMock()
self.s.socket.receive = MagicMock(return_value=request)
self.s.dump = MagicMock()
with self.assertRaises(SystemExit) as cm:
self.s.serve()
self.assertEqual(cm.exception.code, 0)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from ..test_server import BaseServerTestCase, dummy_config
class QueryHandlerTestCase(BaseServerTestCase):
def test_strat_query(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": [
{"config": {"x": [0.5]}, "outcome": 1},
{"config": {"x": [0.0]}, "outcome": 0},
{"config": {"x": [1]}, "outcome": 0},
],
}
self.s.handle_request(setup_request)
while not self.s.strat.finished:
self.s.handle_request(ask_request)
self.s.handle_request(tell_request)
query_max_req = {
"type": "query",
"message": {
"query_type": "max",
},
}
query_min_req = {
"type": "query",
"message": {
"query_type": "min",
},
}
query_pred_req = {
"type": "query",
"message": {
"query_type": "prediction",
"x": {"x": [0.0]},
},
}
query_inv_req = {
"type": "query",
"message": {
"query_type": "inverse",
"y": 5.0,
},
}
response_max = self.s.handle_request(query_max_req)
response_min = self.s.handle_request(query_min_req)
response_pred = self.s.handle_request(query_pred_req)
response_inv = self.s.handle_request(query_inv_req)
for response in [response_max, response_min, response_pred, response_inv]:
self.assertTrue(type(response["x"]) is dict)
self.assertTrue(len(response["x"]["x"]) == 1)
self.assertTrue(type(response["y"]) is float)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from ..test_server import BaseServerTestCase, dummy_config
class StratCanModelTestCase(BaseServerTestCase):
def test_strat_can_model(self):
setup_request = {
"type": "setup",
"version": "0.01",
"message": {"config_str": dummy_config},
}
ask_request = {"type": "ask", "message": ""}
tell_request = {
"type": "tell",
"message": [
{"config": {"x": [0.5]}, "outcome": 1},
],
}
can_model_request = {
"type": "can_model",
"message": {},
}
self.s.handle_request(setup_request)
# At the start there is no model, so can_model returns false
response = self.s.handle_request(can_model_request)
self.assertTrue(response["can_model"] == 0)
self.s.handle_request(ask_request)
self.s.handle_request(tell_request)
self.s.handle_request(ask_request)
self.s.handle_request(tell_request)
self.s.handle_request(ask_request)
# Dummy config has 2 init trials; so after third ask, can_model returns true
response = self.s.handle_request(can_model_request)
self.assertTrue(response["can_model"] == 1)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from ..test_server import BaseServerTestCase, dummy_config
class ResumeTestCase(BaseServerTestCase):
def test_handle_finish_strategy(self):
setup_request = {
"type": "setup",
"message": {"config_str": dummy_config},
}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
}
ask_request = {"type": "ask", "message": ""}
strat_name_request = {"type": "info"}
finish_strat_request = {"type": "finish_strategy"}
self.s.handle_request(setup_request)
strat_name = self.s.handle_request(strat_name_request)["current_strat_name"]
self.assertEqual(strat_name, "init_strat")
# model-based strategies require data
self.s.handle_request(tell_request)
msg = self.s.handle_request(finish_strat_request)
self.assertEqual(msg, "finished strategy init_strat")
# need to gen another trial to move to next strategy
self.s.handle_request(ask_request)
strat_name = self.s.handle_request(strat_name_request)["current_strat_name"]
self.assertEqual(strat_name, "opt_strat")
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock
from ..test_server import BaseServerTestCase, dummy_config
class MessageHandlerTellTests(BaseServerTestCase):
def test_tell(self):
setup_request = {
"type": "setup",
"message": {"config_str": dummy_config},
}
tell_request = {
"type": "tell",
"message": {"config": {"x": [0.5]}, "outcome": 1},
}
self.s.db.record_message = MagicMock()
self.s.handle_request(setup_request)
self.s.handle_request(tell_request)
self.assertEqual(self.s.db.record_message.call_count, 1)
self.assertEqual(len(self.s.strat.x), 1)
tell_request["message"]["model_data"] = False
self.s.handle_request(tell_request)
self.assertEqual(self.s.db.record_message.call_count, 2)
self.assertEqual(len(self.s.strat.x), 1)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from sklearn.datasets import make_classification
from aepsych.models import GPClassificationModel
from aepsych.models.utils import select_inducing_points
class UtilsTestCase(unittest.TestCase):
def test_select_inducing_points(self):
"""Verify that when we have n_induc > data size, we use data as inducing,
and otherwise we correctly select inducing points."""
X, y = make_classification(
n_samples=100,
n_features=1,
n_redundant=0,
n_informative=1,
random_state=1,
n_clusters_per_class=1,
)
X, y = torch.Tensor(X), torch.Tensor(y)
inducing_size = 20
model = GPClassificationModel(
torch.Tensor([-3]), torch.Tensor([3]), inducing_size=inducing_size
)
model.set_train_data(X[:10, ...], y[:10])
# (inducing point selection sorts the inputs so we sort X to verify)
self.assertTrue(
np.allclose(
select_inducing_points(
inducing_size=inducing_size,
covar_module=model.covar_module,
X=model.train_inputs[0],
bounds=model.bounds,
method="auto",
),
X[:10].sort(0).values,
)
)
model.set_train_data(X, y)
self.assertTrue(
len(
select_inducing_points(
inducing_size=inducing_size,
covar_module=model.covar_module,
X=model.train_inputs[0],
bounds=model.bounds,
method="auto",
)
)
<= 20
)
self.assertTrue(
len(
select_inducing_points(
inducing_size=inducing_size,
covar_module=model.covar_module,
X=model.train_inputs[0],
bounds=model.bounds,
method="pivoted_chol",
)
)
<= 20
)
self.assertEqual(
len(
select_inducing_points(
inducing_size=inducing_size,
covar_module=model.covar_module,
X=model.train_inputs[0],
bounds=model.bounds,
method="kmeans++",
)
),
20,
)
with self.assertRaises(AssertionError):
select_inducing_points(
inducing_size=inducing_size,
covar_module=model.covar_module,
X=model.train_inputs[0],
bounds=model.bounds,
method="12345",
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import torch
# run on single threads to keep us from deadlocking weirdly in CI
if "CI" in os.environ or "SANDCASTLE" in os.environ:
torch.set_num_threads(1)
import numpy as np
from aepsych.config import Config
from aepsych.models.monotonic_projection_gp import MonotonicProjectionGP
from sklearn.datasets import make_classification
class MonotonicProjectionGPtest(unittest.TestCase):
def setUp(self):
np.random.seed(1)
torch.manual_seed(1)
X, y = make_classification(
n_samples=25,
n_features=3,
n_redundant=0,
n_informative=3,
random_state=1,
n_clusters_per_class=1,
)
self.X, self.y = torch.Tensor(X), torch.Tensor(y)
def test_posterior(self):
X, y = self.X, self.y
model = MonotonicProjectionGP(
lb=torch.Tensor([-4, -4, -4]),
ub=torch.Tensor([4, 4, 4]),
inducing_size=10,
monotonic_dims=[0, 1],
)
model.fit(X, y)
# Check that it is monotonic in both dims
for i in range(2):
Xtest = torch.zeros(3, 3)
Xtest[:, i] = torch.tensor([-1, 0, 1])
post = model.posterior(Xtest)
mu = post.mean.squeeze()
self.assertTrue(
torch.equal(
torch.tensor([0, 1, 2], dtype=torch.long),
torch.argsort(mu),
)
)
# Check that min_f_val is respected
model = MonotonicProjectionGP(
lb=torch.Tensor([-4]),
ub=torch.Tensor([4]),
inducing_size=10,
monotonic_dims=[0],
min_f_val=5.0,
)
model.fit(X, y)
post = model.posterior(Xtest)
mu = post.mean.squeeze()
self.assertTrue(mu.min().item() >= 4.9)
# And in samples
samps = model.sample(Xtest, num_samples=10)
self.assertTrue(samps.min().item() >= 4.9)
def test_from_config(self):
config_str = """
[common]
parnames = [x, y]
lb = [0, 0]
ub = [1, 1]
stimuli_per_trial = 1
outcome_types = [binary]
strategy_names = [init_strat]
[init_strat]
generator = OptimizeAcqfGenerator
model = MonotonicProjectionGP
[MonotonicProjectionGP]
monotonic_dims = [0]
monotonic_grid_size = 10
min_f_val = 0.1
"""
config = Config(config_str=config_str)
model = MonotonicProjectionGP.from_config(config)
self.assertEqual(model.monotonic_dims, [0])
self.assertEqual(model.mon_grid_size, 10)
self.assertEqual(model.min_f_val, 0.1)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import numpy as np
import torch
from aepsych.generators import SobolGenerator
from aepsych.models import IndependentMultitaskGPRModel, MultitaskGPRModel
from parameterized import parameterized
# run on single threads to keep us from deadlocking weirdly in CI
if "CI" in os.environ or "SANDCASTLE" in os.environ:
torch.set_num_threads(1)
models = [
(MultitaskGPRModel(num_outputs=2, rank=2, lb=[-1], ub=[3]),),
(IndependentMultitaskGPRModel(num_outputs=2, lb=[-1], ub=[3]),),
]
class MultitaskGPRegressionTest(unittest.TestCase):
def setUp(self):
np.random.seed(0)
torch.manual_seed(0)
generator = SobolGenerator(lb=[-1], ub=[3], dim=1)
self.x = generator.gen(50)
f1 = self.x**3 - 4 * self.x**2 + np.random.normal() * 0.01
f2 = self.x**2 - 7 * self.x + np.random.normal() * 0.01
self.f = torch.cat((f1, f2), dim=-1)
self.xtest = generator.gen(10)
ytrue1 = self.xtest**3 - 4 * self.xtest**2
ytrue2 = self.xtest**2 - 7 * self.xtest
self.ytrue = torch.cat((ytrue1, ytrue2), dim=-1)
@parameterized.expand(models)
def test_mtgpr_smoke(self, model):
model.fit(self.x, self.f)
ypred, _ = model.predict(self.xtest)
self.assertTrue(np.allclose(self.ytrue, ypred, atol=0.1)) # loose smoke test
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import uuid
import numpy as np
import numpy.testing as npt
import torch
from aepsych.server import AEPsychServer
from gpytorch.likelihoods import GaussianLikelihood
from aepsych.server.message_handlers.handle_ask import ask
from aepsych.server.message_handlers.handle_tell import tell
from aepsych.server.message_handlers.handle_setup import configure
# run on single threads to keep us from deadlocking weirdly in CI
if "CI" in os.environ or "SANDCASTLE" in os.environ:
torch.set_num_threads(1)
class GPRegressionTest(unittest.TestCase):
def f(self, x):
return x**3 - 4 * x**2 + np.random.normal() * 0.1
def simulate_response(self, trial_params):
x = trial_params["par1"][0]
response = self.f(x)
return response
def setUp(self):
np.random.seed(0)
torch.manual_seed(0)
dbname = "./{}.db".format(str(uuid.uuid4().hex))
config = """
[common]
parnames = [par1]
lb = [-1]
ub = [3]
stimuli_per_trial=1
outcome_types=[continuous]
strategy_names = [init_strat, opt_strat]
[init_strat]
min_asks = 10
generator = SobolGenerator
[opt_strat]
min_asks = 5
generator = OptimizeAcqfGenerator
model = GPRegressionModel
acqf = qNoisyExpectedImprovement
[GPRegressionModel]
likelihood = GaussianLikelihood
max_fit_time = 1
"""
self.server = AEPsychServer(database_path=dbname)
configure(self.server, config_str=config)
while not self.server.strat.finished:
trial_params = ask(self.server)
outcome = self.simulate_response(trial_params)
tell(self.server, outcome, trial_params)
def tearDown(self):
self.server.db.delete_db()
def test_extremum(self):
tol = 0.2 # don't need to be super precise because it's small data
fmax, argmax = self.server.strat.get_max()
npt.assert_allclose(fmax, 0, atol=tol)
npt.assert_allclose(argmax, 0, atol=tol)
fmin, argmin = self.server.strat.get_min()
npt.assert_allclose(fmin, -256 / 27, atol=tol)
npt.assert_allclose(argmin, 8 / 3, atol=tol)
val, arg = self.server.strat.inv_query(0)
npt.assert_allclose(val, 0, atol=tol)
npt.assert_allclose(arg, 0, atol=tol)
def test_from_config(self):
model = self.server.strat.model
npt.assert_allclose(model.lb, [-1.0])
npt.assert_allclose(model.ub, [3.0])
self.assertEqual(model.dim, 1)
self.assertIsInstance(model.likelihood, GaussianLikelihood)
self.assertEqual(model.max_fit_time, 1)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
import torch
from aepsych.acquisition import MCPosteriorVariance
from aepsych.acquisition.lookahead import GlobalMI
from aepsych.acquisition.objective import (
FloorGumbelObjective,
FloorLogitObjective,
FloorProbitObjective,
ProbitObjective,
)
from aepsych.generators import OptimizeAcqfGenerator, SobolGenerator
from aepsych.likelihoods import BernoulliObjectiveLikelihood
from aepsych.strategy import SequentialStrategy, Strategy
from aepsych.acquisition.objective.semi_p import (
SemiPProbabilityObjective,
SemiPThresholdObjective,
)
from aepsych.likelihoods.semi_p import LinearBernoulliLikelihood
from aepsych.models import HadamardSemiPModel, SemiParametricGPModel
from aepsych.models.semi_p import (
_hadamard_mvn_approx,
semi_p_posterior_transform,
)
from gpytorch.distributions import MultivariateNormal
from parameterized import parameterized
def _hadamard_model_constructor(lb, ub, stim_dim, floor, objective=FloorLogitObjective):
return HadamardSemiPModel(
lb=lb,
ub=ub,
stim_dim=stim_dim,
likelihood=BernoulliObjectiveLikelihood(objective=objective(floor=floor)),
inducing_size=10,
inducing_point_method="auto",
max_fit_time=0.5,
)
def _semip_model_constructor(lb, ub, stim_dim, floor, objective=FloorLogitObjective):
return SemiParametricGPModel(
lb=lb,
ub=ub,
stim_dim=stim_dim,
likelihood=LinearBernoulliLikelihood(objective=objective(floor=floor)),
inducing_size=10,
inducing_point_method="auto",
)
links = [FloorLogitObjective, FloorProbitObjective, FloorGumbelObjective]
floors = [0, 0.3, 0.5]
constructors = [_semip_model_constructor, _hadamard_model_constructor]
test_configs = [[FloorLogitObjective, 0.3, _hadamard_model_constructor]]
# test_configs = list(product(links, floors, constructors)) # TODO too slow
class SemiPSmokeTests(unittest.TestCase):
def setUp(self):
self.seed = 1
self.stim_dim = 0
self.context_dim = 1
np.random.seed(1)
torch.manual_seed(1)
X = np.random.randn(100, 2) / 3
xcontext = X[..., self.context_dim]
xintensity = X[..., self.stim_dim]
# polynomial context
slope = (
xcontext - 0.7 * xcontext**2 + 0.3 * xcontext**3 - 0.1 * xcontext**4
)
intercept = (
xcontext + 0.03 * xcontext**5 - 0.2 * xcontext**3 - 0.7 * xcontext**4
)
# multiply by intensity
self.f = torch.Tensor(slope * (intercept + xintensity)).unsqueeze(-1)
X[:, 0] = X[:, 0] * 100
X[:, 1] = X[:, 1] / 100
self.lb = [-100, -0.01]
self.ub = [100, 0.01]
self.X = torch.Tensor(X)
@parameterized.expand(
[(SemiPThresholdObjective(target=0.75),), (SemiPProbabilityObjective(),)]
)
def test_mc_generation(self, objective):
# no objective here, the objective for `gen` is not the same as the objective
# for the likelihood in this case
model = SemiParametricGPModel(
lb=self.lb,
ub=self.ub,
stim_dim=self.stim_dim,
likelihood=LinearBernoulliLikelihood(),
inducing_size=10,
inducing_point_method="auto",
)
generator = OptimizeAcqfGenerator(
acqf=MCPosteriorVariance,
acqf_kwargs={"objective": objective},
max_gen_time=0.1,
)
y = torch.bernoulli(model.likelihood.objective(self.f))
model.set_train_data(
self.X[:10], y[:10]
) # no need to fit for checking gen shapes
next_x = generator.gen(num_points=1, model=model)
self.assertEqual(
next_x.shape,
(
1,
2,
),
)
def test_analytic_lookahead_generation(self):
floor = 0
objective = FloorProbitObjective
model = _semip_model_constructor(
lb=self.lb,
ub=self.ub,
stim_dim=self.stim_dim,
floor=floor,
objective=objective,
)
generator = OptimizeAcqfGenerator(
acqf=GlobalMI,
acqf_kwargs={
"posterior_transform": semi_p_posterior_transform,
"target": 0.75,
"query_set_size": 100,
},
max_gen_time=0.2,
)
link = objective(floor=floor)
y = torch.bernoulli(link(self.f))
model.set_train_data(
self.X[:10], y[:10]
) # no need to fit for checking gen shapes
next_x = generator.gen(num_points=1, model=model)
self.assertEqual(
next_x.shape,
(
1,
2,
),
)
@parameterized.expand(test_configs)
@unittest.skip("Slow smoke test, TODO speed me up")
def test_memorize_data(self, objective, floor, model_constructor):
"""
see approximate accuracy on easy logistic ps that only varies in 1d
(no slope and intercept)
accuracy determined by average performance on training data
"""
with self.subTest(
objective=objective.__name__,
floor=floor,
model_constructor=model_constructor,
):
link = objective(floor=floor)
y = torch.bernoulli(link(self.f))
model = model_constructor(
lb=self.lb,
ub=self.ub,
stim_dim=self.stim_dim,
floor=floor,
objective=objective,
)
model.fit(train_x=self.X[:50], train_y=y[:50])
pm, _ = model.predict(self.X[:50])
pred = (link(pm) > 0.5).numpy()
npt.assert_allclose(pred, y[:50].numpy(), atol=1) # mismatch at most one
model.update(self.X, y)
pm, _ = model.predict(self.X[50:])
pred = (link(pm) > 0.5).numpy()
npt.assert_allclose(pred, y[50:].numpy(), atol=1)
@parameterized.expand([(_semip_model_constructor,), (_hadamard_model_constructor,)])
def test_prediction_shapes(self, model_constructor):
n_opt = 1
lb = [-1, -1]
ub = [1, 1]
with self.subTest(model_constructor=model_constructor):
model = model_constructor(lb=lb, ub=ub, stim_dim=self.stim_dim, floor=0)
strat_list = [
Strategy(
lb=lb,
ub=ub,
model=model,
generator=SobolGenerator(lb=lb, ub=ub, seed=self.seed),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
train_x = torch.tensor([[0.0, 0.0], [2.0, 1.0], [2.0, 2.0]])
train_y = torch.tensor([1.0, 1.0, 0.0])
model.fit(train_x=train_x, train_y=train_y)
f, var = model.predict(train_x)
self.assertEqual(f.shape, torch.Size([3]))
self.assertEqual(var.shape, torch.Size([3]))
p, pvar = model.predict(train_x, probability_space=True)
self.assertEqual(p.shape, torch.Size([3]))
self.assertEqual(pvar.shape, torch.Size([3]))
if isinstance(model, SemiParametricGPModel):
samps = model.sample(train_x, 11, probability_space=True)
self.assertEqual(samps.shape, torch.Size([11, 3]))
post = model.posterior(train_x)
self.assertEqual(post.mvn.mean.shape, torch.Size([2, 3]))
self.assertTrue(torch.equal(post.Xi, torch.tensor([0.0, 2.0, 2.0])))
samps = post.rsample(sample_shape=torch.Size([6]))
# samps should be n_samp x 2 (slope, intercept) * 3 (xshape)
self.assertEqual(samps.shape, torch.Size([6, 2, 3]))
# now check posterior samp sizes. They have
# an extra dim (since it's 1d outcome), which
# model.sample squeezes, except for thresh sampling
# which is already squeezed by the threshold objective
# TODO be more consistent with how we use dims
post = model.posterior(train_x)
p_samps = post.sample_p(torch.Size([6]))
self.assertEqual(p_samps.shape, torch.Size([6, 1, 3]))
f_samps = post.sample_f(torch.Size([6]))
self.assertEqual(f_samps.shape, torch.Size([6, 1, 3]))
thresh_samps = post.sample_thresholds(
threshold_level=0.75, sample_shape=torch.Size([6])
)
self.assertEqual(thresh_samps.shape, torch.Size([6, 3]))
strat.add_data(train_x, train_y)
Xopt = strat.gen()
self.assertEqual(Xopt.shape, torch.Size([1, 2]))
@parameterized.expand([(_semip_model_constructor,), (_hadamard_model_constructor,)])
def test_reset_variational_strategy(self, model_constructor):
lb = [-3, -3]
ub = [3, 3]
stim_dim = 0
with self.subTest(model_constructor=model_constructor):
model = model_constructor(lb=lb, ub=ub, stim_dim=stim_dim, floor=0)
link = FloorLogitObjective(floor=0)
y = torch.bernoulli(link(self.f))
variational_params_before = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_before = model.variational_strategy.inducing_points
model.fit(torch.Tensor(self.X[:15]), torch.Tensor(y[:15]))
variational_params_after = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_after = model.variational_strategy.inducing_points
model._reset_variational_strategy()
variational_params_reset = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_reset = model.variational_strategy.inducing_points
# before should be different from after and after should be different
# from reset
self.assertFalse(np.allclose(induc_before, induc_after))
self.assertFalse(np.allclose(induc_after, induc_reset))
for before, after in zip(
variational_params_before, variational_params_after
):
self.assertFalse(np.allclose(before, after))
for after, reset in zip(variational_params_after, variational_params_reset):
self.assertFalse(np.allclose(after, reset))
def test_slope_mean_setting(self):
for slope_mean in (2, 4):
model = SemiParametricGPModel(
lb=self.lb,
ub=self.ub,
stim_dim=self.stim_dim,
likelihood=LinearBernoulliLikelihood(),
inducing_size=10,
slope_mean=slope_mean,
inducing_point_method="auto",
)
with self.subTest(model=model, slope_mean=slope_mean):
self.assertEqual(model.mean_module.constant[-1], slope_mean)
model = HadamardSemiPModel(
lb=self.lb,
ub=self.ub,
stim_dim=self.stim_dim,
likelihood=BernoulliObjectiveLikelihood(objective=ProbitObjective()),
inducing_size=10,
slope_mean=slope_mean,
inducing_point_method="auto",
)
with self.subTest(model=model, slope_mean=slope_mean):
self.assertEqual(model.slope_mean_module.constant.item(), slope_mean)
class HadamardSemiPtest(unittest.TestCase):
def setUp(self):
np.random.seed(1)
torch.manual_seed(1)
stim_dim = 0
X = torch.randn(100, 2)
self.X = X
link = ProbitObjective()
self.y = torch.bernoulli(link(X[:, stim_dim]))
def test_reset_hyperparams(self):
model = HadamardSemiPModel(lb=[-3, -3], ub=[3, 3], inducing_size=20)
slope_os_before = model.slope_covar_module.outputscale.clone().detach().numpy()
offset_os_before = (
model.offset_covar_module.outputscale.clone().detach().numpy()
)
slope_ls_before = (
model.slope_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
offset_ls_before = (
model.offset_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
model.fit(self.X[:15], self.y[:15])
slope_os_after = model.slope_covar_module.outputscale.clone().detach().numpy()
offset_os_after = model.offset_covar_module.outputscale.clone().detach().numpy()
slope_ls_after = (
model.slope_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
offset_ls_after = (
model.offset_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
model._reset_hyperparameters()
slope_os_reset = model.slope_covar_module.outputscale.clone().detach().numpy()
offset_os_reset = model.offset_covar_module.outputscale.clone().detach().numpy()
slope_ls_reset = (
model.slope_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
offset_ls_reset = (
model.offset_covar_module.base_kernel.lengthscale.clone().detach().numpy()
)
# before should be different from after and after should be different
# from reset but before and reset should be same
self.assertFalse(np.allclose(slope_os_before, slope_os_after))
self.assertFalse(np.allclose(slope_os_after, slope_os_reset))
self.assertTrue(np.allclose(slope_os_before, slope_os_reset))
self.assertFalse(np.allclose(slope_ls_before, slope_ls_after))
self.assertFalse(np.allclose(slope_ls_after, slope_ls_reset))
self.assertTrue(np.allclose(slope_ls_before, slope_ls_reset))
self.assertFalse(np.allclose(offset_os_before, offset_os_after))
self.assertFalse(np.allclose(offset_os_after, offset_os_reset))
self.assertTrue(np.allclose(offset_os_before, offset_os_reset))
self.assertFalse(np.allclose(offset_ls_before, offset_ls_after))
self.assertFalse(np.allclose(offset_ls_after, offset_ls_reset))
self.assertTrue(np.allclose(offset_ls_before, offset_ls_reset))
def _make_psd_matrix(self, size):
L = torch.randn((size, size))
return L @ L.T
def test_normal_approx(self):
np.random.seed(123)
torch.manual_seed(123)
npoints = 10
def make_samp_and_approx_mvns(kcov_scale=1.0, ccov_scale=1.0):
X = torch.randn(npoints)
kmean = torch.randn(npoints)
cmean = torch.randn(npoints)
kcov = self._make_psd_matrix(npoints) * kcov_scale
ccov = self._make_psd_matrix(npoints) * ccov_scale
k_mvn = MultivariateNormal(kmean, kcov)
c_mvn = MultivariateNormal(cmean, ccov)
ksamps = k_mvn.sample(torch.Size([1000]))
csamps = c_mvn.sample(torch.Size([1000]))
samp_mean = (ksamps * (X + csamps)).mean(0)
samp_cov = (ksamps * (X + csamps)).T.cov()
approx_mean, approx_cov = _hadamard_mvn_approx(
X, slope_mean=kmean, slope_cov=kcov, offset_mean=cmean, offset_cov=ccov
)
return samp_mean, samp_cov, approx_mean, approx_cov
# verify that as kvar approaches 0, approx improves on average
mean_errs = []
cov_errs = []
for kcov_scale in [1e-5, 1e-2, 1]:
mean_err = 0
cov_err = 0
for _rep in range(100):
(
samp_mean,
samp_cov,
approx_mean,
approx_cov,
) = make_samp_and_approx_mvns(kcov_scale=kcov_scale)
mean_err += (samp_mean - approx_mean).abs().mean().item()
cov_err += (samp_cov - approx_cov).abs().mean().item()
mean_errs.append(mean_err / 100)
cov_errs.append(cov_err / 100)
npt.assert_equal(mean_errs, sorted(mean_errs))
npt.assert_equal(cov_errs, sorted(cov_errs))
# verify that as cvar approaches 0, approx improves on average
mean_errs = []
cov_errs = []
for ccov_scale in [1e-5, 1e-2, 1]:
mean_err = 0
cov_err = 0
for _rep in range(100):
(
samp_mean,
samp_cov,
approx_mean,
approx_cov,
) = make_samp_and_approx_mvns(ccov_scale=ccov_scale)
mean_err += (samp_mean - approx_mean).abs().mean().item()
cov_err += (samp_cov - approx_cov).abs().mean().item()
mean_errs.append(mean_err / 100)
cov_errs.append(cov_err / 100)
npt.assert_equal(mean_errs, sorted(mean_errs))
npt.assert_equal(cov_errs, sorted(cov_errs))
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from aepsych.kernels.rbf_partial_grad import RBFKernelPartialObsGrad
from aepsych.means.constant_partial_grad import ConstantMeanPartialObsGrad
from aepsych.models.derivative_gp import MixedDerivativeVariationalGP
from botorch.fit import fit_gpytorch_mll
from botorch.utils.testing import BotorchTestCase
from gpytorch.likelihoods import BernoulliLikelihood
from gpytorch.mlls.variational_elbo import VariationalELBO
class TestDerivativeGP(BotorchTestCase):
def testKernel(self):
K = RBFKernelPartialObsGrad(ard_num_dims=2)
x1 = torch.cat((torch.rand(5, 2), torch.zeros(5, 1)), dim=1)
x2 = torch.cat((torch.rand(3, 2), torch.ones(3, 1)), dim=1)
self.assertEqual(K.forward(x1, x2).shape, torch.Size([5, 3]))
def testMean(self):
mu = ConstantMeanPartialObsGrad()
mu.constant.requires_grad_(False)
mu.constant.copy_(torch.tensor(5.0))
mu.constant.requires_grad_(True)
x1 = torch.cat((torch.rand(5, 2), torch.zeros(5, 1)), dim=1)
x2 = torch.cat((torch.rand(3, 2), torch.ones(3, 1)), dim=1)
input = torch.cat((x1, x2))
z = mu(input)
self.assertTrue(
torch.equal(z, torch.tensor([5.0, 5.0, 5.0, 5.0, 5.0, 0.0, 0.0, 0.0]))
)
def testMixedDerivativeVariationalGP(self):
train_x = torch.cat(
(torch.tensor([1.0, 2.0, 3.0, 4.0]).unsqueeze(1), torch.zeros(4, 1)), dim=1
)
train_y = torch.tensor([1.0, 2.0, 3.0, 4.0])
m = MixedDerivativeVariationalGP(
train_x=train_x,
train_y=train_y,
inducing_points=train_x,
fixed_prior_mean=0.5,
)
self.assertEqual(m.mean_module.constant.item(), 0.5)
self.assertEqual(
m.covar_module.base_kernel.raw_lengthscale.shape, torch.Size([1, 1])
)
mll = VariationalELBO(
likelihood=BernoulliLikelihood(), model=m, num_data=train_y.numel()
)
mll = fit_gpytorch_mll(mll)
test_x = torch.tensor([[1.0, 0], [3.0, 1.0]])
m(test_x)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
import torch
from aepsych.likelihoods.ordinal import OrdinalLikelihood
from aepsych.models import BinaryClassificationGP
from aepsych.models.variational_gp import BetaRegressionGP, OrdinalGP
from botorch.fit import fit_gpytorch_mll
from gpytorch.likelihoods import BernoulliLikelihood
from gpytorch.mlls import VariationalELBO
from sklearn.datasets import make_classification, make_regression
class BinaryClassificationGPTestCase(unittest.TestCase):
"""
Super basic smoke test to make sure we know if we broke the underlying model
for single-probit ("1AFC") model
"""
def setUp(self):
np.random.seed(1)
torch.manual_seed(1)
X, y = make_classification(
n_samples=10,
n_features=1,
n_redundant=0,
n_informative=1,
random_state=1,
n_clusters_per_class=1,
)
self.X, self.y = torch.Tensor(X), torch.Tensor(y).reshape(-1, 1)
def test_1d_classification(self):
"""
Just see if we memorize the training set
"""
X, y = self.X, self.y
model = BinaryClassificationGP(
train_X=X, train_Y=y, likelihood=BernoulliLikelihood(), inducing_points=10
)
mll = VariationalELBO(model.likelihood, model.model, len(y))
fit_gpytorch_mll(mll)
# pspace
pm, pv = model.predict_probability(X)
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred.reshape(-1, 1), y)
npt.assert_array_less(pv, 1)
# fspace
pm, pv = model.predict(X)
pred = (pm > 0).numpy()
npt.assert_allclose(pred.reshape(-1, 1), y)
npt.assert_array_less(1, pv)
class AxBetaRegressionGPTextCase(unittest.TestCase):
@classmethod
def setUp(cls):
np.random.seed(1)
torch.manual_seed(1)
X, y = make_regression(
n_samples=7,
n_features=3,
n_informative=1,
random_state=1,
)
# Rescale the target values to the range [0, 1]
y = (y - y.min()) / (y.max() - y.min())
cls.X, cls.y = torch.Tensor(X), torch.Tensor(y).reshape(-1, 1)
def test_1d_regression(self):
X, y = self.X, self.y
model = BetaRegressionGP(train_X=X, train_Y=y, inducing_points=10)
mll = VariationalELBO(model.likelihood, model.model, len(y))
fit_gpytorch_mll(mll)
pm, pv = model.predict(X)
npt.assert_allclose(pm.reshape(-1, 1), y, atol=0.1)
npt.assert_array_less(pv, 1)
class AxOrdinalGPTestCase(unittest.TestCase):
@classmethod
def setUp(cls):
np.random.seed(1)
torch.manual_seed(1)
cls.n_levels = 5
X, y = make_classification(
n_samples=20,
n_features=5,
n_classes=cls.n_levels,
n_informative=3,
n_clusters_per_class=1,
)
cls.X, cls.y = torch.Tensor(X), torch.Tensor(y).reshape(-1, 1)
def test_ordinal_classification(self):
model = OrdinalGP(
train_X=self.X,
train_Y=self.y,
likelihood=OrdinalLikelihood(n_levels=self.n_levels),
inducing_points=2000,
)
mll = VariationalELBO(model.likelihood, model.model, len(self.y))
fit_gpytorch_mll(mll)
# pspace
probs = model.predict_probability(self.X)
pred = np.argmax(probs.detach().numpy(), axis=1).reshape(-1, 1)
clipped_pred = np.clip(pred, 0, self.n_levels)
npt.assert_allclose(clipped_pred, pred, atol=1, rtol=1)
npt.assert_allclose(pred, self.y, atol=1, rtol=1)
# fspace
pm, pv = model.predict(self.X)
pred = np.floor(self.n_levels * pm).reshape(-1, 1)
pred_var = (self.n_levels * pv).reshape(-1, 1)
clipped_pred = np.clip(pred, 0, self.n_levels)
npt.assert_allclose(clipped_pred, pred, atol=3, rtol=self.n_levels)
npt.assert_allclose(pred, self.y, atol=3, rtol=self.n_levels)
npt.assert_allclose(
pred_var, np.ones_like(pred_var), atol=self.n_levels, rtol=self.n_levels
)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import unittest
import uuid
import numpy as np
import numpy.testing as npt
import torch
from aepsych import server, utils_logging
from aepsych.acquisition.objective import ProbitObjective
from aepsych.config import Config
from aepsych.generators import OptimizeAcqfGenerator, SobolGenerator
from aepsych.models import PairwiseProbitModel
from aepsych.server.message_handlers.handle_ask import ask
from aepsych.server.message_handlers.handle_setup import configure
from aepsych.server.message_handlers.handle_tell import tell
from aepsych.strategy import SequentialStrategy, Strategy
from botorch.acquisition import qUpperConfidenceBound
from botorch.acquisition.active_learning import PairwiseMCPosteriorVariance
from scipy.stats import bernoulli, norm, pearsonr
from ..common import f_1d, f_2d, f_pairwise, new_novel_det
class PairwiseProbitModelStrategyTest(unittest.TestCase):
def test_pairs_to_comparisons(self):
def ptc_numpy(x, y, dim):
"""
old numpy impl of pairs to comparisons
"""
# This needs to take a unique over the feature dim by flattening
# over pairs but not instances/batches. This is actually tensor
# matricization over the feature dimension but awkward in numpy
unique_coords = np.unique(np.moveaxis(x, 1, 0).reshape(dim, -1), axis=1)
def _get_index_of_equal_row(arr, x, axis=0):
return np.argwhere(np.all(np.equal(arr, x[:, None]), axis=axis)).item()
comparisons = []
for pair, judgement in zip(x, y):
comparison = (
_get_index_of_equal_row(unique_coords, pair[..., 0]),
_get_index_of_equal_row(unique_coords, pair[..., 1]),
)
if judgement == 0:
comparisons.append(comparison)
else:
comparisons.append(comparison[::-1])
return torch.Tensor(unique_coords.T), torch.LongTensor(comparisons)
x = np.random.normal(size=(10, 1, 2))
y = np.random.choice((0, 1), size=10)
datapoints1, comparisons1 = ptc_numpy(x, y, 1)
pbo = PairwiseProbitModel(lb=[-10], ub=[10])
datapoints2, comparisons2 = pbo._pairs_to_comparisons(
torch.Tensor(x), torch.Tensor(y)
)
npt.assert_equal(datapoints1.numpy(), datapoints2.numpy())
npt.assert_equal(comparisons1.numpy(), comparisons2.numpy())
x = np.random.normal(size=(10, 2, 2))
y = np.random.choice((0, 1), size=10)
datapoints1, comparisons1 = ptc_numpy(x, y, 2)
pbo = PairwiseProbitModel(lb=[-10], ub=[10], dim=2)
datapoints2, comparisons2 = pbo._pairs_to_comparisons(
torch.Tensor(x), torch.Tensor(y)
)
npt.assert_equal(datapoints1.numpy(), datapoints2.numpy())
npt.assert_equal(comparisons1.numpy(), comparisons2.numpy())
def test_pairwise_probit_batched(self):
"""
test our 1d gaussian bump example
"""
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 20
n_opt = 1
lb = [-4.0, 1e-5]
ub = [-1e-5, 4.0]
extra_acqf_args = {"beta": 3.84}
model_list = [
Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2),
min_asks=n_init,
stimuli_per_trial=2,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=PairwiseProbitModel(lb=lb, ub=ub),
generator=OptimizeAcqfGenerator(
acqf=qUpperConfidenceBound,
acqf_kwargs=extra_acqf_args,
stimuli_per_trial=2,
),
min_asks=n_opt,
stimuli_per_trial=2,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
while not strat.finished:
next_pair = strat.gen(num_points=3)
# next_pair is batch x dim x pair,
# this checks that we have the reshapes
# right
self.assertTrue((next_pair[:, 0, :] < 0).all())
self.assertTrue((next_pair[:, 1, :] > 0).all())
strat.add_data(
next_pair,
bernoulli.rvs(
f_pairwise(f_1d, next_pair.sum(1), noise_scale=0.1).squeeze()
),
)
xgrid = strat.model.dim_grid(gridsize=10)
zhat, _ = strat.predict(xgrid)
# true max is 0, very loose test
self.assertTrue(xgrid[torch.argmax(zhat, 0)].sum().detach().numpy() < 0.5)
def test_pairwise_memorize(self):
"""
can we memorize a simple function
"""
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = [-1, -1]
ub = [1, 1]
gen = SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2)
x = torch.Tensor(gen.gen(num_points=20))
# "noiseless" new_novel_det (just take the mean instead of sampling)
y = torch.Tensor(f_pairwise(new_novel_det, x) > 0.5).int()
model = PairwiseProbitModel(lb=lb, ub=ub)
model.fit(x[:18], y[:18])
with torch.no_grad():
f0, _ = model.predict(x[18:, ..., 0])
f1, _ = model.predict(x[18:, ..., 1])
pred_diff = norm.cdf(f1 - f0)
pred = pred_diff > 0.5
npt.assert_allclose(pred, y[18:])
def test_pairwise_memorize_rescaled(self):
"""
can we memorize a simple function (with rescaled inputs)
"""
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = [-1000, 0]
ub = [0, 1e-5]
gen = SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2)
x = torch.Tensor(gen.gen(num_points=20))
# "noiseless" new_novel_det (just take the mean instead of sampling)
xrescaled = x.clone()
xrescaled[:, 0, :] = xrescaled[:, 0, :] / 500 + 1
xrescaled[:, 1, :] = xrescaled[:, 1, :] / 5e-6 - 1
y = torch.Tensor(f_pairwise(new_novel_det, xrescaled) > 0.5).int()
model = PairwiseProbitModel(lb=lb, ub=ub)
model.fit(x[:18], y[:18])
with torch.no_grad():
f0, _ = model.predict(x[18:, ..., 0])
f1, _ = model.predict(x[18:, ..., 1])
pred_diff = norm.cdf(f1 - f0)
pred = pred_diff > 0.5
npt.assert_allclose(pred, y[18:])
def test_1d_pairwise_probit(self):
"""
test our 1d gaussian bump example
"""
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
extra_acqf_args = {"beta": 3.84}
model_list = [
Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2),
min_asks=n_init,
stimuli_per_trial=2,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=PairwiseProbitModel(lb=lb, ub=ub),
generator=OptimizeAcqfGenerator(
acqf=qUpperConfidenceBound,
acqf_kwargs=extra_acqf_args,
stimuli_per_trial=2,
),
min_asks=n_opt,
stimuli_per_trial=2,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_pair = strat.gen()
strat.add_data(
next_pair, [bernoulli.rvs(f_pairwise(f_1d, next_pair, noise_scale=0.1))]
)
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# true max is 0, very loose test
self.assertTrue(np.abs(x[np.argmax(zhat.detach().numpy())]) < 0.5)
def test_1d_pairwise_probit_pure_exploration(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -2.0
ub = 2.0
acqf = PairwiseMCPosteriorVariance
extra_acqf_args = {"objective": ProbitObjective()}
model_list = [
Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2),
min_asks=n_init,
stimuli_per_trial=2,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=PairwiseProbitModel(lb=lb, ub=ub),
generator=OptimizeAcqfGenerator(
acqf=acqf, acqf_kwargs=extra_acqf_args, stimuli_per_trial=2
),
min_asks=n_opt,
stimuli_per_trial=2,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_pair = strat.gen()
strat.add_data(
next_pair,
[bernoulli.rvs(f_pairwise(lambda x: x, next_pair, noise_scale=0.1))],
)
test_gen = SobolGenerator(lb=lb, ub=ub, seed=seed + 1, stimuli_per_trial=2)
test_x = torch.Tensor(test_gen.gen(100))
ftrue_test = (test_x[..., 0] - test_x[..., 1]).squeeze()
with torch.no_grad():
fdiff_test = (
strat.model.predict(test_x[..., 0], rereference=None)[0]
- strat.model.predict(test_x[..., 1], rereference=None)[0]
)
self.assertTrue(pearsonr(fdiff_test, ftrue_test)[0] >= 0.9)
with torch.no_grad():
fdiff_test_reref = (
strat.model.predict(test_x[..., 0])[0]
- strat.model.predict(test_x[..., 1])[0]
)
self.assertTrue(pearsonr(fdiff_test_reref, ftrue_test)[0] >= 0.9)
def test_2d_pairwise_probit(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 20
n_opt = 1
lb = np.r_[-1, -1]
ub = np.r_[1, 1]
extra_acqf_args = {"beta": 3.84}
model_list = [
Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2),
min_asks=n_init,
stimuli_per_trial=2,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=PairwiseProbitModel(lb=lb, ub=ub),
generator=OptimizeAcqfGenerator(
acqf=qUpperConfidenceBound,
acqf_kwargs=extra_acqf_args,
stimuli_per_trial=2,
),
min_asks=n_opt,
stimuli_per_trial=2,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_pair = strat.gen()
strat.add_data(
next_pair, [bernoulli.rvs(f_pairwise(f_2d, next_pair, noise_scale=0.1))]
)
xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
zhat, _ = strat.predict(torch.Tensor(xy))
# true min is at 0,0
self.assertTrue(np.all(np.abs(xy[np.argmax(zhat.detach().numpy())]) < 0.2))
def test_2d_pairwise_probit_pure_exploration(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 20
n_opt = 1
lb = np.r_[-1, -1]
ub = np.r_[1, 1]
acqf = PairwiseMCPosteriorVariance
extra_acqf_args = {"objective": ProbitObjective()}
model_list = [
Strategy(
lb=lb,
ub=ub,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed, stimuli_per_trial=2),
min_asks=n_init,
stimuli_per_trial=2,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=PairwiseProbitModel(lb=lb, ub=ub),
generator=OptimizeAcqfGenerator(
acqf=acqf, acqf_kwargs=extra_acqf_args, stimuli_per_trial=2
),
min_asks=n_opt,
stimuli_per_trial=2,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_pair = strat.gen()
strat.add_data(
next_pair, [bernoulli.rvs(f_pairwise(new_novel_det, next_pair))]
)
xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
zhat, _ = strat.predict(torch.Tensor(xy))
ztrue = new_novel_det(xy)
corr = pearsonr(zhat.detach().flatten(), ztrue.flatten())[0]
self.assertTrue(corr > 0.80)
def test_sobolmodel_pairwise(self):
# test that SobolModel correctly gets bounds
sobol_x = np.zeros((10, 3, 2))
mod = Strategy(
lb=[1, 2, 3],
ub=[2, 3, 4],
min_asks=10,
stimuli_per_trial=2,
outcome_types=["binary"],
generator=SobolGenerator(
lb=[1, 2, 3], ub=[2, 3, 4], seed=12345, stimuli_per_trial=2
),
)
for i in range(10):
sobol_x[i, ...] = mod.gen()
self.assertTrue(np.all(sobol_x[:, 0, :] > 1))
self.assertTrue(np.all(sobol_x[:, 1, :] > 2))
self.assertTrue(np.all(sobol_x[:, 2, :] > 3))
self.assertTrue(np.all(sobol_x[:, 0, :] < 2))
self.assertTrue(np.all(sobol_x[:, 1, :] < 3))
self.assertTrue(np.all(sobol_x[:, 2, :] < 4))
def test_hyperparam_consistency(self):
# verify that creating the model `from_config` or with `__init__` has the same hyperparams
m1 = PairwiseProbitModel(lb=[1, 2], ub=[3, 4])
m2 = PairwiseProbitModel.from_config(
config=Config(config_dict={"common": {"lb": "[1,2]", "ub": "[3,4]"}})
)
self.assertTrue(isinstance(m1.covar_module, type(m2.covar_module)))
self.assertTrue(
isinstance(m1.covar_module.base_kernel, type(m2.covar_module.base_kernel))
)
self.assertTrue(isinstance(m1.mean_module, type(m2.mean_module)))
m1priors = list(m1.covar_module.named_priors())
m2priors = list(m2.covar_module.named_priors())
for p1, p2 in zip(m1priors, m2priors):
name1, parent1, prior1, paramtransforms1, priortransforms1 = p1
name2, parent2, prior2, paramtransforms2, priortransforms2 = p2
self.assertTrue(name1 == name2)
self.assertTrue(isinstance(parent1, type(parent2)))
self.assertTrue(isinstance(prior1, type(prior2)))
# no obvious way to test paramtransform equivalence
class PairwiseProbitModelServerTest(unittest.TestCase):
def setUp(self):
# setup logger
server.logger = utils_logging.getLogger(logging.DEBUG, "logs")
# random datebase path name without dashes
database_path = "./{}.db".format(str(uuid.uuid4().hex))
self.s = server.AEPsychServer(database_path=database_path)
def tearDown(self):
self.s.cleanup()
# cleanup the db
if self.s.db is not None:
self.s.db.delete_db()
def test_1d_pairwise_server(self):
seed = 123
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 2
config_str = f"""
[common]
lb = [-4]
ub = [4]
stimuli_per_trial = 2
outcome_types =[binary]
parnames = [x]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = {n_init}
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = {n_opt}
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
server = self.s
configure(
server,
config_str=config_str,
)
for _i in range(n_init + n_opt):
next_config = ask(server)
next_y = bernoulli.rvs(f_pairwise(f_1d, next_config["x"], noise_scale=0.1))
tell(server, config=next_config, outcome=next_y)
x = torch.linspace(-4, 4, 100)
zhat, _ = server.strat.predict(x)
self.assertTrue(np.abs(x[np.argmax(zhat.detach().numpy())]) < 0.5)
def test_2d_pairwise_server(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
config_str = f"""
[common]
lb = [-1, -1]
ub = [1, 1]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x, y]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = {n_init}
generator = PairwiseSobolGenerator
[opt_strat]
min_asks = {n_opt}
model = PairwiseProbitModel
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
server = self.s
configure(
server,
config_str=config_str,
)
for _i in range(n_init + n_opt):
next_config = ask(server)
next_pair = np.c_[next_config["x"], next_config["y"]].T
next_y = bernoulli.rvs(f_pairwise(f_2d, next_pair, noise_scale=0.1))
tell(server, config=next_config, outcome=next_y)
xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
zhat, _ = server.strat.predict(torch.Tensor(xy))
# true min is at 0,0
self.assertTrue(np.all(np.abs(xy[np.argmax(zhat.detach().numpy())]) < 0.2))
def test_serialization_1d(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 3
n_opt = 1
config_str = f"""
[common]
lb = [-4]
ub = [4]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = {n_init}
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = {n_opt}
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
server = self.s
configure(server, config_str=config_str)
for _i in range(n_init + n_opt):
next_config = ask(server)
next_y = bernoulli.rvs(f_pairwise(f_1d, next_config["x"]))
tell(server, config=next_config, outcome=next_y)
import dill
# just make sure it works
try:
s = dill.dumps(server)
server2 = dill.loads(s)
self.assertEqual(len(server2._strats), len(server._strats))
for strat1, strat2 in zip(server._strats, server2._strats):
self.assertEqual(type(strat1), type(strat2))
self.assertEqual(type(strat1.model), type(strat2.model))
self.assertTrue(torch.equal(strat1.x, strat2.x))
self.assertTrue(torch.equal(strat1.y, strat2.y))
except Exception:
self.fail()
def test_serialization_2d(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 3
n_opt = 1
config_str = f"""
[common]
lb = [-1, -1]
ub = [1, 1]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x, y]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = {n_init}
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = {n_opt}
generator = PairwiseOptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[PairwiseOptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
server = self.s
configure(server, config_str=config_str)
for _i in range(n_init + n_opt):
next_config = ask(server)
next_pair = np.c_[next_config["x"], next_config["y"]].T
next_y = bernoulli.rvs(f_pairwise(f_2d, next_pair))
tell(server, config=next_config, outcome=next_y)
import dill
# just make sure it works
try:
s = dill.dumps(server)
server2 = dill.loads(s)
self.assertEqual(len(server2._strats), len(server._strats))
for strat1, strat2 in zip(server._strats, server2._strats):
self.assertEqual(type(strat1), type(strat2))
self.assertEqual(type(strat1.model), type(strat2.model))
self.assertTrue(torch.equal(strat1.x, strat2.x))
self.assertTrue(torch.equal(strat1.y, strat2.y))
except Exception:
self.fail()
def test_config_to_tensor(self):
config_str = """
[common]
lb = [-1]
ub = [1]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = 1
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = 1
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
server = self.s
configure(server, config_str=config_str)
conf = ask(server)
self.assertTrue(server._config_to_tensor(conf).shape == (1, 2))
config_str = """
[common]
lb = [-1, -1]
ub = [1, 1]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x, y]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = 1
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = 1
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
configure(server, config_str=config_str)
conf = ask(server)
self.assertTrue(server._config_to_tensor(conf).shape == (2, 2))
config_str = """
[common]
lb = [-1, -1, -1]
ub = [1, 1, 1]
stimuli_per_trial=2
outcome_types=[binary]
parnames = [x, y, z]
strategy_names = [init_strat, opt_strat]
acqf = PairwiseMCPosteriorVariance
[init_strat]
min_asks = 1
generator = PairwiseSobolGenerator
[opt_strat]
model = PairwiseProbitModel
min_asks = 1
generator = OptimizeAcqfGenerator
[PairwiseProbitModel]
mean_covar_factory = default_mean_covar_factory
[PairwiseMCPosteriorVariance]
objective = ProbitObjective
[OptimizeAcqfGenerator]
restarts = 10
samps = 1000
"""
configure(server, config_str=config_str)
conf = ask(server)
self.assertTrue(server._config_to_tensor(conf).shape == (3, 2))
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
# run on single threads to keep us from deadlocking weirdly in CI
if "CI" in os.environ or "SANDCASTLE" in os.environ:
torch.set_num_threads(1)
from aepsych.acquisition.monotonic_rejection import MonotonicMCLSE
from aepsych.acquisition.objective import ProbitObjective
from aepsych.generators import MonotonicRejectionGenerator
from aepsych.models import MonotonicRejectionGP
from aepsych.strategy import Strategy
from botorch.acquisition.objective import IdentityMCObjective
from botorch.utils.testing import BotorchTestCase
from gpytorch.likelihoods import BernoulliLikelihood, GaussianLikelihood
from scipy.stats import norm
class MonotonicRejectionGPLSETest(BotorchTestCase):
def testRegression(self):
# Init
target = 1.5
model_gen_options = {"num_restarts": 1, "raw_samples": 3, "epochs": 5}
lb = torch.tensor([0, 0])
ub = torch.tensor([4, 4])
m = MonotonicRejectionGP(
lb=lb,
ub=ub,
likelihood=GaussianLikelihood(),
fixed_prior_mean=target,
monotonic_idxs=[1],
num_induc=2,
num_samples=3,
num_rejection_samples=4,
)
strat = Strategy(
lb=lb,
ub=ub,
model=m,
generator=MonotonicRejectionGenerator(
MonotonicMCLSE,
acqf_kwargs={"target": target},
model_gen_options=model_gen_options,
),
min_asks=1,
stimuli_per_trial=1,
outcome_types=["binary"],
)
# Fit
train_x = torch.tensor([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0]])
train_y = torch.tensor([[1.0], [2.0], [3.0]])
m.fit(train_x=train_x, train_y=train_y)
self.assertEqual(m.inducing_points.shape, torch.Size([2, 2]))
self.assertEqual(m.mean_module.constant.item(), 1.5)
# Predict
f, var = m.predict(train_x)
self.assertEqual(f.shape, torch.Size([3]))
self.assertEqual(var.shape, torch.Size([3]))
# Gen
strat.add_data(train_x, train_y)
Xopt = strat.gen()
self.assertEqual(Xopt.shape, torch.Size([1, 2]))
# Acquisition function
acq = strat.generator._instantiate_acquisition_fn(m)
self.assertEqual(acq.deriv_constraint_points.shape, torch.Size([2, 3]))
self.assertTrue(
torch.equal(acq.deriv_constraint_points[:, -1], 2 * torch.ones(2))
)
self.assertEqual(acq.target, 1.5)
self.assertTrue(isinstance(acq.objective, IdentityMCObjective))
def testClassification(self):
# Init
target = 0.75
model_gen_options = {"num_restarts": 1, "raw_samples": 3, "epochs": 5}
lb = torch.tensor([0, 0])
ub = torch.tensor([4, 4])
m = MonotonicRejectionGP(
lb=lb,
ub=ub,
likelihood=BernoulliLikelihood(),
fixed_prior_mean=target,
monotonic_idxs=[1],
num_induc=2,
num_samples=3,
num_rejection_samples=4,
)
strat = Strategy(
lb=lb,
ub=ub,
model=m,
generator=MonotonicRejectionGenerator(
MonotonicMCLSE,
acqf_kwargs={"target": target, "objective": ProbitObjective()},
model_gen_options=model_gen_options,
),
min_asks=1,
stimuli_per_trial=1,
outcome_types=["binary"],
)
# Fit
train_x = torch.tensor([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0]])
train_y = torch.tensor([1.0, 1.0, 0.0])
m.fit(train_x=train_x, train_y=train_y)
self.assertEqual(m.inducing_points.shape, torch.Size([2, 2]))
self.assertAlmostEqual(m.mean_module.constant.item(), norm.ppf(0.75))
# Predict
f, var = m.predict(train_x)
self.assertEqual(f.shape, torch.Size([3]))
self.assertEqual(var.shape, torch.Size([3]))
# Gen
strat.add_data(train_x, train_y)
Xopt = strat.gen()
self.assertEqual(Xopt.shape, torch.Size([1, 2]))
# Acquisition function
acq = strat.generator._instantiate_acquisition_fn(m)
self.assertEqual(acq.deriv_constraint_points.shape, torch.Size([2, 3]))
self.assertTrue(
torch.equal(acq.deriv_constraint_points[:, -1], 2 * torch.ones(2))
)
self.assertEqual(acq.target, 0.75)
self.assertTrue(isinstance(acq.objective, ProbitObjective))
# Update
m.update(train_x=train_x[:2, :2], train_y=train_y[:2], warmstart=True)
self.assertEqual(m.train_inputs[0].shape, torch.Size([2, 3]))
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from botorch.fit import fit_gpytorch_mll
from gpytorch.mlls import ExactMarginalLogLikelihood
from aepsych.models.exact_gp import ExactGP
# Fix random seeds
np.random.seed(0)
torch.manual_seed(0)
class TestModelQuery(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.bounds = torch.tensor([[0.0], [1.0]])
x = torch.linspace(0.0, 1.0, 10).reshape(-1, 1)
y = torch.sin(6.28 * x).reshape(-1, 1)
cls.model = ExactGP(x, y)
mll = ExactMarginalLogLikelihood(cls.model.likelihood, cls.model)
fit_gpytorch_mll(mll)
def test_min(self):
mymin, my_argmin = self.model.get_min(self.bounds)
# Don't need to be precise since we're working with small data.
self.assertLess(mymin, -0.9)
self.assertTrue(0.7 < my_argmin < 0.8)
def test_max(self):
mymax, my_argmax = self.model.get_max(self.bounds)
# Don't need to be precise since we're working with small data.
self.assertGreater(mymax, 0.9)
self.assertTrue(0.2 < my_argmax < 0.3)
def test_inverse_query(self):
bounds = torch.tensor([[0.1], [0.9]])
val, arg = self.model.inv_query(0.0, bounds)
# Don't need to be precise since we're working with small data.
self.assertTrue(-0.01 < val < 0.01)
self.assertTrue(0.45 < arg < 0.55)
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import unittest
import torch
# run on single threads to keep us from deadlocking weirdly in CI
if "CI" in os.environ or "SANDCASTLE" in os.environ:
torch.set_num_threads(1)
from functools import partial
from unittest.mock import MagicMock
import numpy as np
import numpy.testing as npt
from aepsych.acquisition import MCLevelSetEstimation
from aepsych.config import Config
from aepsych.generators import OptimizeAcqfGenerator, SobolGenerator
from aepsych.models import GPClassificationModel
from aepsych.strategy import SequentialStrategy, Strategy
from botorch.acquisition import qUpperConfidenceBound
from botorch.optim.fit import fit_gpytorch_mll_torch
from botorch.optim.stopping import ExpMAStoppingCriterion
from botorch.posteriors import GPyTorchPosterior
from gpytorch.distributions import MultivariateNormal
from scipy.stats import bernoulli, norm, pearsonr
from sklearn.datasets import make_classification
from torch.distributions import Normal
from torch.optim import Adam
from ..common import cdf_new_novel_det, f_1d, f_2d
class GPClassificationSmoketest(unittest.TestCase):
"""
Super basic smoke test to make sure we know if we broke the underlying model
for single-probit ("1AFC") model
"""
def setUp(self):
np.random.seed(1)
torch.manual_seed(1)
X, y = make_classification(
n_samples=100,
n_features=1,
n_redundant=0,
n_informative=1,
random_state=1,
n_clusters_per_class=1,
)
self.X, self.y = torch.Tensor(X), torch.Tensor(y)
def test_1d_classification(self):
"""
Just see if we memorize the training set
"""
X, y = self.X, self.y
model = GPClassificationModel(
torch.Tensor([-3]), torch.Tensor([3]), inducing_size=10
)
model.fit(X[:50], y[:50])
# pspace
pm, _ = model.predict_probability(X[:50])
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y[:50])
# fspace
pm, _ = model.predict(X[:50], probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y[:50])
# smoke test update
model.update(X, y)
# pspace
pm, _ = model.predict_probability(X)
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y)
# fspace
pm, _ = model.predict(X, probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y)
def test_1d_classification_pytorchopt(self):
"""
Just see if we memorize the training set
"""
X, y = self.X, self.y
model = GPClassificationModel(
torch.Tensor([-3]), torch.Tensor([3]), inducing_size=10
)
model.fit(
X[:50],
y[:50],
optimizer=fit_gpytorch_mll_torch,
optimizer_kwargs={
"stopping_criterion": ExpMAStoppingCriterion(maxiter=30),
"optimizer": partial(Adam, lr=0.05),
},
)
# pspace
pm, _ = model.predict_probability(X[:50])
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y[:50])
# fspace
pm, _ = model.predict(X[:50], probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y[:50])
# smoke test update
model.update(
X,
y,
optimizer=fit_gpytorch_mll_torch,
optimizer_kwargs={"stopping_criterion": ExpMAStoppingCriterion(maxiter=30)},
)
# pspace
pm, _ = model.predict_probability(X)
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y)
# fspace
pm, _ = model.predict(X, probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y)
def test_1d_classification_different_scales(self):
"""
Just see if we memorize the training set
"""
np.random.seed(1)
torch.manual_seed(1)
X, y = make_classification(
n_features=2,
n_redundant=0,
n_informative=1,
random_state=1,
n_clusters_per_class=1,
)
X, y = torch.Tensor(X), torch.Tensor(y)
X[:, 0] = X[:, 0] * 1000
X[:, 1] = X[:, 1] / 1000
lb = [-3000, -0.003]
ub = [3000, 0.003]
model = GPClassificationModel(lb=lb, ub=ub, inducing_size=20)
model.fit(X[:50], y[:50])
# pspace
pm, _ = model.predict_probability(X[:50])
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y[:50])
# fspace
pm, _ = model.predict(X[:50], probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y[:50])
# smoke test update
model.update(X, y)
# pspace
pm, _ = model.predict_probability(X)
pred = (pm > 0.5).numpy()
npt.assert_allclose(pred, y)
# fspace
pm, _ = model.predict(X, probability_space=False)
pred = (pm > 0).numpy()
npt.assert_allclose(pred, y)
def test_reset_hyperparams(self):
model = GPClassificationModel(lb=[-3], ub=[3], inducing_size=20)
os_before = model.covar_module.outputscale.clone().detach().numpy()
ls_before = model.covar_module.base_kernel.lengthscale.clone().detach().numpy()
model.fit(torch.Tensor(self.X), torch.Tensor(self.y))
os_after = model.covar_module.outputscale.clone().detach().numpy()
ls_after = model.covar_module.base_kernel.lengthscale.clone().detach().numpy()
model._reset_hyperparameters()
os_reset = model.covar_module.outputscale.clone().detach().numpy()
ls_reset = model.covar_module.base_kernel.lengthscale.clone().detach().numpy()
# before should be different from after and after should be different
# from reset but before and reset should be same
self.assertFalse(np.allclose(os_before, os_after))
self.assertFalse(np.allclose(os_after, os_reset))
self.assertTrue(np.allclose(os_before, os_reset))
self.assertFalse(np.allclose(ls_before, ls_after))
self.assertFalse(np.allclose(ls_after, ls_reset))
self.assertTrue(np.allclose(ls_before, ls_reset))
def test_reset_variational_strategy(self):
model = GPClassificationModel(lb=[-3], ub=[3], inducing_size=20)
variational_params_before = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_before = model.variational_strategy.inducing_points
model.fit(torch.Tensor(self.X), torch.Tensor(self.y))
variational_params_after = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_after = model.variational_strategy.inducing_points
model._reset_variational_strategy()
variational_params_reset = [
v.clone().detach().numpy() for v in model.variational_parameters()
]
induc_reset = model.variational_strategy.inducing_points
# before should be different from after and after should be different
# from reset
self.assertFalse(np.allclose(induc_before, induc_after))
self.assertFalse(np.allclose(induc_after, induc_reset))
for before, after in zip(variational_params_before, variational_params_after):
self.assertFalse(np.allclose(before, after))
for after, reset in zip(variational_params_after, variational_params_reset):
self.assertFalse(np.allclose(after, reset))
def test_predict_p(self):
"""
Verify analytic p-space mean and var is correct.
"""
X, y = self.X, self.y
model = GPClassificationModel(
torch.Tensor([-3]), torch.Tensor([3]), inducing_size=10
)
model.fit(X, y)
pmean_analytic, pvar_analytic = model.predict_probability(X)
fsamps = model.sample(X, 150000)
psamps = norm.cdf(fsamps)
pmean_samp = psamps.mean(0)
pvar_samp = psamps.var(0)
# TODO these tolerances are a bit loose, verify this is right.
self.assertTrue(np.allclose(pmean_analytic, pmean_samp, atol=0.001))
self.assertTrue(np.allclose(pvar_analytic, pvar_samp, atol=0.001))
class GPClassificationTest(unittest.TestCase):
def test_1d_single_probit_new_interface(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
while not strat.finished:
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(f_1d(next_x))])
self.assertTrue(strat.y.shape[0] == n_init + n_opt)
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# true max is 0, very loose test
self.assertTrue(np.abs(x[np.argmax(zhat.detach().numpy())]) < 0.5)
def test_1d_single_probit_batched(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 2
lb = -4.0
ub = 4.0
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
while not strat.finished:
next_x = strat.gen(num_points=2)
strat.add_data(next_x, bernoulli.rvs(f_1d(next_x)).squeeze())
self.assertEqual(strat.y.shape[0], n_init + n_opt)
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# true max is 0, very loose test
self.assertTrue(np.abs(x[np.argmax(zhat.detach().numpy())]) < 0.5)
def test_1d_single_probit(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(f_1d(next_x))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# true max is 0, very loose test
self.assertTrue(np.abs(x[np.argmax(zhat.detach().numpy())]) < 0.5)
def test_1d_single_probit_pure_exploration(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# f(x) = x so we're just looking at corr between cdf(zhat) and cdf(x)
self.assertTrue(
pearsonr(norm.cdf(zhat.detach().numpy()).flatten(), norm.cdf(x).flatten())[
0
]
> 0.95
)
def test_2d_single_probit_pure_exploration(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = [-1, -1]
ub = [1, 1]
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(cdf_new_novel_det(next_x))])
xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
post_mean, _ = strat.predict(torch.Tensor(xy))
phi_post_mean = norm.cdf(post_mean.reshape(30, 30).detach().numpy())
phi_post_true = cdf_new_novel_det(xy)
self.assertTrue(
pearsonr(phi_post_mean.flatten(), phi_post_true.flatten())[0] > 0.9
)
def test_1d_single_targeting(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
target = 0.75
def obj(x):
return -((Normal(0, 1).cdf(x[..., 0]) - target) ** 2)
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# since target is 0.75, find the point at which f_est is 0.75
est_max = x[np.argmin((norm.cdf(zhat.detach().numpy()) - 0.75) ** 2)]
# since true z is just x, the true max is where phi(x)=0.75,
self.assertTrue(np.abs(est_max - norm.ppf(0.75)) < 0.5)
def test_1d_jnd(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 150
n_opt = 1
lb = -4.0
ub = 4.0
target = 0.5
def obj(x):
return -((Normal(0, 1).cdf(x[..., 0]) - target) ** 2)
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x / 1.5))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# we expect jnd close to the target to be close to the correct
# jnd (1.5), and since this is linear model this should be true
# for both definitions of JND
jnd_step = strat.get_jnd(grid=x[:, None], method="step")
est_jnd_step = jnd_step[50]
# looser test because step-jnd is hurt more by reverting to the mean
self.assertTrue(np.abs(est_jnd_step - 1.5) < 0.5)
jnd_taylor = strat.get_jnd(grid=x[:, None], method="taylor")
est_jnd_taylor = jnd_taylor[50]
self.assertTrue(np.abs(est_jnd_taylor - 1.5) < 0.25)
def test_1d_single_lse(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 50
n_opt = 1
lb = -4.0
ub = 4.0
# target is in z space not phi(z) space, maybe that's
# weird
extra_acqf_args = {"target": 0.75, "beta": 1.96}
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
min_asks=n_opt,
generator=OptimizeAcqfGenerator(
MCLevelSetEstimation, acqf_kwargs=extra_acqf_args
),
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(norm.cdf(next_x))])
x = torch.linspace(-4, 4, 100)
zhat, _ = strat.predict(x)
# since target is 0.75, find the point at which f_est is 0.75
est_max = x[np.argmin((norm.cdf(zhat.detach().numpy()) - 0.75) ** 2)]
# since true z is just x, the true max is where phi(x)=0.75,
self.assertTrue(np.abs(est_max - norm.ppf(0.75)) < 0.5)
def test_2d_single_probit(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 150
n_opt = 1
lb = [-1, -1]
ub = [1, 1]
strat_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=20),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(strat_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(f_2d(next_x[None, :]))])
xy = np.mgrid[-1:1:30j, -1:1:30j].reshape(2, -1).T
zhat, _ = strat.predict(torch.Tensor(xy))
self.assertTrue(np.all(np.abs(xy[np.argmax(zhat.detach().numpy())]) < 0.5))
def test_extra_ask_warns(self):
# test that when we ask more times than we have models, we warn but keep going
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
n_init = 3
n_opt = 1
lb = -4.0
ub = 4.0
model_list = [
Strategy(
lb=lb,
ub=ub,
min_asks=n_init,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
stimuli_per_trial=1,
outcome_types=["binary"],
),
Strategy(
lb=lb,
ub=ub,
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=10),
generator=OptimizeAcqfGenerator(
qUpperConfidenceBound, acqf_kwargs={"beta": 1.96}
),
min_asks=n_opt,
stimuli_per_trial=1,
outcome_types=["binary"],
),
]
strat = SequentialStrategy(model_list)
for _i in range(n_init + n_opt):
next_x = strat.gen()
strat.add_data(next_x, [bernoulli.rvs(norm.cdf(f_1d(next_x)))])
with self.assertWarns(RuntimeWarning):
strat.gen()
def test_1d_query(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
lb = -4.0
ub = 4.0
strat = Strategy(
lb=lb,
ub=ub,
min_asks=1,
generator=SobolGenerator(lb=lb, ub=ub, seed=seed),
model=GPClassificationModel(lb=lb, ub=ub, inducing_size=50),
stimuli_per_trial=1,
outcome_types=["binary"],
)
# mock the posterior call and remove calls that don't need
# to happen
def get_fake_posterior(X, posterior_transform=None):
fmean = torch.sin(torch.pi * X / 4).squeeze(-1)
fcov = torch.eye(fmean.shape[0])
fake_posterior = GPyTorchPosterior(
mvn=MultivariateNormal(mean=fmean, covariance_matrix=fcov)
)
return fake_posterior
strat.model.posterior = get_fake_posterior
strat.model.__call__ = MagicMock()
strat.model.fit = MagicMock()
x = strat.gen(1)
y = torch.Tensor([1])
strat.add_data(x, y)
strat.model.set_train_data(x, y)
# We expect the global max to be at (2, 1), the min at (-2, -1)
fmax, argmax = strat.get_max()
self.assertTrue(np.allclose(fmax, 1))
self.assertTrue(np.allclose(argmax, 2))
fmin, argmin = strat.get_min()
self.assertTrue(np.allclose(fmin, -1))
self.assertTrue(np.allclose(argmin, -2, atol=0.2))
# Inverse query at val .85 should return (.85,[2.7])
val, loc = strat.inv_query(0.85, constraints={})
self.assertTrue(np.allclose(val, 0.85))
self.assertTrue(np.allclose(loc.item(), 2.7, atol=1e-2))
def test_hyperparam_consistency(self):
# verify that creating the model `from_config` or with `__init__` has the same hyperparams
m1 = GPClassificationModel(lb=[1, 2], ub=[3, 4])
m2 = GPClassificationModel.from_config(
config=Config(config_dict={"common": {"lb": "[1,2]", "ub": "[3,4]"}})
)
self.assertTrue(isinstance(m1.covar_module, type(m2.covar_module)))
self.assertTrue(
isinstance(m1.covar_module.base_kernel, type(m2.covar_module.base_kernel))
)
self.assertTrue(isinstance(m1.mean_module, type(m2.mean_module)))
m1priors = list(m1.covar_module.named_priors())
m2priors = list(m2.covar_module.named_priors())
for p1, p2 in zip(m1priors, m2priors):
name1, parent1, prior1, paramtransforms1, priortransforms1 = p1
name2, parent2, prior2, paramtransforms2, priortransforms2 = p2
self.assertTrue(name1 == name2)
self.assertTrue(isinstance(parent1, type(parent2)))
self.assertTrue(isinstance(prior1, type(prior2)))
# no obvious way to test paramtransform equivalence
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import time
import unittest
import numpy as np
import torch
from aepsych.acquisition import MCLevelSetEstimation
from aepsych.config import Config
from aepsych.generators import AxOptimizeAcqfGenerator, OptimizeAcqfGenerator
from aepsych.models import (
ContinuousRegressionGP,
GPClassificationModel,
PairwiseProbitModel,
)
from ax.modelbridge import Models
from botorch.acquisition.preference import AnalyticExpectedUtilityOfBestOption
from sklearn.datasets import make_classification
class TestOptimizeAcqfGenerator(unittest.TestCase):
def test_time_limits(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
X, y = make_classification(
n_samples=100,
n_features=8,
n_redundant=3,
n_informative=5,
random_state=1,
n_clusters_per_class=4,
)
X, y = torch.Tensor(X), torch.Tensor(y)
model = GPClassificationModel(
lb=-3 * torch.ones(8),
ub=3 * torch.ones(8),
max_fit_time=0.5,
inducing_size=10,
)
model.fit(X, y)
generator = OptimizeAcqfGenerator(
acqf=MCLevelSetEstimation, acqf_kwargs={"beta": 1.96, "target": 0.5}
)
start = time.time()
generator.gen(1, model)
end = time.time()
long = end - start
generator = OptimizeAcqfGenerator(
acqf=MCLevelSetEstimation,
acqf_kwargs={"beta": 1.96, "target": 0.5},
max_gen_time=0.1,
)
start = time.time()
generator.gen(1, model)
end = time.time()
short = end - start
# very loose test because fit time is only approximately computed
self.assertTrue(long > short)
def test_instantiate_eubo(self):
config = """
[OptimizeAcqfGenerator]
acqf = AnalyticExpectedUtilityOfBestOption
stimuli_per_trial = 2
"""
generator = OptimizeAcqfGenerator.from_config(Config(config_str=config))
self.assertTrue(generator.acqf == AnalyticExpectedUtilityOfBestOption)
# need a fitted model in order to instantiate the acqf successfully
model = PairwiseProbitModel(lb=[-1], ub=[1])
train_x = torch.Tensor([-0.5, 1, 0.5, -1]).reshape((2, 1, 2))
train_y = torch.Tensor([0, 1])
model.fit(train_x, train_y)
acqf = generator._instantiate_acquisition_fn(model=model)
self.assertTrue(isinstance(acqf, AnalyticExpectedUtilityOfBestOption))
def test_axoptimizeacqf_config(self):
config_str = """
[common]
use_ax = True
parnames = [foo]
lb = [0]
ub = [1]
stimuli_per_trial = 1
outcome_types = [continuous]
strat_names = [opt]
[opt]
generator = OptimizeAcqfGenerator
model = ContinuousRegressionGP
[ContinuousRegressionGP]
max_fit_time = 1
[OptimizeAcqfGenerator]
acqf = MCLevelSetEstimation
max_gen_time = 1
restarts = 1
samps = 100
[MCLevelSetEstimation]
beta = 1
target = 0.5
"""
config = Config(config_str=config_str)
gen = AxOptimizeAcqfGenerator.from_config(config, "opt")
self.assertEqual(gen.model, Models.BOTORCH_MODULAR)
self.assertEqual(
gen.model_kwargs["surrogate"].botorch_model_class, ContinuousRegressionGP
)
self.assertEqual(gen.model_gen_kwargs["restarts"], 1)
self.assertEqual(gen.model_gen_kwargs["samps"], 100)
self.assertEqual(gen.model_kwargs["acquisition_options"]["target"], 0.5)
self.assertEqual(gen.model_kwargs["acquisition_options"]["beta"], 1.0)
# TODO: Implement max_gen_time
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
from aepsych.config import Config
from aepsych.generators import AxRandomGenerator, RandomGenerator
from ax.modelbridge import Models
class TestRandomGenerator(unittest.TestCase):
def test_randomgen_single(self):
# test that RandomGenerator doesn't mess with shapes
n = 100
rand = np.zeros((n, 3))
mod = RandomGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3)
for i in range(n):
rand[i, :] = mod.gen()
# check that bounds are right
self.assertTrue(np.all(rand[:, 0] > 1))
self.assertTrue(np.all(rand[:, 1] > 2))
self.assertTrue(np.all(rand[:, 2] > 3))
self.assertTrue(np.all(rand[:, 0] < 2))
self.assertTrue(np.all(rand[:, 1] < 3))
self.assertTrue(np.all(rand[:, 2] < 4))
def test_randomgen_batch(self):
# test that RandomGenerator doesn't mess with shapes
n = 100
mod = RandomGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3)
rand = mod.gen(n)
# check that bounds are right
self.assertTrue((rand[:, 0] > 1).all())
self.assertTrue((rand[:, 1] > 2).all())
self.assertTrue((rand[:, 2] > 3).all())
self.assertTrue((rand[:, 0] < 2).all())
self.assertTrue((rand[:, 1] < 3).all())
self.assertTrue((rand[:, 2] < 4).all())
def test_randomgen_config(self):
lb = [-1, 0]
ub = [1, 2]
config_str = f"""
[common]
lb = {lb}
ub = {ub}
"""
config = Config(config_str=config_str)
gen = RandomGenerator.from_config(config)
npt.assert_equal(gen.lb.numpy(), np.array(lb))
npt.assert_equal(gen.ub.numpy(), np.array(ub))
self.assertEqual(gen.dim, len(lb))
def test_axrandom_config(self):
config_str = """
[common]
parnames = [par1, par2]
lb = [-1, 0]
ub = [1, 2]
outcome_types = [continuous]
strategy_names = [init]
[init]
generator = RandomGenerator
[RandomGenerator]
seed=231
deduplicate=True
"""
config = Config(config_str=config_str)
gen = AxRandomGenerator.from_config(config, name="init")
self.assertEqual(gen.model, Models.UNIFORM)
self.assertEqual(gen.model_kwargs["seed"], 231)
self.assertTrue(gen.model_kwargs["deduplicate"])
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from aepsych.config import Config
from aepsych.generators.completion_criterion import (
MinAsks,
MinTotalOutcomeOccurrences,
MinTotalTells,
RunIndefinitely,
)
from aepsych.strategy import AEPsychStrategy
class CompletionCriteriaTestCase(unittest.TestCase):
def setUp(self):
config_str = """
[common]
use_ax = True
stimuli_per_trial = 1
outcome_types = [binary]
parnames = [x]
lb = [0]
ub = [1]
strategy_names = [test_strat]
[test_strat]
generator = SobolGenerator
"""
config = Config(config_str=config_str)
self.strat = AEPsychStrategy.from_config(config)
def test_min_asks(self):
config_str = """
[test_strat]
min_asks = 2
"""
config = Config(config_str=config_str)
criterion = MinAsks.from_config(config, "test_strat")
self.assertEqual(criterion.threshold, 2)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 0.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertTrue(criterion.is_met(self.strat.experiment))
def test_min_total_tells(self):
config_str = """
[test_strat]
min_total_tells = 2
"""
config = Config(config_str=config_str)
criterion = MinTotalTells.from_config(config, "test_strat")
self.assertEqual(criterion.threshold, 2)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.gen()
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 0.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 0.0)
self.assertTrue(criterion.is_met(self.strat.experiment))
def test_min_total_outcome_occurences(self):
config_str = """
[common]
outcome_types = [binary]
min_total_outcome_occurrences = 2
"""
config = Config(config_str=config_str)
criterion = MinTotalOutcomeOccurrences.from_config(config, "test_strat")
self.assertEqual(criterion.threshold, 2)
self.strat.complete_new_trial({"x": 0.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 0.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 0.0}, 1.0)
self.assertFalse(criterion.is_met(self.strat.experiment))
self.strat.complete_new_trial({"x": 1.0}, 1.0)
self.assertTrue(criterion.is_met(self.strat.experiment))
def run_indefinitely(self):
config_str = """
[common]
outcome_types = [binary]
run_indefinitely = False
"""
config = Config(config_str=config_str)
criterion = RunIndefinitely(**RunIndefinitely.from_config(config, "test_strat"))
self.assertTrue(criterion.is_met(self.strat.experiment))
config_str = """
[common]
outcome_types = [binary]
run_indefinitely = True
"""
config = Config(config_str=config_str)
criterion = RunIndefinitely(**RunIndefinitely.from_config(config, "test_strat"))
self.assertFalse(criterion.is_met(self.strat.experiment))
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from unittest.mock import MagicMock
import numpy as np
import torch
from aepsych.acquisition import MonotonicMCLSE
from aepsych.config import Config
from aepsych.generators import EpsilonGreedyGenerator, MonotonicRejectionGenerator
class TestEpsilonGreedyGenerator(unittest.TestCase):
def test_epsilon_greedy(self):
seed = 1
torch.manual_seed(seed)
np.random.seed(seed)
total_trials = 2000
extra_acqf_args = {"target": 0.75, "beta": 1.96}
for epsilon in (0.1, 0.5):
gen = EpsilonGreedyGenerator(
subgenerator=MonotonicRejectionGenerator(
acqf=MonotonicMCLSE, acqf_kwargs=extra_acqf_args
),
epsilon=epsilon,
)
model = MagicMock()
gen.subgenerator.gen = MagicMock()
for _ in range(total_trials):
gen.gen(1, model)
self.assertTrue(
np.abs(gen.subgenerator.gen.call_count / total_trials - (1 - epsilon))
< 0.01
)
def test_greedyepsilon_config(self):
config_str = """
[common]
acqf = MonotonicMCLSE
[EpsilonGreedyGenerator]
subgenerator = MonotonicRejectionGenerator
epsilon = .5
"""
config = Config()
config.update(config_str=config_str)
gen = EpsilonGreedyGenerator.from_config(config)
self.assertIsInstance(gen.subgenerator, MonotonicRejectionGenerator)
self.assertEqual(gen.subgenerator.acqf, MonotonicMCLSE)
self.assertEqual(gen.epsilon, 0.5)
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import numpy.testing as npt
import torch
from aepsych.config import Config
from aepsych.generators import AxSobolGenerator, SobolGenerator
from aepsych.utils import make_scaled_sobol
from ax.modelbridge import Models
class TestSobolGenerator(unittest.TestCase):
def test_batchsobol(self):
mod = SobolGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3, seed=12345)
acq1 = mod.gen(num_points=2)
self.assertEqual(acq1.shape, (2, 3))
acq2 = mod.gen(num_points=3)
self.assertEqual(acq2.shape, (3, 3))
acq3 = mod.gen()
self.assertEqual(acq3.shape, (1, 3))
def test_sobolgen_single(self):
# test that SobolGenerator doesn't mess with shapes
sobol1 = make_scaled_sobol(lb=[1, 2, 3], ub=[2, 3, 4], size=10, seed=12345)
sobol2 = torch.zeros((10, 3))
mod = SobolGenerator(lb=[1, 2, 3], ub=[2, 3, 4], dim=3, seed=12345)
for i in range(10):
sobol2[i, :] = mod.gen()
npt.assert_almost_equal(sobol1.numpy(), sobol2.numpy())
# check that bounds are also right
self.assertTrue(torch.all(sobol1[:, 0] > 1))
self.assertTrue(torch.all(sobol1[:, 1] > 2))
self.assertTrue(torch.all(sobol1[:, 2] > 3))
self.assertTrue(torch.all(sobol1[:, 0] < 2))
self.assertTrue(torch.all(sobol1[:, 1] < 3))
self.assertTrue(torch.all(sobol1[:, 2] < 4))
def test_sobol_config(self):
config_str = """
[common]
lb = [0]
ub = [1]
parnames = [par1]
stimuli_per_trial = 1
[SobolGenerator]
seed=12345
"""
config = Config()
config.update(config_str=config_str)
gen = SobolGenerator.from_config(config)
npt.assert_equal(gen.lb.numpy(), np.array([0]))
npt.assert_equal(gen.ub.numpy(), np.array([1]))
self.assertEqual(gen.seed, 12345)
self.assertEqual(gen.stimuli_per_trial, 1)
def test_pairwise_sobol_sizes(self):
for dim in np.arange(1, 4):
for nsamp in (3, 5, 7):
generator = SobolGenerator(
lb=np.arange(dim).tolist(),
ub=(1 + np.arange(dim)).tolist(),
stimuli_per_trial=2,
)
shape_out = (nsamp, dim, 2)
self.assertEqual(generator.gen(nsamp).shape, shape_out)
def test_axsobol_config(self):
config_str = """
[common]
parnames = [par1]
lb = [0]
ub = [1]
stimuli_per_trial = 1
outcome_types = [continuous]
strategy_names = [init]
[init]
generator = SobolGenerator
[SobolGenerator]
seed=12345
scramble=False
"""
config = Config(config_str=config_str)
gen = AxSobolGenerator.from_config(config, name="init")
self.assertEqual(gen.model, Models.SOBOL)
self.assertEqual(gen.model_kwargs["seed"], 12345)
self.assertFalse(gen.model_kwargs["scramble"])
if __name__ == "__main__":
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.