response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Calculate the BBOB s_i.
Assumes i is 0-index based.
Args:
dim: dimension
to_sz: values
Returns:
float representing SIndex(i, d, to_sz). | def SIndex(dim: int, to_sz) -> float:
"""Calculate the BBOB s_i.
Assumes i is 0-index based.
Args:
dim: dimension
to_sz: values
Returns:
float representing SIndex(i, d, to_sz).
"""
s = np.zeros([
dim,
])
for i in range(dim):
if dim > 1:
s[i] = 10**(0.5 * (i / (dim - 1.0)))
else:
s[i] = 10**0.5
if i % 2 == 0 and to_sz[i] > 0:
s[i] *= 10
return s |
The BBOB Fpen function.
Args:
vector: ndarray.
Returns:
float representing Fpen(vector). | def Fpen(vector: np.ndarray) -> float:
"""The BBOB Fpen function.
Args:
vector: ndarray.
Returns:
float representing Fpen(vector).
"""
return sum([max(0.0, (abs(x) - 5.0))**2 for x in vector.flat]) |
Array of integers that can be used as random state seed. | def _IntSeeds(any_seeds: Sequence[Any], *, byte_length: int = 4) -> list[int]:
"""Array of integers that can be used as random state seed."""
int_seeds = []
for s in any_seeds:
# Encode into 4 byte_length worth of a hexadecimal string.
hashed = hashlib.shake_128(str(s).encode("utf-8")).hexdigest(byte_length)
int_seeds.append(int(hashed, 16))
return int_seeds |
Convert a%b where b is an int into a float on [-0.5, 0.5]. | def _ToFloat(a: int, b: np.ndarray) -> np.ndarray:
"""Convert a%b where b is an int into a float on [-0.5, 0.5]."""
return (np.int64(a) % b) / np.float64(b) - 0.5 |
Returns an orthonormal rotation matrix.
Args:
dim: size of the resulting matrix.
seed: int seed. If set to 0, this function returns an identity matrix
regardless of *moreseeds.
*moreseeds: Additional parameters to include in the hash. Arguments are
converted to strings first.
Returns:
Array of shape (dim, dim), representing a rotation matrix. | def _R(dim: int, seed: int, *moreseeds: Any) -> np.ndarray:
"""Returns an orthonormal rotation matrix.
Args:
dim: size of the resulting matrix.
seed: int seed. If set to 0, this function returns an identity matrix
regardless of *moreseeds.
*moreseeds: Additional parameters to include in the hash. Arguments are
converted to strings first.
Returns:
Array of shape (dim, dim), representing a rotation matrix.
"""
if seed == 0:
return np.identity(dim)
rng = np.random.default_rng(_IntSeeds(((seed, dim) + moreseeds)))
return stats.special_ortho_group.rvs(dim, random_state=rng) |
Implementation for BBOB Sphere function. | def Sphere(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Sphere function."""
del seed
return float(np.sum(arr * arr)) |
Implementation for BBOB Rastrigin function. | def Rastrigin(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Rastrigin function."""
dim = len(arr)
arr.shape = (dim, 1)
z = np.matmul(_R(dim, seed, b"R"), arr)
z = Tasy(ArrayMap(z, Tosz), 0.2)
z = np.matmul(_R(dim, seed, b"Q"), z)
z = np.matmul(LambdaAlpha(10.0, dim), z)
z = np.matmul(_R(dim, seed, b"R"), z)
return float(10 * (dim - np.sum(np.cos(2 * math.pi * z))) +
np.sum(z * z, axis=0)) |
Implementation for BBOB BuecheRastrigin function. | def BuecheRastrigin(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB BuecheRastrigin function."""
del seed
dim = len(arr)
arr.shape = (dim, 1)
t = ArrayMap(arr, Tosz)
l = SIndex(dim, arr) * t.flat
term1 = 10 * (dim - np.sum(np.cos(2 * math.pi * l), axis=0))
term2 = np.sum(l * l, axis=0)
term3 = 100 * Fpen(arr)
return float(term1 + term2 + term3) |
Implementation for BBOB LinearSlope function. | def LinearSlope(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB LinearSlope function."""
dim = len(arr)
arr.shape = (dim, 1)
r = _R(dim, seed, b"R")
z = np.matmul(r, arr)
result = 0.0
for i in range(dim):
s = 10**(i / float(dim - 1) if dim > 1 else 1)
z_opt = 5 * np.sum(np.abs(r[i, :]))
result += float(s * (z_opt - z[i]))
return result |
Implementation for BBOB Attractive Sector function. | def AttractiveSector(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Attractive Sector function."""
dim = len(arr)
arr.shape = (dim, 1)
x_opt = np.array([1 if i % 2 == 0 else -1 for i in range(dim)])
x_opt.shape = (dim, 1)
z_vec = np.matmul(_R(dim, seed, b"R"), arr - x_opt)
z_vec = np.matmul(LambdaAlpha(10.0, dim), z_vec)
z_vec = np.matmul(_R(dim, seed, b"Q"), z_vec)
result = 0.0
for i in range(dim):
z = z_vec[i, 0]
s = 100 if z * x_opt[i] > 0 else 1
result += (s * z)**2
return math.pow(Tosz(result), 0.9) |
Implementation for BBOB StepEllipsoidal function. | def StepEllipsoidal(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB StepEllipsoidal function."""
dim = len(arr)
arr.shape = (dim, 1)
z_hat = np.matmul(_R(dim, seed, b"R"), arr)
z_hat = np.matmul(LambdaAlpha(10.0, dim), z_hat)
z_tilde = np.array([
math.floor(0.5 + z) if (z > 0.5) else (math.floor(0.5 + 10 * z) / 10)
for z in z_hat.flat
])
z_tilde = np.matmul(_R(dim, seed, b"Q"), z_tilde)
s = 0.0
for i, val in enumerate(z_tilde):
exponent = 2.0 * float(i) / (dim - 1.0) if dim > 1.0 else 2.0
s += 10.0**exponent * val**2
value = max(abs(z_hat[0, 0]) / 1000, s)
return 0.1 * value + Fpen(arr) |
Implementation for BBOB RosenbrockRotated function. | def RosenbrockRotated(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB RosenbrockRotated function."""
dim = len(arr)
r_x = np.matmul(_R(dim, seed, b"R"), arr)
z = max(1.0, (dim**0.5) / 8.0) * r_x + 0.5 * np.ones((dim,))
return float(
sum([
100.0 * (z[i]**2 - z[i + 1])**2 + (z[i] - 1)**2
for i in range(dim - 1)
])) |
Implementation for BBOB Ellipsoidal function. | def Ellipsoidal(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Ellipsoidal function."""
del seed
dim = len(arr)
arr.shape = (dim, 1)
z_vec = ArrayMap(arr, Tosz)
s = 0.0
for i in range(dim):
exp = 6.0 * i / (dim - 1) if dim > 1 else 6.0
s += float(10**exp * z_vec[i] * z_vec[i])
return s |
Implementation for BBOB Discus function. | def Discus(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Discus function."""
dim = len(arr)
arr.shape = (dim, 1)
r_x = np.matmul(_R(dim, seed, b"R"), arr)
z_vec = ArrayMap(r_x, Tosz)
return float(10**6 * z_vec[0] * z_vec[0]) + sum(
[z * z for z in z_vec[1:].flat]) |
Implementation for BBOB BentCigar function. | def BentCigar(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB BentCigar function."""
dim = len(arr)
arr.shape = (dim, 1)
z_vec = np.matmul(_R(dim, seed, b"R"), arr)
z_vec = Tasy(z_vec, 0.5)
z_vec = np.matmul(_R(dim, seed, b"R"), z_vec)
return float(z_vec[0]**2) + 10**6 * np.sum(z_vec[1:]**2) |
Implementation for BBOB SharpRidge function. | def SharpRidge(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB SharpRidge function."""
dim = len(arr)
arr.shape = (dim, 1)
z_vec = np.matmul(_R(dim, seed, b"R"), arr)
z_vec = np.matmul(LambdaAlpha(10, dim), z_vec)
z_vec = np.matmul(_R(dim, seed, b"Q"), z_vec)
return z_vec[0, 0]**2 + 100 * np.sum(z_vec[1:]**2)**0.5 |
Implementation for BBOB DifferentPowers function. | def DifferentPowers(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB DifferentPowers function."""
dim = len(arr)
z = np.matmul(_R(dim, seed, b"R"), arr)
s = 0.0
for i in range(dim):
exp = 2 + 4 * i / (dim - 1) if dim > 1 else 6
s += abs(z[i])**exp
return s**0.5 |
Implementation for BBOB Weierstrass function. | def Weierstrass(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Weierstrass function."""
k_order = 12
dim = len(arr)
arr.shape = (dim, 1)
z = np.matmul(_R(dim, seed, b"R"), arr)
z = ArrayMap(z, Tosz)
z = np.matmul(_R(dim, seed, b"Q"), z)
z = np.matmul(LambdaAlpha(1.0 / 100.0, dim), z)
f0 = sum([0.5**k * math.cos(math.pi * 3**k) for k in range(k_order)])
s = 0.0
for i in range(dim):
for k in range(k_order):
s += 0.5**k * math.cos(2 * math.pi * (3**k) * (z[i] + 0.5))
return float(10 * (s / dim - f0)**3) + 10 * Fpen(arr) / dim |
Implementation for BBOB Weierstrass function. | def SchaffersF7(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Weierstrass function."""
dim = len(arr)
arr.shape = (dim, 1)
if dim == 1:
return 0.0
z = np.matmul(_R(dim, seed, b"R"), arr)
z = Tasy(z, 0.5)
z = np.matmul(_R(dim, seed, b"Q"), z)
z = np.matmul(LambdaAlpha(10.0, dim), z)
s_arr = np.zeros(dim - 1)
for i in range(dim - 1):
s_arr[i] = float((z[i]**2 + z[i + 1]**2)**0.5)
s = 0.0
for i in range(dim - 1):
s += s_arr[i]**0.5 + (s_arr[i]**0.5) * math.sin(50 * s_arr[i]**0.2)**2
return (s / (dim - 1.0))**2 + 10 * Fpen(arr) |
Implementation for BBOB SchaffersF7 Ill Conditioned. | def SchaffersF7IllConditioned(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB SchaffersF7 Ill Conditioned."""
dim = len(arr)
arr.shape = (dim, 1)
if dim == 1:
return 0.0
z = np.matmul(_R(dim, seed, b"R"), arr)
z = Tasy(z, 0.5)
z = np.matmul(_R(dim, seed, b"Q"), z)
z = np.matmul(LambdaAlpha(1000.0, dim), z)
s_arr = np.zeros(dim - 1)
for i in range(dim - 1):
s_arr[i] = float((z[i]**2 + z[i + 1]**2)**0.5)
s = 0.0
for i in range(dim - 1):
s += s_arr[i]**0.5 + (s_arr[i]**0.5) * math.sin(50 * s_arr[i]**0.2)**2
return (s / (dim - 1.0))**2 + 10 * Fpen(arr) |
Implementation for BBOB GriewankRosenbrock function. | def GriewankRosenbrock(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB GriewankRosenbrock function."""
dim = len(arr)
r_x = np.matmul(_R(dim, seed, b"R"), arr)
# Slightly off BBOB documentation in order to center optima at origin.
# Should be: max(1.0, (dim**0.5) / 8.0) * r_x + 0.5 * np.ones((dim,)).
z_arr = max(1.0, (dim**0.5) / 8.0) * r_x + np.ones((dim,))
s_arr = np.zeros(dim)
for i in range(dim - 1):
s_arr[i] = 100.0 * (z_arr[i]**2 - z_arr[i + 1])**2 + (z_arr[i] - 1)**2
total = 0.0
for i in range(dim - 1):
total += (s_arr[i] / 4000.0 - math.cos(s_arr[i]))
return (10.0 * total) / (dim - 1) + 10 |
Implementation for BBOB Schwefel function. | def Schwefel(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Schwefel function."""
del seed
dim = len(arr)
bernoulli_arr = np.array([pow(-1, i + 1) for i in range(dim)])
x_opt = 4.2096874633 / 2.0 * bernoulli_arr
x_hat = 2.0 * (bernoulli_arr * arr) # Element-wise multiplication
z_hat = np.zeros([dim, 1])
z_hat[0, 0] = x_hat[0]
for i in range(1, dim):
z_hat[i, 0] = x_hat[i] + 0.25 * (x_hat[i - 1] - 2 * abs(x_opt[i - 1]))
x_opt.shape = (dim, 1)
z_vec = 100 * (
np.matmul(LambdaAlpha(10, dim), z_hat - 2 * abs(x_opt)) + 2 * abs(x_opt))
total = sum([z * math.sin(abs(z)**0.5) for z in z_vec.flat])
return -(total / (100.0 * dim)) + 4.189828872724339 + 100 * Fpen(z_vec / 100) |
Implementation for BBOB Katsuura function. | def Katsuura(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Katsuura function."""
dim = len(arr)
arr.shape = (dim, 1)
r_x = np.matmul(_R(dim, seed, b"R"), arr)
z_vec = np.matmul(LambdaAlpha(100.0, dim), r_x)
z_vec = np.matmul(_R(dim, seed, b"Q"), z_vec)
prod = 1.0
for i in range(dim):
s = 0.0
for j in range(1, 33):
s += abs(2**j * z_vec[i, 0] - round(2**j * z_vec[i, 0])) / 2**j
prod *= (1 + (i + 1) * s)**(10.0 / dim**1.2)
return (10.0 / dim**2) * prod - 10.0 / dim**2 + Fpen(arr) |
Implementation for BBOB Lunacek function. | def Lunacek(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Lunacek function."""
dim = len(arr)
arr.shape = (dim, 1)
mu0 = 2.5
s = 1.0 - 1.0 / (2.0 * (dim + 20.0)**0.5 - 8.2)
mu1 = -((mu0**2 - 1) / s)**0.5
x_opt = np.array([mu0 / 2] * dim)
x_hat = np.array([2 * arr[i, 0] * np.sign(x_opt[i]) for i in range(dim)])
x_vec = x_hat - mu0
x_vec.shape = (dim, 1)
x_vec = np.matmul(_R(dim, seed, b"R"), x_vec)
z_vec = np.matmul(LambdaAlpha(100, dim), x_vec)
z_vec = np.matmul(_R(dim, seed, b"Q"), z_vec)
s1 = sum([(val - mu0)**2 for val in x_hat])
s2 = sum([(val - mu1)**2 for val in x_hat])
s3 = sum([math.cos(2 * math.pi * z) for z in z_vec.flat])
return min(s1, dim + s * s2) + 10.0 * (dim - s3) + 10**4 * Fpen(arr) |
Implementation for BBOB Gallagher101 function. | def Gallagher101Me(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Gallagher101 function."""
dim = len(arr)
arr.shape = (dim, 1)
num_optima = 101
optima_list = [np.zeros([dim, 1])]
for i in range(num_optima - 1):
vec = np.zeros([dim, 1])
for j in range(dim):
alpha = (i * dim + j + 1.0) / (dim * num_optima + 2.0)
assert alpha > 0
assert alpha < 1
vec[j, 0] = -5 + 10 * alpha
optima_list.append(vec)
c_list = [LambdaAlpha(1000, dim)]
for i in range(num_optima - 1):
alpha = 1000.0**(2.0 * (i) / (num_optima - 2))
c_mat = LambdaAlpha(alpha, dim) / (alpha**0.25)
c_list.append(c_mat)
rotation = _R(dim, seed, b"R")
max_value = -1.0
for i in range(num_optima):
w = 10 if i == 0 else (1.1 + 8.0 * (i - 1.0) / (num_optima - 2.0))
diff = np.matmul(rotation, arr - optima_list[i])
e = np.matmul(diff.transpose(), np.matmul(c_list[i], diff))
max_value = max(max_value, w * math.exp(-float(e) / (2.0 * dim)))
return Tosz(10.0 - max_value)**2 + Fpen(arr) |
Implementation for BBOB Gallagher21 function. | def Gallagher21Me(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Gallagher21 function."""
dim = len(arr)
arr.shape = (dim, 1)
num_optima = 21
optima_list = [np.zeros([dim, 1])]
for i in range(num_optima - 1):
vec = np.zeros([dim, 1])
for j in range(dim):
alpha = (i * dim + j + 1.0) / (dim * num_optima + 2.0)
assert alpha > 0
assert alpha < 1
vec[j, 0] = -5 + 10 * alpha
optima_list.append(vec)
c_list = [LambdaAlpha(1000, dim)]
for i in range(num_optima - 1):
alpha = 1000.0**(2.0 * (i) / (num_optima - 2))
c_mat = LambdaAlpha(alpha, dim) / (alpha**0.25)
c_list.append(c_mat)
rotation = _R(dim, seed, b"R")
max_value = -1.0
for i in range(num_optima):
w = 10 if i == 0 else (1.1 + 8.0 * (i - 1.0) / (num_optima - 2.0))
diff = np.matmul(rotation, arr - optima_list[i])
e = np.matmul(diff.transpose(), np.matmul(c_list[i], diff))
max_value = max(max_value, w * math.exp(-float(e) / (2.0 * dim)))
return Tosz(10.0 - max_value)**2 + Fpen(arr) |
Implementation for BBOB Sphere function. | def NegativeSphere(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB Sphere function."""
dim = len(arr)
arr.shape = (dim, 1)
z = np.matmul(_R(dim, seed, b"R"), arr)
return float(100 + np.sum(z * z) - 2 * (z[0]**2)) |
Implementation for NegativeMinDifference function. | def NegativeMinDifference(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for NegativeMinDifference function."""
dim = len(arr)
arr.shape = (dim, 1)
z = np.matmul(_R(dim, seed, b"R"), arr)
min_difference = 10000
for i in range(len(z) - 1):
min_difference = min(min_difference, z[i + 1] - z[i])
return 10.0 - float(min_difference) + 1e-8 * float(sum(arr)) |
Implementation for FonsecaFleming function. | def FonsecaFleming(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for FonsecaFleming function."""
del seed
return 1.0 - float(np.exp(-np.sum(arr * arr))) |
Branin function.
This function can accept batch shapes, although it is typed to return floats
to conform to NumpyExperimenter API.
Args:
x: Shape (B*, 2) array.
Returns:
Shape (B*) array. | def _branin(x: np.ndarray) -> float:
"""Branin function.
This function can accept batch shapes, although it is typed to return floats
to conform to NumpyExperimenter API.
Args:
x: Shape (B*, 2) array.
Returns:
Shape (B*) array.
"""
a = 1
b = 5.1 / (4 * np.pi**2)
c = 5 / np.pi
r = 6
s = 10
t = 1 / (8 * np.pi)
x1 = x[..., 0]
x2 = x[..., 1]
y = a * (x2 - b * x1**2 + c * x1 - r) ** 2 + s * (1 - t) * np.cos(x1) + s
return y |
Computes the float term on a list of values.
Args:
x_list: Elements in the list correspond to a different dimension/Parameter.
Returns:
The float term accounting for all the float Parameters. | def _float_term(x_list: list[float]) -> float:
"""Computes the float term on a list of values.
Args:
x_list: Elements in the list correspond to a different dimension/Parameter.
Returns:
The float term accounting for all the float Parameters.
"""
float_term = 0
for x in x_list:
float_term += min(
1.6,
(x > -0.8) * -((x - 1) ** 2)
+ (x <= -0.8) * (-(1.8**2) + 150 * (x + 0.8) ** 2),
)
return float_term |
Computes the categorical term. | def _categorical_term(x: str, best_category: SimpleKDCategory) -> float:
"""Computes the categorical term."""
if x != best_category:
return 0
elif x == 'corner':
return 1
elif x == 'center':
return 1
elif x == 'mixed':
return 1.5
raise NotImplementedError(f'Unknown categorical parameter: {x}') |
Computes the discrete term on a list of values. | def _discrete_term(x_list: list[int]) -> float:
"""Computes the discrete term on a list of values."""
discrete_term = 0
for x in x_list:
discrete_term += [1.2, 0.0, 0.6, 0.8, 1.0][
_feasible_discrete_values.index(x)
]
return discrete_term |
Computes the int term on a list of values. | def _int_term(x_list: list[int]) -> float:
"""Computes the int term on a list of values."""
int_term = 0
for x in x_list:
int_term += np.power(x - 2.2, 2) / 2.0
return int_term |
Asserts that random suggestions from the search space are valid. | def assert_evaluates_random_suggestions(
test,
experimenter: experimenter_lib.Experimenter,
) -> None:
"""Asserts that random suggestions from the search space are valid."""
runner = benchmark_runner.BenchmarkRunner(
[benchmark_runner.GenerateAndEvaluate(10)], num_repeats=1
)
state = benchmark_state.BenchmarkState(
experimenter=experimenter,
algorithm=benchmark_state.PolicySuggester.from_designer_factory(
experimenter.problem_statement(), random.RandomDesigner.from_problem
),
)
runner.run(state)
test.assertLen(
state.algorithm.supporter.GetTrials(
status_matches=vz.TrialStatus.COMPLETED
),
10,
) |
Loss function for a stochastic process model. | def stochastic_process_model_loss_fn(
params: types.ParameterDict,
model: sp.StochasticProcessModel,
data: types.ModelData,
normalize: bool = False,
):
"""Loss function for a stochastic process model."""
gp, mutables = model.apply(
{'params': params},
data.features,
mutable=['losses', 'predictive'],
)
labels = data.labels.padded_array
if len(gp.event_shape) == 1 and labels.shape[-1] == 1:
labels = jnp.squeeze(data.labels.padded_array, axis=-1)
loss = -gp.log_prob(
labels,
is_missing=data.labels.is_missing[0],
) + jax.tree_util.tree_reduce(jnp.add, mutables['losses'])
if normalize:
loss /= data.labels.original_shape[0]
return loss, dict() |
Setup function for a stochastic process model. | def stochastic_process_model_setup(
key: jax.Array,
model: sp.StochasticProcessModel,
data: types.ModelData,
):
"""Setup function for a stochastic process model."""
return model.init(key, data.features)['params'] |
Generates the predictive distribution on array function. | def _build_predictive_distribution(
xs: types.ModelInput,
model: sp.StochasticProcessModel,
state: types.GPState,
use_vmap: bool = True,
) -> tfd.Distribution:
"""Generates the predictive distribution on array function."""
def _predict_on_array_one_model(
model_state: types.ModelState, *, xs: types.ModelInput
) -> tfd.Distribution:
return model.apply(
model_state,
xs,
state.data.features,
state.data.labels,
method=model.posterior_predictive,
)
if not use_vmap:
return _predict_on_array_one_model(state.model_state, xs=xs)
def _predict_mean_and_stddev(state_: types.ModelState) -> tfd.Distribution:
dist = _predict_on_array_one_model(state_, xs=xs)
return {'mean': dist.mean(), 'stddev': dist.stddev()} # pytype: disable=attribute-error # numpy-scalars
# Returns a dictionary with mean and stddev, of shape [M, N].
# M is the size of the parameter ensemble and N is the number of points.
pp = jax.vmap(_predict_mean_and_stddev)(state.model_state)
batched_normal = tfd.Normal(pp['mean'].T, pp['stddev'].T) # pytype: disable=attribute-error # numpy-scalars
return tfd.MixtureSameFamily(
tfd.Categorical(logits=jnp.ones(batched_normal.batch_shape[1])),
batched_normal,
) |
Prediction function on features array. | def predict_on_array(
xs: types.ModelInput,
model: sp.StochasticProcessModel,
state: types.GPState,
use_vmap: bool = True,
):
"""Prediction function on features array."""
dist = _build_predictive_distribution(xs, model, state, use_vmap)
return {'mean': dist.mean(), 'stddev': dist.stddev()} |
Acquisition function on features array. | def acquisition_on_array(
xs: types.ModelInput,
model: sp.StochasticProcessModel,
acquisition_fn: acquisitions_lib.AcquisitionFunction,
state: types.GPState,
trust_region: Optional[acquisitions_lib.TrustRegion] = None,
use_vmap: bool = True,
):
"""Acquisition function on features array."""
dist = _build_predictive_distribution(xs, model, state, use_vmap)
acquisition = acquisition_fn(dist)
if trust_region is not None:
distance = trust_region.min_linf_distance(xs)
# Due to output normalization, acquisition can't be nearly as
# low as -1e12.
# We use a bad value that decreases in the distance to trust region
# so that acquisition optimizer can follow the gradient and escape
# untrusted regions.
return jnp.where(
(trust_region.trust_radius >= 0.5)
| (
(distance <= trust_region.trust_radius)
& (trust_region.trust_radius < 0.5)
),
acquisition,
-1e12 - distance,
)
else:
return acquisition |
Squeezes the singleton `metrics` dimension from `labels`, if applicable. | def _squeeze_to_event_dims(
dist: tfd.Distribution, labels: jax.Array
) -> jax.Array:
"""Squeezes the singleton `metrics` dimension from `labels`, if applicable."""
if len(dist.event_shape) == 1 and labels.shape == (dist.event_shape[0], 1):
return jnp.squeeze(labels, axis=-1)
return labels |
Gets the parameter constraints from a StochasticProcessModel.
If the model contains trainable Flax variables besides those defined by the
coroutine (for example, if `mean_fn` is a Flax module), the non-coroutine
variables are assumed to be unconstrained (the bijector passes them through
unmodified, and their lower/upper bounds are `None`). In this case `x` must be
passed, so that the structure of the non-coroutine parameters dict(s) can be
generated. `Constraint` objects for models with constrained parameters aside
from those defined in the coroutine must be built manually.
This method runs the coroutine, collects the parameter constraints, and
returns a new Constraint object in which the lower/upper bounds are dicts
(corresponding to the parameters dict) and the bijector operates on dicts. The
object may be passed to a Vizier Optimizer to constrain the parameters for
optimization.
Example:
```python
def model_coroutine(inputs=None):
amplitude_constraint = Constraint(
bounds=(jnp.array(0.1), None))
length_scale_constraint = Constraint.create(
bounds=(jnp.array(0.0), jnp.array(10.0)),
bijector_fn=tfb.Sigmoid)
amplitude = yield ModelParameter.from_prior(
tfd.LogNormal(0.0, 1.0, name='amplitude'),
constraint=amplitude_constraint)
length_scale = yield ModelParameter(
init_fn=jax.random.exponential,
regularizer=lambda x: 1e-3 * x**2,
constraint=length_scale_constraint,
name='length_scale')
kernel = tfpk.ExponentiatedQuadratic(
amplitude=amplitude, length_scale=length_scale)
return tfd.GaussianProcess(kernel, index_points=inputs)
model = StochasticProcessModel(model_coroutine)
constraint = GetConstraints(model)
constraint.bijector
# => tfb.JointMap({'amplitude': tfb.Exp(),
'length_scale': tfb.Sigmoid(0.0, 10.0)})
constraint.bounds
# => ({'amplitude': jnp.array(0.1), 'length_scale': jnp.array(0.0)},
# {'amplitude': None, 'length_scale': jnp.array(10.0)})
```
Args:
model: A `StochasticProcessModel` instance.
x: An input that can be passed to `model.lazy_init`. `x` must be of the same
structure as the model inputs and may contain arrays or `ShapeDtypeStruct`
instances (see flax.linen.Module.lazy_init docs). If `model` contains Flax
variables aside from those defined by `model.coroutine` (e.g. in a
trainable `mean_fn`) then this arg is required.
Returns:
constraint: A `Constraint` instance expressing constraints on the parameters
specified by `coroutine`. | def get_constraints(
model: StochasticProcessModel, x: Optional[Any] = None
) -> Constraint:
"""Gets the parameter constraints from a StochasticProcessModel.
If the model contains trainable Flax variables besides those defined by the
coroutine (for example, if `mean_fn` is a Flax module), the non-coroutine
variables are assumed to be unconstrained (the bijector passes them through
unmodified, and their lower/upper bounds are `None`). In this case `x` must be
passed, so that the structure of the non-coroutine parameters dict(s) can be
generated. `Constraint` objects for models with constrained parameters aside
from those defined in the coroutine must be built manually.
This method runs the coroutine, collects the parameter constraints, and
returns a new Constraint object in which the lower/upper bounds are dicts
(corresponding to the parameters dict) and the bijector operates on dicts. The
object may be passed to a Vizier Optimizer to constrain the parameters for
optimization.
Example:
```python
def model_coroutine(inputs=None):
amplitude_constraint = Constraint(
bounds=(jnp.array(0.1), None))
length_scale_constraint = Constraint.create(
bounds=(jnp.array(0.0), jnp.array(10.0)),
bijector_fn=tfb.Sigmoid)
amplitude = yield ModelParameter.from_prior(
tfd.LogNormal(0.0, 1.0, name='amplitude'),
constraint=amplitude_constraint)
length_scale = yield ModelParameter(
init_fn=jax.random.exponential,
regularizer=lambda x: 1e-3 * x**2,
constraint=length_scale_constraint,
name='length_scale')
kernel = tfpk.ExponentiatedQuadratic(
amplitude=amplitude, length_scale=length_scale)
return tfd.GaussianProcess(kernel, index_points=inputs)
model = StochasticProcessModel(model_coroutine)
constraint = GetConstraints(model)
constraint.bijector
# => tfb.JointMap({'amplitude': tfb.Exp(),
'length_scale': tfb.Sigmoid(0.0, 10.0)})
constraint.bounds
# => ({'amplitude': jnp.array(0.1), 'length_scale': jnp.array(0.0)},
# {'amplitude': None, 'length_scale': jnp.array(10.0)})
```
Args:
model: A `StochasticProcessModel` instance.
x: An input that can be passed to `model.lazy_init`. `x` must be of the same
structure as the model inputs and may contain arrays or `ShapeDtypeStruct`
instances (see flax.linen.Module.lazy_init docs). If `model` contains Flax
variables aside from those defined by `model.coroutine` (e.g. in a
trainable `mean_fn`) then this arg is required.
Returns:
constraint: A `Constraint` instance expressing constraints on the parameters
specified by `coroutine`.
"""
# Run the coroutine to extract constraints for the model parameters defined in
# the coroutine.
gen = model.coroutine()
k = jax.random.PRNGKey(0)
lower = {}
upper = {}
bijectors = {}
try:
p = next(gen)
while True:
v = p.init_fn(k)
if p.constraint is None or p.constraint.bounds is None:
lower[p.name] = None
upper[p.name] = None
else:
lower[p.name] = p.constraint.bounds[0]
upper[p.name] = p.constraint.bounds[1]
if p.constraint is None or p.constraint.bijector is None:
bijectors[p.name] = tfb.Identity()
else:
bijectors[p.name] = p.constraint.bijector
p = gen.send(v)
except StopIteration:
pass
# `tfb.JointMap` applies a structure of bijectors to a parallel structure of
# inputs. Define a `JointMap` bijector that maps an unconstrained parameters
# dict to a constrained parameters dict with a dict of bijectors (all dicts
# are keyed by parameter names).
bijector = tfb.JointMap(bijectors=bijectors)
if x is not None:
# Get the parameters dict keys, if any, that do not come from the coroutine
# (e.g. `mean_fn` parameters).
params = model.lazy_init(jax.random.PRNGKey(0), x)['params']
non_coroutine_keys = set(params.keys()) - set(bijectors.keys())
# Define a bijector that ignores (applies an identity transformation to)
# non-coroutine parameters.
if non_coroutine_keys:
logging.info(
(
'Defining a constraint object that ignores the following'
'non-coroutine parameters: %s'
),
non_coroutine_keys,
)
def _wrap_bijector_method_to_ignore_non_coro(f):
"""Wrap bijector methods to pass non-coroutine params through."""
def _f(p):
p_ = p.copy()
non_coroutine_params = {k: p_.pop(k) for k in non_coroutine_keys}
y = f(p_)
y.update(non_coroutine_params)
return y
return _f
def _bijector_fldj_with_non_coro(p):
"""Non-coroutine params do not affect the FLDJ."""
p_ = {k: v for k, v in p.items() if k not in non_coroutine_keys}
return bijector.forward_log_det_jacobian(p_)
bijector_forward_min_event_ndims = bijector.forward_min_event_ndims.copy()
# Populate `lower` and `upper` bounds dicts with `None` values for entries
# corresponding to non-coroutine params.
for k in non_coroutine_keys:
lower[k] = tree.map_structure(lambda _: None, params[k])
upper[k] = tree.map_structure(lambda _: None, params[k])
bijector_forward_min_event_ndims[k] = tree.map_structure(
lambda _: 0, params[k]
)
bijector = tfb.Inline(
forward_fn=_wrap_bijector_method_to_ignore_non_coro(bijector.forward),
inverse_fn=_wrap_bijector_method_to_ignore_non_coro(bijector.inverse),
forward_log_det_jacobian_fn=_bijector_fldj_with_non_coro,
forward_min_event_ndims=bijector_forward_min_event_ndims,
)
else:
# If the model doesn't have params aside from those defined by the
# coroutine, its params should have the same structure as `bijectors`
# (this assertion failing indicates a bug).
try:
tree.assert_same_structure(params, bijectors)
except ValueError as exc:
raise ValueError(
'`params` and `bijectors` should have the same nested structure.'
f'Saw: `params={params}` and `bijectors={bijectors}`'
) from exc
return Constraint((lower, upper), bijector=bijector) |
Randomly initializes a coroutine's parameters. | def _initialize_params(
coroutine: ModelCoroutine, rng: jax.Array
) -> chex.ArrayTree:
"""Randomly initializes a coroutine's parameters."""
gen = coroutine()
params = {}
try:
p: ModelParameter = next(gen)
while True:
# Declare a Flax variable with the name and initialization function from
# the `ModelParameter`.
rng, init_rng = jax.random.split(rng)
param = p.init_fn(init_rng)
params[p.name] = param
p: ModelParameter = gen.send(param)
except StopIteration:
return params |
A coroutine that follows the `ModelCoroutine` protocol. | def _test_coroutine(
inputs: Optional[types.ModelInput] = None,
num_tasks=1,
dtype=np.float64,
):
"""A coroutine that follows the `ModelCoroutine` protocol."""
kernel = yield from _kernel_coroutine(dtype=dtype)
if inputs is not None:
kernel = mask_features.MaskFeatures(
kernel,
dimension_is_missing=tfpke.ContinuousAndCategoricalValues(
continuous=inputs.continuous.is_missing[1],
categorical=inputs.categorical.is_missing[1],
),
)
inputs = tfpke.ContinuousAndCategoricalValues(
inputs.continuous.padded_array, inputs.categorical.padded_array
)
if num_tasks == 1:
return tfd.StudentTProcess(
df=np.array(5.0).astype(dtype),
kernel=kernel,
index_points=inputs,
observation_noise_variance=np.ones([], dtype=dtype),
validate_args=True,
)
multi_task_kernel = tfpke.Independent(num_tasks=num_tasks, base_kernel=kernel)
return tfde.MultiTaskGaussianProcess(
kernel=multi_task_kernel,
index_points=inputs,
observation_noise_variance=np.ones([], dtype=dtype),
validate_args=True,
) |
True if y2 > y1 (or y2 >= y1 if strict is False) every coordinate. | def _is_dominated(
y1: jt.Float[jt.Array, "M"],
y2: jt.Float[jt.Array, "M"],
strict: bool = True,
) -> jt.Bool[jt.Array, ""]:
"""True if y2 > y1 (or y2 >= y1 if strict is False) every coordinate."""
dominated_or_equal = jnp.all(y1 <= y2)
if strict:
return dominated_or_equal & jnp.any(y2 > y1)
else:
return dominated_or_equal |
Computes if nothing in `baseline` dominates `yy`.
Args:
yy: array of shape [B1, M] where M is number of metrics.
baseline: array of shape [B2, M] where M is number of metrics.
strict: If true, strict dominance is used.
Returns:
Boolean array of shape [B1] | def _is_pareto_optimal_against(
yy: jt.Float[jt.Array, "B1 M"],
baseline: jt.Float[jt.Array, "B2 M"],
*,
strict: bool,
) -> jt.Bool[jt.Array, "B1"]:
"""Computes if nothing in `baseline` dominates `yy`.
Args:
yy: array of shape [B1, M] where M is number of metrics.
baseline: array of shape [B2, M] where M is number of metrics.
strict: If true, strict dominance is used.
Returns:
Boolean array of shape [B1]
"""
jax_dominated_mv = jax.vmap(
functools.partial(_is_dominated, strict=strict), (None, 0), 0
) # ([b,a], [a]) -> [b]
jax_dominated_mm = jax.vmap(jax_dominated_mv, (0, None),
0) # ([b,a], [c,a]) -> [b,c]
return jnp.logical_not(jnp.any(jax_dominated_mm(yy, baseline),
axis=-1)) |
Efficiently compute `_is_pareto_optimal_against(ys, ys, strict=True)`.
Divide `ys` into shards and gradually trim down the candidates.
Args:
ys: Array of shape [B, M] where M is number of metrics.
num_shards: Each sharding results in filtering, i.e. indexing the array with
boolean vector. This operation can be very expensive and dominate the cost
of computation. Use a moderate number sublinear in B, e.g. log(B).
verbose:
Returns:
Boolean numpy Array of shape [B]. | def is_frontier(
ys: jt.Float[jt.ArrayLike, "B M"],
*,
num_shards: int = 10,
verbose: bool = False,
) -> jt.Bool[jt.ArrayLike, "B"]:
"""Efficiently compute `_is_pareto_optimal_against(ys, ys, strict=True)`.
Divide `ys` into shards and gradually trim down the candidates.
Args:
ys: Array of shape [B, M] where M is number of metrics.
num_shards: Each sharding results in filtering, i.e. indexing the array with
boolean vector. This operation can be very expensive and dominate the cost
of computation. Use a moderate number sublinear in B, e.g. log(B).
verbose:
Returns:
Boolean numpy Array of shape [B].
"""
idx = np.linspace(0, ys.shape[0], num_shards).astype(np.int32)
idx = list(reversed(idx))
# Initialize candidates with all points
frontier = np.ones(ys.shape[0], dtype=np.bool_)
for begin, end in zip(idx[1:], idx[:-1]):
candidates = ys[frontier]
# Filter candidates by comparing against the slice.
if verbose:
print(f"Compare {len(candidates)} against {begin}:{end}.")
tt = _is_pareto_optimal_against(candidates, ys[begin:end], strict=True)
frontier[frontier] = tt
return frontier |
Efficiently compute `ys[_is_pareto_optimal_against(ys, ys, strict=True)]` using iterative filtering.
Divide `ys` into shards and gradually trim down the candidates.
`get_frontier` doesn't call `is_frontier`, because `get_frontier` runs faster
by not slicing the full `ys` every iteration.
Args:
ys: Array of shape [B, M] where M is number of metrics.
num_shards: Each sharding results in filtering, i.e. indexing the array with
boolean vector. This operation can be very expensive and dominate the cost
of computation. Use a moderate number sublinear in B, e.g. log(B).
verbose:
Returns:
Array of shape [B, M]. | def get_frontier(
ys: jt.Float[jt.ArrayLike, "B M"],
*,
num_shards: int = 10,
verbose: bool = True,
) -> jnp.ndarray:
"""Efficiently compute `ys[_is_pareto_optimal_against(ys, ys, strict=True)]` using iterative filtering.
Divide `ys` into shards and gradually trim down the candidates.
`get_frontier` doesn't call `is_frontier`, because `get_frontier` runs faster
by not slicing the full `ys` every iteration.
Args:
ys: Array of shape [B, M] where M is number of metrics.
num_shards: Each sharding results in filtering, i.e. indexing the array with
boolean vector. This operation can be very expensive and dominate the cost
of computation. Use a moderate number sublinear in B, e.g. log(B).
verbose:
Returns:
Array of shape [B, M].
"""
idx = np.linspace(0, ys.shape[0], num_shards).astype(np.int32)
idx = list(reversed(idx))
# Initialize candidates with all points
candidates = jnp.asarray(ys)
for begin, end in zip(idx[1:], idx[:-1]):
# Filter candidates by comparing against the slice.
if verbose:
# Use print. This method won't run in production anyways.
print(f"Compare {len(candidates)} against {begin}:{end}.")
tt = _is_pareto_optimal_against(candidates, ys[begin:end], strict=True)
candidates = candidates[tt]
return candidates |
Returns the pareto rank. | def pareto_rank(ys: jt.Float[jt.ArrayLike, "B M"]) -> jt.Int[jt.ArrayLike, "B"]:
"""Returns the pareto rank."""
jax_dominated_mv = jax.vmap(
functools.partial(_is_dominated, strict=True), (None, 0), 0
) # ([b,a], [a]) -> [b]
jax_dominated_mm = jax.vmap(
jax_dominated_mv, (0, None), 0
) # ([b,a], [c,a]) -> [b,c]
domination_matrix = jax_dominated_mm(ys, ys)
return jnp.sum(domination_matrix, axis=1) |
Returns a randomized approximation of the cumulative dominated hypervolume.
See Section 3, Lemma 5 of https://arxiv.org/pdf/2006.04655.pdf for a fuller
explanation of the technique. This assumes the reference point is the
origin.
NOTE: This returns an unnormalized hypervolume.
Args:
points: Any set of points with shape (num_points, dimension).
vector: A vector of length dimension.
Returns:
Approximated cumulative dominated hypervolume of points[:i]. Length is
num_points. | def _cum_hypervolume_origin(
points: jt.Float[jt.ArrayLike, "B M"], vector: jt.Float[jt.Array, "... M"]
) -> jt.Float[jt.Array, "B"]:
"""Returns a randomized approximation of the cumulative dominated hypervolume.
See Section 3, Lemma 5 of https://arxiv.org/pdf/2006.04655.pdf for a fuller
explanation of the technique. This assumes the reference point is the
origin.
NOTE: This returns an unnormalized hypervolume.
Args:
points: Any set of points with shape (num_points, dimension).
vector: A vector of length dimension.
Returns:
Approximated cumulative dominated hypervolume of points[:i]. Length is
num_points.
"""
ratios = points / vector
coordinate_min_ratio = jnp.min(ratios, axis=1)
return jax.lax.cummax(coordinate_min_ratio, axis=0)**len(vector) |
Take log-uniform sample in the constraint and map it back to \R.
Args:
low: Parameter lower bound.
high: Parameter upper bound.
shape: Returned array has this shape. Each entry in the returned array is an
i.i.d sample.
Returns:
Randomly sampled array. | def _log_uniform_init(
low: Union[float, np.floating],
high: Union[float, np.floating],
shape: tuple[int, ...] = tuple(),
) -> sp.InitFn:
r"""Take log-uniform sample in the constraint and map it back to \R.
Args:
low: Parameter lower bound.
high: Parameter upper bound.
shape: Returned array has this shape. Each entry in the returned array is an
i.i.d sample.
Returns:
Randomly sampled array.
"""
def sample(key: Any) -> jnp.ndarray:
unif = jax.random.uniform(key, shape, dtype=jnp.float64)
return jnp.exp(unif * jnp.log(high / low) + jnp.log(low))
return sample |
Returns the top `best_n` parameters that minimize the losses.
Args:
losses: Shape (N,) array
all_params: ArrayTree whose leaves have shape (N, ...)
best_n: Integer greater than or equal to 1. If None, squeezes the leading
dimension.
Returns:
Top `best_n` parameters. | def get_best_params(
losses: jax.Array,
all_params: chex.ArrayTree,
*,
best_n: Optional[int] = None,
) -> chex.ArrayTree:
"""Returns the top `best_n` parameters that minimize the losses.
Args:
losses: Shape (N,) array
all_params: ArrayTree whose leaves have shape (N, ...)
best_n: Integer greater than or equal to 1. If None, squeezes the leading
dimension.
Returns:
Top `best_n` parameters.
"""
argsorted = jnp.argsort(losses)
if not best_n:
best_idx = argsorted[:1]
else:
best_idx = argsorted[:best_n]
logging.info('Best loss(es): %s at retry %s', losses[best_idx], best_idx)
optimal_params = jax.tree_util.tree_map(lambda p: p[best_idx], all_params)
if best_n is None:
optimal_params = jax.tree_map(
functools.partial(jnp.squeeze, axis=0), optimal_params
)
return optimal_params |
Converts None bounds to inf or -inf to pass to the optimizer. | def _none_to_inf(b: float, inf: float, params: chex.ArrayTree):
"""Converts None bounds to inf or -inf to pass to the optimizer."""
if b is None:
b = inf
if _is_leaf(b):
# Broadcast scalars to the parameter structure.
return tree.map_structure(lambda x: b * jnp.ones_like(x), params)
else:
# For structured bounds, replace `None`s in the structure with `inf`.
return tree.map_structure(
lambda b_, x: jnp.ones_like(x) * (inf if b_ is None else b_), b, params
) |
Returns (Lower, upper) ArrayTrees with the same shape as params. | def _get_bounds(
params: core.Params,
constraints: Optional[sp.Constraint],
) -> Optional[tuple[chex.ArrayTree, chex.ArrayTree]]:
"""Returns (Lower, upper) ArrayTrees with the same shape as params."""
if constraints is None:
return None
else:
lb = _none_to_inf(constraints.bounds[0], -jnp.inf, params)
ub = _none_to_inf(constraints.bounds[1], jnp.inf, params)
logging.info(
'constraints\n: %s converted to bounds:\n %s', constraints, (lb, ub)
)
return (lb, ub) |
Called by JaxoptLbfgsB. | def _run_parallel_lbfgs(
loss_fn: core.LossFunction[core.Params],
init_params_batch: core.Params,
*,
bounds: Optional[tuple[chex.ArrayTree, chex.ArrayTree]],
options: LbfgsBOptions,
) -> tuple[core.Params, Any]:
"""Called by JaxoptLbfgsB."""
def _run_one_lbfgs(
init_params: core.Params,
) -> tuple[core.Params, Any]:
lbfgsb = jaxopt.LBFGSB(
fun=loss_fn,
maxls=options.num_line_search_steps,
tol=options.tol,
maxiter=options.maxiter,
has_aux=True,
)
return lbfgsb.run(init_params=init_params, bounds=bounds)
# We chose map over vmap because some of the lbfgs runs may terminate early.
# pmap is also not fit for our use case, because we typically have a single
# processor.
return jax.lax.map(_run_one_lbfgs, init_params_batch) |
Serialize (maybe) symbolic object to compressed JSON value. | def _to_json_str_compressed(value: Any) -> str:
"""Serialize (maybe) symbolic object to compressed JSON value."""
return base64.b64encode(
lzma.compress(json.dumps(
pg.to_json(value)).encode('utf-8'))).decode('ascii') |
Converts a parameter value to proper external type. | def _parameter_with_external_type(
val: vz.ParameterValueTypes,
external_type: vz.ExternalType) -> vz.ParameterValueTypes:
"""Converts a parameter value to proper external type."""
if external_type == vz.ExternalType.BOOLEAN:
# We output strings 'True' or 'False', not booleans themselves.
# because BOOLEAN is interally CATEGORICAL.
return val
elif external_type == vz.ExternalType.INTEGER:
return int(val)
elif external_type == vz.ExternalType.FLOAT:
return float(val)
else:
return val |
Make a decision point (DNASpec) out from a parameter config. | def _make_decision_point(
parameter_config: vz.ParameterConfig) -> pg.geno.DecisionPoint:
"""Make a decision point (DNASpec) out from a parameter config."""
# NOTE(daiyip): We set the name of each decision point instead of its
# location with parameter name.
#
# Why? For conditional space, the ID of a decision point is a
# path of locations from the root to the leaf node. For example, if there
# are two parameters - a parent with location 'a' and a child with location
# 'b', the ID for the child will be 'a.b'. However, for external (
# non-PyGlove created) study, the parameter name for the child does not
# follow this pattern. The solution is to use the ``name`` property of
# `DNASpec`, which allows the user to access hierarchical decision
# points by name, also DNA supports to_dict/from_dict based on the decision
# point names instead of their IDs. Therefore, we can minimize the
# difference between a PyGlove created study and an external study.
name = parameter_config.name
if parameter_config.type == vz.ParameterType.DOUBLE:
# Create `pg.geno.Float` which does not have child spaces.
min_value, max_value = parameter_config.bounds
return pg.geno.Float(min_value, max_value, name=name)
elif parameter_config.type in (vz.ParameterType.CATEGORICAL,
vz.ParameterType.DISCRETE,
vz.ParameterType.INTEGER):
# Create `pg.geno.Choices` with possible child spaces.
candidates = []
literal_values = []
for val in parameter_config.feasible_values:
child_decision_points = []
if parameter_config.child_parameter_configs:
for child_pc in parameter_config.child_parameter_configs:
if val in child_pc.matching_parent_values:
child_decision_points.append(_make_decision_point(child_pc))
candidates.append(pg.geno.Space(child_decision_points))
literal_values.append(
_parameter_with_external_type(val, parameter_config.external_type))
return pg.geno.Choices(
1, candidates, literal_values=literal_values, name=name)
else:
raise ValueError(
f'Parameter Config Type {parameter_config.type!r} is not supported.') |
Converts a DNASpec to Vizier search space.
Args:
dna_spec:
Returns:
Vizier search space.
Raises:
NotImplementedError: If no part of the spec can be converted to a Vizier
parameter. | def _to_search_space(dna_spec: pg.DNASpec) -> vz.SearchSpace:
"""Converts a DNASpec to Vizier search space.
Args:
dna_spec:
Returns:
Vizier search space.
Raises:
NotImplementedError: If no part of the spec can be converted to a Vizier
parameter.
"""
def _parameter_name(path: pg.KeyPath) -> str:
# NOTE(daiyip): Vizier doesn't support empty name, thus we use a
# special parameter name for the hyper value at root.
return path.path if path else constants.PARAMETER_NAME_ROOT
def _categories(spec: pg.geno.Choices) -> List[str]:
return [spec.format_candidate(i) for i in range(len(spec.candidates))]
def _category_value(spec: pg.geno.Choices, index: int) -> str:
assert index < len(spec.candidates)
return spec.format_candidate(index)
def _add_dna_spec(root: vz.SearchSpaceSelector, path: pg.KeyPath,
spec: pg.DNASpec) -> None:
"""Convert a DNASpec node with parent choice to a list of parameters.
Args:
root: The DNA spec is added to this root.
path: Root path of current DNA spec.
spec: Current DNA spec.
"""
if isinstance(spec, pg.geno.Space):
for elem in spec.elements:
_add_dna_spec(root, path + elem.location, elem)
elif isinstance(spec, pg.geno.Choices):
is_discrete = all(
isinstance(v, numbers.Number) for v in spec.literal_values
) and len(set(spec.literal_values)) == len(spec.literal_values)
for choice_idx in range(spec.num_choices):
choice_path = path
if spec.num_choices > 1:
choice_path = choice_path + choice_idx
if is_discrete:
unique_feasible_points = sorted(set(spec.literal_values))
root.add_discrete_param(
name=_parameter_name(choice_path),
# We sort the literal values since Vizier requires the feasible
# points of a discrete parameter to be in increasing order.
# The sorting has no impact to the trial parameter -> DNA
# conversion since for numeric literal value, the conversion
# is value based.
feasible_values=unique_feasible_points,
)
if unique_feasible_points != spec.literal_values:
logging.warning(
'Candidates for parameter %r have been reordered/deduped from '
'%s to %s to meet the sorted/distinct requirement for discrete '
'parameter specifiction.',
_parameter_name(choice_path),
spec.literal_values,
unique_feasible_points)
else:
new_parameter: vz.SearchSpaceSelector = root.add_categorical_param(
name=_parameter_name(choice_path),
feasible_values=_categories(spec))
for candidate_idx, candidate in enumerate(spec.candidates):
candidate_path = choice_path + pg.geno.ConditionalKey(
candidate_idx, len(spec.candidates)
)
child: vz.SearchSpaceSelector = new_parameter.select_values(
[_category_value(spec, candidate_idx)])
_add_dna_spec(child, candidate_path, candidate)
elif isinstance(spec, pg.geno.Float):
root.add_float_param(
name=_parameter_name(path),
scale_type=get_scale_type(spec.scale),
min_value=spec.min_value,
max_value=spec.max_value)
elif isinstance(spec, pg.geno.CustomDecisionPoint):
# For CustomDecisionPoint, there is not a corresponding parameter type
# in Vizier since its value is a variable string. In such case the
# parameter value will be put into metadata.
logging.info(
'Encountered custom decision point %s, which will not be shown '
'in Vizier dashboard.',
_parameter_name(path),
)
else:
raise NotImplementedError(
f'Spec has unknown type. This Should never happen. Spec: {spec}')
search_space = vz.SearchSpace()
_add_dna_spec(search_space.root, pg.KeyPath(), dna_spec)
if not search_space.parameters:
raise NotImplementedError(
'No part of the dna spec could be represented as a Vizier parameter.')
return search_space |
Returns scale type based on scale string. | def get_scale_type(scale: Optional[str]) -> Optional[vz.ScaleType]:
"""Returns scale type based on scale string."""
if scale in [None, 'linear']:
return vz.ScaleType.LINEAR
elif scale == 'log':
return vz.ScaleType.LOG
elif scale == 'rlog':
return vz.ScaleType.REVERSE_LOG
else:
raise ValueError(f'Unsupported scale type: {scale!r}') |
Extracts only the pyglove-related metadata into a simple dict. | def get_pyglove_metadata(trial: vz.Trial) -> dict[str, Any]:
"""Extracts only the pyglove-related metadata into a simple dict."""
metadata = dict()
# NOTE(daiyip): This is to keep backward compatibility for Cloud NAS service,
# which might loads trials from studies created in the old NAS pipeline for
# transfer learning.
for key, value in trial.metadata.items():
if key in constants.TRIAL_METADATA_KEYS:
metadata[key] = pg.from_json_str(value)
for key, value in trial.metadata.ns(constants.METADATA_NAMESPACE).items():
if key not in constants.TRIAL_METADATA_KEYS and value is not None:
metadata[key] = pg.from_json_str(value)
return metadata |
Extracts only the pyglove-related metadata into a simple dict. | def get_pyglove_study_metadata(problem: vz.ProblemStatement) -> pg.Dict:
"""Extracts only the pyglove-related metadata into a simple dict."""
metadata = pg.Dict()
pg_metadata = problem.metadata.ns(constants.METADATA_NAMESPACE)
for key, value in pg_metadata.items():
if key not in constants.STUDY_METADATA_KEYS and value is not None:
metadata[key] = pg.from_json_str(value)
return metadata |
Restores DNASpec from compressed JSON str. | def restore_dna_spec(json_str_compressed: str) -> pg.DNASpec:
"""Restores DNASpec from compressed JSON str."""
return pg.from_json(
json.loads(lzma.decompress(base64.b64decode(json_str_compressed)))
) |
From ':ns:key' to (ns, key). | def _parse_namespace_from_key(
encoded_key: str, default_ns: vz.Namespace
) -> tuple[vz.Namespace, str]:
"""From ':ns:key' to (ns, key)."""
ns_and_key = tuple(vz.Namespace.decode(encoded_key))
if not ns_and_key:
raise ValueError(
f'String did not parse into namespace and key: {encoded_key}'
)
elif len(ns_and_key) == 1:
return (default_ns, ns_and_key[-1])
else:
return (vz.Namespace(ns_and_key[:-1]), ns_and_key[-1]) |
Init OSS Vizier backend.
Args:
study_prefix: An optional string that will be used as the prefix for the
study names created by `pg.sample` throughout the application. This allows
users to change the study names across multiple runs of the same binary
through this single venue, instead of modifying the `name` argument of
every `pg.sample` invocation.
vizier_endpoint: An optional string in format of <hostname>:<port>, as the
Vizier service address to connect to. If None, an in-process Vizier
service will be created for local tuning scenarios.
pythia_port: An optional port used for hosting the Pythia service. If None,
the port will be automatically picked. | def init(
study_prefix: Optional[str] = None,
vizier_endpoint: Optional[str] = None,
pythia_port: Optional[int] = None,
) -> None:
"""Init OSS Vizier backend.
Args:
study_prefix: An optional string that will be used as the prefix for the
study names created by `pg.sample` throughout the application. This allows
users to change the study names across multiple runs of the same binary
through this single venue, instead of modifying the `name` argument of
every `pg.sample` invocation.
vizier_endpoint: An optional string in format of <hostname>:<port>, as the
Vizier service address to connect to. If None, an in-process Vizier
service will be created for local tuning scenarios.
pythia_port: An optional port used for hosting the Pythia service. If None,
the port will be automatically picked.
"""
_services.use_vizier_service(vizier_endpoint)
_services.set_pythia_port(pythia_port)
backend.VizierBackend.use_study_prefix(study_prefix)
pg.tuning.set_default_backend('oss_vizier') |
Creates a Pythia policy that uses PyGlove algorithms. | def create_policy(
supporter: pythia.PolicySupporter,
problem_statement: vz.ProblemStatement,
algorithm: pg.geno.DNAGenerator,
early_stopping_policy: Optional[pg.tuning.EarlyStoppingPolicy] = None,
prior_trials: Optional[Sequence[vz.Trial]] = None,
) -> pythia.Policy:
"""Creates a Pythia policy that uses PyGlove algorithms."""
converter = converters.VizierConverter.from_problem(problem_statement)
# Bind the algorithm with the search space before usage.
algorithm.setup(converter.dna_spec)
# Warm up algorithm if prior trials are present.
if prior_trials:
def get_trial_history():
for trial in prior_trials:
tuner_trial = core.VizierTrial(converter, trial)
reward = tuner_trial.get_reward_for_feedback(
converter.metrics_to_optimize
)
yield (tuner_trial.dna, reward)
algorithm.recover(get_trial_history())
return TunerPolicy(supporter, converter, algorithm, early_stopping_policy) |
Returns a randomized approximation of the cumulative dominated hypervolume.
See Section 3, Lemma 5 of https://arxiv.org/pdf/2006.04655.pdf for a fuller
explanation of the technique. This assumes the reference point is the
origin.
NOTE: This returns an unnormalized hypervolume.
Args:
points: Any set of points with shape (num_points, dimension).
vectors: Set of vectors with shape (num_vectors, dimension).
Returns:
Approximated cumulative dominated hypervolume of points[:i].
Raises:
ValueError: Points and vectors do not match in dimension. | def _cum_hypervolume_origin(points: np.ndarray,
vectors: np.ndarray) -> np.ndarray:
"""Returns a randomized approximation of the cumulative dominated hypervolume.
See Section 3, Lemma 5 of https://arxiv.org/pdf/2006.04655.pdf for a fuller
explanation of the technique. This assumes the reference point is the
origin.
NOTE: This returns an unnormalized hypervolume.
Args:
points: Any set of points with shape (num_points, dimension).
vectors: Set of vectors with shape (num_vectors, dimension).
Returns:
Approximated cumulative dominated hypervolume of points[:i].
Raises:
ValueError: Points and vectors do not match in dimension.
"""
if vectors.shape[1] != points.shape[1]:
raise ValueError(f'Vectors shape {vectors.shape} do not match dimension of'
f' (second value in tuple of points shape {points.shape}')
num_points, dimension = points.shape
num_vectors = vectors.shape[0]
temp_points = np.broadcast_to(points[np.newaxis, :, :],
[num_vectors, num_points, dimension])
vectors = vectors.reshape([num_vectors, 1, dimension])
# Here, ratios[i][j][k] is the kth coordinate of the jth point / ith vector.
# Since points is (num_vectors, num_points, dimension) and vectors is
# (num_vectors, 1, dimension), note that ratios is
# (num_vectors, num_points, dimension).
ratios = temp_points / vectors
# These calculations are from Lemma 5 in above cited paper (dimension axis).
coordinate_min_ratio = np.min(ratios, axis=2)
# Maximizing across all points (num_points axis).
point_max_ratio = np.maximum.accumulate(coordinate_min_ratio, axis=1)
# Averaging across the vector axis.
return np.mean(point_max_ratio**dimension, axis=0) |
Assigns value to $metadatum. | def _assign_value(
metadatum: key_value_pb2.KeyValue, value: Union[str, any_pb2.Any, Message]
) -> None:
"""Assigns value to $metadatum."""
if isinstance(value, str):
metadatum.ClearField('proto')
metadatum.value = value
elif isinstance(value, any_pb2.Any):
metadatum.ClearField('value')
metadatum.proto.CopyFrom(value)
else:
metadatum.ClearField('value')
metadatum.proto.Pack(value) |
Insert and/or assign (key, value) to container.metadata.
Args:
container: container.metadata must be repeated KeyValue (protobuf) field.
key:
ns: A namespace for the key (defaults to '', which is the user's namespace).
value: Behavior depends on the type. `str` is copied to KeyValue.value
`any_pb2.Any` is copied to KeyValue.proto Other types are packed to
any_pb2.Any proto, which is then copied to KeyValue.proto.
mode: `insert_or_assign` overrides the value if (ns, key)-pair already
exists and `insert_or_error` raises ValueError if duplicate (ns, key)-pair
exists. `insert` blindly inserts. This is fastest and should be used if
the data source can be trusted.
Returns:
(proto, inserted) where
proto is the protobuf that was just inserted into the $container, and
inserted is True if the proto was newly inserted, False if it was replaced. | def assign(
container: Union[study_pb2.StudySpec, study_pb2.Trial],
*,
key: str,
ns: str,
value: Union[str, any_pb2.Any, Message],
mode: Literal['insert_or_assign', 'insert_or_error', 'insert'] = 'insert',
) -> Tuple[key_value_pb2.KeyValue, bool]:
"""Insert and/or assign (key, value) to container.metadata.
Args:
container: container.metadata must be repeated KeyValue (protobuf) field.
key:
ns: A namespace for the key (defaults to '', which is the user's namespace).
value: Behavior depends on the type. `str` is copied to KeyValue.value
`any_pb2.Any` is copied to KeyValue.proto Other types are packed to
any_pb2.Any proto, which is then copied to KeyValue.proto.
mode: `insert_or_assign` overrides the value if (ns, key)-pair already
exists and `insert_or_error` raises ValueError if duplicate (ns, key)-pair
exists. `insert` blindly inserts. This is fastest and should be used if
the data source can be trusted.
Returns:
(proto, inserted) where
proto is the protobuf that was just inserted into the $container, and
inserted is True if the proto was newly inserted, False if it was replaced.
"""
inserted = True
# Find existing metadatum, unless in `insert` mode.
existing_metadatum = None
if mode in ('insert_or_assign', 'insert_or_error'):
for metadatum in container.metadata:
if metadatum.key == key and metadatum.ns == ns:
inserted = False
if mode == 'insert_or_error':
raise ValueError(
f'Duplicate (ns, key) pair: ({metadatum.ns}, {metadatum.key})'
)
existing_metadatum = metadatum
break
# If the metadatum does not exist, then add the (ns, key) pair.
metadatum = existing_metadatum or container.metadata.add(key=key, ns=ns)
_assign_value(metadatum, value)
return metadatum, inserted |
Returns the metadata value associated with key, or None.
Args:
container: A Trial of a StudySpec in protobuf form.
key: The key of a KeyValue protobuf.
ns: A namespace for the key (defaults to '', which is the user's namespace). | def get(
container: Union[study_pb2.StudySpec, study_pb2.Trial], *, key: str, ns: str
) -> Optional[str]:
"""Returns the metadata value associated with key, or None.
Args:
container: A Trial of a StudySpec in protobuf form.
key: The key of a KeyValue protobuf.
ns: A namespace for the key (defaults to '', which is the user's namespace).
"""
for kv in container.metadata:
if kv.key == key and kv.ns == ns:
if not kv.HasField('proto'):
return kv.value
return None |
Unpacks the proto metadata into message.
Args:
container: (const) StudySpec or Trial to search the metadata from.
key: (const) Lookup key of the metadata.
ns: A namespace for the key (defaults to '', which is the user's namespace).
cls: Pass in a proto ***class***, not a proto object.
Returns:
Proto message, if the value associated with the key exists and
can be parsed into proto; None otherwise. | def get_proto(
container: Union[study_pb2.StudySpec, study_pb2.Trial],
*,
key: str,
ns: str,
cls: Type[T],
) -> Optional[T]:
"""Unpacks the proto metadata into message.
Args:
container: (const) StudySpec or Trial to search the metadata from.
key: (const) Lookup key of the metadata.
ns: A namespace for the key (defaults to '', which is the user's namespace).
cls: Pass in a proto ***class***, not a proto object.
Returns:
Proto message, if the value associated with the key exists and
can be parsed into proto; None otherwise.
"""
for kv in container.metadata:
if kv.key == key and kv.ns == ns:
if kv.HasField('proto'):
message = cls()
success = kv.proto.Unpack(message)
return message if success else None
return None |
Convert $metadata to a list of KeyValue protobufs. | def make_key_value_list(
metadata: common.Metadata,
) -> list[key_value_pb2.KeyValue]:
"""Convert $metadata to a list of KeyValue protobufs."""
result = []
for ns, k, v in metadata.all_items():
item = key_value_pb2.KeyValue(key=k, ns=ns.encode())
_assign_value(item, v)
result.append(item)
return result |
Converts a list of KeyValue protos into a Metadata object. | def from_key_value_list(
kv_s: Iterable[key_value_pb2.KeyValue],
) -> common.Metadata:
"""Converts a list of KeyValue protos into a Metadata object."""
metadata = common.Metadata()
for kv in kv_s:
metadata.abs_ns(common.Namespace.decode(kv.ns))[kv.key] = (
kv.proto if kv.HasField('proto') else kv.value
)
return metadata |
Convert a dictionary of Trial.id:Metadata to a list of UnitMetadataUpdate.
Args:
trial_metadata: Typically MetadataDelta.on_trials.
Returns:
a list of UnitMetadataUpdate objects. | def trial_metadata_to_update_list(
trial_metadata: dict[int, common.Metadata]
) -> list[vizier_service_pb2.UnitMetadataUpdate]:
"""Convert a dictionary of Trial.id:Metadata to a list of UnitMetadataUpdate.
Args:
trial_metadata: Typically MetadataDelta.on_trials.
Returns:
a list of UnitMetadataUpdate objects.
"""
result = []
for trial_id, md in trial_metadata.items():
for kv in make_key_value_list(md):
# TODO: Verify this implementation.
# Should str(trial_id) below be "resources.StudyResource.from_name(
# study_resource_name).trial_resource(trial_id=str(trial_id)).name"?
result.append(
vizier_service_pb2.UnitMetadataUpdate(
trial_id=str(trial_id), metadatum=kv
)
)
return result |
Convert `on_study` metadata to list of metadata update protos. | def study_metadata_to_update_list(
study_metadata: common.Metadata,
) -> list[vizier_service_pb2.UnitMetadataUpdate]:
"""Convert `on_study` metadata to list of metadata update protos."""
unit_metadata_updates = []
for ns, k, v in study_metadata.all_items():
unit_metadata_update = vizier_service_pb2.UnitMetadataUpdate()
metadatum = unit_metadata_update.metadatum
metadatum.key = k
metadatum.ns = ns.encode()
_assign_value(metadatum, v)
unit_metadata_updates.append(unit_metadata_update)
return unit_metadata_updates |
Create an UpdateMetadataRequest proto.
Args:
study_resource_name:
delta:
Returns: | def to_request_proto(
study_resource_name: str, delta: trial.MetadataDelta
) -> vizier_service_pb2.UpdateMetadataRequest:
"""Create an UpdateMetadataRequest proto.
Args:
study_resource_name:
delta:
Returns:
"""
request = vizier_service_pb2.UpdateMetadataRequest(name=study_resource_name)
# Study Metadata
request.delta.extend(study_metadata_to_update_list(delta.on_study))
# Trial metadata
request.delta.extend(trial_metadata_to_update_list(delta.on_trials))
return request |
Merges $new_metadata into a Study's existing metadata. | def merge_study_metadata(
study_spec: study_pb2.StudySpec,
new_metadata: Iterable[key_value_pb2.KeyValue],
) -> None:
"""Merges $new_metadata into a Study's existing metadata."""
metadata_dict: Dict[Tuple[str, str], key_value_pb2.KeyValue] = {}
for kv in study_spec.metadata:
metadata_dict[(kv.ns, kv.key)] = kv
for kv in new_metadata:
metadata_dict[(kv.ns, kv.key)] = kv
study_spec.ClearField('metadata')
study_spec.metadata.extend(
sorted(metadata_dict.values(), key=lambda kv: (kv.ns, kv.key))
) |
Merges $new_metadata into a Trial's existing metadata.
Args:
trial_proto: A representation of a Trial; this will be modified.
new_metadata: Metadata that will add or update metadata in the Trial.
NOTE: the metadata updates in $new_metadata should have the same ID as
$trial_proto. | def merge_trial_metadata(
trial_proto: study_pb2.Trial,
new_metadata: Iterable[vizier_service_pb2.UnitMetadataUpdate],
) -> None:
"""Merges $new_metadata into a Trial's existing metadata.
Args:
trial_proto: A representation of a Trial; this will be modified.
new_metadata: Metadata that will add or update metadata in the Trial.
NOTE: the metadata updates in $new_metadata should have the same ID as
$trial_proto.
"""
metadata_dict: Dict[Tuple[str, str], key_value_pb2.KeyValue] = {}
for kv in trial_proto.metadata:
metadata_dict[(kv.ns, kv.key)] = kv
for md_update in new_metadata:
if md_update.trial_id == trial_proto.id:
metadata_dict[(md_update.metadatum.ns, md_update.metadatum.key)] = (
md_update.metadatum
)
else:
logging.warning(
'Metadata associated with wrong trial: %s instead of %s',
md_update.trial_id,
trial_proto.id,
)
trial_proto.ClearField('metadata')
trial_proto.metadata.extend(
sorted(metadata_dict.values(), key=lambda kv: (kv.ns, kv.key))
) |
from_proto conversion for Trial statuses. | def _to_pyvizier_trial_status(
proto_state: study_pb2.Trial.State,
) -> trial.TrialStatus:
"""from_proto conversion for Trial statuses."""
if proto_state == study_pb2.Trial.State.REQUESTED:
return trial.TrialStatus.REQUESTED
elif proto_state == study_pb2.Trial.State.ACTIVE:
return trial.TrialStatus.ACTIVE
if proto_state == study_pb2.Trial.State.STOPPING:
return trial.TrialStatus.STOPPING
if proto_state == study_pb2.Trial.State.SUCCEEDED:
return trial.TrialStatus.COMPLETED
elif proto_state == study_pb2.Trial.State.INFEASIBLE:
return trial.TrialStatus.COMPLETED
else:
return trial.TrialStatus.UNKNOWN |
to_proto conversion for Trial states. | def _from_pyvizier_trial_status(
status: trial.TrialStatus, infeasible: bool
) -> study_pb2.Trial.State:
"""to_proto conversion for Trial states."""
if status == trial.TrialStatus.REQUESTED:
return study_pb2.Trial.State.REQUESTED
elif status == trial.TrialStatus.ACTIVE:
return study_pb2.Trial.State.ACTIVE
elif status == trial.TrialStatus.STOPPING:
return study_pb2.Trial.State.STOPPING
elif status == trial.TrialStatus.COMPLETED:
if infeasible:
return study_pb2.Trial.State.INFEASIBLE
else:
return study_pb2.Trial.State.SUCCEEDED
else:
return study_pb2.Trial.State.STATE_UNSPECIFIED |
Parses an encoded namespace string into a namespace tuple. | def _parse(arg: str) -> Tuple[str, ...]:
"""Parses an encoded namespace string into a namespace tuple."""
# The tricky part here is that arg.split('') has a length of 1, so it can't
# generate a zero-length tuple; we handle that corner case manually.
if not arg:
return ()
# And, then, once we've handled the case of _parse(''), we note that all the
# other encoded strings begin with a colon. It thus contains no information
# and we can remove it.
# TODO: Once we're on Python 3.9, use: arg = arg.removeprefix(':')
if arg.startswith(':'):
arg = arg[1:]
# The rest of the algorithm is that we split on all colons, both
# escaped and unescaped. Then, we walk through the list of fragments and
# join back together the colons that were preceeded by an escape character,
# dropping the escape character as we go.
fragments = arg.split(':')
output = []
join = False
for frag in fragments:
if join and frag and frag[-1] == '\\':
output[-1] += ':' + frag[:-1]
join = True
elif join: # Doesn't end in an escape character.
output[-1] += ':' + frag
join = False
elif frag and frag[-1] == '\\': # Don't join to previous.
output.append(frag[:-1])
join = True
else: # Don't join to previous and doesn't end in an escape.
output.append(frag)
join = False
return tuple(output) |
Validates the bounds. | def _validate_bounds(bounds: Union[Tuple[int, int], Tuple[float, float]]):
"""Validates the bounds."""
if len(bounds) != 2:
raise ValueError(f'Bounds must have length 2. Given: {bounds}')
lower = bounds[0]
upper = bounds[1]
if not all([math.isfinite(v) for v in (lower, upper)]):
raise ValueError(
f'Both "lower" and "upper" must be finite. Given: ({lower}, {upper})'
)
if lower > upper:
raise ValueError(
f'Lower cannot be greater than upper: given lower={lower} upper={upper}'
) |
Validates and converts feasible values to floats. | def _get_feasible_points_and_bounds(
feasible_values: Sequence[float],
) -> Tuple[List[float], Union[Tuple[int, int], Tuple[float, float]]]:
"""Validates and converts feasible values to floats."""
if not all([math.isfinite(p) for p in feasible_values]):
raise ValueError(
f'Feasible values must all be finite. Given: {feasible_values}'
)
feasible_points = list(sorted(feasible_values))
bounds = (feasible_points[0], feasible_points[-1])
return feasible_points, bounds |
Returns the categories. | def _get_categories(categories: Sequence[str]) -> List[str]:
"""Returns the categories."""
return sorted(list(categories)) |
Validates and converts the default_value to the right type. | def _get_default_value(
param_type: ParameterType, default_value: Union[float, int, str]
) -> Union[float, int, str]:
"""Validates and converts the default_value to the right type."""
if param_type in (ParameterType.DOUBLE, ParameterType.DISCRETE) and (
isinstance(default_value, float) or isinstance(default_value, int)
):
return float(default_value)
elif param_type == ParameterType.INTEGER and (
isinstance(default_value, float) or isinstance(default_value, int)
):
if isinstance(default_value, int):
return default_value
else:
# Check if the float rounds nicely.
default_int_value = round(default_value)
if not math.isclose(default_value, default_int_value):
raise ValueError(
'default_value for an INTEGER parameter should be an '
f'integer, got float: [{default_value}]'
)
return default_int_value
elif param_type == ParameterType.CATEGORICAL and isinstance(
default_value, str
):
return default_value
elif param_type == ParameterType.CUSTOM:
return default_value
raise ValueError(
'default_value has an incorrect type. '
f'ParameterType has type {param_type.name}, '
f'but default_value has type {type(default_value)}'
) |
Converter for initializing timestamps in Trial class. | def _to_local_time(
dt: Optional[datetime.datetime]) -> Optional[datetime.datetime]:
"""Converter for initializing timestamps in Trial class."""
return dt.astimezone() if dt else None |
Distributes tuning via Ray datasets API for MapReduce purposes.
NOTE: There are no datasets processed. However, all MapReduce operations
are now done in the Datasets API under Ray.
Args:
run_tune_args_list: List of Tuples that are to be passed into run_tune.
run_tune: Callable that accepts args from previous list.
Returns:
List of results. | def run_tune_distributed(
run_tune_args_list: List[Tuple[Any]],
run_tune: Callable[[Any], tune.result_grid.ResultGrid],
) -> List[tune.result_grid.ResultGrid]:
"""Distributes tuning via Ray datasets API for MapReduce purposes.
NOTE: There are no datasets processed. However, all MapReduce operations
are now done in the Datasets API under Ray.
Args:
run_tune_args_list: List of Tuples that are to be passed into run_tune.
run_tune: Callable that accepts args from previous list.
Returns:
List of results.
"""
ds = data.from_items([{'args_tuple': args} for args in run_tune_args_list])
ds = ds.map(lambda x: {'result': run_tune(*x['args_tuple'])})
return ds.take_all() |
Runs Ray Tuners for BBOB problems.
See https://docs.ray.io/en/latest/tune/key-concepts.html
For more information on Tune and Run configs, see
https://docs.ray.io/en/latest/ray-air/tuner.html
Args:
function_name: BBOB function name.
dimension: Dimension of BBOB function.
shift: Shift of BBOB function.
tune_config: Ray Tune Config.
run_config: Ray Run Config.
Returns: | def run_tune_bbob(
function_name: str,
dimension: int,
shift: Optional[np.ndarray] = None,
tune_config: Optional[tune.TuneConfig] = None,
run_config: Optional[air.RunConfig] = None,
) -> tune.result_grid.ResultGrid:
"""Runs Ray Tuners for BBOB problems.
See https://docs.ray.io/en/latest/tune/key-concepts.html
For more information on Tune and Run configs, see
https://docs.ray.io/en/latest/ray-air/tuner.html
Args:
function_name: BBOB function name.
dimension: Dimension of BBOB function.
shift: Shift of BBOB function.
tune_config: Ray Tune Config.
run_config: Ray Run Config.
Returns:
"""
experimenter_factory = experimenters.BBOBExperimenterFactory(
name=function_name, dim=dimension
)
if shift is not None:
experimenter_factory = experimenters.SingleObjectiveExperimenterFactory(
base_factory=experimenter_factory, shift=shift
)
return run_tune_from_factory(experimenter_factory, tune_config, run_config) |
Runs Ray Tuners from an Experimenter Factory.
See https://docs.ray.io/en/latest/tune/key-concepts.html
For more information on Tune and Run configs, see
https://docs.ray.io/en/latest/ray-air/tuner.html
Args:
experimenter_factory: Experimenter Factory.
tune_config: Ray Tune Config.
run_config: Ray Run Config.
Returns: | def run_tune_from_factory(
experimenter_factory: experimenters.ExperimenterFactory,
tune_config: Optional[tune.TuneConfig] = None,
run_config: Optional[air.RunConfig] = None,
) -> tune.result_grid.ResultGrid:
"""Runs Ray Tuners from an Experimenter Factory.
See https://docs.ray.io/en/latest/tune/key-concepts.html
For more information on Tune and Run configs, see
https://docs.ray.io/en/latest/ray-air/tuner.html
Args:
experimenter_factory: Experimenter Factory.
tune_config: Ray Tune Config.
run_config: Ray Run Config.
Returns:
"""
experimenter = experimenter_factory()
problem = experimenter.problem_statement()
param_space = converters.SearchSpaceConverter.to_dict(problem.search_space)
objective = converters.ExperimenterConverter.to_callable(experimenter)
metric_info = problem.metric_information.item()
if tune_config is None:
tune_config = tune.TuneConfig()
tune_config.metric = metric_info.name
if metric_info.goal == vz.ObjectiveMetricGoal.MINIMIZE:
tune_config.mode = 'min'
else:
tune_config.mode = 'max'
def objective_fn(config: vz.ParameterDict) -> None:
# Config contains parameter names to values and is autopopulated for each
# Trial. Evaluation is static for BBOB so we simply loop.
for _ in range(tune_config.num_samples):
result_dict = objective(config)
session.report(result_dict)
tuner = tune.Tuner(
objective_fn,
param_space=param_space,
run_config=run_config,
tune_config=tune_config,
)
return tuner.fit() |
Converts custom exception into correct context error code.
The rules for gRPC are:
1) In the remote case (servicer wrapped into a server), the context is
automatically generated by gRPC. Calling `context.set_code()` will
automatically trigger an ` _InactiveRpcError` on the client side, which can
collect the code and details from said RPC error.
2) In the local case (e.g. when using a `VizierServicer` only), contexts are
not used and set to None. In order to imitate the behavior of 1), we will
instead use `LocalRpcError` to imitate both the behavior of a
`ServicerContext` for setting codes and details, AND as an `_InactiveRpcError`
to be raised on the client side.
Args:
e: Exception to be wrapped.
context: For collecting error code and details. Set to None in the local
case.
Raises:
LocalRpcError: If in the local case (when context is None). | def handle_exception(
e: Exception, context: Optional[grpc.ServicerContext] = None
) -> None:
"""Converts custom exception into correct context error code.
The rules for gRPC are:
1) In the remote case (servicer wrapped into a server), the context is
automatically generated by gRPC. Calling `context.set_code()` will
automatically trigger an ` _InactiveRpcError` on the client side, which can
collect the code and details from said RPC error.
2) In the local case (e.g. when using a `VizierServicer` only), contexts are
not used and set to None. In order to imitate the behavior of 1), we will
instead use `LocalRpcError` to imitate both the behavior of a
`ServicerContext` for setting codes and details, AND as an `_InactiveRpcError`
to be raised on the client side.
Args:
e: Exception to be wrapped.
context: For collecting error code and details. Set to None in the local
case.
Raises:
LocalRpcError: If in the local case (when context is None).
"""
if context is None:
context = LocalRpcError(e)
if isinstance(
e, (custom_errors.ImmutableStudyError, custom_errors.ImmutableTrialError)
):
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
elif isinstance(e, custom_errors.NotFoundError):
context.set_code(grpc.StatusCode.NOT_FOUND)
elif isinstance(e, custom_errors.AlreadyExistsError):
context.set_code(grpc.StatusCode.ALREADY_EXISTS)
else:
context.set_code(grpc.StatusCode.UNKNOWN)
context.set_details(str(e))
if isinstance(context, LocalRpcError):
raise context |
Creates GRPC channel. | def _create_channel(
endpoint: str, timeout: Optional[float] = None
) -> grpc.Channel:
"""Creates GRPC channel."""
logging.info('Securing channel to %s.', endpoint)
channel = grpc.insecure_channel(endpoint)
grpc.channel_ready_future(channel).result(timeout=timeout)
logging.info('Created channel to %s.', endpoint)
return channel |
Creates the GRPC stub.
This method uses LRU cache so we create a single stub per endpoint (which is
effectively one per binary). Stub and channel are both thread-safe and can
take a while to create. The LRU cache makes binaries run faster, especially
for unit tests.
Args:
endpoint: Pythia server endpoint.
timeout: Timeout in seconds. If None, no timeout will be used.
Returns:
Pythia Server stub at endpoint. | def create_pythia_server_stub(
endpoint: str, timeout: Optional[float] = 10.0
) -> pythia_service_pb2_grpc.PythiaServiceStub:
"""Creates the GRPC stub.
This method uses LRU cache so we create a single stub per endpoint (which is
effectively one per binary). Stub and channel are both thread-safe and can
take a while to create. The LRU cache makes binaries run faster, especially
for unit tests.
Args:
endpoint: Pythia server endpoint.
timeout: Timeout in seconds. If None, no timeout will be used.
Returns:
Pythia Server stub at endpoint.
"""
return pythia_service_pb2_grpc.PythiaServiceStub(
_create_channel(endpoint, timeout)
) |
Creates the GRPC stub.
This method uses LRU cache so we create a single stub per endpoint (which is
effectively one per binary). Stub and channel are both thread-safe and can
take a while to create. The LRU cache makes binaries run faster, especially
for unit tests.
Args:
endpoint: Vizier server endpoint.
timeout: Timeout in seconds. If None, no timeout will be used.
Returns:
Vizier Server stub at endpoint. | def create_vizier_server_stub(
endpoint: str, timeout: Optional[float] = 10.0
) -> vizier_service_pb2_grpc.VizierServiceStub:
"""Creates the GRPC stub.
This method uses LRU cache so we create a single stub per endpoint (which is
effectively one per binary). Stub and channel are both thread-safe and can
take a while to create. The LRU cache makes binaries run faster, especially
for unit tests.
Args:
endpoint: Vizier server endpoint.
timeout: Timeout in seconds. If None, no timeout will be used.
Returns:
Vizier Server stub at endpoint.
"""
return vizier_service_pb2_grpc.VizierServiceStub(
_create_channel(endpoint, timeout)
) |
Factory method for creating or loading a VizierClient.
This will either create or load the specified study, given
(owner_id, study_id, study_config). It will create it if it doesn't
already exist, and load it if someone has already created it.
Note that once a study is created, you CANNOT modify it with this function.
This function is designed for use in a distributed system, where many jobs
initially call create_or_load_study() nearly simultaneously with the same
`study_config`. In that situation, all clients will end up pointing nicely
to the same study.
Args:
owner_id: An owner id.
client_id: ID for the VizierClient. See class for notes.
study_id: Each study is uniquely identified by the tuple (owner_id,
study_id).
study_config: Study configuration for Vizier service. If not supplied, it
will be assumed that the study with the given study_id already exists,
and will try to retrieve that study.
Returns:
A VizierClient object with the specified study created or loaded.
Raises:
RuntimeError: Indicates that study_config is supplied but CreateStudy
failed and GetStudy did not succeed after
constants.MAX_NUM_TRIES_FOR_STUDIES tries.
ValueError: Indicates that study_config is not supplied and the study
with the given study_id does not exist. | def create_or_load_study(
owner_id: str,
client_id: str,
study_id: str,
study_config: pyvizier.StudyConfig,
) -> VizierClient:
"""Factory method for creating or loading a VizierClient.
This will either create or load the specified study, given
(owner_id, study_id, study_config). It will create it if it doesn't
already exist, and load it if someone has already created it.
Note that once a study is created, you CANNOT modify it with this function.
This function is designed for use in a distributed system, where many jobs
initially call create_or_load_study() nearly simultaneously with the same
`study_config`. In that situation, all clients will end up pointing nicely
to the same study.
Args:
owner_id: An owner id.
client_id: ID for the VizierClient. See class for notes.
study_id: Each study is uniquely identified by the tuple (owner_id,
study_id).
study_config: Study configuration for Vizier service. If not supplied, it
will be assumed that the study with the given study_id already exists,
and will try to retrieve that study.
Returns:
A VizierClient object with the specified study created or loaded.
Raises:
RuntimeError: Indicates that study_config is supplied but CreateStudy
failed and GetStudy did not succeed after
constants.MAX_NUM_TRIES_FOR_STUDIES tries.
ValueError: Indicates that study_config is not supplied and the study
with the given study_id does not exist.
"""
vizier_stub = create_vizier_servicer_or_stub()
study = study_pb2.Study(
display_name=study_id, study_spec=study_config.to_proto()
)
request = vizier_service_pb2.CreateStudyRequest(
parent=resources.OwnerResource(owner_id).name, study=study
)
# The response study contains a service assigned `name`, and may have been
# created by this RPC or a previous RPC from another client.
study = vizier_stub.CreateStudy(request)
return VizierClient(study.name, client_id, vizier_stub) |
Computes a delay to the next attempt to poll the Vizier service.
This does bounded exponential backoff, starting with $time_scale.
If $time_scale == 0, it starts with a small time interval, less than
1 second.
Args:
num_attempts: The number of times have we polled and found that the desired
result was not yet available.
time_scale: The shortest polling interval, in seconds, or zero. Zero is
treated as a small interval, less than 1 second.
Returns:
A recommended delay interval, in seconds. | def PollingDelay(num_attempts: int, time_scale: float) -> datetime.timedelta: # pylint:disable=invalid-name
"""Computes a delay to the next attempt to poll the Vizier service.
This does bounded exponential backoff, starting with $time_scale.
If $time_scale == 0, it starts with a small time interval, less than
1 second.
Args:
num_attempts: The number of times have we polled and found that the desired
result was not yet available.
time_scale: The shortest polling interval, in seconds, or zero. Zero is
treated as a small interval, less than 1 second.
Returns:
A recommended delay interval, in seconds.
"""
small_interval = 0.3 # Seconds
interval = max(time_scale, small_interval) * 1.41 ** min(num_attempts, 9)
return datetime.timedelta(seconds=interval) |
Generates arbitrary trials. | def generate_trials(trial_id_list: Sequence[int],
owner_id: str = 'my_username',
study_id: str = '1234',
**trial_kwargs) -> List[study_pb2.Trial]:
"""Generates arbitrary trials."""
trials = []
for trial_id in trial_id_list:
trial = study_pb2.Trial(
name=resources.TrialResource(owner_id, study_id, trial_id).name,
id=str(trial_id),
**trial_kwargs)
trials.append(trial)
return trials |
Generates a trial for each possible trial state. | def generate_all_states_trials(start_trial_index: int,
owner_id: str = 'my_username',
study_id: str = '1234',
**trial_kwargs) -> List[study_pb2.Trial]:
"""Generates a trial for each possible trial state."""
trials = []
for i, state in enumerate(study_pb2.Trial.State.keys()):
trial_id = start_trial_index + i
trial = study_pb2.Trial(
name=resources.TrialResource(owner_id, study_id, trial_id).name,
id=str(trial_id),
state=state,
**trial_kwargs)
trials.append(trial)
return trials |
Generates arbitrary suggestion operations. | def generate_suggestion_operations(
operation_numbers: Sequence[int],
owner_id: str = 'my_username',
study_id: str = 'cifar10',
client_id: str = 'client0',
**operation_kwargs) -> List[operations_pb2.Operation]:
"""Generates arbitrary suggestion operations."""
operations = []
for operation_number in operation_numbers:
operation = operations_pb2.Operation(
name=resources.SuggestionOperationResource(owner_id, study_id,
client_id,
operation_number).name,
**operation_kwargs)
operations.append(operation)
return operations |
Generates arbitrary early stopping operations. | def generate_early_stopping_operations(
trial_id_list: Sequence[int],
owner_id: str = 'my_username',
study_id: str = '1234',
**operation_kwargs) -> List[vizier_oss_pb2.EarlyStoppingOperation]:
"""Generates arbitrary early stopping operations."""
operations = []
for trial_id in trial_id_list:
operation = vizier_oss_pb2.EarlyStoppingOperation(
name=resources.EarlyStoppingOperationResource(owner_id, study_id,
trial_id).name,
**operation_kwargs)
operations.append(operation)
return operations |
All possible primitive parameter specs for testing. | def generate_all_four_parameter_specs(**study_spec_kwargs
) -> study_pb2.StudySpec:
"""All possible primitive parameter specs for testing."""
double_value_spec = study_pb2.StudySpec.ParameterSpec.DoubleValueSpec(
min_value=-1.0, max_value=1.0)
double_parameter_spec = study_pb2.StudySpec.ParameterSpec(
parameter_id='learning_rate', double_value_spec=double_value_spec)
integer_value_spec = study_pb2.StudySpec.ParameterSpec.IntegerValueSpec(
min_value=1, max_value=10)
integer_parameter_spec = study_pb2.StudySpec.ParameterSpec(
parameter_id='num_layers', integer_value_spec=integer_value_spec)
categorical_value_spec = (
study_pb2.StudySpec.ParameterSpec.CategoricalValueSpec(
values=['relu', 'sigmoid']
)
)
categorical_parameter_spec = study_pb2.StudySpec.ParameterSpec(
parameter_id='nonlinearity',
categorical_value_spec=categorical_value_spec)
discrete_value_spec = study_pb2.StudySpec.ParameterSpec.DiscreteValueSpec(
values=[1.0, 1.5, 3.0, 4.5])
discrete_parameter_spec = study_pb2.StudySpec.ParameterSpec(
parameter_id='discrete_unnamed', discrete_value_spec=discrete_value_spec)
return study_pb2.StudySpec(
parameters=[
double_parameter_spec, integer_parameter_spec,
categorical_parameter_spec, discrete_parameter_spec
],
**study_spec_kwargs) |
Subsets and Splits