Skip to content

Commit e94a78a

Browse files
esantorellafacebook-github-bot
authored andcommitted
Remove qMultiObjectiveMaxValueEntropy acquisition function (#2800)
Summary: Pull Request resolved: #2800 Context: This acquisition function * Probably doesn't perform well generally, as per the literature * Is likely not as good as `qLowerBoundMultiObjectiveMaxValueEntropySearch` * Cannot be used with outcome transforms * Uses deprecated functionality that is overdue to be reaped This PR: * Replaces `qMultiObjectiveMaxValueEntropy.__init__` with a `NotImplementedError` that recommends using `qLowerBoundMultiObjectiveMaxValueEntropySearch` * Adds a comment recommending this be removed in 0.15.0 Reviewed By: sdaulton, saitcakmak Differential Revision: D72332368 fbshipit-source-id: 2446b718e24c7637124b6b582a16400f8d15719a
1 parent b2e87f2 commit e94a78a

File tree

5 files changed

+36
-295
lines changed

5 files changed

+36
-295
lines changed

botorch/acquisition/max_value_entropy_search.py

+3-4
Original file line numberDiff line numberDiff line change
@@ -328,6 +328,8 @@ def set_X_pending(self, X_pending: Tensor | None = None) -> None:
328328
self.model = init_model
329329
super().set_X_pending(X_pending)
330330

331+
# NOTE: This may not work with m > 1, and currently the only supported use
332+
# cases are with m=1.
331333
def _compute_information_gain(
332334
self, X: Tensor, mean_M: Tensor, variance_M: Tensor, covar_mM: Tensor
333335
) -> Tensor:
@@ -440,10 +442,7 @@ def _compute_information_gain(
440442
beta = cov / (h0.var(dim=dim) * h1.var(dim=dim)).sqrt()
441443
H1_hat = H1_bar - beta * (H0_bar - H0)
442444
ig = H0 - H1_hat # batch_shape x num_fantasies x (m)
443-
if self.posterior_max_values.ndim == 2:
444-
permute_idcs = [-1, *range(ig.ndim - 1)]
445-
else:
446-
permute_idcs = [-2, *range(ig.ndim - 2), -1]
445+
permute_idcs = [-1, *range(ig.ndim - 1)]
447446
ig = ig.permute(*permute_idcs) # num_fantasies x batch_shape x (m)
448447
return ig
449448

botorch/acquisition/multi_objective/max_value_entropy_search.py

+9-142
Original file line numberDiff line numberDiff line change
@@ -7,19 +7,10 @@
77
r"""
88
Acquisition functions for max-value entropy search for multi-objective
99
Bayesian optimization (MESMO).
10-
11-
References
12-
13-
.. [Belakaria2019]
14-
S. Belakaria, A. Deshwal, J. R. Doppa. Max-value Entropy Search
15-
for Multi-Objective Bayesian Optimization. Advances in Neural
16-
Information Processing Systems, 32. 2019.
17-
1810
"""
1911

2012
from __future__ import annotations
2113

22-
from collections.abc import Callable
2314
from math import pi
2415

2516
import torch
@@ -28,153 +19,29 @@
2819
from botorch.acquisition.multi_objective.joint_entropy_search import (
2920
LowerBoundMultiObjectiveEntropySearch,
3021
)
31-
from botorch.models.converter import (
32-
batched_multi_output_to_single_output,
33-
model_list_to_batched,
34-
)
3522
from botorch.models.model import Model
36-
from botorch.models.model_list_gp_regression import ModelListGP
3723
from botorch.posteriors.gpytorch import GPyTorchPosterior
38-
from botorch.sampling.base import MCSampler
39-
from botorch.sampling.normal import SobolQMCNormalSampler
4024
from botorch.utils.transforms import concatenate_pending_points, t_batch_mode_transform
4125
from torch import Tensor
4226

4327

28+
# Can be removed in version 0.15.0, or potentially sooner because the code has
29+
# already been raising deprecation warnings for a long time
4430
class qMultiObjectiveMaxValueEntropy(
4531
qMaxValueEntropy, MultiObjectiveMCAcquisitionFunction
4632
):
4733
r"""The acquisition function for MESMO.
4834
49-
This acquisition function computes the mutual information of
50-
Pareto frontier and a candidate point. See [Belakaria2019]_ for
51-
a detailed discussion.
52-
53-
q > 1 is supported through cyclic optimization and fantasies.
54-
55-
Noisy observations are support by computing information gain with
56-
observation noise as in Appendix C in [Takeno2020mfmves]_.
57-
58-
Note: this only supports maximization.
59-
60-
Attributes:
61-
_default_sample_shape: The `sample_shape` for the default sampler.
62-
63-
Example:
64-
>>> model = SingleTaskGP(train_X, train_Y, outcome_transform=None)
65-
>>> MESMO = qMultiObjectiveMaxValueEntropy(model, sample_pfs)
66-
>>> mesmo = MESMO(test_X)
35+
This is no longer available. We recommend
36+
`qLowerBoundMultiObjectiveMaxValueEntropySearch` as a replacement.
6737
"""
6838

69-
_default_sample_shape = torch.Size([128])
70-
71-
def __init__(
72-
self,
73-
model: Model,
74-
sample_pareto_frontiers: Callable[[Model], Tensor],
75-
num_fantasies: int = 16,
76-
X_pending: Tensor | None = None,
77-
sampler: MCSampler | None = None,
78-
) -> None:
79-
r"""Multi-objective max-value entropy search acquisition function.
80-
81-
Args:
82-
model: A fitted multi-output model.
83-
sample_pareto_frontiers: A callable that takes a model and returns a
84-
`num_samples x n' x m`-dim tensor of outcomes to use for constructing
85-
`num_samples` sampled Pareto frontiers.
86-
num_fantasies: Number of fantasies to generate. The higher this
87-
number the more accurate the model (at the expense of model
88-
complexity, wall time and memory). Ignored if `X_pending` is `None`.
89-
X_pending: A `m x d`-dim Tensor of `m` design points that have been
90-
submitted for function evaluation, but have not yet been evaluated.
91-
"""
92-
MultiObjectiveMCAcquisitionFunction.__init__(self, model=model, sampler=sampler)
93-
94-
# Batch GP models (e.g. fantasized models) are not currently supported
95-
if isinstance(model, ModelListGP):
96-
train_X = model.models[0].train_inputs[0]
97-
else:
98-
train_X = model.train_inputs[0]
99-
if train_X.ndim > 3:
100-
raise NotImplementedError(
101-
"Batch GP models (e.g. fantasized models) "
102-
"are not yet supported by qMultiObjectiveMaxValueEntropy"
103-
)
104-
# convert to batched MO model
105-
batched_mo_model = (
106-
model_list_to_batched(model) if isinstance(model, ModelListGP) else model
107-
)
108-
self._init_model = batched_mo_model
109-
self.fantasies_sampler = SobolQMCNormalSampler(
110-
sample_shape=torch.Size([num_fantasies])
39+
def __init__(self, *args, **kwargs) -> None:
40+
"""Multi-objective max-value entropy search acquisition function."""
41+
raise NotImplementedError(
42+
"qMultiObjectiveMaxValueEntropy is no longer available. We suggest "
43+
"qLowerBoundMultiObjectiveMaxValueEntropySearch as a replacement."
11144
)
112-
self.num_fantasies = num_fantasies
113-
# weight is used in _compute_information_gain
114-
self.maximize = True
115-
self.weight = 1.0
116-
self.sample_pareto_frontiers = sample_pareto_frontiers
117-
# Set X_pending, register converted model and sample max values.
118-
self.set_X_pending(X_pending)
119-
# This avoids attribute errors in qMaxValueEntropy code.
120-
self.posterior_transform = None
121-
122-
def set_X_pending(self, X_pending: Tensor | None = None) -> None:
123-
r"""Set pending points.
124-
125-
Informs the acquisition function about pending design points,
126-
fantasizes the model on the pending points and draws max-value samples
127-
from the fantasized model posterior.
128-
129-
Args:
130-
X_pending: `m x d` Tensor with `m` `d`-dim design points that have
131-
been submitted for evaluation but have not yet been evaluated.
132-
"""
133-
MultiObjectiveMCAcquisitionFunction.set_X_pending(self, X_pending=X_pending)
134-
if X_pending is not None:
135-
# fantasize the model
136-
fantasy_model = self._init_model.fantasize(
137-
X=X_pending,
138-
sampler=self.fantasies_sampler,
139-
)
140-
self.mo_model = fantasy_model
141-
else:
142-
# This is mainly for setting the model to the original model
143-
# after the sequential optimization at q > 1
144-
self.mo_model = self._init_model
145-
# convert model to batched single outcome model.
146-
self.model = batched_multi_output_to_single_output(batch_mo_model=self.mo_model)
147-
self._sample_max_values()
148-
149-
def _sample_max_values(self) -> None:
150-
"""Sample max values for MC approximation of the expectation in MES.
151-
152-
Sets self.posterior_max_values."""
153-
with torch.no_grad():
154-
# num_samples x (num_fantasies) x n_pareto_points x m
155-
sampled_pfs = self.sample_pareto_frontiers(self.mo_model)
156-
if sampled_pfs.ndim == 3:
157-
# add fantasy dim
158-
sampled_pfs = sampled_pfs.unsqueeze(-3)
159-
# take component-wise max value
160-
self.posterior_max_values = sampled_pfs.max(dim=-2).values
161-
162-
@t_batch_mode_transform(expected_q=1)
163-
def forward(self, X: Tensor) -> Tensor:
164-
r"""Compute max-value entropy at the design points `X`.
165-
166-
Args:
167-
X: A `batch_shape x 1 x d`-dim Tensor of `batch_shape` t-batches
168-
with `1` `d`-dim design points each.
169-
170-
Returns:
171-
A `batch_shape`-dim Tensor of MVE values at the given design points `X`.
172-
"""
173-
# `m` dim tensor of information gains
174-
# unsqueeze X to add a batch-dim for the batched model
175-
igs = qMaxValueEntropy.forward(self, X=X.unsqueeze(-3))
176-
# sum over objectives
177-
return igs.sum(dim=-1)
17845

17946

18047
class qLowerBoundMultiObjectiveMaxValueEntropySearch(

botorch/optim/utils/acquisition_utils.py

+5-9
Original file line numberDiff line numberDiff line change
@@ -114,16 +114,12 @@ def get_X_baseline(acq_function: AcquisitionFunction) -> Tensor | None:
114114
raise BotorchError
115115
except (BotorchError, AttributeError):
116116
try:
117-
# for entropy MOO methods
118-
model = acq_function.mo_model
117+
# some acquisition functions do not have a model attribute
118+
# e.g. FixedFeatureAcquisitionFunction
119+
model = acq_function.model
119120
except AttributeError:
120-
try:
121-
# some acquisition functions do not have a model attribute
122-
# e.g. FixedFeatureAcquisitionFunction
123-
model = acq_function.model
124-
except AttributeError:
125-
warn("Failed to extract X_baseline.", BotorchWarning)
126-
return
121+
warn("Failed to extract X_baseline.", BotorchWarning)
122+
return
127123
try:
128124
# Make sure we get the original train inputs.
129125
m = model.models[0] if isinstance(model, ModelListGPyTorchModel) else model

test/acquisition/multi_objective/test_max_value_entropy_search.py

+4-127
Original file line numberDiff line numberDiff line change
@@ -5,17 +5,13 @@
55
# LICENSE file in the root directory of this source tree.
66

77
from itertools import product
8-
from unittest import mock
98

109
import torch
11-
from botorch.acquisition.max_value_entropy_search import qMaxValueEntropy
1210
from botorch.acquisition.multi_objective.max_value_entropy_search import (
1311
qLowerBoundMultiObjectiveMaxValueEntropySearch,
1412
qMultiObjectiveMaxValueEntropy,
1513
)
1614
from botorch.acquisition.multi_objective.utils import compute_sample_box_decomposition
17-
from botorch.exceptions.errors import UnsupportedError
18-
from botorch.models.gp_regression import SingleTaskGP
1915
from botorch.models.model_list_gp_regression import ModelListGP
2016
from botorch.sampling.normal import SobolQMCNormalSampler
2117
from botorch.utils.test_helpers import get_model
@@ -33,130 +29,11 @@ def dummy_sample_pareto_frontiers(model):
3329
)
3430

3531

32+
# TODO: remove all references
3633
class TestMultiObjectiveMaxValueEntropy(BotorchTestCase):
37-
def test_multi_objective_max_value_entropy(self):
38-
for dtype, m in product((torch.float, torch.double), (2, 3)):
39-
torch.manual_seed(7)
40-
# test batched model
41-
train_X = torch.rand(1, 1, 2, dtype=dtype, device=self.device)
42-
train_Y = torch.rand(1, 1, m, dtype=dtype, device=self.device)
43-
model = SingleTaskGP(train_X, train_Y, outcome_transform=None)
44-
with self.assertRaises(NotImplementedError):
45-
qMultiObjectiveMaxValueEntropy(
46-
model=model, sample_pareto_frontiers=dummy_sample_pareto_frontiers
47-
)
48-
# test initialization
49-
train_X = torch.rand(4, 2, dtype=dtype, device=self.device)
50-
train_Y = torch.rand(4, m, dtype=dtype, device=self.device)
51-
# Models with outcome transforms aren't supported.
52-
model = SingleTaskGP(train_X, train_Y)
53-
with self.assertRaisesRegex(
54-
UnsupportedError,
55-
"Conversion of models with outcome transforms is unsupported. "
56-
"To fix this error, explicitly pass `outcome_transform=None`.",
57-
):
58-
qMultiObjectiveMaxValueEntropy(
59-
model=ModelListGP(model, model),
60-
sample_pareto_frontiers=dummy_sample_pareto_frontiers,
61-
)
62-
# test batched MO model
63-
model = SingleTaskGP(train_X, train_Y, outcome_transform=None)
64-
mesmo = qMultiObjectiveMaxValueEntropy(
65-
model=model, sample_pareto_frontiers=dummy_sample_pareto_frontiers
66-
)
67-
self.assertEqual(mesmo.num_fantasies, 16)
68-
# Initialize the sampler.
69-
dummy_post = model.posterior(train_X[:1])
70-
mesmo.get_posterior_samples(dummy_post)
71-
self.assertIsInstance(mesmo.sampler, SobolQMCNormalSampler)
72-
self.assertEqual(mesmo.sampler.sample_shape, torch.Size([128]))
73-
self.assertIsInstance(mesmo.fantasies_sampler, SobolQMCNormalSampler)
74-
self.assertEqual(mesmo.posterior_max_values.shape, torch.Size([3, 1, m]))
75-
# test conversion to single-output model
76-
self.assertIs(mesmo.mo_model, model)
77-
self.assertEqual(mesmo.mo_model.num_outputs, m)
78-
self.assertIsInstance(mesmo.model, SingleTaskGP)
79-
self.assertEqual(mesmo.model.num_outputs, 1)
80-
self.assertEqual(
81-
mesmo.model._aug_batch_shape, mesmo.model._input_batch_shape
82-
)
83-
# test ModelListGP
84-
model = ModelListGP(
85-
*[
86-
SingleTaskGP(train_X, train_Y[:, i : i + 1], outcome_transform=None)
87-
for i in range(m)
88-
]
89-
)
90-
mock_sample_pfs = mock.Mock()
91-
mock_sample_pfs.return_value = dummy_sample_pareto_frontiers(model=model)
92-
mesmo = qMultiObjectiveMaxValueEntropy(
93-
model=model, sample_pareto_frontiers=mock_sample_pfs
94-
)
95-
self.assertEqual(mesmo.num_fantasies, 16)
96-
# Initialize the sampler.
97-
dummy_post = model.posterior(train_X[:1])
98-
mesmo.get_posterior_samples(dummy_post)
99-
self.assertIsInstance(mesmo.sampler, SobolQMCNormalSampler)
100-
self.assertEqual(mesmo.sampler.sample_shape, torch.Size([128]))
101-
self.assertIsInstance(mesmo.fantasies_sampler, SobolQMCNormalSampler)
102-
self.assertEqual(mesmo.posterior_max_values.shape, torch.Size([3, 1, m]))
103-
# test conversion to batched MO model
104-
self.assertIsInstance(mesmo.mo_model, SingleTaskGP)
105-
self.assertEqual(mesmo.mo_model.num_outputs, m)
106-
self.assertIs(mesmo.mo_model, mesmo._init_model)
107-
# test conversion to single-output model
108-
self.assertIsInstance(mesmo.model, SingleTaskGP)
109-
self.assertEqual(mesmo.model.num_outputs, 1)
110-
self.assertEqual(
111-
mesmo.model._aug_batch_shape, mesmo.model._input_batch_shape
112-
)
113-
# test that we call sample_pareto_frontiers with the multi-output model
114-
mock_sample_pfs.assert_called_once_with(mesmo.mo_model)
115-
# test basic evaluation
116-
X = torch.rand(1, 2, device=self.device, dtype=dtype)
117-
with torch.no_grad():
118-
vals = mesmo(X)
119-
igs = qMaxValueEntropy.forward(mesmo, X=X.view(1, 1, 1, 2))
120-
self.assertEqual(vals.shape, torch.Size([1]))
121-
self.assertTrue(torch.equal(vals, igs.sum(dim=-1)))
122-
123-
# test batched evaluation
124-
X = torch.rand(4, 1, 2, device=self.device, dtype=dtype)
125-
with torch.no_grad():
126-
vals = mesmo(X)
127-
igs = qMaxValueEntropy.forward(mesmo, X=X.view(4, 1, 1, 2))
128-
self.assertEqual(vals.shape, torch.Size([4]))
129-
self.assertTrue(torch.equal(vals, igs.sum(dim=-1)))
130-
131-
# test set X pending to None
132-
mesmo.set_X_pending(None)
133-
self.assertIs(mesmo.mo_model, mesmo._init_model)
134-
fant_X = torch.cat(
135-
[
136-
train_X.expand(16, 4, 2),
137-
torch.rand(16, 1, 2, device=self.device, dtype=dtype),
138-
],
139-
dim=1,
140-
)
141-
fant_Y = torch.cat(
142-
[
143-
train_Y.expand(16, 4, m),
144-
torch.rand(16, 1, m, device=self.device, dtype=dtype),
145-
],
146-
dim=1,
147-
)
148-
fantasy_model = SingleTaskGP(fant_X, fant_Y, outcome_transform=None)
149-
150-
# test with X_pending is not None
151-
with mock.patch.object(
152-
SingleTaskGP, "fantasize", return_value=fantasy_model
153-
) as mock_fantasize:
154-
qMultiObjectiveMaxValueEntropy(
155-
model,
156-
dummy_sample_pareto_frontiers,
157-
X_pending=torch.rand(1, 2, device=self.device, dtype=dtype),
158-
)
159-
mock_fantasize.assert_called_once()
34+
def test_multi_objective_max_value_entropy(self) -> None:
35+
with self.assertRaisesRegex(NotImplementedError, "no longer available"):
36+
qMultiObjectiveMaxValueEntropy()
16037

16138

16239
class TestQLowerBoundMultiObjectiveMaxValueEntropySearch(BotorchTestCase):

0 commit comments

Comments
 (0)