From 283e8309ca180da2d995bdf2e7d83114ef2d4007 Mon Sep 17 00:00:00 2001 From: Karlson Pfannschmidt Date: Wed, 19 Jul 2017 11:10:40 +0200 Subject: [PATCH 1/2] Implement A-optimal selection in LCB acquisition --- skopt/acquisition.py | 8 +++++++- skopt/tests/test_acquisition.py | 8 ++++++++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/skopt/acquisition.py b/skopt/acquisition.py index 69c827e..7e52da6 100644 --- a/skopt/acquisition.py +++ b/skopt/acquisition.py @@ -105,10 +105,12 @@ def gaussian_lcb(X, model, kappa=1.96, return_grad=False): It should have a ``return_std`` parameter that returns the standard deviation. - * `kappa`: [float, default 1.96]: + * `kappa`: [float, default 1.96 or 'Aopt']: Controls how much of the variance in the predicted values should be taken into account. If set to be very high, then we are favouring exploration over exploitation and vice versa. + If set to 'Aopt', the acquisition function will only use the variance + which is useful in a pure exploration setting. Useless if ``method`` is set to "LCB". * `return_grad`: [boolean, optional]: @@ -132,10 +134,14 @@ def gaussian_lcb(X, model, kappa=1.96, return_grad=False): X, return_std=True, return_mean_grad=True, return_std_grad=True) + if kappa == "Aopt": + return -std, -std_grad return mu - kappa * std, mu_grad - kappa * std_grad else: mu, std = model.predict(X, return_std=True) + if kappa == "Aopt": + return -std return mu - kappa * std diff --git a/skopt/tests/test_acquisition.py b/skopt/tests/test_acquisition.py index 8df33d7..bc30689 100644 --- a/skopt/tests/test_acquisition.py +++ b/skopt/tests/test_acquisition.py @@ -71,6 +71,14 @@ def test_acquisition_pi_correctness(): @pytest.mark.fast_test +def test_acquisition_a_opt_correctness(): + # check that it works with a vector as well + X = 10 * np.ones((4, 2)) + aopt = gaussian_lcb(X, ConstSurrogate(), kappa='Aopt') + assert_array_almost_equal(aopt, [-1.0] * 4) + + +@pytest.mark.fast_test def test_acquisition_lcb_correctness(): # check that it works with a vector as well X = 10 * np.ones((4, 2)) From 3824ef0f9daf69a0b7d9106ddf4d63be10e2d0fe Mon Sep 17 00:00:00 2001 From: Karlson Pfannschmidt Date: Tue, 25 Jul 2017 13:58:29 +0200 Subject: [PATCH 2/2] Rename Aopt to inf Since in LCB the variable kappa is used to describe how much weight is given to the standard deviation, 'inf' is a more natural name for the limit of this weight. --- skopt/acquisition.py | 8 ++++---- skopt/tests/test_acquisition.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/skopt/acquisition.py b/skopt/acquisition.py index 7e52da6..8693fea 100644 --- a/skopt/acquisition.py +++ b/skopt/acquisition.py @@ -105,11 +105,11 @@ def gaussian_lcb(X, model, kappa=1.96, return_grad=False): It should have a ``return_std`` parameter that returns the standard deviation. - * `kappa`: [float, default 1.96 or 'Aopt']: + * `kappa`: [float, default 1.96 or 'inf']: Controls how much of the variance in the predicted values should be taken into account. If set to be very high, then we are favouring exploration over exploitation and vice versa. - If set to 'Aopt', the acquisition function will only use the variance + If set to 'inf', the acquisition function will only use the variance which is useful in a pure exploration setting. Useless if ``method`` is set to "LCB". @@ -134,13 +134,13 @@ def gaussian_lcb(X, model, kappa=1.96, return_grad=False): X, return_std=True, return_mean_grad=True, return_std_grad=True) - if kappa == "Aopt": + if kappa == "inf": return -std, -std_grad return mu - kappa * std, mu_grad - kappa * std_grad else: mu, std = model.predict(X, return_std=True) - if kappa == "Aopt": + if kappa == "inf": return -std return mu - kappa * std diff --git a/skopt/tests/test_acquisition.py b/skopt/tests/test_acquisition.py index bc30689..c806799 100644 --- a/skopt/tests/test_acquisition.py +++ b/skopt/tests/test_acquisition.py @@ -71,11 +71,11 @@ def test_acquisition_pi_correctness(): @pytest.mark.fast_test -def test_acquisition_a_opt_correctness(): +def test_acquisition_variance_correctness(): # check that it works with a vector as well X = 10 * np.ones((4, 2)) - aopt = gaussian_lcb(X, ConstSurrogate(), kappa='Aopt') - assert_array_almost_equal(aopt, [-1.0] * 4) + var = gaussian_lcb(X, ConstSurrogate(), kappa='inf') + assert_array_almost_equal(var, [-1.0] * 4) @pytest.mark.fast_test