forked from boringlee24/SC21_Ribbon
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathutil.py
288 lines (231 loc) · 8.11 KB
/
util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
import warnings
import numpy as np
from scipy.stats import norm
from scipy.optimize import minimize
import pdb
def acq_max(ac, gp, y_max, bounds, random_state, n_warmup=10000, n_iter=10):
"""
A function to find the maximum of the acquisition function
It uses a combination of random sampling (cheap) and the 'L-BFGS-B'
optimization method. First by sampling `n_warmup` (1e5) points at random,
and then running L-BFGS-B from `n_iter` (250) random starting points.
Parameters
----------
:param ac:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
:param random_state:
instance of np.RandomState random number generator
:param n_warmup:
number of times to randomly sample the aquisition function
:param n_iter:
number of times to run scipy.minimize
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Warm up with random points
x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_warmup, bounds.shape[0]))
ys = ac(x_tries, gp=gp, y_max=y_max)
x_max = x_tries[ys.argmax()]
max_acq = ys.max()
# Explore the parameter space more throughly
x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(n_iter, bounds.shape[0]))
for x_try in x_seeds:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# See if success
if not res.success:
continue
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun[0] >= max_acq:
x_max = res.x
max_acq = -res.fun[0]
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
class UtilityFunction(object):
"""
An object to compute the acquisition functions.
"""
def __init__(self, kind, kappa, xi, kappa_decay=1, kappa_decay_delay=0, pruned=[]):
self.kappa = kappa
self._kappa_decay = kappa_decay
self._kappa_decay_delay = kappa_decay_delay
self.xi = xi
self.pruned = pruned
self._iters_counter = 0
if kind not in ['ucb', 'ei', 'poi', 'ei_prune', 'poi_prune']:
err = "The utility function " \
"{} has not been implemented, " \
"please choose one of ucb, ei, or poi.".format(kind)
raise NotImplementedError(err)
else:
self.kind = kind
def update_params(self):
self._iters_counter += 1
if self._kappa_decay < 1 and self._iters_counter > self._kappa_decay_delay:
self.kappa *= self._kappa_decay
def utility(self, x, gp, y_max):
if self.kind == 'ucb':
return self._ucb(x, gp, self.kappa)
if self.kind == 'ei':
return self._ei(x, gp, y_max, self.xi)
if self.kind == 'poi':
return self._poi(x, gp, y_max, self.xi)
if self.kind == 'poi_prune':
return self._poi_prune(x, gp, y_max, self.xi, self.pruned)
if self.kind == 'ei_prune':
return self._ei_prune(x, gp, y_max, self.xi, self.pruned)
@staticmethod
def _ucb(x, gp, kappa):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
return mean + kappa * std
@staticmethod
def _ei(x, gp, y_max, xi):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
a = (mean - y_max - xi)
z = a / std
return a * norm.cdf(z) + std * norm.pdf(z)
@staticmethod
def _ei_prune(x, gp, y_max, xi, pruned):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
a = (mean - y_max - xi)
z = a / std
inter = a * norm.cdf(z) + std * norm.pdf(z)
mask = []
for val in np.round(x):
if val.tolist() in pruned:
mask.append(0)
else:
mask.append(1)
mask = np.array(mask)
return inter * mask
@staticmethod
def _poi(x, gp, y_max, xi):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return norm.cdf(z)
@staticmethod
def _poi_prune(x, gp, y_max, xi, pruned):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
inter = norm.cdf(z)
mask = []
for val in np.round(x):
if val.tolist() in pruned:
mask.append(0)
else:
mask.append(1)
mask = np.array(mask)
return inter * mask
def load_logs(optimizer, logs):
"""Load previous ...
"""
import json
if isinstance(logs, str):
logs = [logs]
for log in logs:
with open(log, "r") as j:
while True:
try:
iteration = next(j)
except StopIteration:
break
iteration = json.loads(iteration)
try:
optimizer.register(
params=iteration["params"],
target=iteration["target"],
)
except KeyError:
pass
return optimizer
def ensure_rng(random_state=None):
"""
Creates a random number generator based on an optional seed. This can be
an integer or another random state for a seeded rng, or None for an
unseeded rng.
"""
if random_state is None:
random_state = np.random.RandomState()
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState)
return random_state
class Colours:
"""Print in nice colours."""
BLUE = '\033[94m'
BOLD = '\033[1m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
END = '\033[0m'
GREEN = '\033[92m'
PURPLE = '\033[95m'
RED = '\033[91m'
UNDERLINE = '\033[4m'
YELLOW = '\033[93m'
@classmethod
def _wrap_colour(cls, s, colour):
return colour + s + cls.END
@classmethod
def black(cls, s):
"""Wrap text in black."""
return cls._wrap_colour(s, cls.END)
@classmethod
def blue(cls, s):
"""Wrap text in blue."""
return cls._wrap_colour(s, cls.BLUE)
@classmethod
def bold(cls, s):
"""Wrap text in bold."""
return cls._wrap_colour(s, cls.BOLD)
@classmethod
def cyan(cls, s):
"""Wrap text in cyan."""
return cls._wrap_colour(s, cls.CYAN)
@classmethod
def darkcyan(cls, s):
"""Wrap text in darkcyan."""
return cls._wrap_colour(s, cls.DARKCYAN)
@classmethod
def green(cls, s):
"""Wrap text in green."""
return cls._wrap_colour(s, cls.GREEN)
@classmethod
def purple(cls, s):
"""Wrap text in purple."""
return cls._wrap_colour(s, cls.PURPLE)
@classmethod
def red(cls, s):
"""Wrap text in red."""
return cls._wrap_colour(s, cls.RED)
@classmethod
def underline(cls, s):
"""Wrap text in underline."""
return cls._wrap_colour(s, cls.UNDERLINE)
@classmethod
def yellow(cls, s):
"""Wrap text in yellow."""
return cls._wrap_colour(s, cls.YELLOW)