MilesCranmer commited on
Commit
b4e0cde
1 Parent(s): 64b444d

Add crossover to hyperparam optimization

Browse files
Files changed (1) hide show
  1. benchmarks/hyperparamopt.py +18 -11
benchmarks/hyperparamopt.py CHANGED
@@ -11,7 +11,7 @@ from hyperopt.fmin import generate_trials_to_calculate
11
  ################################################################################
12
  TRIALS_FOLDER = "trials2"
13
  NUMBER_TRIALS_PER_RUN = 1
14
- timeout_in_minutes = 5
15
 
16
  # Test run to compile everything:
17
  binary_operators = ["*", "/", "+", "-"]
@@ -162,10 +162,12 @@ space = dict(
162
  weightRandomize=hp.loguniform("weightRandomize", np.log(0.0001), np.log(100)),
163
  # weightSimplify=0.002,
164
  weightSimplify=hp.choice("weightSimplify", [0.002]), # One of these is fixed.
 
 
165
  # perturbationFactor=1.0,
166
  perturbationFactor=hp.loguniform("perturbationFactor", np.log(0.0001), np.log(100)),
167
  # maxsize=20,
168
- maxsize=hp.choice("maxsize", [20]),
169
  # warmupMaxsizeBy=0.0,
170
  warmupMaxsizeBy=hp.uniform("warmupMaxsizeBy", 0.0, 0.5),
171
  # useFrequency=True,
@@ -180,6 +182,8 @@ space = dict(
180
  tournament_selection_p=hp.uniform("tournament_selection_p", 0.0, 1.0),
181
  )
182
 
 
 
183
  init_vals = [
184
  dict(
185
  model_selection=0, # 0 means first choice
@@ -187,15 +191,15 @@ init_vals = [
187
  unary_operators=0,
188
  populations=100.0,
189
  niterations=0,
190
- ncyclesperiteration=100.0,
191
- alpha=0.1,
192
  annealing=0,
193
  # fractionReplaced=0.01,
194
  fractionReplaced=0.01,
195
  # fractionReplacedHof=0.005,
196
  fractionReplacedHof=0.005,
197
  # npop=100,
198
- npop=100.0,
199
  # parsimony=1e-4,
200
  parsimony=1e-4,
201
  # topn=10,
@@ -216,6 +220,8 @@ init_vals = [
216
  weightRandomize=1.0,
217
  # weightSimplify=0.002,
218
  weightSimplify=0, # One of these is fixed.
 
 
219
  # perturbationFactor=1.0,
220
  perturbationFactor=1.0,
221
  # maxsize=20,
@@ -231,7 +237,7 @@ init_vals = [
231
  # optimizer_iterations=10,
232
  optimizer_iterations=10.0,
233
  # tournament_selection_p=1.0,
234
- tournament_selection_p=0.999,
235
  )
236
  ]
237
 
@@ -273,12 +279,9 @@ n_prior_trials = len(list(glob.glob(path)))
273
 
274
  loaded_fnames = []
275
  trials = generate_trials_to_calculate(init_vals)
276
- i = n_prior_trials
277
  n = NUMBER_TRIALS_PER_RUN
278
 
279
- if i > 0:
280
- trials = None
281
-
282
  # Run new hyperparameter trials until killed
283
  while True:
284
  np.random.seed()
@@ -331,7 +334,11 @@ while True:
331
  hyperopt_trial = Trials()
332
 
333
  # Merge with empty trials dataset:
334
- save_trials = merge_trials(hyperopt_trial, trials.trials[-n:])
 
 
 
 
335
  new_fname = TRIALS_FOLDER + "/" + str(np.random.randint(0, sys.maxsize)) + ".pkl"
336
  pkl.dump({"trials": save_trials, "n": n}, open(new_fname, "wb"))
337
  loaded_fnames.append(new_fname)
 
11
  ################################################################################
12
  TRIALS_FOLDER = "trials2"
13
  NUMBER_TRIALS_PER_RUN = 1
14
+ timeout_in_minutes = 10
15
 
16
  # Test run to compile everything:
17
  binary_operators = ["*", "/", "+", "-"]
 
162
  weightRandomize=hp.loguniform("weightRandomize", np.log(0.0001), np.log(100)),
163
  # weightSimplify=0.002,
164
  weightSimplify=hp.choice("weightSimplify", [0.002]), # One of these is fixed.
165
+ # crossoverProbability=0.01,
166
+ crossoverProbability=hp.loguniform("crossoverProbability", np.log(0.00001), np.log(0.2)),
167
  # perturbationFactor=1.0,
168
  perturbationFactor=hp.loguniform("perturbationFactor", np.log(0.0001), np.log(100)),
169
  # maxsize=20,
170
+ maxsize=hp.choice("maxsize", [30]),
171
  # warmupMaxsizeBy=0.0,
172
  warmupMaxsizeBy=hp.uniform("warmupMaxsizeBy", 0.0, 0.5),
173
  # useFrequency=True,
 
182
  tournament_selection_p=hp.uniform("tournament_selection_p", 0.0, 1.0),
183
  )
184
 
185
+ rand_between = lambda lo, hi: (np.random.rand()*(hi - lo) + lo)
186
+
187
  init_vals = [
188
  dict(
189
  model_selection=0, # 0 means first choice
 
191
  unary_operators=0,
192
  populations=100.0,
193
  niterations=0,
194
+ ncyclesperiteration=rand_between(50, 150),
195
+ alpha=rand_between(0.05, 0.2),
196
  annealing=0,
197
  # fractionReplaced=0.01,
198
  fractionReplaced=0.01,
199
  # fractionReplacedHof=0.005,
200
  fractionReplacedHof=0.005,
201
  # npop=100,
202
+ npop=rand_between(50, 200),
203
  # parsimony=1e-4,
204
  parsimony=1e-4,
205
  # topn=10,
 
220
  weightRandomize=1.0,
221
  # weightSimplify=0.002,
222
  weightSimplify=0, # One of these is fixed.
223
+ # crossoverProbability=0.01
224
+ crossoverProbability=0.01,
225
  # perturbationFactor=1.0,
226
  perturbationFactor=1.0,
227
  # maxsize=20,
 
237
  # optimizer_iterations=10,
238
  optimizer_iterations=10.0,
239
  # tournament_selection_p=1.0,
240
+ tournament_selection_p=rand_between(0.9, 0.999),
241
  )
242
  ]
243
 
 
279
 
280
  loaded_fnames = []
281
  trials = generate_trials_to_calculate(init_vals)
282
+ i = 0
283
  n = NUMBER_TRIALS_PER_RUN
284
 
 
 
 
285
  # Run new hyperparameter trials until killed
286
  while True:
287
  np.random.seed()
 
334
  hyperopt_trial = Trials()
335
 
336
  # Merge with empty trials dataset:
337
+ if i == 0:
338
+ save_trials = merge_trials(hyperopt_trial, trials.trials)
339
+ else:
340
+ save_trials = merge_trials(hyperopt_trial, trials.trials[-n:])
341
+
342
  new_fname = TRIALS_FOLDER + "/" + str(np.random.randint(0, sys.maxsize)) + ".pkl"
343
  pkl.dump({"trials": save_trials, "n": n}, open(new_fname, "wb"))
344
  loaded_fnames.append(new_fname)