Spaces:
Sleeping
Sleeping
MilesCranmer
commited on
Commit
•
1443fba
1
Parent(s):
2ff5ae9
Switch CamelCase to snake_case for parameters
Browse files- benchmarks/README.md +1 -1
- benchmarks/benchmark.sh +5 -5
- benchmarks/hyperparamopt.py +32 -32
- benchmarks/space.py +34 -34
- pysr/sr.py +163 -106
benchmarks/README.md
CHANGED
@@ -7,7 +7,7 @@ for x in $(cat tags.txt); do sleep 120 && git checkout $x &> /dev/null && nohup
|
|
7 |
```
|
8 |
with this API call in `benchmark.sh`
|
9 |
```python
|
10 |
-
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, procs=4, parsimony=1e-10,
|
11 |
```
|
12 |
|
13 |
|
|
|
7 |
```
|
8 |
with this API call in `benchmark.sh`
|
9 |
```python
|
10 |
+
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, procs=4, parsimony=1e-10, population_size=1000, ncyclesperiteration=1000)
|
11 |
```
|
12 |
|
13 |
|
benchmarks/benchmark.sh
CHANGED
@@ -7,13 +7,13 @@ from pysr import pysr
|
|
7 |
X=np.random.randn(100, 2)*5
|
8 |
y=2*np.sin((X[:, 0]+X[:, 1]))*np.exp(X[:, 1]/3)
|
9 |
if version[1] >= 3 and version[2] >= 20:
|
10 |
-
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, procs=4, parsimony=1e-10,
|
11 |
elif version[1] >= 3 and version[2] >= 17:
|
12 |
-
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, procs=4, parsimony=1e-10,
|
13 |
elif version[1] >= 3 and version[2] >= 16:
|
14 |
-
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, procs=4, parsimony=1e-10,
|
15 |
elif version[1] >= 3 and version[2] >= 2:
|
16 |
-
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, procs=4, parsimony=1e-10,
|
17 |
else:
|
18 |
-
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, threads=4, parsimony=1e-10,
|
19 |
' 2>&1 | grep 'per second' | tail -n 1 | vims '%s/ //g' -l 'df:'
|
|
|
7 |
X=np.random.randn(100, 2)*5
|
8 |
y=2*np.sin((X[:, 0]+X[:, 1]))*np.exp(X[:, 1]/3)
|
9 |
if version[1] >= 3 and version[2] >= 20:
|
10 |
+
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, procs=4, parsimony=1e-10, population_size=1000, ncyclesperiteration=1000, maxdepth=6, fast_cycle=True, batching=True, batch_size=50)
|
11 |
elif version[1] >= 3 and version[2] >= 17:
|
12 |
+
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, procs=4, parsimony=1e-10, population_size=1000, ncyclesperiteration=1000, maxdepth=6, fast_cycle=True)
|
13 |
elif version[1] >= 3 and version[2] >= 16:
|
14 |
+
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, procs=4, parsimony=1e-10, population_size=1000, ncyclesperiteration=1000, maxdepth=6)
|
15 |
elif version[1] >= 3 and version[2] >= 2:
|
16 |
+
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, procs=4, parsimony=1e-10, population_size=1000, ncyclesperiteration=1000)
|
17 |
else:
|
18 |
+
eq = pysr(X, y, binary_operators=["plus", "mult", "div", "pow"], unary_operators=["sin"], niterations=20, threads=4, parsimony=1e-10, population_size=1000, ncyclesperiteration=1000)
|
19 |
' 2>&1 | grep 'per second' | tail -n 1 | vims '%s/ //g' -l 'df:'
|
benchmarks/hyperparamopt.py
CHANGED
@@ -42,7 +42,7 @@ def run_trial(args):
|
|
42 |
"populations",
|
43 |
"niterations",
|
44 |
"ncyclesperiteration",
|
45 |
-
"
|
46 |
"topn",
|
47 |
"maxsize",
|
48 |
"optimizer_nrestarts",
|
@@ -57,7 +57,7 @@ def run_trial(args):
|
|
57 |
args["tournament_selection_n"] = args["topn"]
|
58 |
|
59 |
# Invalid hyperparams:
|
60 |
-
invalid = args["
|
61 |
if invalid:
|
62 |
return dict(status="fail", loss=float("inf"))
|
63 |
|
@@ -125,42 +125,42 @@ init_vals = [
|
|
125 |
ncyclesperiteration=rand_between(50, 150),
|
126 |
alpha=rand_between(0.05, 0.2),
|
127 |
annealing=0,
|
128 |
-
#
|
129 |
-
|
130 |
-
#
|
131 |
-
|
132 |
-
#
|
133 |
-
|
134 |
# parsimony=1e-4,
|
135 |
parsimony=1e-4,
|
136 |
# topn=10,
|
137 |
topn=10.0,
|
138 |
-
#
|
139 |
-
|
140 |
-
#
|
141 |
-
|
142 |
-
#
|
143 |
-
|
144 |
-
#
|
145 |
-
|
146 |
-
#
|
147 |
-
|
148 |
-
#
|
149 |
-
|
150 |
-
#
|
151 |
-
|
152 |
-
#
|
153 |
-
|
154 |
-
#
|
155 |
-
|
156 |
-
#
|
157 |
-
|
158 |
# maxsize=20,
|
159 |
maxsize=0,
|
160 |
-
#
|
161 |
-
|
162 |
-
#
|
163 |
-
|
164 |
# optimizer_nrestarts=3,
|
165 |
optimizer_nrestarts=3.0,
|
166 |
# optimize_probability=1.0,
|
|
|
42 |
"populations",
|
43 |
"niterations",
|
44 |
"ncyclesperiteration",
|
45 |
+
"population_size",
|
46 |
"topn",
|
47 |
"maxsize",
|
48 |
"optimizer_nrestarts",
|
|
|
57 |
args["tournament_selection_n"] = args["topn"]
|
58 |
|
59 |
# Invalid hyperparams:
|
60 |
+
invalid = args["population_size"] < args["topn"]
|
61 |
if invalid:
|
62 |
return dict(status="fail", loss=float("inf"))
|
63 |
|
|
|
125 |
ncyclesperiteration=rand_between(50, 150),
|
126 |
alpha=rand_between(0.05, 0.2),
|
127 |
annealing=0,
|
128 |
+
# fraction_replaced=0.01,
|
129 |
+
fraction_replaced=0.01,
|
130 |
+
# fraction_replaced_hof=0.005,
|
131 |
+
fraction_replaced_hof=0.005,
|
132 |
+
# population_size=100,
|
133 |
+
population_size=rand_between(50, 200),
|
134 |
# parsimony=1e-4,
|
135 |
parsimony=1e-4,
|
136 |
# topn=10,
|
137 |
topn=10.0,
|
138 |
+
# weight_add_node=1,
|
139 |
+
weight_add_node=1.0,
|
140 |
+
# weight_insert_node=3,
|
141 |
+
weight_insert_node=3.0,
|
142 |
+
# weight_delete_node=3,
|
143 |
+
weight_delete_node=3.0,
|
144 |
+
# weight_do_nothing=1,
|
145 |
+
weight_do_nothing=1.0,
|
146 |
+
# weight_mutate_constant=10,
|
147 |
+
weight_mutate_constant=10.0,
|
148 |
+
# weight_mutate_operator=1,
|
149 |
+
weight_mutate_operator=1.0,
|
150 |
+
# weight_randomize=1,
|
151 |
+
weight_randomize=1.0,
|
152 |
+
# weight_simplify=0.002,
|
153 |
+
weight_simplify=0, # One of these is fixed.
|
154 |
+
# crossover_probability=0.01
|
155 |
+
crossover_probability=0.01,
|
156 |
+
# perturbation_factor=1.0,
|
157 |
+
perturbation_factor=1.0,
|
158 |
# maxsize=20,
|
159 |
maxsize=0,
|
160 |
+
# warmup_maxsize_by=0.0,
|
161 |
+
warmup_maxsize_by=0.0,
|
162 |
+
# use_frequency=True,
|
163 |
+
use_frequency=1,
|
164 |
# optimizer_nrestarts=3,
|
165 |
optimizer_nrestarts=3.0,
|
166 |
# optimize_probability=1.0,
|
benchmarks/space.py
CHANGED
@@ -25,50 +25,50 @@ space = dict(
|
|
25 |
alpha=hp.loguniform("alpha", np.log(0.0001), np.log(1000)),
|
26 |
# annealing=False,
|
27 |
annealing=hp.choice("annealing", [False, True]),
|
28 |
-
#
|
29 |
-
|
30 |
-
#
|
31 |
-
|
32 |
-
"
|
33 |
),
|
34 |
-
#
|
35 |
-
|
36 |
# parsimony=1e-4,
|
37 |
parsimony=hp.loguniform("parsimony", np.log(0.0001), np.log(0.5)),
|
38 |
# topn=10,
|
39 |
topn=hp.qloguniform("topn", np.log(2), np.log(50), 1),
|
40 |
-
#
|
41 |
-
|
42 |
-
#
|
43 |
-
|
44 |
-
#
|
45 |
-
|
46 |
-
#
|
47 |
-
|
48 |
-
#
|
49 |
-
|
50 |
-
"
|
51 |
),
|
52 |
-
#
|
53 |
-
|
54 |
-
"
|
55 |
),
|
56 |
-
#
|
57 |
-
|
58 |
-
#
|
59 |
-
|
60 |
-
#
|
61 |
-
|
62 |
-
"
|
63 |
),
|
64 |
-
#
|
65 |
-
|
66 |
# maxsize=20,
|
67 |
maxsize=hp.choice("maxsize", [30]),
|
68 |
-
#
|
69 |
-
|
70 |
-
#
|
71 |
-
|
72 |
# optimizer_nrestarts=3,
|
73 |
optimizer_nrestarts=hp.quniform("optimizer_nrestarts", 1, 10, 1),
|
74 |
# optimize_probability=1.0,
|
|
|
25 |
alpha=hp.loguniform("alpha", np.log(0.0001), np.log(1000)),
|
26 |
# annealing=False,
|
27 |
annealing=hp.choice("annealing", [False, True]),
|
28 |
+
# fraction_replaced=0.01,
|
29 |
+
fraction_replaced=hp.loguniform("fraction_replaced", np.log(0.0001), np.log(0.5)),
|
30 |
+
# fraction_replaced_hof=0.005,
|
31 |
+
fraction_replaced_hof=hp.loguniform(
|
32 |
+
"fraction_replaced_hof", np.log(0.0001), np.log(0.5)
|
33 |
),
|
34 |
+
# population_size=100,
|
35 |
+
population_size=hp.qloguniform("population_size", np.log(20), np.log(1000), 1),
|
36 |
# parsimony=1e-4,
|
37 |
parsimony=hp.loguniform("parsimony", np.log(0.0001), np.log(0.5)),
|
38 |
# topn=10,
|
39 |
topn=hp.qloguniform("topn", np.log(2), np.log(50), 1),
|
40 |
+
# weight_add_node=1,
|
41 |
+
weight_add_node=hp.loguniform("weight_add_node", np.log(0.0001), np.log(100)),
|
42 |
+
# weight_insert_node=3,
|
43 |
+
weight_insert_node=hp.loguniform("weight_insert_node", np.log(0.0001), np.log(100)),
|
44 |
+
# weight_delete_node=3,
|
45 |
+
weight_delete_node=hp.loguniform("weight_delete_node", np.log(0.0001), np.log(100)),
|
46 |
+
# weight_do_nothing=1,
|
47 |
+
weight_do_nothing=hp.loguniform("weight_do_nothing", np.log(0.0001), np.log(100)),
|
48 |
+
# weight_mutate_constant=10,
|
49 |
+
weight_mutate_constant=hp.loguniform(
|
50 |
+
"weight_mutate_constant", np.log(0.0001), np.log(100)
|
51 |
),
|
52 |
+
# weight_mutate_operator=1,
|
53 |
+
weight_mutate_operator=hp.loguniform(
|
54 |
+
"weight_mutate_operator", np.log(0.0001), np.log(100)
|
55 |
),
|
56 |
+
# weight_randomize=1,
|
57 |
+
weight_randomize=hp.loguniform("weight_randomize", np.log(0.0001), np.log(100)),
|
58 |
+
# weight_simplify=0.002,
|
59 |
+
weight_simplify=hp.choice("weight_simplify", [0.002]), # One of these is fixed.
|
60 |
+
# crossover_probability=0.01,
|
61 |
+
crossover_probability=hp.loguniform(
|
62 |
+
"crossover_probability", np.log(0.00001), np.log(0.2)
|
63 |
),
|
64 |
+
# perturbation_factor=1.0,
|
65 |
+
perturbation_factor=hp.loguniform("perturbation_factor", np.log(0.0001), np.log(100)),
|
66 |
# maxsize=20,
|
67 |
maxsize=hp.choice("maxsize", [30]),
|
68 |
+
# warmup_maxsize_by=0.0,
|
69 |
+
warmup_maxsize_by=hp.uniform("warmup_maxsize_by", 0.0, 0.5),
|
70 |
+
# use_frequency=True,
|
71 |
+
use_frequency=hp.choice("use_frequency", [True, False]),
|
72 |
# optimizer_nrestarts=3,
|
73 |
optimizer_nrestarts=hp.quniform("optimizer_nrestarts", 1, 10, 1),
|
74 |
# optimize_probability=1.0,
|
pysr/sr.py
CHANGED
@@ -16,6 +16,7 @@ from collections import OrderedDict
|
|
16 |
from hashlib import sha256
|
17 |
|
18 |
from .version import __version__, __symbolic_regression_jl_version__
|
|
|
19 |
|
20 |
|
21 |
def install(julia_project=None, quiet=False): # pragma: no cover
|
@@ -356,24 +357,24 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
356 |
timeout_in_seconds=None,
|
357 |
alpha=0.1,
|
358 |
annealing=False,
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
parsimony=0.0032,
|
363 |
migration=True,
|
364 |
-
|
365 |
-
|
366 |
topn=12,
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
extra_sympy_mappings=None,
|
378 |
extra_torch_mappings=None,
|
379 |
extra_jax_mappings=None,
|
@@ -386,12 +387,12 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
386 |
maxdepth=None,
|
387 |
variable_names=None,
|
388 |
batching=False,
|
389 |
-
|
390 |
select_k_features=None,
|
391 |
-
|
392 |
constraints=None,
|
393 |
-
|
394 |
-
|
395 |
tempdir=None,
|
396 |
delete_tempfiles=True,
|
397 |
julia_project=None,
|
@@ -411,6 +412,8 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
411 |
multithreading=None,
|
412 |
use_symbolic_utils=False,
|
413 |
skip_mutation_failures=True,
|
|
|
|
|
414 |
):
|
415 |
"""Initialize settings for an equation search in PySR.
|
416 |
|
@@ -442,8 +445,8 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
442 |
:type multithreading: bool
|
443 |
:param batching: whether to compare population members on small batches during evolution. Still uses full dataset for comparing against hall of fame.
|
444 |
:type batching: bool
|
445 |
-
:param
|
446 |
-
:type
|
447 |
:param maxsize: Max size of an equation.
|
448 |
:type maxsize: int
|
449 |
:param ncyclesperiteration: Number of total mutations to run, per 10 samples of the population, per iteration.
|
@@ -454,42 +457,42 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
454 |
:type alpha: float
|
455 |
:param annealing: Whether to use annealing. You should (and it is default).
|
456 |
:type annealing: bool
|
457 |
-
:param
|
458 |
-
:type
|
459 |
-
:param
|
460 |
-
:type
|
461 |
-
:param
|
462 |
-
:type
|
463 |
:param parsimony: Multiplicative factor for how much to punish complexity.
|
464 |
:type parsimony: float
|
465 |
:param migration: Whether to migrate.
|
466 |
:type migration: bool
|
467 |
-
:param
|
468 |
-
:type
|
469 |
-
:param
|
470 |
-
:type
|
471 |
:param topn: How many top individuals migrate from each population.
|
472 |
:type topn: int
|
473 |
-
:param
|
474 |
-
:type
|
475 |
-
:param
|
476 |
-
:type
|
477 |
-
:param
|
478 |
-
:type
|
479 |
-
:param
|
480 |
-
:type
|
481 |
-
:param
|
482 |
-
:type
|
483 |
-
:param
|
484 |
-
:type
|
485 |
-
:param
|
486 |
-
:type
|
487 |
-
:param
|
488 |
-
:type
|
489 |
-
:param
|
490 |
-
:type
|
491 |
-
:param
|
492 |
-
:type
|
493 |
:param equation_file: Where to save the files (.csv separated by |)
|
494 |
:type equation_file: str
|
495 |
:param verbosity: What verbosity level to use. 0 means minimal print statements.
|
@@ -504,14 +507,14 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
504 |
:type fast_cycle: bool
|
505 |
:param variable_names: a list of names for the variables, other than "x0", "x1", etc.
|
506 |
:type variable_names: list
|
507 |
-
:param
|
508 |
-
:type
|
509 |
:param constraints: dictionary of int (unary) or 2-tuples (binary), this enforces maxsize constraints on the individual arguments of operators. E.g., `'pow': (-1, 1)` says that power laws can have any complexity left argument, but only 1 complexity exponent. Use this to force more interpretable solutions.
|
510 |
:type constraints: dict
|
511 |
-
:param
|
512 |
-
:type
|
513 |
-
:param
|
514 |
-
:type
|
515 |
:param tempdir: directory for the temporary files
|
516 |
:type tempdir: str/None
|
517 |
:param delete_tempfiles: whether to delete the temporary files after finishing
|
@@ -536,11 +539,65 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
536 |
:type use_symbolic_utils: bool
|
537 |
:param skip_mutation_failures: Whether to skip mutation and crossover failures, rather than simply re-sampling the current member.
|
538 |
:type skip_mutation_failures: bool
|
|
|
|
|
|
|
539 |
:returns: Initialized model. Call `.fit(X, y)` to fit your data!
|
540 |
:type: PySRRegressor
|
541 |
"""
|
542 |
super().__init__()
|
543 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
544 |
self.model_selection = model_selection
|
545 |
|
546 |
if binary_operators is None:
|
@@ -572,7 +629,7 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
572 |
progress = buffer_available
|
573 |
|
574 |
assert optimizer_algorithm in ["NelderMead", "BFGS"]
|
575 |
-
assert tournament_selection_n <
|
576 |
|
577 |
if extra_jax_mappings is not None:
|
578 |
for value in extra_jax_mappings.values():
|
@@ -594,7 +651,7 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
594 |
|
595 |
if maxsize > 40:
|
596 |
warnings.warn(
|
597 |
-
"Note: Using a large maxsize for the equation search will be exponentially slower and use significant memory. You should consider turning `
|
598 |
)
|
599 |
elif maxsize < 7:
|
600 |
raise NotImplementedError("PySR requires a maxsize of at least 7")
|
@@ -620,24 +677,24 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
620 |
timeout_in_seconds=timeout_in_seconds,
|
621 |
alpha=alpha,
|
622 |
annealing=annealing,
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
parsimony=float(parsimony),
|
627 |
migration=migration,
|
628 |
-
|
629 |
-
|
630 |
topn=topn,
|
631 |
-
|
632 |
-
|
633 |
-
|
634 |
-
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
641 |
verbosity=verbosity,
|
642 |
update_verbosity=update_verbosity,
|
643 |
progress=progress,
|
@@ -645,12 +702,12 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
645 |
fast_cycle=fast_cycle,
|
646 |
maxdepth=maxdepth,
|
647 |
batching=batching,
|
648 |
-
|
649 |
select_k_features=select_k_features,
|
650 |
-
|
651 |
constraints=constraints,
|
652 |
-
|
653 |
-
|
654 |
tempdir=tempdir,
|
655 |
delete_tempfiles=delete_tempfiles,
|
656 |
update=update,
|
@@ -976,14 +1033,14 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
976 |
constraints = self.params["constraints"]
|
977 |
update = self.params["update"]
|
978 |
loss = self.params["loss"]
|
979 |
-
|
980 |
-
|
981 |
-
|
982 |
-
|
983 |
-
|
984 |
-
|
985 |
-
|
986 |
-
|
987 |
|
988 |
if Main is None:
|
989 |
if multithreading:
|
@@ -1129,14 +1186,14 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
1129 |
Main.custom_loss = Main.eval(loss)
|
1130 |
|
1131 |
mutationWeights = [
|
1132 |
-
float(
|
1133 |
-
float(
|
1134 |
-
float(
|
1135 |
-
float(
|
1136 |
-
float(
|
1137 |
-
float(
|
1138 |
-
float(
|
1139 |
-
float(
|
1140 |
]
|
1141 |
|
1142 |
params_to_hash = {
|
@@ -1182,7 +1239,7 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
1182 |
npopulations=int(self.params["populations"]),
|
1183 |
batching=batching,
|
1184 |
batchSize=int(
|
1185 |
-
min([self.params["
|
1186 |
),
|
1187 |
mutationWeights=mutationWeights,
|
1188 |
probPickFirst=self.params["tournament_selection_p"],
|
@@ -1193,28 +1250,28 @@ class PySRRegressor(BaseEstimator, RegressorMixin):
|
|
1193 |
maxdepth=self.params["maxdepth"],
|
1194 |
fast_cycle=self.params["fast_cycle"],
|
1195 |
migration=self.params["migration"],
|
1196 |
-
hofMigration=self.params["
|
1197 |
-
fractionReplacedHof=self.params["
|
1198 |
-
shouldOptimizeConstants=self.params["
|
1199 |
-
warmupMaxsizeBy=self.params["
|
1200 |
-
useFrequency=self.params["
|
1201 |
-
useFrequencyInTournament=self.params["
|
1202 |
-
npop=self.params["
|
1203 |
ncyclesperiteration=self.params["ncyclesperiteration"],
|
1204 |
-
fractionReplaced=self.params["
|
1205 |
topn=self.params["topn"],
|
1206 |
verbosity=self.params["verbosity"],
|
1207 |
optimizer_algorithm=self.params["optimizer_algorithm"],
|
1208 |
optimizer_nrestarts=self.params["optimizer_nrestarts"],
|
1209 |
optimize_probability=self.params["optimize_probability"],
|
1210 |
optimizer_iterations=self.params["optimizer_iterations"],
|
1211 |
-
perturbationFactor=self.params["
|
1212 |
annealing=self.params["annealing"],
|
1213 |
stateReturn=True, # Required for state saving.
|
1214 |
use_symbolic_utils=self.params["use_symbolic_utils"],
|
1215 |
progress=self.params["progress"],
|
1216 |
timeout_in_seconds=self.params["timeout_in_seconds"],
|
1217 |
-
crossoverProbability=self.params["
|
1218 |
skip_mutation_failures=self.params["skip_mutation_failures"],
|
1219 |
)
|
1220 |
|
|
|
16 |
from hashlib import sha256
|
17 |
|
18 |
from .version import __version__, __symbolic_regression_jl_version__
|
19 |
+
from .deprecated import make_deprecated_kwargs_for_pysr_regressor
|
20 |
|
21 |
|
22 |
def install(julia_project=None, quiet=False): # pragma: no cover
|
|
|
357 |
timeout_in_seconds=None,
|
358 |
alpha=0.1,
|
359 |
annealing=False,
|
360 |
+
fraction_replaced=0.000364,
|
361 |
+
fraction_replaced_hof=0.035,
|
362 |
+
population_size=33,
|
363 |
parsimony=0.0032,
|
364 |
migration=True,
|
365 |
+
hof_migration=True,
|
366 |
+
should_optimize_constants=True,
|
367 |
topn=12,
|
368 |
+
weight_add_node=0.79,
|
369 |
+
weight_delete_node=1.7,
|
370 |
+
weight_do_nothing=0.21,
|
371 |
+
weight_insert_node=5.1,
|
372 |
+
weight_mutate_constant=0.048,
|
373 |
+
weight_mutate_operator=0.47,
|
374 |
+
weight_randomize=0.00023,
|
375 |
+
weight_simplify=0.0020,
|
376 |
+
crossover_probability=0.066,
|
377 |
+
perturbation_factor=0.076,
|
378 |
extra_sympy_mappings=None,
|
379 |
extra_torch_mappings=None,
|
380 |
extra_jax_mappings=None,
|
|
|
387 |
maxdepth=None,
|
388 |
variable_names=None,
|
389 |
batching=False,
|
390 |
+
batch_size=50,
|
391 |
select_k_features=None,
|
392 |
+
warmup_maxsize_by=0.0,
|
393 |
constraints=None,
|
394 |
+
use_frequency=True,
|
395 |
+
use_frequency_in_tournament=True,
|
396 |
tempdir=None,
|
397 |
delete_tempfiles=True,
|
398 |
julia_project=None,
|
|
|
412 |
multithreading=None,
|
413 |
use_symbolic_utils=False,
|
414 |
skip_mutation_failures=True,
|
415 |
+
# To support deprecated kwargs:
|
416 |
+
**kwargs,
|
417 |
):
|
418 |
"""Initialize settings for an equation search in PySR.
|
419 |
|
|
|
445 |
:type multithreading: bool
|
446 |
:param batching: whether to compare population members on small batches during evolution. Still uses full dataset for comparing against hall of fame.
|
447 |
:type batching: bool
|
448 |
+
:param batch_size: the amount of data to use if doing batching.
|
449 |
+
:type batch_size: int
|
450 |
:param maxsize: Max size of an equation.
|
451 |
:type maxsize: int
|
452 |
:param ncyclesperiteration: Number of total mutations to run, per 10 samples of the population, per iteration.
|
|
|
457 |
:type alpha: float
|
458 |
:param annealing: Whether to use annealing. You should (and it is default).
|
459 |
:type annealing: bool
|
460 |
+
:param fraction_replaced: How much of population to replace with migrating equations from other populations.
|
461 |
+
:type fraction_replaced: float
|
462 |
+
:param fraction_replaced_hof: How much of population to replace with migrating equations from hall of fame.
|
463 |
+
:type fraction_replaced_hof: float
|
464 |
+
:param population_size: Number of individuals in each population
|
465 |
+
:type population_size: int
|
466 |
:param parsimony: Multiplicative factor for how much to punish complexity.
|
467 |
:type parsimony: float
|
468 |
:param migration: Whether to migrate.
|
469 |
:type migration: bool
|
470 |
+
:param hof_migration: Whether to have the hall of fame migrate.
|
471 |
+
:type hof_migration: bool
|
472 |
+
:param should_optimize_constants: Whether to numerically optimize constants (Nelder-Mead/Newton) at the end of each iteration.
|
473 |
+
:type should_optimize_constants: bool
|
474 |
:param topn: How many top individuals migrate from each population.
|
475 |
:type topn: int
|
476 |
+
:param perturbation_factor: Constants are perturbed by a max factor of (perturbation_factor*T + 1). Either multiplied by this or divided by this.
|
477 |
+
:type perturbation_factor: float
|
478 |
+
:param weight_add_node: Relative likelihood for mutation to add a node
|
479 |
+
:type weight_add_node: float
|
480 |
+
:param weight_insert_node: Relative likelihood for mutation to insert a node
|
481 |
+
:type weight_insert_node: float
|
482 |
+
:param weight_delete_node: Relative likelihood for mutation to delete a node
|
483 |
+
:type weight_delete_node: float
|
484 |
+
:param weight_do_nothing: Relative likelihood for mutation to leave the individual
|
485 |
+
:type weight_do_nothing: float
|
486 |
+
:param weight_mutate_constant: Relative likelihood for mutation to change the constant slightly in a random direction.
|
487 |
+
:type weight_mutate_constant: float
|
488 |
+
:param weight_mutate_operator: Relative likelihood for mutation to swap an operator.
|
489 |
+
:type weight_mutate_operator: float
|
490 |
+
:param weight_randomize: Relative likelihood for mutation to completely delete and then randomly generate the equation
|
491 |
+
:type weight_randomize: float
|
492 |
+
:param weight_simplify: Relative likelihood for mutation to simplify constant parts by evaluation
|
493 |
+
:type weight_simplify: float
|
494 |
+
:param crossover_probability: Absolute probability of crossover-type genetic operation, instead of a mutation.
|
495 |
+
:type crossover_probability: float
|
496 |
:param equation_file: Where to save the files (.csv separated by |)
|
497 |
:type equation_file: str
|
498 |
:param verbosity: What verbosity level to use. 0 means minimal print statements.
|
|
|
507 |
:type fast_cycle: bool
|
508 |
:param variable_names: a list of names for the variables, other than "x0", "x1", etc.
|
509 |
:type variable_names: list
|
510 |
+
:param warmup_maxsize_by: whether to slowly increase max size from a small number up to the maxsize (if greater than 0). If greater than 0, says the fraction of training time at which the current maxsize will reach the user-passed maxsize.
|
511 |
+
:type warmup_maxsize_by: float
|
512 |
:param constraints: dictionary of int (unary) or 2-tuples (binary), this enforces maxsize constraints on the individual arguments of operators. E.g., `'pow': (-1, 1)` says that power laws can have any complexity left argument, but only 1 complexity exponent. Use this to force more interpretable solutions.
|
513 |
:type constraints: dict
|
514 |
+
:param use_frequency: whether to measure the frequency of complexities, and use that instead of parsimony to explore equation space. Will naturally find equations of all complexities.
|
515 |
+
:type use_frequency: bool
|
516 |
+
:param use_frequency_in_tournament: whether to use the frequency mentioned above in the tournament, rather than just the simulated annealing.
|
517 |
+
:type use_frequency_in_tournament: bool
|
518 |
:param tempdir: directory for the temporary files
|
519 |
:type tempdir: str/None
|
520 |
:param delete_tempfiles: whether to delete the temporary files after finishing
|
|
|
539 |
:type use_symbolic_utils: bool
|
540 |
:param skip_mutation_failures: Whether to skip mutation and crossover failures, rather than simply re-sampling the current member.
|
541 |
:type skip_mutation_failures: bool
|
542 |
+
:param kwargs: Supports deprecated keyword arguments. Other arguments will result
|
543 |
+
in an error
|
544 |
+
:type kwargs: dict
|
545 |
:returns: Initialized model. Call `.fit(X, y)` to fit your data!
|
546 |
:type: PySRRegressor
|
547 |
"""
|
548 |
super().__init__()
|
549 |
+
# First, check for deprecated kwargs:
|
550 |
+
if len(kwargs) > 0: # pragma: no cover
|
551 |
+
deprecated_kwargs = make_deprecated_kwargs_for_pysr_regressor()
|
552 |
+
for k, v in kwargs.items():
|
553 |
+
if k == "fractionReplaced":
|
554 |
+
fraction_replaced = v
|
555 |
+
elif k == "fractionReplacedHof":
|
556 |
+
fraction_replaced_hof = v
|
557 |
+
elif k == "npop":
|
558 |
+
population_size = v
|
559 |
+
elif k == "hofMigration":
|
560 |
+
hof_migration = v
|
561 |
+
elif k == "shouldOptimizeConstants":
|
562 |
+
should_optimize_constants = v
|
563 |
+
elif k == "weightAddNode":
|
564 |
+
weight_add_node = v
|
565 |
+
elif k == "weightDeleteNode":
|
566 |
+
weight_delete_node = v
|
567 |
+
elif k == "weightDoNothing":
|
568 |
+
weight_do_nothing = v
|
569 |
+
elif k == "weightInsertNode":
|
570 |
+
weight_insert_node = v
|
571 |
+
elif k == "weightMutateConstant":
|
572 |
+
weight_mutate_constant = v
|
573 |
+
elif k == "weightMutateOperator":
|
574 |
+
weight_mutate_operator = v
|
575 |
+
elif k == "weightRandomize":
|
576 |
+
weight_randomize = v
|
577 |
+
elif k == "weightSimplify":
|
578 |
+
weight_simplify = v
|
579 |
+
elif k == "crossoverProbability":
|
580 |
+
crossover_probability = v
|
581 |
+
elif k == "perturbationFactor":
|
582 |
+
perturbation_factor = v
|
583 |
+
elif k == "batchSize":
|
584 |
+
batch_size = v
|
585 |
+
elif k == "warmupMaxsizeBy":
|
586 |
+
warmup_maxsize_by = v
|
587 |
+
elif k == "useFrequency":
|
588 |
+
use_frequency = v
|
589 |
+
elif k == "useFrequencyInTournament":
|
590 |
+
use_frequency_in_tournament = v
|
591 |
+
else:
|
592 |
+
raise TypeError(
|
593 |
+
f"{k} is not a valid keyword argument for PySRRegressor"
|
594 |
+
)
|
595 |
+
|
596 |
+
updated_name = deprecated_kwargs[k]
|
597 |
+
warnings.warn(
|
598 |
+
f"{k} has been renamed to {updated_name} in PySRRegressor.",
|
599 |
+
f" Please use that instead.",
|
600 |
+
)
|
601 |
self.model_selection = model_selection
|
602 |
|
603 |
if binary_operators is None:
|
|
|
629 |
progress = buffer_available
|
630 |
|
631 |
assert optimizer_algorithm in ["NelderMead", "BFGS"]
|
632 |
+
assert tournament_selection_n < population_size
|
633 |
|
634 |
if extra_jax_mappings is not None:
|
635 |
for value in extra_jax_mappings.values():
|
|
|
651 |
|
652 |
if maxsize > 40:
|
653 |
warnings.warn(
|
654 |
+
"Note: Using a large maxsize for the equation search will be exponentially slower and use significant memory. You should consider turning `use_frequency` to False, and perhaps use `warmup_maxsize_by`."
|
655 |
)
|
656 |
elif maxsize < 7:
|
657 |
raise NotImplementedError("PySR requires a maxsize of at least 7")
|
|
|
677 |
timeout_in_seconds=timeout_in_seconds,
|
678 |
alpha=alpha,
|
679 |
annealing=annealing,
|
680 |
+
fraction_replaced=fraction_replaced,
|
681 |
+
fraction_replaced_hof=fraction_replaced_hof,
|
682 |
+
population_size=population_size,
|
683 |
parsimony=float(parsimony),
|
684 |
migration=migration,
|
685 |
+
hof_migration=hof_migration,
|
686 |
+
should_optimize_constants=should_optimize_constants,
|
687 |
topn=topn,
|
688 |
+
weight_add_node=weight_add_node,
|
689 |
+
weight_insert_node=weight_insert_node,
|
690 |
+
weight_delete_node=weight_delete_node,
|
691 |
+
weight_do_nothing=weight_do_nothing,
|
692 |
+
weight_mutate_constant=weight_mutate_constant,
|
693 |
+
weight_mutate_operator=weight_mutate_operator,
|
694 |
+
weight_randomize=weight_randomize,
|
695 |
+
weight_simplify=weight_simplify,
|
696 |
+
crossover_probability=crossover_probability,
|
697 |
+
perturbation_factor=perturbation_factor,
|
698 |
verbosity=verbosity,
|
699 |
update_verbosity=update_verbosity,
|
700 |
progress=progress,
|
|
|
702 |
fast_cycle=fast_cycle,
|
703 |
maxdepth=maxdepth,
|
704 |
batching=batching,
|
705 |
+
batch_size=batch_size,
|
706 |
select_k_features=select_k_features,
|
707 |
+
warmup_maxsize_by=warmup_maxsize_by,
|
708 |
constraints=constraints,
|
709 |
+
use_frequency=use_frequency,
|
710 |
+
use_frequency_in_tournament=use_frequency_in_tournament,
|
711 |
tempdir=tempdir,
|
712 |
delete_tempfiles=delete_tempfiles,
|
713 |
update=update,
|
|
|
1033 |
constraints = self.params["constraints"]
|
1034 |
update = self.params["update"]
|
1035 |
loss = self.params["loss"]
|
1036 |
+
weight_mutate_constant = self.params["weight_mutate_constant"]
|
1037 |
+
weight_mutate_operator = self.params["weight_mutate_operator"]
|
1038 |
+
weight_add_node = self.params["weight_add_node"]
|
1039 |
+
weight_insert_node = self.params["weight_insert_node"]
|
1040 |
+
weight_delete_node = self.params["weight_delete_node"]
|
1041 |
+
weight_simplify = self.params["weight_simplify"]
|
1042 |
+
weight_randomize = self.params["weight_randomize"]
|
1043 |
+
weight_do_nothing = self.params["weight_do_nothing"]
|
1044 |
|
1045 |
if Main is None:
|
1046 |
if multithreading:
|
|
|
1186 |
Main.custom_loss = Main.eval(loss)
|
1187 |
|
1188 |
mutationWeights = [
|
1189 |
+
float(weight_mutate_constant),
|
1190 |
+
float(weight_mutate_operator),
|
1191 |
+
float(weight_add_node),
|
1192 |
+
float(weight_insert_node),
|
1193 |
+
float(weight_delete_node),
|
1194 |
+
float(weight_simplify),
|
1195 |
+
float(weight_randomize),
|
1196 |
+
float(weight_do_nothing),
|
1197 |
]
|
1198 |
|
1199 |
params_to_hash = {
|
|
|
1239 |
npopulations=int(self.params["populations"]),
|
1240 |
batching=batching,
|
1241 |
batchSize=int(
|
1242 |
+
min([self.params["batch_size"], len(X)]) if batching else len(X)
|
1243 |
),
|
1244 |
mutationWeights=mutationWeights,
|
1245 |
probPickFirst=self.params["tournament_selection_p"],
|
|
|
1250 |
maxdepth=self.params["maxdepth"],
|
1251 |
fast_cycle=self.params["fast_cycle"],
|
1252 |
migration=self.params["migration"],
|
1253 |
+
hofMigration=self.params["hof_migration"],
|
1254 |
+
fractionReplacedHof=self.params["fraction_replaced_hof"],
|
1255 |
+
shouldOptimizeConstants=self.params["should_optimize_constants"],
|
1256 |
+
warmupMaxsizeBy=self.params["warmup_maxsize_by"],
|
1257 |
+
useFrequency=self.params["use_frequency"],
|
1258 |
+
useFrequencyInTournament=self.params["use_frequency_in_tournament"],
|
1259 |
+
npop=self.params["population_size"],
|
1260 |
ncyclesperiteration=self.params["ncyclesperiteration"],
|
1261 |
+
fractionReplaced=self.params["fraction_replaced"],
|
1262 |
topn=self.params["topn"],
|
1263 |
verbosity=self.params["verbosity"],
|
1264 |
optimizer_algorithm=self.params["optimizer_algorithm"],
|
1265 |
optimizer_nrestarts=self.params["optimizer_nrestarts"],
|
1266 |
optimize_probability=self.params["optimize_probability"],
|
1267 |
optimizer_iterations=self.params["optimizer_iterations"],
|
1268 |
+
perturbationFactor=self.params["perturbation_factor"],
|
1269 |
annealing=self.params["annealing"],
|
1270 |
stateReturn=True, # Required for state saving.
|
1271 |
use_symbolic_utils=self.params["use_symbolic_utils"],
|
1272 |
progress=self.params["progress"],
|
1273 |
timeout_in_seconds=self.params["timeout_in_seconds"],
|
1274 |
+
crossoverProbability=self.params["crossover_probability"],
|
1275 |
skip_mutation_failures=self.params["skip_mutation_failures"],
|
1276 |
)
|
1277 |
|