MilesCranmer commited on
Commit
211c6a7
1 Parent(s): 0d82add

Force all operators to be fast versions

Browse files
Files changed (4) hide show
  1. TODO.md +1 -0
  2. julia/operators.jl +20 -9
  3. julia/sr.jl +5 -4
  4. pysr/sr.py +2 -2
TODO.md CHANGED
@@ -78,6 +78,7 @@
78
 
79
  ## Algorithmic performance ideas:
80
 
 
81
  - [ ] Idea: use gradient of equation with respect to each operator (perhaps simply add to each operator) to tell which part is the most "sensitive" to changes. Then, perhaps insert/delete/mutate on that part of the tree?
82
  - [ ] Start populations staggered; so that there is more frequent printing (and pops that start a bit later get hall of fame already)?
83
  - [ ] Consider adding mutation for constant<->variable
 
78
 
79
  ## Algorithmic performance ideas:
80
 
81
+ - [ ] Use package compiler and compile sr.jl into a standalone binary that can be used by pysr.
82
  - [ ] Idea: use gradient of equation with respect to each operator (perhaps simply add to each operator) to tell which part is the most "sensitive" to changes. Then, perhaps insert/delete/mutate on that part of the tree?
83
  - [ ] Start populations staggered; so that there is more frequent printing (and pops that start a bit later get hall of fame already)?
84
  - [ ] Consider adding mutation for constant<->variable
julia/operators.jl CHANGED
@@ -1,15 +1,26 @@
1
  import SpecialFunctions: gamma, lgamma, erf, erfc, beta
2
 
 
 
 
 
 
 
 
 
 
 
 
3
  # Define allowed operators. Any julia operator can also be used.
4
- plus(x::Float32, y::Float32)::Float32 = x+y #Do not change the name of this operator.
5
- mult(x::Float32, y::Float32)::Float32 = x*y #Do not change the name of this operator.
6
- pow(x::Float32, y::Float32)::Float32 = sign(x)*abs(x)^y
7
- div(x::Float32, y::Float32)::Float32 = x/y
8
- logm(x::Float32)::Float32 = log(abs(x) + 1f-8)
9
- logm2(x::Float32)::Float32 = log2(abs(x) + 1f-8)
10
- logm10(x::Float32)::Float32 = log10(abs(x) + 1f-8)
11
- sqrtm(x::Float32)::Float32 = sqrt(abs(x))
12
- neg(x::Float32)::Float32 = -x
13
 
14
  function greater(x::Float32, y::Float32)::Float32
15
  if x > y
 
1
  import SpecialFunctions: gamma, lgamma, erf, erfc, beta
2
 
3
+
4
+ import Base.FastMath: sqrt_llvm_fast, neg_float_fast,
5
+ add_float_fast, sub_float_fast, mul_float_fast, div_float_fast, rem_float_fast,
6
+ eq_float_fast, ne_float_fast, lt_float_fast, le_float_fast,
7
+ sign_fast, abs_fast, log_fast, log2_fast, log10_fast, sqrt_fast
8
+
9
+ # Implicitly defined:
10
+ #binary: mod
11
+ #unary: exp, abs, log1p, sin, cos, tan, sinh, cosh, tanh, asin, acos, atan, asinh, acosh, atanh, erf, erfc, gamma, relu, round, floor, ceil, round, sign.
12
+
13
+ # Use some fast operators from https://github.com/JuliaLang/julia/blob/81597635c4ad1e8c2e1c5753fda4ec0e7397543f/base/fastmath.jl
14
  # Define allowed operators. Any julia operator can also be used.
15
+ plus(x::Float32, y::Float32)::Float32 = add_float_fast(x, y) #Do not change the name of this operator.
16
+ mult(x::Float32, y::Float32)::Float32 = mul_float_fast(x, y) #Do not change the name of this operator.
17
+ pow(x::Float32, y::Float32)::Float32 = sign_fast(x)*ccall(("powf",libm), Float32, (Float32,Float32), abs_fast(x), y)
18
+ div(x::Float32, y::Float32)::Float32 = div_float_fast(x, y)
19
+ logm(x::Float32)::Float32 = log_fast(abs_fast(x) + 1f-8)
20
+ logm2(x::Float32)::Float32 = log2_fast(abs_fast(x) + 1f-8)
21
+ logm10(x::Float32)::Float32 = log10_fast(abs_fast(x) + 1f-8)
22
+ sqrtm(x::Float32)::Float32 = sqrt_fast(abs_fast(x))
23
+ neg(x::Float32)::Float32 = sub_float_fast(x)
24
 
25
  function greater(x::Float32, y::Float32)::Float32
26
  if x > y
julia/sr.jl CHANGED
@@ -282,10 +282,10 @@ function evalTreeArray(tree::Node, cX::Array{Float32, 2})::Union{Array{Float32,
282
  return nothing
283
  end
284
  op = unaops[tree.op]
285
- @fastmath @inbounds @simd for i=1:clen
286
  cumulator[i] = op(cumulator[i])
287
  end
288
- @inbounds @simd for i=1:clen
289
  if isinf(cumulator[i]) || isnan(cumulator[i])
290
  return nothing
291
  end
@@ -302,10 +302,10 @@ function evalTreeArray(tree::Node, cX::Array{Float32, 2})::Union{Array{Float32,
302
  return nothing
303
  end
304
 
305
- @fastmath @inbounds @simd for i=1:clen
306
  cumulator[i] = op(cumulator[i], array2[i])
307
  end
308
- @inbounds @simd for i=1:clen
309
  if isinf(cumulator[i]) || isnan(cumulator[i])
310
  return nothing
311
  end
@@ -1032,6 +1032,7 @@ function fullRun(niterations::Integer;
1032
  end
1033
  if shouldOptimizeConstants
1034
  #pass #(We already calculate full scores in the optimizer)
 
1035
  else
1036
  tmp_pop = finalizeScores(tmp_pop)
1037
  end
 
282
  return nothing
283
  end
284
  op = unaops[tree.op]
285
+ @inbounds @simd for i=1:clen
286
  cumulator[i] = op(cumulator[i])
287
  end
288
+ @inbounds for i=1:clen
289
  if isinf(cumulator[i]) || isnan(cumulator[i])
290
  return nothing
291
  end
 
302
  return nothing
303
  end
304
 
305
+ @inbounds @simd for i=1:clen
306
  cumulator[i] = op(cumulator[i], array2[i])
307
  end
308
+ @inbounds for i=1:clen
309
  if isinf(cumulator[i]) || isnan(cumulator[i])
310
  return nothing
311
  end
 
1032
  end
1033
  if shouldOptimizeConstants
1034
  #pass #(We already calculate full scores in the optimizer)
1035
+ #TODO - not correct. only randomly calculate!
1036
  else
1037
  tmp_pop = finalizeScores(tmp_pop)
1038
  end
pysr/sr.py CHANGED
@@ -241,8 +241,8 @@ def pysr(X=None, y=None, weights=None,
241
  op_list[i] = function_name
242
 
243
  def_hyperparams += f"""include("{pkg_directory}/operators.jl")
244
- const binops = {'[' + ', '.join(binary_operators) + ']'}
245
- const unaops = {'[' + ', '.join(unary_operators) + ']'}
246
  const ns=10;
247
  const parsimony = {parsimony:f}f0
248
  const alpha = {alpha:f}f0
 
241
  op_list[i] = function_name
242
 
243
  def_hyperparams += f"""include("{pkg_directory}/operators.jl")
244
+ const binops = @fastmath {'[' + ', '.join(binary_operators) + ']'}
245
+ const unaops = @fastmath {'[' + ', '.join(unary_operators) + ']'}
246
  const ns=10;
247
  const parsimony = {parsimony:f}f0
248
  const alpha = {alpha:f}f0