MilesCranmer commited on
Commit
9c27796
1 Parent(s): 907cc73

Fix parallelization issue

Browse files
Files changed (2) hide show
  1. eureqa.jl +1 -0
  2. paralleleureqa.jl +38 -16
eureqa.jl CHANGED
@@ -420,6 +420,7 @@ function run(
420
  ncycles::Int,
421
  annealing::Bool=false,
422
  )::Population
 
423
 
424
  allT = LinRange(1.0, 0.0, ncycles)
425
  for iT in 1:size(allT)[1]
 
420
  ncycles::Int,
421
  annealing::Bool=false,
422
  )::Population
423
+ pop = deepcopy(pop)
424
 
425
  allT = LinRange(1.0, 0.0, ncycles)
426
  for iT in 1:size(allT)[1]
paralleleureqa.jl CHANGED
@@ -1,36 +1,58 @@
1
  using Distributed
2
- const nthreads = 10
3
  addprocs(nthreads)
 
4
  @everywhere include("eureqa.jl")
5
 
6
  println("Lets try to learn (x2^2 + cos(x3) + 5) using regularized evolution from scratch")
7
  const npop = 100
8
  const annealing = false
9
  const niterations = 10
10
- bestScore = Inf
11
 
12
  # Generate random initial populations
13
- allPops = [Population(npop, 3) for i=1:nthreads]
14
 
15
  # Create a mapping for running the algorithm on all processes
16
- @everywhere f = (pop,)->run(pop, 1000, annealing)
 
17
 
18
- # Do niterations cycles
19
- for i=1:niterations
20
  # Map it over our workers
21
- global allPops = deepcopy(pmap(f, allPops))
 
 
 
 
22
 
23
  # Get best 10 models for each processes
24
- bestPops = Population(vcat(map(((pop,)->bestSubPop(pop).members), allPops)...))
25
- for pop in bestPops
26
- bestCurScoreIdx = argmin([pop.members[member].score for member=1:pop.n])
27
- bestCurScore = pop.members[bestCurScoreIdx].score
28
- if bestCurScore < bestScore
29
- global bestScore = bestCurScore
30
- println(bestScore, " is the score for ", stringTree(pop.members[bestCurScoreIdx].tree))
31
- end
 
 
 
32
  end
33
- exit()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  end
35
 
 
36
 
 
1
  using Distributed
2
+ const nthreads = 8
3
  addprocs(nthreads)
4
+
5
  @everywhere include("eureqa.jl")
6
 
7
  println("Lets try to learn (x2^2 + cos(x3) + 5) using regularized evolution from scratch")
8
  const npop = 100
9
  const annealing = false
10
  const niterations = 10
11
+ const ncyclesperiteration = 1000
12
 
13
  # Generate random initial populations
 
14
 
15
  # Create a mapping for running the algorithm on all processes
16
+ @everywhere f = (pop,)->run(pop, ncyclesperiteration, annealing)
17
+
18
 
19
+ function update(allPops::Array{Population, 1}, bestScore::Float64, pool::AbstractWorkerPool)
 
20
  # Map it over our workers
21
+ #global allPops = deepcopy(pmap(f, deepcopy(allPops)))
22
+ curAllPops = deepcopy(pmap(f, allPops))
23
+ for j=1:nthreads
24
+ allPops[j] = curAllPops[j]
25
+ end
26
 
27
  # Get best 10 models for each processes
28
+ bestPops = Population([member for pop in allPops for member in bestSubPop(pop).members])
29
+ bestCurScoreIdx = argmin([bestPops.members[member].score for member=1:bestPops.n])
30
+ bestCurScore = bestPops.members[bestCurScoreIdx].score
31
+ if bestCurScore < bestScore
32
+ bestScore = bestCurScore
33
+ println(bestScore, " is the score for ", stringTree(bestPops.members[bestCurScoreIdx].tree))
34
+ end
35
+
36
+ # Migration
37
+ for j=1:nthreads
38
+ allPops[j].members[1:50] = deepcopy(bestPops.members[rand(1:bestPops.n, 50)])
39
  end
40
+ return allPops, bestScore
41
+ end
42
+
43
+
44
+ function runExperiment()
45
+ # Do niterations cycles
46
+ allPops = [Population(npop, 3) for j=1:nthreads]
47
+ bestScore = Inf
48
+ #pool = CachingPool(workers())
49
+ pool = WorkerPool(workers())
50
+ for i=1:niterations
51
+ allPops, bestScore = update(allPops, bestScore, pool)
52
+ end
53
+
54
+ return bestScore
55
  end
56
 
57
+ runExperiment()
58