evanlohn
commited on
Commit
·
1dc7ce1
1
Parent(s):
9caccca
correct file context
Browse files- README.md +3 -1
- baseline.py +2 -2
README.md
CHANGED
|
@@ -2,11 +2,13 @@
|
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
| 4 |
First install [Lean 4](https://leanprover-community.github.io/get_started.html). Then clone this repo:
|
|
|
|
| 5 |
`git clone --recurse-submodules https://huggingface.co/datasets/elohn/miniCodeProps`
|
|
|
|
| 6 |
The outer LeanSrc folder is a [Lean Project](https://leanprover-community.github.io/install/project.html). You can open that folder directly in VSCode and check that the proofs in `LeanSrc/Sorts.lean` type check after following the instructions for working on an existing lean project in the Lean 4 documentation.
|
| 7 |
The main miniCodeProps folder handles extracting the benchmark and calculating baselines. If anything fails when building Lean or running `lake exe cache get` from LeanSrc, the [Zulip Chat](https://leanprover.zulipchat.com/) is the best resource for troubleshooting.
|
| 8 |
|
| 9 |
-
After cloning the repo, you will need to install [Lean REPL](https://github.com/leanprover-community/repl). By default, our scripts expect the `repl` folder to be directly inside the miniCodeProps folder. run
|
| 10 |
|
| 11 |
The `extract.py` script is used only to create the json-formatted benchmark.
|
| 12 |
|
|
|
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
| 4 |
First install [Lean 4](https://leanprover-community.github.io/get_started.html). Then clone this repo:
|
| 5 |
+
|
| 6 |
`git clone --recurse-submodules https://huggingface.co/datasets/elohn/miniCodeProps`
|
| 7 |
+
|
| 8 |
The outer LeanSrc folder is a [Lean Project](https://leanprover-community.github.io/install/project.html). You can open that folder directly in VSCode and check that the proofs in `LeanSrc/Sorts.lean` type check after following the instructions for working on an existing lean project in the Lean 4 documentation.
|
| 9 |
The main miniCodeProps folder handles extracting the benchmark and calculating baselines. If anything fails when building Lean or running `lake exe cache get` from LeanSrc, the [Zulip Chat](https://leanprover.zulipchat.com/) is the best resource for troubleshooting.
|
| 10 |
|
| 11 |
+
After cloning the repo, you will need to install [Lean REPL](https://github.com/leanprover-community/repl). By default, our scripts expect the `repl` folder to be directly inside the miniCodeProps folder. run `lake build` from within the `repl` folder.
|
| 12 |
|
| 13 |
The `extract.py` script is used only to create the json-formatted benchmark.
|
| 14 |
|
baseline.py
CHANGED
|
@@ -76,7 +76,7 @@ def benchmark_nextstep(pwd, get_tactics, send_command, search_depth=3, repl_type
|
|
| 76 |
if search_lvl > 0:
|
| 77 |
print(f'search at level {search_lvl}')
|
| 78 |
for (curr_goal, ps, tac_seq) in old_ps:
|
| 79 |
-
next_tactics = get_tactics(curr_goal, prev_lines)
|
| 80 |
for next_tactic, _scr in sorted(next_tactics, key=lambda p: -p[1])[:3]:
|
| 81 |
print('\n'.join(tac_seq + [next_tactic]))
|
| 82 |
outp, new_proofState = send_tactic(lean_repl, next_tactic, ps)
|
|
@@ -340,7 +340,7 @@ if __name__ == '__main__':
|
|
| 340 |
|
| 341 |
#benchmark_nextstep(pwd, get_tactics_interactive, send_command, repl_type=repl_type) # get_tactics_interactive for testing
|
| 342 |
|
| 343 |
-
pwd = parse_benchmark_input('
|
| 344 |
|
| 345 |
if bench_type == 'nextstep':
|
| 346 |
benchmark_nextstep(pwd, get_tactics_llmstep, send_command, repl_type=repl_type) # get_tactics_llmstep for benchmarking
|
|
|
|
| 76 |
if search_lvl > 0:
|
| 77 |
print(f'search at level {search_lvl}')
|
| 78 |
for (curr_goal, ps, tac_seq) in old_ps:
|
| 79 |
+
next_tactics = get_tactics(curr_goal, prev_lines + '\n'.join(tac_seq))
|
| 80 |
for next_tactic, _scr in sorted(next_tactics, key=lambda p: -p[1])[:3]:
|
| 81 |
print('\n'.join(tac_seq + [next_tactic]))
|
| 82 |
outp, new_proofState = send_tactic(lean_repl, next_tactic, ps)
|
|
|
|
| 340 |
|
| 341 |
#benchmark_nextstep(pwd, get_tactics_interactive, send_command, repl_type=repl_type) # get_tactics_interactive for testing
|
| 342 |
|
| 343 |
+
pwd = parse_benchmark_input('codeprops_bench_sorts.jsonl')
|
| 344 |
|
| 345 |
if bench_type == 'nextstep':
|
| 346 |
benchmark_nextstep(pwd, get_tactics_llmstep, send_command, repl_type=repl_type) # get_tactics_llmstep for benchmarking
|