import json import requests import time import os from openai import OpenAI from utils import make_lean_repl, send_tactic, send_command_icanon, send_command_zsh, get_errs def get_tactics_interactive(goal, prev_file): print(f'output:<{goal}>') print(f'file context: <{prev_file}>') return [(input('give the next tactic to execute:'), 0)] # the goal is to directly call the llmstep server.py def get_tactics_llmstep(goal, prev_file): # this is the function lean calls to interact with the server def suggest(host, tactic_state, prefix, context): data = {'tactic_state': tactic_state, 'prefix': prefix, 'context': context} response = json.loads(requests.post(host, json=data).content) return response['suggestions'] # modified to directly return the suggestion list HOST='localhost' PORT='6000' default_host = f'http://{HOST}:{PORT}' suggestions = suggest(default_host, goal, '', prev_file) # trying to match what the tactic sends return suggestions # for benchmarking 'get_tactics' functions that suggest several next possible steps for a given # proofstate + optionally file context. def benchmark_nextstep(pwd, get_tactics, send_command, search_depth=3, repl_type='zsh'): lean_repl = make_lean_repl(repl_type=repl_type) # get the first command out of the way which has a weird "expect" behavior using icanon mode mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True) num_proved = 0 num_attempted = 0 for prop_name in pwd: print(prop_name) #time.sleep(5) num_attempted += 1 #if num_attempted < 30: # continue successful_def = False while not successful_def: successful_def = True env = None all_lines = [] for _loc, line in pwd[prop_name]: if line.strip() == 'import Mathlib': outp, env = mathlib_out, mathlib_env else: outp, env = send_command(lean_repl, line, env=env) if outp is None: print('restarting repl') successful_def = False lean_repl.close() lean_repl = make_lean_repl(repl_type=repl_type) mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True) break all_lines.append(line) assert len(get_errs(outp)) == 0, str(outp.get('messages', [])) proofState = int(outp['sorries'][0]['proofState']) goal = outp['sorries'][0]['goal'] prev_lines = '\n'.join(all_lines) prev_lines = prev_lines.replace(':= by sorry', ':= by\n') solution_tac_seq = None old_ps = [(goal, proofState, [])] new_ps = [] found_proof = False for search_lvl in range(search_depth): if search_lvl > 0: print(f'search at level {search_lvl}') for (curr_goal, ps, tac_seq) in old_ps: next_tactics = get_tactics(curr_goal, prev_lines) for next_tactic, _scr in sorted(next_tactics, key=lambda p: -p[1])[:3]: print('\n'.join(tac_seq + [next_tactic])) outp, new_proofState = send_tactic(lean_repl, next_tactic, ps) if outp is None: continue # i.e. timeout/error on tactic sending #print(outp) error_msgs = get_errs(outp) if len(error_msgs) > 0: continue # invalid next proof step. sometimes there are invalid intermediate # states that lead to successful proof, but for efficiency we enforce this. if len(outp['goals']) == 0 and len(error_msgs) == 0: #print(outp) found_proof = True solution_tac_seq = tac_seq + [next_tactic] break new_ps.append(('\n'.join(outp['goals']), new_proofState, tac_seq + [next_tactic])) #print(f'final output: {outp}') if found_proof: break if found_proof: break old_ps = new_ps new_ps = [] if found_proof: num_proved += 1 nl = '\n' print(f'prop {prop_name} with goal <{goal}> solved by: <\n {nl.join([str(s) for s in solution_tac_seq])}\n>') else: print(f'failed to prove {prop_name}') print(f'proved {num_proved}/{num_attempted}') #exit() def get_proof_gpt(theorem_defn, goal, context): #openai_api_key = os.environ['OPENAI_API_KEY'] client = OpenAI() # decided I don't need the goal, it doesn't look very useful in most cases when the theorem statement # and context are given. Future work can confirm or invalidate this. encoded = f'\n{context}\n\n\n{theorem_defn}\n\n' return client.chat.completions.create( model=gpt_model, # see main block messages=[{"role": "system", "content": "You are a Lean 4 expert tasked with completing proofs of program properties. You will be shown the relevant programs and definitions in ... tags, the theorem to be proven in .... Please output your proof containing only Lean 4 proof code between ... tags. The generated proof should never contain the word `sorry`. Here are some examples:"}, {"role": "user", "content": """ import Mathlib inductive MyTree (α: Type) where | leaf : MyTree α | node : MyTree α → α → MyTree α → MyTree α def tree_size : MyTree α → ℕ | .leaf => 1 | .node l _x r => 1 + (tree_size l) + (tree_size r) def balanced : MyTree α → Prop | .leaf => true | .node l _x r => ((tree_size l) = (tree_size r)) ∧ (balanced l) ∧ (balanced r) theorem balanced_tree_size_odd (t: MyTree α) (hb: balanced t): Odd (tree_size t) := by """}, {"role": "assistant", "content": """ cases t with | leaf => simp [tree_size] | node p x q => unfold tree_size unfold balanced at hb simp [hb.1] """}, {"role": "user", "content": """ import Mathlib inductive MyTree (α: Type) where | leaf : MyTree α | node : MyTree α → α → MyTree α → MyTree α def balanced : MyTree α → Prop | .leaf => true | .node l _x r => ((tree_size l) = (tree_size r)) ∧ (balanced l) ∧ (balanced r) def swap_branches : MyTree α → MyTree α | MyTree.leaf => MyTree.leaf | MyTree.node p x q => MyTree.node q x p theorem swap_preserves_balance (t: MyTree α) (hb: balanced t): balanced (swap_branches t) := by """}, {"role": "assistant", "content": """ cases t with | leaf => simp [swap_branches] | node p x q => simp [swap_branches, balanced] at hb ⊢ split { simp [← hb.1] } { split; assumption } """}, {"role": "user", "content": """ import Mathlib inductive PairList where | empty : PairList | node : Nat → Nat → PairList → PairList def len_pairlist : PairList → Nat | .empty => 0 | .node _n1 _n2 l => len_pairlist l + 2 lemma even_plus_two (x: Nat) (h: Even x): Even (x + 2) := by unfold Even at h rcases h with ⟨y, hy⟩ use y + 1 linarith [hy] theorem len_pairlist_even (l: PairList): Even (len_pairlist l) := by """}, {"role": "assistant", "content": """ generalize hl: len_pairlist l = pl induction pl using Nat.strong_induction_on generalizing l with | h n ih => cases l with | empty => simp [len_pairlist] at hl; simp [←hl]; | node n1 n2 l2 => unfold len_pairlist at hl simp [←hl] apply even_plus_two exact ih (len_pairlist l2) (by linarith [hl]) l2 (by rfl) """}, {"role": "user", "content": encoded}] ).choices[0].message.content.replace('','').replace('', '').strip() # for benchmarking full proof generation methods, where input is # file context, theorem definition, and initial proof state, and output is a full proof of the theorem. def benchmark_full_proofgen(pwd, get_proof, send_command, num_gen=5, repl_type='icanon'): lean_repl = make_lean_repl(repl_type=repl_type) # get the first command out of the way which has a weird "expect" behavior using icanon mode mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True) num_proved = 0 num_attempted = 0 for prop_name in pwd: print(prop_name) #time.sleep(5) num_attempted += 1 #if num_attempted < 30: # continue successful_def = False penult_env = None while not successful_def: successful_def = True env = None all_lines = [] for _loc, line in pwd[prop_name]: penult_env = env if line.strip() == 'import Mathlib': outp, env = mathlib_out, mathlib_env else: outp, env = send_command(lean_repl, line, env=env) if outp is None: print('restarting repl') successful_def = False lean_repl.close() lean_repl = make_lean_repl(repl_type=repl_type) mathlib_out, mathlib_env = send_command(lean_repl, 'import Mathlib', env=None, first=True) break all_lines.append(line) assert len(get_errs(outp)) == 0, str(outp.get('messages', [])) context = '\n\n'.join([line for _loc, line in pwd[prop_name][:-1]]) theorem_defn = pwd[prop_name][-1][1].replace('by sorry', 'by\n') # give the llm a clean place to begin generating goal = outp['sorries'][0]['goal'] found_proof = False for gen_i in range(num_gen): print(f'generating proof {gen_i}') suggested_proof = get_proof(theorem_defn, goal, context) full_thm = theorem_defn + suggested_proof print('suggested proof: ' + full_thm) outp, _result_env = send_command(lean_repl, full_thm, env=penult_env) if len(get_errs(outp)) == 0: num_proved += 1 found_proof = True print('successful proof!') print(f'prop {prop_name} with goal <{goal}> solved by: <\n {suggested_proof}\n>') break if not found_proof: print(f'failed to prove {prop_name}') print(f'proved {num_proved}/{num_attempted}') def parse_benchmark_output(fname, pwd, loc2comm): with open(fname, 'r') as f: lines = f.readlines() failures = set() for line in lines: if 'failed to prove' in line: failures.add(line.strip().split(' ')[-1]) by_score = {i: [0,0] for i in range(1, 6)} by_custom = [0, 0] custom_proved = [] all_proved = [] results = {} for i in range(1, 87): key = f'prop_{i}' if i >=10 else f'prop_0{i}' if key not in pwd: continue loc = [loc[0] for loc, line in pwd[key] if key in line][0] line_str = int(loc.strip().split(':')[1]) comm = loc2comm[line_str-1] print(comm) score = int(comm.split(':')[1].strip().split('/')[0].strip()) is_custom = 'custom' in comm results[key] = {'score': score, 'result': key not in failures, 'custom': is_custom} if key in failures: by_score[score][1] += 1 if is_custom: by_custom[1] += 1 print(f'could not prove {key}') else: by_score[score][0] += 1 if is_custom: by_custom[0] += 1 custom_proved.append(key) all_proved.append((score, key)) print(f'proved {key}') print('by score', by_score) print('by custom', by_custom) print('custom proved', custom_proved) print('all proved 5', [name for score, name in all_proved if score == 5]) print(f'total: {len(all_proved)}/{len(pwd)}') return results, by_score def parse_benchmark_input(fname): with open(fname, 'r') as f: lines = f.readlines() jl = [json.loads(line.strip()) for line in lines if len(line.strip()) > 0] # dummy locations via enumerate, since they're unused during baseline calculation return {dct['full_name']: list(enumerate(dct['deps'].split('\n\n') + [dct['prop_defn']])) for dct in jl} if __name__ == '__main__': # if any single command is >1024 characters, use_icanon=True is necessary. # unfortunately there are still some bugs where a theorem is actually proven, # but the messages from Lean REPL indicate an error when using this mode. use_icanon = True #bench_type = 'fullproof' bench_type = 'nextstep' gpt_model = 'gpt-4-turbo' if use_icanon: send_command = send_command_icanon repl_type = 'icanon' else: send_command = send_command_zsh repl_type = 'zsh' #benchmark_nextstep(pwd, get_tactics_interactive, send_command, repl_type=repl_type) # get_tactics_interactive for testing pwd = parse_benchmark_input('codeprops_bench_lemmas.jsonl') if bench_type == 'nextstep': benchmark_nextstep(pwd, get_tactics_llmstep, send_command, repl_type=repl_type) # get_tactics_llmstep for benchmarking elif bench_type == 'fullproof': benchmark_full_proofgen(pwd, get_proof_gpt, send_command, repl_type=repl_type)