File size: 2,631 Bytes
3b53436
 
 
 
 
 
 
 
 
04f30d4
3b53436
64e19e9
04f30d4
3b53436
 
 
 
 
c0c0901
64e19e9
 
 
 
3b53436
 
 
 
 
 
 
 
64e19e9
 
 
a1abf89
 
 
 
 
64e19e9
 
 
3b53436
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c0c0901
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import os
import gradio as gr
from huggingface_hub import login
from transformers import pipeline
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM

# Load the gated model
#model_name = "RickyDeSkywalker/TheoremLlama"
model_name = "unsloth/Llama-3.2-1B-Instruct"
device = "cuda" if torch.cuda.is_available() else "cpu"
#model_name = "internlm/internlm2-math-plus-7b"
#model_name = "deepseek-ai/DeepSeek-Prover-V1.5-RL"
HF_TOKEN = os.environ.get("HF_TOKEN")
#login(HF_TOKEN)

tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
# Set `torch_dtype=torch.float16` to load model in float16, otherwise it will be loaded as float32 and might cause OOM Error.
model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, torch_dtype=torch.float16, device_map="auto").eval()
terminators = [tokenizer.eos_token_id, 
               tokenizer.convert_tokens_to_ids("<|eot_id|>"), 
               tokenizer.convert_tokens_to_ids("<|reserved_special_token_26|>")]


#generator = pipeline('text-generation', model=model_name, trust_remote_code=True, token=HF_TOKEN)

# Function for generating Lean 4 code
@torch.inference_mode()
def generate_lean4_code(prompt):
    #result = generator(prompt, max_length=1000, num_return_sequences=1)
    #return result[0]['generated_text']
    #response, history = model.chat(tokenizer, prompt, history=[], meta_instruction="")
    #print(response, history)
    #return response
    chat = [
        {"role": "system", "content": "You are a Lean4 expert who can write good Lean4 code based on natural language mathematical theorem and proof"},
        {"role": "user", "content": prompt},
    ]
    input_ids = tokenizer.apply_chat_template(chat, return_tensors="pt").to(device)
    results = model.generate(input_ids, max_new_tokens=1024, eos_token_id=terminators, do_sample=True, temperature=0.85, top_p=0.9)
    result_str = tokenizer.decode(results[0], skip_special_tokens=True)
    return result_str

# Gradio Interface
title = "Lean 4 Code Generation with TheoremLlama"
description = "Enter a natural language prompt to generate Lean 4 code."

interface = gr.Interface(
    fn=generate_lean4_code,
    inputs=gr.Textbox(
        label="Prompt",
        placeholder="Prove that the sum of two even numbers is even.",
        lines=4
    ),
    #outputs=gr.Code(label="Generated Lean 4 Code", language="lean"),
    outputs=gr.Code(label="Generated Lean 4 Code"),
    title=title,
    description=description
)

# Launch the Gradio app
interface.queue().launch(ssr_mode=False)