sabssag's picture
Update app.py
d74b748 verified
raw
history blame
2.02 kB
import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
# Load the fine-tuned model and tokenizer
model_repo_path = 'sabssag/Latex_to_Python_T5-small'
model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path)
tokenizer = AutoTokenizer.from_pretrained(model_repo_path)
# Function to generate Python code from LaTeX expression
def generate_code(latex_expr, tokenizer, model, max_length=512):
# Tokenize the input LaTeX expression
inputs = tokenizer.encode(
latex_expr,
return_tensors='pt',
max_length=max_length,
truncation=True
)
# Generate the output
with torch.no_grad():
outputs = model.generate(
inputs,
max_length=max_length,
num_beams=5, # Number of beams for beam search
early_stopping=True
)
# Decode the output tokens to string
generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
return generated_code
# Streamlit app layout
st.title("LaTeX to Python Code Generator")
# Define session state keys
if 'latex_expr' not in st.session_state:
st.session_state.latex_expr = ""
# User input for LaTeX expression
latex_input = st.text_area("Enter the LaTeX Expression", value=st.session_state.latex_expr, height=150)
# Update session state with the new LaTeX expression
if st.button("Generate Code"):
if latex_input:
st.session_state.latex_expr = latex_input
with st.spinner("Generating Python Code..."):
try:
generated_code = generate_code(latex_expr=st.session_state.latex_expr, tokenizer=tokenizer, model=model)
# Display the generated code
st.subheader("Generated Python Code")
st.code(generated_code, language='python')
except Exception as e:
st.error(f"Error during code generation: {e}")
else:
st.warning("Please enter a LaTeX expression to generate Python code.")