Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -2,15 +2,13 @@ import streamlit as st
|
|
2 |
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
4 |
|
5 |
-
hf_token = st.secrets["TOKEN"]
|
6 |
-
|
7 |
# Load the fine-tuned model and tokenizer
|
8 |
model_repo_path = 'sabssag/Code-T5'
|
9 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path)
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_repo_path)
|
11 |
|
12 |
# Function to generate Python code from LaTeX expression
|
13 |
-
def
|
14 |
inputs = tokenizer(f"Latex Expression: {latex_expression} Solution:", return_tensors="pt").to(model.device)
|
15 |
|
16 |
# Generate the output
|
|
|
2 |
import torch
|
3 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
4 |
|
|
|
|
|
5 |
# Load the fine-tuned model and tokenizer
|
6 |
model_repo_path = 'sabssag/Code-T5'
|
7 |
model = AutoModelForSeq2SeqLM.from_pretrained(model_repo_path)
|
8 |
tokenizer = AutoTokenizer.from_pretrained(model_repo_path)
|
9 |
|
10 |
# Function to generate Python code from LaTeX expression
|
11 |
+
def generate_code(latex_expression, max_length=256):
|
12 |
inputs = tokenizer(f"Latex Expression: {latex_expression} Solution:", return_tensors="pt").to(model.device)
|
13 |
|
14 |
# Generate the output
|