Spaces:
Runtime error
Runtime error
import gradio as gr | |
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline | |
import time | |
import os | |
HF_TOKEN = os.environ['HF_TOKEN'] | |
headers = {"Authorization": "Bearer "+ HF_TOKEN} | |
# Define the model and tokenizer loading | |
def load_model_and_tokenizer(): | |
try: | |
tokenizer = AutoTokenizer.from_pretrained("atlasia/Terjman-Large") | |
model = AutoModelForSeq2SeqLM.from_pretrained("atlasia/Terjman-Large") | |
translation_pipeline = pipeline("translation_en_to_XX", model=model, tokenizer=tokenizer) | |
return translation_pipeline | |
except Exception as e: | |
print(f"Error loading model and tokenizer: {e}") | |
return None | |
# Load the model and tokenizer once at startup | |
model_pipeline = load_model_and_tokenizer() | |
# Define the response function | |
def respond(english_text): | |
if model_pipeline is None: | |
return "Model failed to load." | |
while True: | |
try: | |
result = model_pipeline(english_text) | |
return result[0]['translation_text'] | |
except Exception as e: | |
if "estimated_time" in str(e): | |
time.sleep(5) # Wait for 5 seconds before retrying | |
else: | |
return f"An error occurred: {e}" | |
# Create the Gradio interface | |
app = gr.Interface( | |
fn=respond, | |
inputs="text", | |
outputs="text", | |
title="Terjman-Large π¨βπ»π₯°", | |
description="Translate English text to Moroccan Darija using our Large model (240M) π€" | |
) | |
if __name__ == "__main__": | |
app.launch() | |