Spaces:
Runtime error
Runtime error
File size: 1,526 Bytes
c4480a3 2139159 495ce27 2139159 0ef93b5 cc400f3 0ef93b5 2139159 7c88e46 2139159 cc400f3 2139159 d33d29c 2139159 d33d29c 0ef93b5 2139159 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import gradio as gr
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
import time
import os
HF_TOKEN = os.environ['HF_TOKEN']
headers = {"Authorization": "Bearer "+ HF_TOKEN}
# Define the model and tokenizer loading
def load_model_and_tokenizer():
try:
tokenizer = AutoTokenizer.from_pretrained("atlasia/Terjman-Large")
model = AutoModelForSeq2SeqLM.from_pretrained("atlasia/Terjman-Large")
translation_pipeline = pipeline("translation_en_to_XX", model=model, tokenizer=tokenizer)
return translation_pipeline
except Exception as e:
print(f"Error loading model and tokenizer: {e}")
return None
# Load the model and tokenizer once at startup
model_pipeline = load_model_and_tokenizer()
# Define the response function
def respond(english_text):
if model_pipeline is None:
return "Model failed to load."
while True:
try:
result = model_pipeline(english_text)
return result[0]['translation_text']
except Exception as e:
if "estimated_time" in str(e):
time.sleep(5) # Wait for 5 seconds before retrying
else:
return f"An error occurred: {e}"
# Create the Gradio interface
app = gr.Interface(
fn=respond,
inputs="text",
outputs="text",
title="Terjman-Large 👨💻🥰",
description="Translate English text to Moroccan Darija using our Large model (240M) 🤗"
)
if __name__ == "__main__":
app.launch()
|