BounharAbdelaziz commited on
Commit
2139159
β€’
1 Parent(s): 495ce27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -16
app.py CHANGED
@@ -1,28 +1,48 @@
1
  import gradio as gr
2
- import os
3
- import requests
4
  import time
 
5
 
6
- API_URL = "https://api-inference.huggingface.co/models/atlasia/Terjman-Supreme"
7
  HF_TOKEN = os.environ['HF_TOKEN']
8
  headers = {"Authorization": "Bearer "+ HF_TOKEN}
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  def respond(english_text):
11
- payload = {
12
- "inputs": english_text,
13
- }
14
 
15
  while True:
16
- response = requests.post(API_URL, headers=headers, json=payload).json()
17
-
18
- if isinstance(response, list) and 'generated_text' in response[0]:
19
- return response[0]['generated_text']
20
- elif 'estimated_time' in response:
21
- time.sleep(5) # Wait for 5 seconds before retrying
22
- else:
23
- return "An error occurred, please refresh the webpage: " + str(response)
24
 
25
- app = gr.Interface(fn=respond, inputs="text", outputs="text", title="Terjman-Supreme πŸ‘¨β€πŸ’»πŸ€―", description="Translate English text to Moroccan Darija using our top and biggest model (3.3B) πŸ€—")
 
 
 
 
 
 
 
26
 
27
  if __name__ == "__main__":
28
- app.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
 
3
  import time
4
+ import os
5
 
 
6
  HF_TOKEN = os.environ['HF_TOKEN']
7
  headers = {"Authorization": "Bearer "+ HF_TOKEN}
8
 
9
+ # Define the model and tokenizer loading
10
+ def load_model_and_tokenizer():
11
+ try:
12
+ tokenizer = AutoTokenizer.from_pretrained("atlasia/Terjman-Large")
13
+ model = AutoModelForSeq2SeqLM.from_pretrained("atlasia/Terjman-Large")
14
+ translation_pipeline = pipeline("translation_en_to_XX", model=model, tokenizer=tokenizer)
15
+ return translation_pipeline
16
+ except Exception as e:
17
+ print(f"Error loading model and tokenizer: {e}")
18
+ return None
19
+
20
+ # Load the model and tokenizer once at startup
21
+ model_pipeline = load_model_and_tokenizer()
22
+
23
+ # Define the response function
24
  def respond(english_text):
25
+ if model_pipeline is None:
26
+ return "Model failed to load."
 
27
 
28
  while True:
29
+ try:
30
+ result = model_pipeline(english_text)
31
+ return result[0]['translation_text']
32
+ except Exception as e:
33
+ if "estimated_time" in str(e):
34
+ time.sleep(5) # Wait for 5 seconds before retrying
35
+ else:
36
+ return f"An error occurred: {e}"
37
 
38
+ # Create the Gradio interface
39
+ app = gr.Interface(
40
+ fn=respond,
41
+ inputs="text",
42
+ outputs="text",
43
+ title="Terjman-Large πŸ‘¨β€πŸ’»πŸ₯°",
44
+ description="Translate English text to Moroccan Darija using our Large model (240M) πŸ€—"
45
+ )
46
 
47
  if __name__ == "__main__":
48
+ app.launch()