JJFrancisco commited on
Commit
edcca00
1 Parent(s): 2173dd0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -38
app.py CHANGED
@@ -1,39 +1,33 @@
1
- import requests
2
  import gradio as gr
3
- from dotenv import load_dotenv
4
- import os
5
-
6
- # Load environment variables from .env file
7
- load_dotenv()
8
- HF_TOKEN = os.getenv("HF_TOKEN")
9
-
10
- model_name = "Helsinki-NLP/opus-mt-en-fr"
11
- API_URL = f"https://api-inference.huggingface.co/models/{model_name}"
12
- headers = {"Authorization": f"Bearer {HF_TOKEN}"}
13
-
14
-
15
- def query(payload):
16
- # HTTP POST Request
17
- response = requests.post(API_URL, headers=headers, json=payload)
18
- return response.json()
19
-
20
-
21
- def translate(input_text):
22
- # API Request:
23
- response = query({
24
- "inputs": input_text,
25
- "options": {"wait_for_model": True}
26
- })
27
-
28
- translation = response[0]["translation_text"]
29
-
30
- return translation
31
-
32
-
33
- translator = gr.Interface(fn=translate,
34
- inputs=[gr.Textbox(label="Input Text", placeholder="Input Text To Be Translated")],
35
- outputs=gr.Textbox(label="Translation"),
36
- title="tTranslatorR-Opus"
37
- )
38
-
39
- translator.launch()
 
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
+ import torch
4
+
5
+ # this model was loaded from https://hf.co/models
6
+ model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M")
7
+ tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-600M")
8
+ device = 0 if torch.cuda.is_available() else -1
9
+ LANGS = ["ace_Arab", "eng_Latn", "fra_Latn", "spa_Latn"]
10
+
11
+ def translate(text, src_lang, tgt_lang):
12
+ """
13
+ Translate the text from source lang to target lang
14
+ """
15
+ translation_pipeline = pipeline("translation", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device)
16
+ result = translation_pipeline(text)
17
+ return result[0]['translation_text']
18
+
19
+ demo = gr.Interface(
20
+ fn=translate,
21
+ inputs=[
22
+ gr.components.Textbox(label="Text"),
23
+ gr.components.Dropdown(label="Source Language", choices=LANGS),
24
+ gr.components.Dropdown(label="Target Language", choices=LANGS),
25
+ ],
26
+ outputs=["text"],
27
+ examples=[["Building a translation demo with Gradio is so easy!", "eng_Latn", "spa_Latn"]],
28
+ cache_examples=False,
29
+ title="Translation Demo",
30
+ description="This demo is a simplified version of the original [NLLB-Translator](https://huggingface.co/spaces/Narrativaai/NLLB-Translator) space"
31
+ )
32
+
33
+ demo.launch()