yaya-sy's picture
Update app.py
adb9ff1
raw
history blame
1.75 kB
import os
import torch
import gradio as gr
import time
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
def load_models():
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-1.3B")
tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-200-distilled-1.3B")
return model, tokenizer
def translation(source, target, text):
translator = pipeline('translation', model=model, tokenizer=tokenizer, src_lang=source, tgt_lang=target)
output = translator(text, max_length=400)
end_time = time.time()
output = output[0]['translation_text']
return output
if __name__ == '__main__':
print('\tinit models')
model, tokenizer = load_models()
global model
global tokenizer
# define gradio demo
lang_codes = ["eng_Latn", "fuv_Latn", "fra_Latn", "arb_Arab"]
#inputs = [gr.inputs.Radio(['nllb-distilled-600M', 'nllb-1.3B', 'nllb-distilled-1.3B'], label='NLLB Model'),
inputs = [gr.inputs.Dropdown(lang_codes, default='fra_Latn', label='Source'),
gr.inputs.Dropdown(lang_codes, default='fuv_Latn', label='Target'),
gr.inputs.Textbox(lines=5, label="Input text"),
]
title = "Fulfulde translator"
demo_status = "Demo is running on CPU"
description = "Fulfulde to French, English or Arabic and vice-versa translation demo using NLLB."
examples = [
['fra_Latn', 'fuv_Latn', 'La traduction est une tâche facile.']
]
gr.Interface(translation,
inputs,
outputs,
title=title,
description=description,
examples=examples,
examples_per_page=50,
).launch()