himanishprak23's picture
Upload app.py
238ef35 verified
raw
history blame
1.15 kB
import gradio as gr
from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM
# Define the model repository and tokenizer checkpoint
model_checkpoint = "himanishprak23/neural_machine_translation"
tokenizer_checkpoint = "Helsinki-NLP/opus-mt-en-hi"
# Load the tokenizer from Helsinki-NLP and model from Hugging Face repository
tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint)
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
def translate_text(input_text):
tokenized_input = tokenizer(input_text, return_tensors='tf', max_length=128, truncation=True)
generated_tokens = model.generate(**tokenized_input, max_length=128)
predicted_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
return predicted_text
# Create the Gradio interface
iface = gr.Interface(
fn=translate_text,
inputs=gr.components.Textbox(lines=2, placeholder="Enter text to translate from English to Hindi..."),
outputs=gr.components.Textbox(),
title="English to Hindi Translator",
description="Enter English text and get the Hindi translation."
)
# Launch the Gradio app
iface.launch()