File size: 1,150 Bytes
238ef35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import gradio as gr
from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM

# Define the model repository and tokenizer checkpoint
model_checkpoint = "himanishprak23/neural_machine_translation"
tokenizer_checkpoint = "Helsinki-NLP/opus-mt-en-hi"

# Load the tokenizer from Helsinki-NLP and model from Hugging Face repository
tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint)
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)

def translate_text(input_text):
    tokenized_input = tokenizer(input_text, return_tensors='tf', max_length=128, truncation=True)
    generated_tokens = model.generate(**tokenized_input, max_length=128)
    predicted_text = tokenizer.decode(generated_tokens[0], skip_special_tokens=True)
    return predicted_text

# Create the Gradio interface
iface = gr.Interface(
    fn=translate_text,
    inputs=gr.components.Textbox(lines=2, placeholder="Enter text to translate from English to Hindi..."),
    outputs=gr.components.Textbox(),
    title="English to Hindi Translator",
    description="Enter English text and get the Hindi translation."
)

# Launch the Gradio app
iface.launch()