import sys import subprocess # implement pip as a subprocess: subprocess.check_call([sys.executable, '-m', 'pip', 'install','--quiet','sentencepiece==0.1.95']) import gradio as gr from transformers import pipeline from transformers import AutoTokenizer, AutoModelForSeq2SeqLM import torch tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-ar") model = torch.load("helsinki_fineTuned.pt") translation_pipeline = pipeline(model) def translate_gradio(input): encode = translation_pipeline.generate(**tokenizer.prepare_seq2seq_batch(input,return_tensors='pt').to('cuda')) text_ar = tokenizer.batch_decode(encode,skip_special_tokens=True)[0] return text_ar translate_interface = gr.Interface(fn = translate_gradio, inputs="text", outputs="text" ) translate_interface.launch(inline = False)