Spaces:
Sleeping
Sleeping
from transformers import AutoModelForCausalLM, AutoTokenizer | |
import gradio as gr | |
import torch | |
tokenizer = AutoTokenizer.from_pretrained("facebook/m2m100_418M") | |
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/m2m100_418M") | |
def launch(input): | |
tokenizer.src_lang = "en" | |
encoded_hi = tokenizer(hi_text, return_tensors="pt") | |
generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.get_lang_id("it")) | |
response = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) | |
return response | |
iface = gr.Interface(launch, inputs="text", outputs="text") | |
iface.launch() | |