Spaces:
Sleeping
Sleeping
File size: 639 Bytes
70ceed9 fe31714 70ceed9 fe31714 2b5ef4f fe31714 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 |
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
import gradio as gr
import torch
tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
def launch(input):
tokenizer.src_lang = "en"
encoded_hi = tokenizer(input, return_tensors="pt")
generated_tokens = model.generate(**encoded_hi, forced_bos_token_id=tokenizer.get_lang_id("it"))
response = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
return response
iface = gr.Interface(launch, inputs="text", outputs="text")
iface.launch()
|