File size: 1,550 Bytes
2570705
 
 
 
c2b09ef
2570705
 
fe7b5d7
2570705
 
 
 
 
b8b2797
 
 
 
2570705
 
 
 
 
 
 
 
43a9af8
2570705
43a9af8
 
2570705
 
 
 
 
 
d15c3cf
5c13be2
2570705
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
from pypinyin import pinyin
from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration
from LAC import LAC
import gradio as gr
import torch

model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_1.2B")
model.eval()
tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_1.2B")
lac = LAC(mode="seg")

def make_request(chinese_text):
  tokenizer.src_lang="zh"
  with torch.no_grad():
      encoded_zh = tokenizer(chinese_text, return_tensors="pt")
      generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en"))
      return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)

def generatepinyin(input):
  pinyin_list = pinyin(input)
  pinyin_string = ""
  for piece in pinyin_list:
    pinyin_string = pinyin_string+" "+piece[0]
  return pinyin_string

def generate_response(Chinese_to_translate):
    response = []
    response.append([Chinese_to_translate,make_request(Chinese_to_translate),generatepinyin(Chinese_to_translate)])
    segmented_string_list = lac.run(Chinese_to_translate)
    for piece in segmented_string_list:
        response.append([piece,make_request(piece),generatepinyin(piece)])
    return response

iface = gr.Interface(
    fn=generate_response,
    title="Chinese to English",
    description="Chinese to English with a state-of-the-art model (facebook-research m2m-100 1.2B)",
    inputs=gr.inputs.Textbox(lines=5, placeholder="Enter text in Chinese"),
    outputs="text")

iface.launch()