NLLB-Translator / app.py
Kevin676's picture
Update app.py
1dfb18f
raw
history blame
3.53 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
import torch
from langs import LANGS
import os
pipe1 = pipeline('text-generation', model='RamAnanth1/distilgpt2-sd-prompts')
TASK = "translation"
CKPT = "facebook/nllb-200-distilled-600M"
model = AutoModelForSeq2SeqLM.from_pretrained(CKPT)
tokenizer = AutoTokenizer.from_pretrained(CKPT)
device = 0 if torch.cuda.is_available() else -1
def translate(text):
"""
Translate the text from source lang to target lang
"""
src_lang = "zho-Hans"
tgt_lang = "eng_Latn"
max_length = 400
translation_pipeline = pipeline(TASK,
model=model,
tokenizer=tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
max_length=max_length,
device=device)
result = translation_pipeline(text)
return result[0]['translation_text']
#prompt
stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5")
clip_interrogator_2 = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2")
def get_images(prompt):
gallery_dir = stable_diffusion(prompt, fn_index=2)
img_results = [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)]
return img_results[0]
def get_new_prompt(img, mode):
interrogate = clip_interrogator_2(img, mode, 12, api_name="clipi2")
return interrogate
def infer(input):
prompt = pipe1(input+',', num_return_sequences=1)[0]["generated_text"]
img = get_images(prompt)
result = get_new_prompt(img, 'fast')
return result[0]
with gr.Blocks() as demo:
gr.Markdown(
""" # <center>🥳💬💕 - Stable Diffusion,随时随地,想画就画!</center>
### <center>🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!山水AI - Enable the future!</center>
"""
)
with gr.Row().style(equal_height=True):
inp1 = gr.Textbox(label = "您要翻译的中文内容", lines=1)
button = gr.Button("翻译一下吧")
out1 = gr.Textbox(label = "翻译后的英文内容", lines=1)
button.click(translate, [inp1], [out1])
with gr.Row().style(equal_height=True):
inp2 = out1
btn1 = gr.Button("让您的提示词更详细一些吧")
out2 = gr.Textbox(label = "翻译后的英文内容", lines=1)
btn1.click(infer, [inp2], [out2])
gr.Markdown(
""" ### <center>注意❗:请不要输入或生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及娱乐使用。用户输入或生成的内容与程序开发者无关,请自觉合法合规使用,违反者一切后果自负。</center>
### <center>Model by [ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b). Thanks to [THUDM](https://github.com/THUDM) and [CjangCjengh](https://github.com/CjangCjengh). Please follow me on [Bilibili](https://space.bilibili.com/501495851?spm_id_from=333.1007.0.0).</center>
"""
)
gr.HTML('''
<div class="footer">
<p>🎶🖼️🎡 - It’s the intersection of technology and liberal arts that makes our hearts sing. - Steve Jobs
</p>
</div>
''')
demo.queue().launch(show_error=True)