NLLB-Translator / app.py
Kevin676's picture
Update app.py
04b3352
raw
history blame
4.69 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
import torch
from langs import LANGS
import os
import numpy as np
import modin.pandas as pd
from PIL import Image
from diffusers import DiffusionPipeline #EulerDiscreteScheduler
device1 = "cuda" if torch.cuda.is_available() else "cpu"
pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", safety_checker=None)
pipe = pipe.to(device1)
pipe1 = pipeline('text-generation', model='RamAnanth1/distilgpt2-sd-prompts')
TASK = "translation"
CKPT = "facebook/nllb-200-distilled-600M"
model = AutoModelForSeq2SeqLM.from_pretrained(CKPT)
tokenizer = AutoTokenizer.from_pretrained(CKPT)
device = 0 if torch.cuda.is_available() else -1
def translate(text):
"""
Translate the text from source lang to target lang
"""
src_lang = "zho-Hans"
tgt_lang = "eng_Latn"
max_length = 400
translation_pipeline = pipeline(TASK,
model=model,
tokenizer=tokenizer,
src_lang=src_lang,
tgt_lang=tgt_lang,
max_length=max_length,
device=device)
result = translation_pipeline(text)
return result[0]['translation_text']
#prompt
stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5")
clip_interrogator_2 = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2")
def get_images(prompt):
gallery_dir = stable_diffusion(prompt, fn_index=2)
img_results = [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)]
return img_results[0]
def get_new_prompt(img, mode):
interrogate = clip_interrogator_2(img, mode, 12, api_name="clipi2")
return interrogate
def infer(input):
prompt = pipe1(input+',', num_return_sequences=1)[0]["generated_text"]
img = get_images(prompt)
result = get_new_prompt(img, 'fast')
return result[0]
#stable diffusion
def genie (prompt, negative_prompt, scale, steps, seed):
generator = torch.Generator(device=device1).manual_seed(seed)
images = pipe(prompt, negative_prompt=negative_prompt, width=768, height=768, num_inference_steps=steps, guidance_scale=scale, num_images_per_prompt=1, generator=generator).images[0]
return images
with gr.Blocks() as demo:
gr.Markdown(
""" # <center>🥳💬💕 - Stable Diffusion,随时随地,想画就画!</center>
### <center>🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!山水AI - Enable the future!</center>
"""
)
with gr.Row().style(equal_height=True):
inp1 = gr.Textbox(label = "您要翻译的中文内容", lines=1)
button = gr.Button("翻译一下吧")
out1 = gr.Textbox(label = "翻译后的英文内容", lines=1)
button.click(translate, [inp1], [out1])
with gr.Row().style(equal_height=True):
inp2 = out1
btn1 = gr.Button("让您的提示词更详细一些吧")
out2 = gr.Textbox(label = "更详细的提示词(可在此编辑)", lines=1)
btn1.click(infer, [inp2], [out2])
with gr.Row().style(equal_height=True):
inp3 = out2
inp4 = gr.Textbox(label="删去一些您不想要的特点(反向关键词;选填)", placeholder = "low quality", lines=1)
inp5 = gr.Slider(1, 25, 10)
inp6 = gr.Slider(30, maximum=75, value=50, step=1)
inp7 = gr.Slider(minimum=1, step=1, maximum=999999999999999999, randomize=True)
with gr.Row().style(equal_height=True):
btn2 = gr.Button("开始生成图片吧")
out3 = gr.Image(label="为您生成的专属图片")
btn2.click(genie, [inp3, inp4, inp5, inp6, inp7], [out3])
gr.Markdown(
""" ### <center>注意❗:请不要输入或生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及娱乐使用。用户输入或生成的内容与程序开发者无关,请自觉合法合规使用,违反者一切后果自负。</center>
### <center>Model by [Stable Diffusion](https://github.com/Stability-AI/stablediffusion). Thanks to [Manjushri](https://huggingface.co/Manjushri).</center>
"""
)
gr.HTML('''
<div class="footer">
<p>🎶🖼️🎡 - It’s the intersection of technology and liberal arts that makes our hearts sing. - Steve Jobs
</p>
</div>
''')
demo.queue().launch(show_error=True)