File size: 3,528 Bytes
b82b703
 
 
 
cbdedd5
 
 
b82b703
 
 
 
 
 
 
 
 
 
cce8d20
b82b703
 
 
cce8d20
 
 
 
b82b703
 
 
 
 
 
 
 
 
 
 
8f3256e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbdedd5
 
 
 
 
 
 
 
 
 
 
1dfb18f
cbdedd5
 
 
 
8f3256e
 
1dfb18f
8f3256e
 
 
 
cbdedd5
 
 
 
 
 
 
 
 
 
 
 
 
 
b82b703
8f3256e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import gradio as gr
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
import torch
from langs import LANGS
import os

pipe1 = pipeline('text-generation', model='RamAnanth1/distilgpt2-sd-prompts')

TASK = "translation"
CKPT = "facebook/nllb-200-distilled-600M"

model = AutoModelForSeq2SeqLM.from_pretrained(CKPT)
tokenizer = AutoTokenizer.from_pretrained(CKPT)

device = 0 if torch.cuda.is_available() else -1


def translate(text):
    """
    Translate the text from source lang to target lang
    """
    src_lang = "zho-Hans"
    tgt_lang = "eng_Latn"
    max_length = 400
    
    translation_pipeline = pipeline(TASK,
                                    model=model,
                                    tokenizer=tokenizer,
                                    src_lang=src_lang,
                                    tgt_lang=tgt_lang,
                                    max_length=max_length,
                                    device=device)

    result = translation_pipeline(text)
    return result[0]['translation_text']

#prompt

stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5")
clip_interrogator_2 = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2")

def get_images(prompt):
    gallery_dir = stable_diffusion(prompt, fn_index=2)    
    img_results = [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)]  
    return img_results[0]

def get_new_prompt(img, mode):
    interrogate = clip_interrogator_2(img, mode, 12, api_name="clipi2")
    return interrogate

def infer(input):
    prompt = pipe1(input+',', num_return_sequences=1)[0]["generated_text"]
    img = get_images(prompt) 
    result = get_new_prompt(img, 'fast')    
    return result[0]

with gr.Blocks() as demo:
    gr.Markdown(
            """ # <center>🥳💬💕 - Stable Diffusion,随时随地,想画就画!</center>
            
            ### <center>🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!山水AI - Enable the future!</center>
            
        """
    )
    
    with gr.Row().style(equal_height=True):
        inp1 = gr.Textbox(label = "您要翻译的中文内容", lines=1)
        button = gr.Button("翻译一下吧")
        out1 = gr.Textbox(label = "翻译后的英文内容", lines=1)
    
    button.click(translate, [inp1], [out1])
    
    with gr.Row().style(equal_height=True):
        inp2 = out1
        btn1 = gr.Button("让您的提示词更详细一些吧")
        out2 = gr.Textbox(label = "翻译后的英文内容", lines=1)
    
    btn1.click(infer, [inp2], [out2])
   
    gr.Markdown(
            """ ### <center>注意❗:请不要输入或生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及娱乐使用。用户输入或生成的内容与程序开发者无关,请自觉合法合规使用,违反者一切后果自负。</center>
            
            ### <center>Model by [ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b). Thanks to [THUDM](https://github.com/THUDM) and [CjangCjengh](https://github.com/CjangCjengh). Please follow me on [Bilibili](https://space.bilibili.com/501495851?spm_id_from=333.1007.0.0).</center>
            
        """
    )
        
    gr.HTML('''
        <div class="footer">
                    <p>🎶🖼️🎡 - It’s the intersection of technology and liberal arts that makes our hearts sing. - Steve Jobs
                    </p>
        </div>
    ''')     

demo.queue().launch(show_error=True)