Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,9 @@ import gradio as gr
|
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
|
3 |
import torch
|
4 |
from langs import LANGS
|
|
|
|
|
|
|
5 |
|
6 |
TASK = "translation"
|
7 |
CKPT = "facebook/nllb-200-distilled-600M"
|
@@ -31,11 +34,62 @@ def translate(text):
|
|
31 |
result = translation_pipeline(text)
|
32 |
return result[0]['translation_text']
|
33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
-
|
36 |
-
translate,
|
37 |
-
[
|
38 |
-
gr.components.Textbox(label="Text"),
|
39 |
-
],
|
40 |
-
["text"],
|
41 |
-
).launch()
|
|
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
|
3 |
import torch
|
4 |
from langs import LANGS
|
5 |
+
import os
|
6 |
+
|
7 |
+
pipe1 = pipeline('text-generation', model='RamAnanth1/distilgpt2-sd-prompts')
|
8 |
|
9 |
TASK = "translation"
|
10 |
CKPT = "facebook/nllb-200-distilled-600M"
|
|
|
34 |
result = translation_pipeline(text)
|
35 |
return result[0]['translation_text']
|
36 |
|
37 |
+
#prompt
|
38 |
+
|
39 |
+
stable_diffusion = gr.Blocks.load(name="spaces/runwayml/stable-diffusion-v1-5")
|
40 |
+
clip_interrogator_2 = gr.Blocks.load(name="spaces/fffiloni/CLIP-Interrogator-2")
|
41 |
+
|
42 |
+
def get_images(prompt):
|
43 |
+
gallery_dir = stable_diffusion(prompt, fn_index=2)
|
44 |
+
img_results = [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)]
|
45 |
+
return img_results[0]
|
46 |
+
|
47 |
+
def get_new_prompt(img, mode):
|
48 |
+
interrogate = clip_interrogator_2(img, mode, 12, api_name="clipi2")
|
49 |
+
return interrogate
|
50 |
+
|
51 |
+
def infer(input):
|
52 |
+
prompt = pipe1(input+',', num_return_sequences=1)[0]["generated_text"]
|
53 |
+
img = get_images(prompt)
|
54 |
+
result = get_new_prompt(img, 'fast')
|
55 |
+
return result[0]
|
56 |
+
|
57 |
+
with gr.Blocks() as demo:
|
58 |
+
gr.Markdown(
|
59 |
+
""" # <center>🥳💬💕 - Stable Diffusion,随时随地,想画就画!</center>
|
60 |
+
|
61 |
+
### <center>🤖 - 让有人文关怀的AI造福每一个人!AI向善,文明璀璨!山水AI - Enable the future!</center>
|
62 |
+
|
63 |
+
"""
|
64 |
+
)
|
65 |
+
|
66 |
+
with gr.Row().style(equal_height=True):
|
67 |
+
inp1 = gr.Textbox(label = "您要翻译的中文内容", lines=1)
|
68 |
+
button = gr.Button("翻译一下")
|
69 |
+
out1 = gr.Textbox(label = "翻译后的英文内容", lines=1)
|
70 |
+
|
71 |
+
button.click(translate, [inp1], [out1])
|
72 |
+
|
73 |
+
with gr.Row().style(equal_height=True):
|
74 |
+
inp2 = out1
|
75 |
+
btn1 = gr.Button("让您的提示词更详细一下吧")
|
76 |
+
out2 = gr.Textbox(label = "翻译后的英文内容", lines=1)
|
77 |
+
|
78 |
+
btn1.click(infer, [inp2], [out2])
|
79 |
+
|
80 |
+
gr.Markdown(
|
81 |
+
""" ### <center>注意❗:请不要输入或生成会对个人以及组织造成侵害的内容,此程序仅供科研、学习及娱乐使用。用户输入或生成的内容与程序开发者无关,请自觉合法合规使用,违反者一切后果自负。</center>
|
82 |
+
|
83 |
+
### <center>Model by [ChatGLM-6B](https://huggingface.co/THUDM/chatglm-6b). Thanks to [THUDM](https://github.com/THUDM) and [CjangCjengh](https://github.com/CjangCjengh). Please follow me on [Bilibili](https://space.bilibili.com/501495851?spm_id_from=333.1007.0.0).</center>
|
84 |
+
|
85 |
+
"""
|
86 |
+
)
|
87 |
+
|
88 |
+
gr.HTML('''
|
89 |
+
<div class="footer">
|
90 |
+
<p>🎶🖼️🎡 - It’s the intersection of technology and liberal arts that makes our hearts sing. - Steve Jobs
|
91 |
+
</p>
|
92 |
+
</div>
|
93 |
+
''')
|
94 |
|
95 |
+
demo.queue().launch(show_error=True)
|
|
|
|
|
|
|
|
|
|
|
|