Spaces:
Runtime error
Runtime error
Commit
·
db2d287
1
Parent(s):
40eb3d0
Upload C
Browse files
C
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from PIL import Image
|
3 |
+
import torch
|
4 |
+
from muse import PipelineMuse
|
5 |
+
from diffusers import AutoPipelineForText2Image, UniPCMultistepScheduler
|
6 |
+
|
7 |
+
muse_512 = PipelineMuse.from_pretrained("openMUSE/muse-512").to("cuda", dtype=torch.float16)
|
8 |
+
muse_512.transformer.enable_xformers_memory_efficient_attention()
|
9 |
+
|
10 |
+
muse_512_fine = PipelineMuse.from_pretrained("openMUSE/muse-512-finetuned").to("cuda", dtype=torch.float16)
|
11 |
+
muse_512_fine.transformer.enable_xformers_memory_efficient_attention()
|
12 |
+
|
13 |
+
|
14 |
+
sdv1_5 = AutoPipelineForText2Image.from_pretrained("runwayml/stable-diffusion-v1-5", variant="fp16").to("cuda", dtype=torch.float16)
|
15 |
+
sdv1_5.scheduler = UniPCMultistepScheduler.from_config(sdv1_5.scheduler.config)
|
16 |
+
sdv1_5.enable_xformers_memory_efficient_attention()
|
17 |
+
|
18 |
+
def infer(prompt, negative):
|
19 |
+
print("Generating:")
|
20 |
+
|
21 |
+
muse_512_image = muse_512(
|
22 |
+
prompt, timesteps=16, guidance_scale=10, transformer_seq_len=1024, use_fp16=True, temperature=(2, 0),
|
23 |
+
)[0]
|
24 |
+
|
25 |
+
muse_512_fine_image = muse_512_fine(
|
26 |
+
prompt, timesteps=16, guidance_scale=10, transformer_seq_len=1024, use_fp16=True, temperature=(2, 0),
|
27 |
+
)[0]
|
28 |
+
|
29 |
+
sdv1_5_image = sdv1_5(prompt, num_inference_steps=25).images[0]
|
30 |
+
|
31 |
+
images = [muse_512_image, muse_512_fine_image, sdv1_5_image]
|
32 |
+
|
33 |
+
return images
|
34 |
+
|
35 |
+
|
36 |
+
examples = [
|
37 |
+
[
|
38 |
+
'A high tech solarpunk utopia in the Amazon rainforest',
|
39 |
+
'low quality',
|
40 |
+
10,
|
41 |
+
],
|
42 |
+
[
|
43 |
+
'A pikachu fine dining with a view to the Eiffel Tower',
|
44 |
+
'low quality',
|
45 |
+
10,
|
46 |
+
],
|
47 |
+
[
|
48 |
+
'A mecha robot in a favela in expressionist style',
|
49 |
+
'low quality, 3d, photorealistic',
|
50 |
+
10,
|
51 |
+
],
|
52 |
+
[
|
53 |
+
'an insect robot preparing a delicious meal',
|
54 |
+
'low quality, illustration',
|
55 |
+
10,
|
56 |
+
],
|
57 |
+
[
|
58 |
+
"A small cabin on top of a snowy mountain in the style of Disney, artstation",
|
59 |
+
'low quality, ugly',
|
60 |
+
10,
|
61 |
+
],
|
62 |
+
]
|
63 |
+
|
64 |
+
|
65 |
+
css = """
|
66 |
+
h1 {
|
67 |
+
text-align: center;
|
68 |
+
}
|
69 |
+
|
70 |
+
#component-0 {
|
71 |
+
max-width: 730px;
|
72 |
+
margin: auto;
|
73 |
+
}
|
74 |
+
"""
|
75 |
+
|
76 |
+
block = gr.Blocks(css=css)
|
77 |
+
|
78 |
+
with block:
|
79 |
+
gr.Markdown("MUSE is an upcoming fast text2image model.")
|
80 |
+
with gr.Group():
|
81 |
+
with gr.Row(elem_id="prompt-container").style(mobile_collapse=False, equal_height=True):
|
82 |
+
with gr.Column():
|
83 |
+
text = gr.Textbox(
|
84 |
+
label="Enter your prompt",
|
85 |
+
show_label=False,
|
86 |
+
max_lines=1,
|
87 |
+
placeholder="Enter your prompt",
|
88 |
+
container=False,
|
89 |
+
)
|
90 |
+
btn = gr.Button("Generate image", scale=0)
|
91 |
+
|
92 |
+
gallery = gr.Gallery(
|
93 |
+
label="Generated images", show_label=False,
|
94 |
+
).style()
|
95 |
+
|
96 |
+
with gr.Accordion("Advanced settings", open=False):
|
97 |
+
guidance_scale = gr.Slider(
|
98 |
+
label="Guidance Scale", minimum=0, maximum=20, value=10, step=0.1
|
99 |
+
)
|
100 |
+
|
101 |
+
ex = gr.Examples(examples=examples, fn=infer, inputs=[text, negative, guidance_scale], outputs=gallery, cache_examples=False)
|
102 |
+
ex.dataset.headers = [""]
|
103 |
+
|
104 |
+
text.submit(infer, inputs=[text, negative, guidance_scale], outputs=gallery)
|
105 |
+
negative.submit(infer, inputs=[text, negative, guidance_scale], outputs=gallery)
|
106 |
+
btn.click(infer, inputs=[text, negative, guidance_scale], outputs=gallery)
|
107 |
+
|
108 |
+
block.launch()
|