Raushan-123 commited on
Commit
a0b32cd
·
verified ·
1 Parent(s): eb153f8

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -207
app.py DELETED
@@ -1,207 +0,0 @@
1
- #!/usr/bin/env python
2
-
3
- import os
4
- import random
5
- import uuid
6
-
7
- import gradio as gr
8
- import numpy as np
9
- from PIL import Image
10
- import spaces
11
- import torch
12
- from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
13
-
14
- DESCRIPTION = """
15
- # Image Generator
16
- """
17
-
18
- def save_image(img):
19
- unique_name = str(uuid.uuid4()) + ".png"
20
- img.save(unique_name)
21
- return unique_name
22
-
23
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
24
- if randomize_seed:
25
- seed = random.randint(0, MAX_SEED)
26
- return seed
27
-
28
- MAX_SEED = np.iinfo(np.int32).max
29
-
30
- if not torch.cuda.is_available():
31
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
32
-
33
- MAX_SEED = np.iinfo(np.int32).max
34
-
35
- USE_TORCH_COMPILE = 0
36
- ENABLE_CPU_OFFLOAD = 0
37
-
38
-
39
- if torch.cuda.is_available():
40
- pipe = StableDiffusionXLPipeline.from_pretrained(
41
- "fluently/Fluently-XL-v2",
42
- torch_dtype=torch.float16,
43
- use_safetensors=True,
44
- )
45
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
46
-
47
-
48
- pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
49
- pipe.set_adapters("dalle")
50
-
51
- pipe.to("cuda")
52
-
53
-
54
-
55
- @spaces.GPU(enable_queue=True)
56
- def generate(
57
- prompt: str,
58
- negative_prompt: str = "",
59
- use_negative_prompt: bool = False,
60
- seed: int = 0,
61
- width: int = 1024,
62
- height: int = 1024,
63
- guidance_scale: float = 3,
64
- randomize_seed: bool = False,
65
- progress=gr.Progress(track_tqdm=True),
66
- ):
67
-
68
-
69
- seed = int(randomize_seed_fn(seed, randomize_seed))
70
-
71
- if not use_negative_prompt:
72
- negative_prompt = "" # type: ignore
73
-
74
- images = pipe(
75
- prompt=prompt,
76
- negative_prompt=negative_prompt,
77
- width=width,
78
- height=height,
79
- guidance_scale=guidance_scale,
80
- num_inference_steps=25,
81
- num_images_per_prompt=1,
82
- cross_attention_kwargs={"scale": 0.65},
83
- output_type="pil",
84
- ).images
85
- image_paths = [save_image(img) for img in images]
86
- print(image_paths)
87
- return image_paths, seed
88
-
89
- examples = [
90
- "neon holography crystal cat",
91
- "newton thinking about the falling of an apple from the tree"
92
- "a cat eating a piece of cheese",
93
- "an astronaut riding a horse in space",
94
- "a beautiful farmhouse"
95
- "a cartoon of a boy playing with a tiger",
96
- "a cute robot artist painting on an easel, concept art",
97
- "a close up of a woman wearing a transparent, prismatic, elaborate nemeses headdress, over the should pose, brown skin-tone"
98
- ]
99
-
100
- css = '''
101
- .gradio-container{max-width: 560px !important}
102
- h1{text-align:center}
103
- footer {
104
- visibility: hidden
105
- }
106
- '''
107
- with gr.Blocks(css=css, theme="pseudolab/huggingface-korea-theme") as demo:
108
- gr.Markdown(DESCRIPTION)
109
- gr.DuplicateButton(
110
- value="Duplicate Space for private use",
111
- elem_id="duplicate-button",
112
- visible=False,
113
- )
114
-
115
- with gr.Group():
116
- with gr.Row():
117
- prompt = gr.Text(
118
- label="Prompt",
119
- show_label=False,
120
- max_lines=1,
121
- placeholder="Enter your prompt",
122
- container=False,
123
- )
124
- run_button = gr.Button("Run", scale=0)
125
- result = gr.Gallery(label="Result", columns=1, preview=True, show_label=False)
126
- with gr.Accordion("Advanced options", open=False):
127
- use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
128
- negative_prompt = gr.Text(
129
- label="Negative prompt",
130
- lines=4,
131
- max_lines=6,
132
- value="""(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation, (NSFW:1.25)""",
133
- placeholder="Enter a negative prompt",
134
- visible=True,
135
- )
136
- seed = gr.Slider(
137
- label="Seed",
138
- minimum=0,
139
- maximum=MAX_SEED,
140
- step=1,
141
- value=0,
142
- visible=True
143
- )
144
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
145
- with gr.Row(visible=True):
146
- width = gr.Slider(
147
- label="Width",
148
- minimum=512,
149
- maximum=2048,
150
- step=8,
151
- value=1024,
152
- )
153
- height = gr.Slider(
154
- label="Height",
155
- minimum=512,
156
- maximum=2048,
157
- step=8,
158
- value=1024,
159
- )
160
- with gr.Row():
161
- guidance_scale = gr.Slider(
162
- label="Guidance Scale",
163
- minimum=0.1,
164
- maximum=20.0,
165
- step=0.1,
166
- value=6,
167
- )
168
-
169
- gr.Examples(
170
- examples=examples,
171
- inputs=prompt,
172
- outputs=[result, seed],
173
- fn=generate,
174
- cache_examples=False,
175
- )
176
-
177
- use_negative_prompt.change(
178
- fn=lambda x: gr.update(visible=x),
179
- inputs=use_negative_prompt,
180
- outputs=negative_prompt,
181
- api_name=False,
182
- )
183
-
184
-
185
- gr.on(
186
- triggers=[
187
- prompt.submit,
188
- negative_prompt.submit,
189
- run_button.click,
190
- ],
191
- fn=generate,
192
- inputs=[
193
- prompt,
194
- negative_prompt,
195
- use_negative_prompt,
196
- seed,
197
- width,
198
- height,
199
- guidance_scale,
200
- randomize_seed,
201
- ],
202
- outputs=[result, seed],
203
- api_name="run",
204
- )
205
-
206
- if __name__ == "__main__":
207
- demo.queue(max_size=20).launch(show_api=False, debug=False)