Spaces:
Running
on
Zero
Running
on
Zero
kayfahaarukku
commited on
Commit
•
0ea7716
1
Parent(s):
1f4d65d
Update app.py
Browse files
app.py
CHANGED
@@ -18,12 +18,16 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
|
|
18 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
19 |
|
20 |
# Function to generate an image
|
21 |
-
@spaces.GPU
|
22 |
def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
|
23 |
-
pipe.to('cuda')
|
24 |
|
25 |
if randomize_seed:
|
26 |
seed = random.randint(0, 99999999)
|
|
|
|
|
|
|
|
|
27 |
if use_defaults:
|
28 |
prompt = f"{prompt}, masterpiece, best quality"
|
29 |
negative_prompt = f"nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, {negative_prompt}"
|
@@ -48,18 +52,26 @@ def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_s
|
|
48 |
|
49 |
torch.cuda.empty_cache()
|
50 |
|
51 |
-
return image, seed
|
52 |
|
53 |
# Define Gradio interface
|
54 |
def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
|
55 |
-
image, seed = generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress)
|
56 |
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
return image, seed, gr.update(value=seed), details
|
65 |
|
@@ -92,9 +104,15 @@ with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as d
|
|
92 |
reset_button = gr.Button("Reset")
|
93 |
|
94 |
with gr.Column():
|
95 |
-
|
96 |
-
|
97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
gr.Markdown(
|
99 |
"""
|
100 |
### Recommended prompt formatting:
|
|
|
18 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
19 |
|
20 |
# Function to generate an image
|
21 |
+
@spaces.GPU
|
22 |
def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
|
23 |
+
pipe.to('cuda')
|
24 |
|
25 |
if randomize_seed:
|
26 |
seed = random.randint(0, 99999999)
|
27 |
+
|
28 |
+
original_prompt = prompt
|
29 |
+
original_negative_prompt = negative_prompt
|
30 |
+
|
31 |
if use_defaults:
|
32 |
prompt = f"{prompt}, masterpiece, best quality"
|
33 |
negative_prompt = f"nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, {negative_prompt}"
|
|
|
52 |
|
53 |
torch.cuda.empty_cache()
|
54 |
|
55 |
+
return image, seed, original_prompt, original_negative_prompt
|
56 |
|
57 |
# Define Gradio interface
|
58 |
def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
|
59 |
+
image, seed, original_prompt, original_negative_prompt = generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress)
|
60 |
|
61 |
+
details = f"""Prompt: {original_prompt}
|
62 |
+
|
63 |
+
Negative prompt: {original_negative_prompt}
|
64 |
+
|
65 |
+
Steps: {num_inference_steps}, CFG scale: {guidance_scale}, Seed: {seed}, Size: {resolution}
|
66 |
+
|
67 |
+
Default quality tags: {"Enabled" if use_defaults else "Disabled"}"""
|
68 |
+
|
69 |
+
if use_defaults:
|
70 |
+
details += f"""
|
71 |
+
|
72 |
+
Default prompt addition: , masterpiece, best quality
|
73 |
+
|
74 |
+
Default negative prompt addition: nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name"""
|
75 |
|
76 |
return image, seed, gr.update(value=seed), details
|
77 |
|
|
|
104 |
reset_button = gr.Button("Reset")
|
105 |
|
106 |
with gr.Column():
|
107 |
+
output_image = gr.Image(type="pil", label="Generated Image")
|
108 |
+
with gr.Accordion("Generation Details", open=False):
|
109 |
+
generation_info = gr.Textbox(
|
110 |
+
label="",
|
111 |
+
max_lines=15,
|
112 |
+
interactive=False,
|
113 |
+
elem_id="generation_info",
|
114 |
+
show_copy_button=True
|
115 |
+
)
|
116 |
gr.Markdown(
|
117 |
"""
|
118 |
### Recommended prompt formatting:
|