Spaces:
Running
on
Zero
Running
on
Zero
kayfahaarukku
commited on
Commit
•
f223a90
1
Parent(s):
2c40df6
Revert
Browse files
app.py
CHANGED
@@ -18,19 +18,15 @@ pipe = StableDiffusionXLPipeline.from_pretrained(
|
|
18 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
19 |
|
20 |
# Function to generate an image
|
21 |
-
@spaces.GPU
|
22 |
def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
|
23 |
-
pipe.to('cuda')
|
24 |
|
25 |
if randomize_seed:
|
26 |
seed = random.randint(0, 99999999)
|
27 |
-
|
28 |
-
original_prompt = prompt
|
29 |
-
original_negative_prompt = negative_prompt
|
30 |
-
|
31 |
if use_defaults:
|
32 |
prompt = f"{prompt}, masterpiece, best quality"
|
33 |
-
negative_prompt = f"
|
34 |
generator = torch.manual_seed(seed)
|
35 |
|
36 |
def callback(step, timestep, latents):
|
@@ -52,31 +48,15 @@ def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_s
|
|
52 |
|
53 |
torch.cuda.empty_cache()
|
54 |
|
55 |
-
return image, seed
|
56 |
|
57 |
# Define Gradio interface
|
58 |
def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
|
59 |
-
image, seed
|
60 |
-
|
61 |
-
details = f"""Prompt: {original_prompt}
|
62 |
-
|
63 |
-
Negative prompt: {original_negative_prompt}
|
64 |
-
|
65 |
-
Steps: {num_inference_steps}, CFG scale: {guidance_scale}, Seed: {seed}, Size: {resolution}
|
66 |
-
|
67 |
-
Default quality tags: {"Enabled" if use_defaults else "Disabled"}"""
|
68 |
-
|
69 |
-
if use_defaults:
|
70 |
-
details += f"""
|
71 |
-
|
72 |
-
Default prompt addition: , masterpiece, best quality
|
73 |
-
|
74 |
-
Default negative prompt addition: nsfw, lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name"""
|
75 |
-
|
76 |
-
return image, seed, gr.update(value=seed), details
|
77 |
|
78 |
def reset_inputs():
|
79 |
-
return gr.update(value=''), gr.update(value=''), gr.update(value=True), gr.update(value='832x1216'), gr.update(value=7), gr.update(value=28), gr.update(value=0), gr.update(value=True)
|
80 |
|
81 |
with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
|
82 |
gr.HTML(
|
@@ -86,7 +66,7 @@ with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as d
|
|
86 |
with gr.Row():
|
87 |
with gr.Column():
|
88 |
prompt_input = gr.Textbox(lines=2, placeholder="Enter prompt here", label="Prompt")
|
89 |
-
negative_prompt_input = gr.Textbox(lines=2, placeholder="Enter negative prompt here", label="Negative Prompt")
|
90 |
use_defaults_input = gr.Checkbox(label="Use Default Quality Tags and Negative Prompt", value=True)
|
91 |
resolution_input = gr.Radio(
|
92 |
choices=[
|
@@ -105,14 +85,6 @@ with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as d
|
|
105 |
|
106 |
with gr.Column():
|
107 |
output_image = gr.Image(type="pil", label="Generated Image")
|
108 |
-
with gr.Accordion("Generation Details", open=False):
|
109 |
-
generation_info = gr.Textbox(
|
110 |
-
label="",
|
111 |
-
max_lines=15,
|
112 |
-
interactive=False,
|
113 |
-
elem_id="generation_info",
|
114 |
-
show_copy_button=True
|
115 |
-
)
|
116 |
gr.Markdown(
|
117 |
"""
|
118 |
### Recommended prompt formatting:
|
@@ -131,7 +103,7 @@ with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as d
|
|
131 |
inputs=[
|
132 |
prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
|
133 |
],
|
134 |
-
outputs=[output_image, seed_input
|
135 |
)
|
136 |
|
137 |
reset_button.click(
|
|
|
18 |
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
|
19 |
|
20 |
# Function to generate an image
|
21 |
+
@spaces.GPU # Adjust the duration as needed
|
22 |
def generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
|
23 |
+
pipe.to('cuda') # Move the model to GPU when the function is called
|
24 |
|
25 |
if randomize_seed:
|
26 |
seed = random.randint(0, 99999999)
|
|
|
|
|
|
|
|
|
27 |
if use_defaults:
|
28 |
prompt = f"{prompt}, masterpiece, best quality"
|
29 |
+
negative_prompt = f"lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, {negative_prompt}"
|
30 |
generator = torch.manual_seed(seed)
|
31 |
|
32 |
def callback(step, timestep, latents):
|
|
|
48 |
|
49 |
torch.cuda.empty_cache()
|
50 |
|
51 |
+
return image, seed
|
52 |
|
53 |
# Define Gradio interface
|
54 |
def interface_fn(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress=gr.Progress()):
|
55 |
+
image, seed = generate_image(prompt, negative_prompt, use_defaults, resolution, guidance_scale, num_inference_steps, seed, randomize_seed, progress)
|
56 |
+
return image, seed, gr.update(value=seed)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
57 |
|
58 |
def reset_inputs():
|
59 |
+
return gr.update(value=''), gr.update(value='realistic, 3d,'), gr.update(value=True), gr.update(value='832x1216'), gr.update(value=7), gr.update(value=28), gr.update(value=0), gr.update(value=True)
|
60 |
|
61 |
with gr.Blocks(title="UrangDiffusion 1.0 Demo", theme="NoCrypt/[email protected]") as demo:
|
62 |
gr.HTML(
|
|
|
66 |
with gr.Row():
|
67 |
with gr.Column():
|
68 |
prompt_input = gr.Textbox(lines=2, placeholder="Enter prompt here", label="Prompt")
|
69 |
+
negative_prompt_input = gr.Textbox(lines=2, placeholder="Enter negative prompt here", label="Negative Prompt", value="realistic, 3d,")
|
70 |
use_defaults_input = gr.Checkbox(label="Use Default Quality Tags and Negative Prompt", value=True)
|
71 |
resolution_input = gr.Radio(
|
72 |
choices=[
|
|
|
85 |
|
86 |
with gr.Column():
|
87 |
output_image = gr.Image(type="pil", label="Generated Image")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
gr.Markdown(
|
89 |
"""
|
90 |
### Recommended prompt formatting:
|
|
|
103 |
inputs=[
|
104 |
prompt_input, negative_prompt_input, use_defaults_input, resolution_input, guidance_scale_input, num_inference_steps_input, seed_input, randomize_seed_input
|
105 |
],
|
106 |
+
outputs=[output_image, seed_input]
|
107 |
)
|
108 |
|
109 |
reset_button.click(
|