Spaces:
Running
on
Zero
Running
on
Zero
<feat> add diffusion inference steps control.
Browse files
app.py
CHANGED
@@ -98,7 +98,7 @@ def init_basemodel():
|
|
98 |
|
99 |
|
100 |
@spaces.GPU
|
101 |
-
def process_image_and_text(condition_image, target_prompt, condition_image_prompt, task, random_seed, inpainting, fill_x1, fill_x2, fill_y1, fill_y2):
|
102 |
# set up the model
|
103 |
global pipe, current_task, transformer
|
104 |
if current_task != task:
|
@@ -265,7 +265,7 @@ def process_image_and_text(condition_image, target_prompt, condition_image_promp
|
|
265 |
height=512,
|
266 |
width=512,
|
267 |
num_frames=5,
|
268 |
-
num_inference_steps=
|
269 |
guidance_scale=6.0,
|
270 |
num_videos_per_prompt=1,
|
271 |
generator=torch.Generator(device=pipe.transformer.device).manual_seed(random_seed),
|
@@ -315,8 +315,9 @@ def create_app():
|
|
315 |
)
|
316 |
gr.Markdown(notice, elem_id="notice")
|
317 |
target_prompt = gr.Textbox(lines=2, label="Target Prompt", elem_id="tp")
|
318 |
-
condition_image_prompt = gr.Textbox(lines=2, label="Condition Image Prompt", elem_id="cp")
|
319 |
random_seed = gr.Number(label="Random Seed", precision=0, value=0, elem_id="seed")
|
|
|
320 |
inpainting = gr.Checkbox(label="Inpainting", value=False, elem_id="inpainting")
|
321 |
fill_x1 = gr.Number(label="In/Out-painting Box Left Boundary", precision=0, value=128, elem_id="fill_x1")
|
322 |
fill_x2 = gr.Number(label="In/Out-painting Box Right Boundary", precision=0, value=384, elem_id="fill_x2")
|
@@ -338,7 +339,7 @@ def create_app():
|
|
338 |
|
339 |
submit_btn.click(
|
340 |
fn=process_image_and_text,
|
341 |
-
inputs=[condition_image, target_prompt, condition_image_prompt, task, random_seed, inpainting, fill_x1, fill_x2, fill_y1, fill_y2],
|
342 |
outputs=output_images,
|
343 |
)
|
344 |
|
|
|
98 |
|
99 |
|
100 |
@spaces.GPU
|
101 |
+
def process_image_and_text(condition_image, target_prompt, condition_image_prompt, task, random_seed, num_steps, inpainting, fill_x1, fill_x2, fill_y1, fill_y2):
|
102 |
# set up the model
|
103 |
global pipe, current_task, transformer
|
104 |
if current_task != task:
|
|
|
265 |
height=512,
|
266 |
width=512,
|
267 |
num_frames=5,
|
268 |
+
num_inference_steps=num_steps,
|
269 |
guidance_scale=6.0,
|
270 |
num_videos_per_prompt=1,
|
271 |
generator=torch.Generator(device=pipe.transformer.device).manual_seed(random_seed),
|
|
|
315 |
)
|
316 |
gr.Markdown(notice, elem_id="notice")
|
317 |
target_prompt = gr.Textbox(lines=2, label="Target Prompt", elem_id="tp")
|
318 |
+
condition_image_prompt = gr.Textbox(lines=2, label="Condition Image Prompt (Only required by Subject-driven Image Generation and Style Transfer tasks)", elem_id="cp")
|
319 |
random_seed = gr.Number(label="Random Seed", precision=0, value=0, elem_id="seed")
|
320 |
+
num_steps = gr.Number(label="Diffusion Inference Steps", precision=0, value=50, elem_id="steps")
|
321 |
inpainting = gr.Checkbox(label="Inpainting", value=False, elem_id="inpainting")
|
322 |
fill_x1 = gr.Number(label="In/Out-painting Box Left Boundary", precision=0, value=128, elem_id="fill_x1")
|
323 |
fill_x2 = gr.Number(label="In/Out-painting Box Right Boundary", precision=0, value=384, elem_id="fill_x2")
|
|
|
339 |
|
340 |
submit_btn.click(
|
341 |
fn=process_image_and_text,
|
342 |
+
inputs=[condition_image, target_prompt, condition_image_prompt, task, random_seed, num_steps, inpainting, fill_x1, fill_x2, fill_y1, fill_y2],
|
343 |
outputs=output_images,
|
344 |
)
|
345 |
|