Spaces:
Running
on
Zero
Running
on
Zero
File size: 3,732 Bytes
e2160a6 bff1c38 e2160a6 56dd0de 38c7953 d1bb2c2 2e88f74 56dd0de 711fa9a bff1c38 d174eab 711fa9a 55fa117 0492353 7565e99 342f262 e2160a6 596e4a5 d174eab 342f262 d174eab 013786a 79fe17b d174eab 6e51693 342f262 e2160a6 d174eab 342f262 d174eab 342f262 d174eab 342f262 d174eab 342f262 d174eab 342f262 d174eab 342f262 d174eab 342f262 d174eab 342f262 d174eab 342f262 d174eab ddc7c4a d174eab 342f262 d174eab 342f262 d174eab 342f262 e2160a6 d174eab 342f262 d174eab |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import gradio as gr
import torch
import numpy as np
import diffusers
import os
import random
from PIL import Image
hf_token = os.environ.get("HF_TOKEN")
from diffusers import AutoPipelineForText2Image
device = "cuda" # if torch.cuda.is_available() else "cpu"
pipe = AutoPipelineForText2Image.from_pretrained("briaai/BRIA-2.3", torch_dtype=torch.float16, force_zeros_for_empty_prompt=False).to(device)
pipe.load_ip_adapter("briaai/Image-Prompt", subfolder='models', weight_name="ip_adapter_bria.bin")
pipe.to(device)
MAX_SEED = np.iinfo(np.int32).max
@spaces.GPU(enable_queue=True)
def predict(prompt, ip_adapter_images, ip_adapter_scale=0.5, negative_prompt="", seed=100, randomize_seed=False, center_crop=False, width=1024, height=1024, guidance_scale=5.0, num_inference_steps=50, progress=gr.Progress(track_tqdm=True)):
if randomize_seed:
seed = random.randint(0, MAX_SEED)
# Optionally resize images if center crop is not selected
if not center_crop:
ip_adapter_images = [image.resize((224, 224)) for image in ip_adapter_images]
# Create a generator for reproducible random seed
generator = torch.Generator(device="cuda").manual_seed(seed)
pipe.set_ip_adapter_scale([ip_adapter_scale])
# Pass all images at once to the pipe
result_images = pipe(
prompt=prompt,
ip_adapter_image=ip_adapter_images, # Pass the list of images
negative_prompt=negative_prompt,
height=height,
width=width,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
num_images_per_prompt=1,
generator=generator,
).images
return result_images, seed
examples = [
["high quality", ["example1.png", "example2.png"], 1.0, "", 1000, False, False, 1152, 896],
]
css = """
#col-container {
display: flex;
flex-direction: column;
align-items: center;
padding: 10px;
}
"""
with gr.Blocks(css=css) as demo:
with gr.Column():
with gr.Row():
prompt = gr.Textbox(label="Prompt", lines=1)
ip_adapter_images = gr.Gallery(label="Input Images", elem_id="image-gallery").style(grid=[2], preview=True)
ip_adapter_scale = gr.Slider(label="IP Adapter Scale", minimum=0.0, maximum=1.0, step=0.1, value=0.5)
negative_prompt = gr.Textbox(label="Negative Prompt", placeholder="Optional", lines=1)
with gr.Row():
seed = gr.Number(label="Seed", value=100)
randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
center_crop = gr.Checkbox(label="Center Crop Image", value=False, info="If not checked, the images will be resized.")
with gr.Row():
guidance_scale = gr.Slider(
label="Guidance Scale", minimum=0.0, maximum=10.0, step=0.1, value=7.0
)
num_inference_steps = gr.Slider(
label="Number of Inference Steps", minimum=1, maximum=100, step=1, value=25
)
result = gr.Gallery(label="Generated Images").style(grid=[2], preview=True)
run_button = gr.Button("Run")
run_button.click(
predict,
inputs=[prompt, ip_adapter_images, ip_adapter_scale, negative_prompt, seed, randomize_seed, center_crop, width, height, guidance_scale, num_inference_steps],
outputs=[result, seed]
)
gr.Examples(
examples=examples,
fn=predict,
inputs=[prompt, ip_adapter_images, ip_adapter_scale, negative_prompt, seed, randomize_seed, center_crop, width, height],
outputs=[result, seed],
cache_examples="lazy"
)
demo.queue(max_size=25, api_open=False).launch(show_api=False)
|