Spaces:
Running
on
Zero
Running
on
Zero
File size: 22,075 Bytes
08f2d0e 70037c5 08f2d0e fd37a32 08f2d0e fd37a32 718f5e2 1979685 6a6a21f 08f2d0e 718f5e2 1979685 08f2d0e fd37a32 bcdd850 fd37a32 08f2d0e fd37a32 70037c5 08f2d0e fd37a32 70037c5 08f2d0e fd37a32 08f2d0e fd37a32 70037c5 08f2d0e bcce982 08f2d0e fd37a32 08f2d0e ea7754d fd37a32 08f2d0e fd37a32 08f2d0e ec787a0 08f2d0e fd37a32 08f2d0e fd37a32 08f2d0e fd37a32 08f2d0e fd37a32 08f2d0e fd37a32 08f2d0e fd37a32 3223042 08f2d0e 3223042 08f2d0e 70037c5 08f2d0e 70037c5 3e9dad5 9e82be4 1ad2320 3e9dad5 ae043fd 3e9dad5 70037c5 08f2d0e 70037c5 08f2d0e 70037c5 3223042 70037c5 08f2d0e 1ad2320 ae043fd 1ad2320 3e9dad5 1ad2320 70037c5 1ad2320 70037c5 1ad2320 70037c5 1ad2320 70037c5 71d9441 70037c5 71d9441 70037c5 71d9441 70037c5 71d9441 70037c5 08f2d0e 70037c5 71d9441 70037c5 08f2d0e 48ffa2f 6cf93e3 df618a5 2f2e9f1 df618a5 70037c5 60af241 1a14f73 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 |
# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
# Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from PIL import Image
import subprocess
import spaces
import torch
import gradio as gr
import string
import random, time, os, math
from src.flux.generate import generate_from_test_sample, seed_everything
from src.flux.pipeline_tools import CustomFluxPipeline, load_modulation_adapter, load_dit_lora
from src.utils.data_utils import get_train_config, image_grid, pil2tensor, json_dump, pad_to_square, cv2pil, merge_bboxes
from eval.tools.face_id import FaceID
from eval.tools.florence_sam import ObjectDetector
import shutil
import yaml
import numpy as np
from huggingface_hub import snapshot_download
print(os.getcwd())
os.environ["TORCH_HOME"] = os.path.join(os.getcwd(), "checkpoints")
dtype = torch.bfloat16
device = "cuda"
config_path = "train/config/XVerse_config_demo.yaml"
config = config_train = get_train_config(config_path)
config["model"]["dit_quant"] = "int8-quanto"
config["model"]["use_dit_lora"] = False
model = CustomFluxPipeline(
config, device, torch_dtype=dtype,
)
model.pipe.set_progress_bar_config(leave=False)
face_model = FaceID(device)
detector = ObjectDetector(device)
config = get_train_config(config_path)
model.config = config
store_attn_map = False
ckpt_root = snapshot_download(repo_id="ByteDance/XVerse")
modulation_adapter = load_modulation_adapter(model, config, dtype, device, f"{ckpt_root}/modulation_adapter", is_training=False)
model.add_modulation_adapter(modulation_adapter)
if config["model"]["use_dit_lora"]:
load_dit_lora(model, model.pipe, config, dtype, device, f"{ckpt_root}", is_training=False)
num_inputs = 4
# 定义清空图像的函数,只返回四个 None
def clear_images():
return [None, ]*num_inputs
@spaces.GPU()
def det_seg_img(image, label):
if isinstance(image, str):
image = Image.open(image).convert("RGB")
instance_result_dict = detector.get_multiple_instances(image, label, min_size=image.size[0]//20)
indices = list(range(len(instance_result_dict["instance_images"])))
ins, bbox = merge_instances(image, indices, instance_result_dict["instance_bboxes"], instance_result_dict["instance_images"])
return ins
@spaces.GPU()
def crop_face_img(image):
if isinstance(image, str):
image = Image.open(image).convert("RGB")
# image = resize_keep_aspect_ratio(image, 1024)
image = pad_to_square(image).resize((2048, 2048))
face_bbox = face_model.detect(
(pil2tensor(image).unsqueeze(0) * 255).to(torch.uint8).to(device), 1.4
)[0]
face = image.crop(face_bbox)
return face
@spaces.GPU()
def vlm_img_caption(image):
if isinstance(image, str):
image = Image.open(image).convert("RGB")
try:
caption = detector.detector.caption(image, "<CAPTION>").strip()
if caption.endswith("."):
caption = caption[:-1]
except Exception as e:
print(e)
caption = ""
caption = caption.lower()
return caption
def generate_random_string(length=4):
letters = string.ascii_letters # 包含大小写字母的字符串
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
def resize_keep_aspect_ratio(pil_image, target_size=1024):
H, W = pil_image.height, pil_image.width
target_area = target_size * target_size
current_area = H * W
scaling_factor = (target_area / current_area) ** 0.5 # sqrt(target_area / current_area)
new_H = int(round(H * scaling_factor))
new_W = int(round(W * scaling_factor))
return pil_image.resize((new_W, new_H))
# 使用循环生成六个图像输入
images = []
captions = []
face_btns = []
det_btns = []
vlm_btns = []
idip_checkboxes = []
def open_accordion_on_example_selection(*args):
return None, "", False
@spaces.GPU
def generate_image(
prompt,
cond_size, target_height, target_width,
seed,
vae_skip_iter, control_weight_lambda,
double_attention, # 新增参数
single_attention, # 新增参数
latent_dblora_scale_str,
latent_sblora_scale_str, vae_lora_scale,
*images_captions_faces, # Combine all unpacked arguments into one tuple
):
torch.cuda.empty_cache()
num_images = 1
# Determine the number of images, captions, and faces based on the indexs length
images = list(images_captions_faces[:num_inputs])
captions = list(images_captions_faces[num_inputs:2 * num_inputs])
idips_checkboxes = list(images_captions_faces[2 * num_inputs:3 * num_inputs])
print(f"Length of images: {len(images)}")
print(f"Length of captions: {len(captions)}")
print(f"Control weight lambda: {control_weight_lambda}")
if control_weight_lambda != "no":
parts = control_weight_lambda.split(',')
new_parts = []
for part in parts:
if ':' in part:
left, right = part.split(':')
values = right.split('/')
# 保存整体值
global_value = values[0]
id_value = values[1]
ip_value = values[2]
new_values = [global_value]
for is_id in idips_checkboxes:
if is_id:
new_values.append(id_value)
else:
new_values.append(ip_value)
new_part = f"{left}:{('/'.join(new_values))}"
new_parts.append(new_part)
else:
new_parts.append(part)
control_weight_lambda = ','.join(new_parts)
print(f"Control weight lambda: {control_weight_lambda}")
src_inputs = []
use_words = []
cur_run_time = time.strftime("%m%d-%H%M%S")
tmp_dir_root = f"tmp/gradio_demo"
temp_dir = f"{tmp_dir_root}/{cur_run_time}_{generate_random_string(4)}"
os.makedirs(temp_dir, exist_ok=True)
print(f"Temporary directory created: {temp_dir}")
for i, (image_path, caption) in enumerate(zip(images, captions)):
if image_path:
if caption.startswith("a ") or caption.startswith("A "):
word = caption[2:]
else:
word = caption
if f"ENT{i+1}" in prompt:
prompt = prompt.replace(f"ENT{i+1}", caption)
image = resize_keep_aspect_ratio(Image.open(image_path), 768)
save_path = f"{temp_dir}/tmp_resized_input_{i}.png"
image.save(save_path)
input_image_path = save_path
src_inputs.append(
{
"image_path": input_image_path,
"caption": caption
}
)
use_words.append((i, word, word))
test_sample = dict(
input_images=[], position_delta=[0, -32],
prompt=prompt,
target_height=target_height,
target_width=target_width,
seed=seed,
cond_size=cond_size,
vae_skip_iter=vae_skip_iter,
lora_scale=latent_dblora_scale_str,
control_weight_lambda=control_weight_lambda,
latent_sblora_scale=latent_sblora_scale_str,
condition_sblora_scale=vae_lora_scale,
double_attention=double_attention,
single_attention=single_attention,
)
if len(src_inputs) > 0:
test_sample["modulation"] = [
dict(
type="adapter",
src_inputs=src_inputs,
use_words=use_words,
),
]
json_dump(test_sample, f"{temp_dir}/test_sample.json", 'utf-8')
assert single_attention == True
target_size = int(round((target_width * target_height) ** 0.5) // 16 * 16)
print(test_sample)
model.config["train"]["dataset"]["val_condition_size"] = cond_size
model.config["train"]["dataset"]["val_target_size"] = target_size
if control_weight_lambda == "no":
control_weight_lambda = None
if vae_skip_iter == "no":
vae_skip_iter = None
use_condition_sblora_control = True
use_latent_sblora_control = True
image = generate_from_test_sample(
test_sample, model.pipe, model.config,
num_images=num_images,
target_height=target_height,
target_width=target_width,
seed=seed,
store_attn_map=store_attn_map,
vae_skip_iter=vae_skip_iter, # 使用新的参数
control_weight_lambda=control_weight_lambda, # 传递新的参数
double_attention=double_attention, # 新增参数
single_attention=single_attention, # 新增参数
ip_scale=latent_dblora_scale_str,
use_latent_sblora_control=use_latent_sblora_control,
latent_sblora_scale=latent_sblora_scale_str,
use_condition_sblora_control=use_condition_sblora_control,
condition_sblora_scale=vae_lora_scale,
)
if isinstance(image, list):
num_cols = 2
num_rows = int(math.ceil(num_images / num_cols))
image = image_grid(image, num_rows, num_cols)
save_path = f"{temp_dir}/tmp_result.png"
image.save(save_path)
return image
def create_image_input(index):
with gr.Column():
image = gr.Image(type="filepath", label=f"Image {index + 1}")
caption = gr.Textbox(label=f"Caption {index + 1}", value="")
id_ip_checkbox = gr.Checkbox(value=False, label=f"ID or not {index + 1}", visible=True)
with gr.Row():
vlm_btn = gr.Button("Auto Caption")
det_btn = gr.Button("Det & Seg")
face_btn = gr.Button("Crop Face")
return image, caption, face_btn, det_btn, vlm_btn, id_ip_checkbox
def merge_instances(orig_img, indices, ins_bboxes, ins_images):
orig_image_width, orig_image_height = orig_img.width, orig_img.height
final_img = Image.new("RGB", (orig_image_width, orig_image_height), color=(255, 255, 255))
bboxes = []
for i in indices:
bbox = np.array(ins_bboxes[i], dtype=int).tolist()
bboxes.append(bbox)
img = cv2pil(ins_images[i])
mask = (np.array(img)[..., :3] != 255).any(axis=-1)
mask = Image.fromarray(mask.astype(np.uint8) * 255, mode='L')
final_img.paste(img, (bbox[0], bbox[1]), mask)
bbox = merge_bboxes(bboxes)
img = final_img.crop(bbox)
return img, bbox
if __name__ == "__main__":
with gr.Blocks() as demo:
gr.Markdown("""
## XVerse Demo
- **Paper**: [XVerse: A Versatile Image Generation Framework for Subject Consistency](https://arxiv.org/abs/2506.21416)
- **GitHub**: [ByteDance/XVerse](https://github.com/bytedance/XVerse)
- **Project Page**: [ByteDance/XVerse](https://bytedance.github.io/XVerse/)
#### Input Images and Prompts
* **Prompt**: The textual description guiding the image generation.
* **Upload Image**: Click "Image X" to upload your desired reference image.
* **Image Description**: Enter a description in the "Caption X" input box. You can also click "Auto Caption" to generate a description automatically.
* **Detection & Segmentation**: Click "Det & Seg" to perform detection and segmentation on the uploaded image.
* **Crop Face**: Use "Crop Face" to automatically crop the face from the image.
* **ID Checkbox**: Check or uncheck "ID or not" to determine whether to use ID-related weights for that specific input image.
> **⚠️ Important Usage Notes:**
>
> The main text prompt **MUST** include the exact text you entered in the `Image Description` field for each active image. **Generation will fail if this description is missing from the prompt.**
> * *Example*: If you upload two images and set their descriptions as "a man with red hair" (for Image 1) and "a woman with blue eyes" (for Image 2), your main prompt might be: "A `a man with red hair` walking beside `a woman with blue eyes` in a park."
> * You can then write your main prompt simply as: "`ENT1` walking beside `ENT2` in a park." The code will **automatically replace** these placeholders with the full description text before generation.
""")
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="Prompt", value="")
clear_btn = gr.Button("清空输入图像")
with gr.Row():
for i in range(num_inputs):
image, caption, face_btn, det_btn, vlm_btn, id_ip_checkbox = create_image_input(i)
images.append(image)
idip_checkboxes.append(id_ip_checkbox)
captions.append(caption)
face_btns.append(face_btn)
det_btns.append(det_btn)
vlm_btns.append(vlm_btn)
with gr.Column():
output = gr.Image(label="Generated Image")
seed = gr.Number(value=42, label="Seed", info="")
gen_btn = gr.Button("Generate Image")
with gr.Row():
# 将其他设置参数压缩到 Advanced Accordion 内
with gr.Accordion("Advanced Settings", open=False):
gr.Markdown("""The Gradio demo provides several parameters to control your image generation process:
* **Generated Height/Width**: Use the sliders to set the shape of the output image.
* **Weight_id/ip**: Adjust these weight parameters. Higher values generally lead to better subject consistency but might slightly impact the naturalness of the generated image.
* **latent_lora_scale and vae_lora_scale**: Control the LoRA scale. Similar to Weight_id/ip, larger LoRA values can improve subject consistency but may reduce image naturalness.
* **vae_skip_iter_before and vae_skip_iter_after**: Configure VAE skip iterations. Skipping more steps can result in better naturalness but might compromise subject consistency.
""")
# 使用 Row 和 Column 来布局四个图像和描述
with gr.Row():
target_height = gr.Slider(512, 1024, step=128, value=768, label="Generated Height", info="")
target_width = gr.Slider(512, 1024, step=128, value=768, label="Generated Width", info="")
cond_size = gr.Slider(256, 384, step=128, value=256, label="Condition Size", info="")
with gr.Row():
# 修改 weight_id_ip_str 为两个 Slider
weight_id = gr.Slider(0.1, 5, step=0.1, value=3, label="weight_id")
weight_ip = gr.Slider(0.1, 5, step=0.1, value=5, label="weight_ip")
with gr.Row():
# 修改 ip_scale_str 为 Slider,并添加 Textbox 显示转换后的格式
ip_scale_str = gr.Slider(0.5, 1.5, step=0.01, value=0.85, label="latent_lora_scale")
vae_lora_scale = gr.Slider(0.5, 1.5, step=0.01, value=1.3, label="vae_lora_scale")
with gr.Row():
# 修改 vae_skip_iter 为两个 Slider
vae_skip_iter_s1 = gr.Slider(0, 1, step=0.01, value=0.05, label="vae_skip_iter_before")
vae_skip_iter_s2 = gr.Slider(0, 1, step=0.01, value=0.8, label="vae_skip_iter_after")
with gr.Row():
weight_id_ip_str = gr.Textbox(
value="0-1:1/3/5",
label="weight_id_ip_str",
interactive=False, visible=False
)
weight_id.change(
lambda s1, s2: f"0-1:1/{s1}/{s2}",
inputs=[weight_id, weight_ip],
outputs=weight_id_ip_str
)
weight_ip.change(
lambda s1, s2: f"0-1:1/{s1}/{s2}",
inputs=[weight_id, weight_ip],
outputs=weight_id_ip_str
)
vae_skip_iter = gr.Textbox(
value="0-0.05:1,0.8-1:1",
label="vae_skip_iter",
interactive=False, visible=False
)
vae_skip_iter_s1.change(
lambda s1, s2: f"0-{s1}:1,{s2}-1:1",
inputs=[vae_skip_iter_s1, vae_skip_iter_s2],
outputs=vae_skip_iter
)
vae_skip_iter_s2.change(
lambda s1, s2: f"0-{s1}:1,{s2}-1:1",
inputs=[vae_skip_iter_s1, vae_skip_iter_s2],
outputs=vae_skip_iter
)
with gr.Row():
db_latent_lora_scale_str = gr.Textbox(
value="0-1:0.85",
label="db_latent_lora_scale_str",
interactive=False, visible=False
)
sb_latent_lora_scale_str = gr.Textbox(
value="0-1:0.85",
label="sb_latent_lora_scale_str",
interactive=False, visible=False
)
vae_lora_scale_str = gr.Textbox(
value="0-1:1.3",
label="vae_lora_scale_str",
interactive=False, visible=False
)
vae_lora_scale.change(
lambda s: f"0-1:{s}",
inputs=vae_lora_scale,
outputs=vae_lora_scale_str
)
ip_scale_str.change(
lambda s: [f"0-1:{s}", f"0-1:{s}"],
inputs=ip_scale_str,
outputs=[db_latent_lora_scale_str, sb_latent_lora_scale_str]
)
with gr.Row():
double_attention = gr.Checkbox(value=False, label="Double Attention", visible=False)
single_attention = gr.Checkbox(value=True, label="Single Attention", visible=False)
gr.Markdown("### Examples")
gen_btn.click(
generate_image,
inputs=[
prompt, cond_size, target_height, target_width, seed,
vae_skip_iter, weight_id_ip_str,
double_attention, single_attention,
db_latent_lora_scale_str, sb_latent_lora_scale_str, vae_lora_scale_str,
*images,
*captions,
*idip_checkboxes,
],
outputs=output
)
# 修改清空函数的输出参数
clear_btn.click(clear_images, outputs=images)
# 循环绑定 Det & Seg 和 Auto Caption 按钮的点击事件
for i in range(num_inputs):
face_btns[i].click(crop_face_img, inputs=[images[i]], outputs=[images[i]])
det_btns[i].click(det_seg_img, inputs=[images[i], captions[i]], outputs=[images[i]])
vlm_btns[i].click(vlm_img_caption, inputs=[images[i]], outputs=[captions[i]])
examples = gr.Examples(
examples=[
[
"sample/hamster.jpg", None, None,
"a hamster", None, None,
False, False, False,
"ENT1 wearing a tiny hat",
42, 256, 768, 768,
3, 5,
0.85, 1.3,
0.05, 0.8,
],
[
"sample/woman.jpg", None, None,
"a woman", None, None,
True, False, False,
"ENT1 in a red dress is smiling",
42, 256, 768, 768,
3, 5,
0.85, 1.3,
0.05, 0.8,
],
[
"sample/woman.jpg", "sample/girl.jpg", None,
"a woman", "a girl", None,
True, True, False,
"ENT1 and ENT2 standing together in a park.",
42, 256, 768, 768,
2, 5,
0.85, 1.3,
0.05, 0.8,
],
[
"sample/woman.jpg", "sample/girl.jpg", "sample/old_man.jpg",
"a woman", "a girl", "an old man",
True, True, True,
"ENT1, ENT2, and ENT3 standing together in a park.",
42, 256, 768, 768,
2.5, 5,
0.8, 1.2,
0.05, 0.8,
],
],
inputs=[
images[0], images[1], images[2],
captions[0], captions[1], captions[2],
idip_checkboxes[0], idip_checkboxes[1], idip_checkboxes[2],
prompt, seed,
cond_size,
target_height,
target_width,
weight_id,
weight_ip,
ip_scale_str,
vae_lora_scale,
vae_skip_iter_s1,
vae_skip_iter_s2,
],
outputs=[images[3], captions[3], idip_checkboxes[3]],
fn=open_accordion_on_example_selection,
run_on_click=True,
cache_examples=False,
label="Examples"
)
demo.queue()
demo.launch() |