team11aiml commited on
Commit
839bc4d
·
verified ·
1 Parent(s): db953e2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +610 -603
app.py CHANGED
@@ -1,604 +1,611 @@
1
- import cv2
2
- import torch
3
- import random
4
- import numpy as np
5
-
6
- import spaces
7
-
8
- import PIL
9
- from PIL import Image
10
- from typing import Tuple
11
-
12
- import diffusers
13
- from diffusers.utils import load_image
14
- from diffusers.models import ControlNetModel
15
- from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
16
-
17
- from huggingface_hub import hf_hub_download
18
-
19
- from insightface.app import FaceAnalysis
20
-
21
- from style_template import styles
22
- from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps
23
-
24
- from controlnet_aux import OpenposeDetector
25
-
26
- import gradio as gr
27
-
28
- from depth_anything.dpt import DepthAnything
29
- from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
30
-
31
- import torch.nn.functional as F
32
- from torchvision.transforms import Compose
33
-
34
- # global variable
35
- MAX_SEED = np.iinfo(np.int32).max
36
- device = "cuda" if torch.cuda.is_available() else "cpu"
37
- dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32
38
- STYLE_NAMES = list(styles.keys())
39
- DEFAULT_STYLE_NAME = "Spring Festival"
40
- enable_lcm_arg = False
41
-
42
- # download checkpoints
43
- from huggingface_hub import hf_hub_download
44
-
45
- hf_hub_download(repo_id="InstantX/InstantID", filename="ControlNetModel/config.json", local_dir="./checkpoints")
46
- hf_hub_download(
47
- repo_id="InstantX/InstantID",
48
- filename="ControlNetModel/diffusion_pytorch_model.safetensors",
49
- local_dir="./checkpoints",
50
- )
51
- hf_hub_download(repo_id="InstantX/InstantID", filename="ip-adapter.bin", local_dir="./checkpoints")
52
-
53
- # Load face encoder
54
- app = FaceAnalysis(
55
- name="antelopev2",
56
- root="./",
57
- providers=["CPUExecutionProvider"],
58
- )
59
- app.prepare(ctx_id=0, det_size=(640, 640))
60
-
61
- # Load OpenPose
62
- openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
63
-
64
- depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval()
65
-
66
- transform = Compose([
67
- Resize(
68
- width=518,
69
- height=518,
70
- resize_target=False,
71
- keep_aspect_ratio=True,
72
- ensure_multiple_of=14,
73
- resize_method='lower_bound',
74
- image_interpolation_method=cv2.INTER_CUBIC,
75
- ),
76
- NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
77
- PrepareForNet(),
78
- ])
79
-
80
- # Path to InstantID models
81
- face_adapter = f"./checkpoints/ip-adapter.bin"
82
- controlnet_path = f"./checkpoints/ControlNetModel"
83
-
84
- # Load pipeline face ControlNetModel
85
- controlnet_identitynet = ControlNetModel.from_pretrained(
86
- controlnet_path, torch_dtype=dtype
87
- )
88
-
89
- # controlnet-pose/canny/depth
90
- controlnet_pose_model = "thibaud/controlnet-openpose-sdxl-1.0"
91
- controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0"
92
- controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small"
93
-
94
- controlnet_pose = ControlNetModel.from_pretrained(
95
- controlnet_pose_model, torch_dtype=dtype
96
- ).to(device)
97
- controlnet_canny = ControlNetModel.from_pretrained(
98
- controlnet_canny_model, torch_dtype=dtype
99
- ).to(device)
100
- controlnet_depth = ControlNetModel.from_pretrained(
101
- controlnet_depth_model, torch_dtype=dtype
102
- ).to(device)
103
-
104
- def get_depth_map(image):
105
-
106
- image = np.array(image) / 255.0
107
-
108
- h, w = image.shape[:2]
109
-
110
- image = transform({'image': image})['image']
111
- image = torch.from_numpy(image).unsqueeze(0).to("cuda")
112
-
113
- with torch.no_grad():
114
- depth = depth_anything(image)
115
-
116
- depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
117
- depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
118
-
119
- depth = depth.cpu().numpy().astype(np.uint8)
120
-
121
- depth_image = Image.fromarray(depth)
122
-
123
- return depth_image
124
-
125
- def get_canny_image(image, t1=100, t2=200):
126
- image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
127
- edges = cv2.Canny(image, t1, t2)
128
- return Image.fromarray(edges, "L")
129
-
130
- controlnet_map = {
131
- "pose": controlnet_pose,
132
- "canny": controlnet_canny,
133
- "depth": controlnet_depth,
134
- }
135
- controlnet_map_fn = {
136
- "pose": openpose,
137
- "canny": get_canny_image,
138
- "depth": get_depth_map,
139
- }
140
-
141
- pretrained_model_name_or_path = "wangqixun/YamerMIX_v8"
142
-
143
- pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
144
- pretrained_model_name_or_path,
145
- controlnet=[controlnet_identitynet],
146
- torch_dtype=dtype,
147
- safety_checker=None,
148
- feature_extractor=None,
149
- ).to(device)
150
-
151
- pipe.scheduler = diffusers.EulerDiscreteScheduler.from_config(
152
- pipe.scheduler.config
153
- )
154
-
155
- # load and disable LCM
156
- pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
157
- pipe.disable_lora()
158
-
159
- pipe.cuda()
160
- pipe.load_ip_adapter_instantid(face_adapter)
161
- pipe.image_proj_model.to("cuda")
162
- pipe.unet.to("cuda")
163
-
164
- def toggle_lcm_ui(value):
165
- if value:
166
- return (
167
- gr.update(minimum=0, maximum=100, step=1, value=5),
168
- gr.update(minimum=0.1, maximum=20.0, step=0.1, value=1.5),
169
- )
170
- else:
171
- return (
172
- gr.update(minimum=5, maximum=100, step=1, value=30),
173
- gr.update(minimum=0.1, maximum=20.0, step=0.1, value=5),
174
- )
175
-
176
- def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
177
- if randomize_seed:
178
- seed = random.randint(0, MAX_SEED)
179
- return seed
180
-
181
- def remove_tips():
182
- return gr.update(visible=False)
183
-
184
- def get_example():
185
- case = [
186
-
187
- [
188
- "./examples/musk_resize.jpeg",
189
- "./examples/poses/pose2.jpg",
190
- "a man flying in the sky in Mars",
191
- "Mars",
192
- "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
193
- ],
194
-
195
- ]
196
- return case
197
-
198
- def run_for_examples(face_file, pose_file, prompt, style, negative_prompt):
199
- return generate_image(
200
- face_file,
201
- pose_file,
202
- prompt,
203
- negative_prompt,
204
- style,
205
- 20, # num_steps
206
- 0.8, # identitynet_strength_ratio
207
- 0.8, # adapter_strength_ratio
208
- 0.4, # pose_strength
209
- 0.3, # canny_strength
210
- 0.5, # depth_strength
211
- ["depth", "canny", "pose"], # controlnet_selection
212
- 5.0, # guidance_scale
213
- 42, # seed
214
- "EulerDiscreteScheduler", # scheduler
215
- False, # enable_LCM
216
- True, # enable_Face_Region
217
- )
218
-
219
- def convert_from_cv2_to_image(img: np.ndarray) -> Image:
220
- return Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
221
-
222
- def convert_from_image_to_cv2(img: Image) -> np.ndarray:
223
- return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
224
-
225
- def resize_img(
226
- input_image,
227
- max_side=1280,
228
- min_side=1024,
229
- size=None,
230
- pad_to_max_side=False,
231
- mode=PIL.Image.BILINEAR,
232
- base_pixel_number=64,
233
- ):
234
- w, h = input_image.size
235
- if size is not None:
236
- w_resize_new, h_resize_new = size
237
- else:
238
- ratio = min_side / min(h, w)
239
- w, h = round(ratio * w), round(ratio * h)
240
- ratio = max_side / max(h, w)
241
- input_image = input_image.resize([round(ratio * w), round(ratio * h)], mode)
242
- w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
243
- h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
244
- input_image = input_image.resize([w_resize_new, h_resize_new], mode)
245
-
246
- if pad_to_max_side:
247
- res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
248
- offset_x = (max_side - w_resize_new) // 2
249
- offset_y = (max_side - h_resize_new) // 2
250
- res[
251
- offset_y : offset_y + h_resize_new, offset_x : offset_x + w_resize_new
252
- ] = np.array(input_image)
253
- input_image = Image.fromarray(res)
254
- return input_image
255
-
256
- def apply_style(
257
- style_name: str, positive: str, negative: str = ""
258
- ) -> Tuple[str, str]:
259
- p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
260
- return p.replace("{prompt}", positive), n + " " + negative
261
-
262
- @spaces.GPU
263
- def generate_image(
264
- face_image_path,
265
- pose_image_path,
266
- prompt,
267
- negative_prompt,
268
- style_name,
269
- num_steps,
270
- identitynet_strength_ratio,
271
- adapter_strength_ratio,
272
- pose_strength,
273
- canny_strength,
274
- depth_strength,
275
- controlnet_selection,
276
- guidance_scale,
277
- seed,
278
- scheduler,
279
- enable_LCM,
280
- enhance_face_region,
281
- progress=gr.Progress(track_tqdm=True),
282
- ):
283
-
284
- if enable_LCM:
285
- pipe.scheduler = diffusers.LCMScheduler.from_config(pipe.scheduler.config)
286
- pipe.enable_lora()
287
- else:
288
- pipe.disable_lora()
289
- scheduler_class_name = scheduler.split("-")[0]
290
-
291
- add_kwargs = {}
292
- if len(scheduler.split("-")) > 1:
293
- add_kwargs["use_karras_sigmas"] = True
294
- if len(scheduler.split("-")) > 2:
295
- add_kwargs["algorithm_type"] = "sde-dpmsolver++"
296
- scheduler = getattr(diffusers, scheduler_class_name)
297
- pipe.scheduler = scheduler.from_config(pipe.scheduler.config, **add_kwargs)
298
-
299
- if face_image_path is None:
300
- raise gr.Error(
301
- f"Cannot find any input face image! Please upload the face image"
302
- )
303
-
304
- if prompt is None:
305
- prompt = "a person"
306
-
307
- # apply the style template
308
- prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
309
-
310
- face_image = load_image(face_image_path)
311
- face_image = resize_img(face_image, max_side=1024)
312
- face_image_cv2 = convert_from_image_to_cv2(face_image)
313
- height, width, _ = face_image_cv2.shape
314
-
315
- # Extract face features
316
- face_info = app.get(face_image_cv2)
317
-
318
- if len(face_info) == 0:
319
- raise gr.Error(
320
- f"Unable to detect a face in the image. Please upload a different photo with a clear face."
321
- )
322
-
323
- face_info = sorted(
324
- face_info,
325
- key=lambda x: (x["bbox"][2] - x["bbox"][0]) * x["bbox"][3] - x["bbox"][1],
326
- )[
327
- -1
328
- ] # only use the maximum face
329
- face_emb = face_info["embedding"]
330
- face_kps = draw_kps(convert_from_cv2_to_image(face_image_cv2), face_info["kps"])
331
- img_controlnet = face_image
332
- if pose_image_path is not None:
333
- pose_image = load_image(pose_image_path)
334
- pose_image = resize_img(pose_image, max_side=1024)
335
- img_controlnet = pose_image
336
- pose_image_cv2 = convert_from_image_to_cv2(pose_image)
337
-
338
- face_info = app.get(pose_image_cv2)
339
-
340
- if len(face_info) == 0:
341
- raise gr.Error(
342
- f"Cannot find any face in the reference image! Please upload another person image"
343
- )
344
-
345
- face_info = face_info[-1]
346
- face_kps = draw_kps(pose_image, face_info["kps"])
347
-
348
- width, height = face_kps.size
349
-
350
- if enhance_face_region:
351
- control_mask = np.zeros([height, width, 3])
352
- x1, y1, x2, y2 = face_info["bbox"]
353
- x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
354
- control_mask[y1:y2, x1:x2] = 255
355
- control_mask = Image.fromarray(control_mask.astype(np.uint8))
356
- else:
357
- control_mask = None
358
-
359
- if len(controlnet_selection) > 0:
360
- controlnet_scales = {
361
- "pose": pose_strength,
362
- "canny": canny_strength,
363
- "depth": depth_strength,
364
- }
365
- pipe.controlnet = MultiControlNetModel(
366
- [controlnet_identitynet]
367
- + [controlnet_map[s] for s in controlnet_selection]
368
- )
369
- control_scales = [float(identitynet_strength_ratio)] + [
370
- controlnet_scales[s] for s in controlnet_selection
371
- ]
372
- control_images = [face_kps] + [
373
- controlnet_map_fn[s](img_controlnet).resize((width, height))
374
- for s in controlnet_selection
375
- ]
376
- else:
377
- pipe.controlnet = controlnet_identitynet
378
- control_scales = float(identitynet_strength_ratio)
379
- control_images = face_kps
380
-
381
- generator = torch.Generator(device=device).manual_seed(seed)
382
-
383
- print("Start inference...")
384
- print(f"[Debug] Prompt: {prompt}, \n[Debug] Neg Prompt: {negative_prompt}")
385
-
386
- pipe.set_ip_adapter_scale(adapter_strength_ratio)
387
- images = pipe(
388
- prompt=prompt,
389
- negative_prompt=negative_prompt,
390
- image_embeds=face_emb,
391
- image=control_images,
392
- control_mask=control_mask,
393
- controlnet_conditioning_scale=control_scales,
394
- num_inference_steps=num_steps,
395
- guidance_scale=guidance_scale,
396
- height=height,
397
- width=width,
398
- generator=generator,
399
- ).images
400
-
401
- return images[0], gr.update(visible=True)
402
-
403
- # Description
404
- title = r"""
405
- <h1 align="center">Progressive Pose Attention for Person Image Gen (PPAG)</h1>
406
- """
407
-
408
- description = r"""
409
- <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/YourRepo/YourProject' target='_blank'><b>Pose Transfer</b></a>.<br>
410
- This demo allows you to transfer poses from one image to another.<br>
411
- How to use:<br>
412
- 1. Upload an image with a face. For images with multiple faces, we will only detect the largest face. Ensure the face is not too small and is clearly visible without significant obstructions or blurring.
413
- 2. Upload another image as a reference for the pose.
414
- 3. (Optional) You can select multiple ControlNet models to control the generation process. The default is to use the IdentityNet only. The ControlNet models include pose skeleton, canny, and depth. You can adjust the strength of each ControlNet model to control the generation process.
415
- 4. Enter a text prompt, as done in normal text-to-image models.
416
- 5. Click the <b>Submit</b> button to begin pose transfer.
417
- 6. Share your customized photo with your friends and enjoy! 😊"""
418
-
419
- article = r"""
420
- """
421
-
422
-
423
- css = """
424
- .gradio-container {width: 85% !important}
425
- """
426
- with gr.Blocks(css=css) as demo:
427
- # description
428
- gr.Markdown(title)
429
- gr.Markdown(description)
430
-
431
- with gr.Row():
432
- with gr.Column():
433
- with gr.Row(equal_height=True):
434
- # upload face image
435
- face_file = gr.Image(
436
- label="Upload a photo of your face", type="filepath"
437
- )
438
- # optional: upload a reference pose image
439
- pose_file = gr.Image(
440
- label="Upload a reference pose image (Optional)",
441
- type="filepath",
442
- )
443
-
444
- # prompt
445
- prompt = gr.Textbox(
446
- label="Prompt",
447
- info="Give simple prompt is enough to achieve good face fidelity",
448
- placeholder="A photo of a person",
449
- value="",
450
- )
451
-
452
- submit = gr.Button("Submit", variant="primary")
453
- enable_LCM = gr.Checkbox(
454
- label="Enable Fast Inference with LCM", value=enable_lcm_arg,
455
- info="LCM speeds up the inference step, the trade-off is the quality of the generated image. It performs better with portrait face images rather than distant faces",
456
- )
457
- style = gr.Dropdown(
458
- label="Style template",
459
- choices=STYLE_NAMES,
460
- value=DEFAULT_STYLE_NAME,
461
- )
462
-
463
- # strength
464
- identitynet_strength_ratio = gr.Slider(
465
- label="IdentityNet strength (for fidelity)",
466
- minimum=0,
467
- maximum=1.5,
468
- step=0.05,
469
- value=0.80,
470
- )
471
- adapter_strength_ratio = gr.Slider(
472
- label="Image adapter strength (for detail)",
473
- minimum=0,
474
- maximum=1.5,
475
- step=0.05,
476
- value=0.80,
477
- )
478
- with gr.Accordion("Controlnet"):
479
- controlnet_selection = gr.CheckboxGroup(
480
- ["pose", "canny", "depth"], label="Controlnet", value=["depth"],
481
- info="Use pose for skeleton inference, canny for edge detection, and depth for depth map estimation. You can try all three to control the generation process"
482
- )
483
- pose_strength = gr.Slider(
484
- label="Pose strength",
485
- minimum=0,
486
- maximum=1.5,
487
- step=0.05,
488
- value=0.40,
489
- )
490
- canny_strength = gr.Slider(
491
- label="Canny strength",
492
- minimum=0,
493
- maximum=1.5,
494
- step=0.05,
495
- value=0.40,
496
- )
497
- depth_strength = gr.Slider(
498
- label="Depth strength",
499
- minimum=0,
500
- maximum=1.5,
501
- step=0.05,
502
- value=0.40,
503
- )
504
- with gr.Accordion(open=False, label="Advanced Options"):
505
- negative_prompt = gr.Textbox(
506
- label="Negative Prompt",
507
- placeholder="low quality",
508
- value="(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
509
- )
510
- num_steps = gr.Slider(
511
- label="Number of sample steps",
512
- minimum=1,
513
- maximum=100,
514
- step=1,
515
- value=5 if enable_lcm_arg else 30,
516
- )
517
- guidance_scale = gr.Slider(
518
- label="Guidance scale",
519
- minimum=0.1,
520
- maximum=20.0,
521
- step=0.1,
522
- value=0.0 if enable_lcm_arg else 5.0,
523
- )
524
- seed = gr.Slider(
525
- label="Seed",
526
- minimum=0,
527
- maximum=MAX_SEED,
528
- step=1,
529
- value=42,
530
- )
531
- schedulers = [
532
- "DEISMultistepScheduler",
533
- "HeunDiscreteScheduler",
534
- "EulerDiscreteScheduler",
535
- "DPMSolverMultistepScheduler",
536
- "DPMSolverMultistepScheduler-Karras",
537
- "DPMSolverMultistepScheduler-Karras-SDE",
538
- ]
539
- scheduler = gr.Dropdown(
540
- label="Schedulers",
541
- choices=schedulers,
542
- value="EulerDiscreteScheduler",
543
- )
544
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
545
- enhance_face_region = gr.Checkbox(label="Enhance non-face region", value=True)
546
-
547
- with gr.Column(scale=1):
548
- gallery = gr.Image(label="Generated Images")
549
- usage_tips = gr.Markdown(
550
- label="InstantID Usage Tips", value=tips, visible=False
551
- )
552
-
553
- submit.click(
554
- fn=remove_tips,
555
- outputs=usage_tips,
556
- ).then(
557
- fn=randomize_seed_fn,
558
- inputs=[seed, randomize_seed],
559
- outputs=seed,
560
- queue=False,
561
- api_name=False,
562
- ).then(
563
- fn=generate_image,
564
- inputs=[
565
- face_file,
566
- pose_file,
567
- prompt,
568
- negative_prompt,
569
- style,
570
- num_steps,
571
- identitynet_strength_ratio,
572
- adapter_strength_ratio,
573
- pose_strength,
574
- canny_strength,
575
- depth_strength,
576
- controlnet_selection,
577
- guidance_scale,
578
- seed,
579
- scheduler,
580
- enable_LCM,
581
- enhance_face_region,
582
- ],
583
- outputs=[gallery, usage_tips],
584
- )
585
-
586
- enable_LCM.input(
587
- fn=toggle_lcm_ui,
588
- inputs=[enable_LCM],
589
- outputs=[num_steps, guidance_scale],
590
- queue=False,
591
- )
592
-
593
- gr.Examples(
594
- examples=get_example(),
595
- inputs=[face_file, pose_file, prompt, style, negative_prompt],
596
- fn=run_for_examples,
597
- outputs=[gallery, usage_tips],
598
- cache_examples=True,
599
- )
600
-
601
- gr.Markdown(article)
602
-
603
- demo.queue(api_open=False)
 
 
 
 
 
 
 
604
  demo.launch()
 
1
+ import cv2
2
+ import torch
3
+ import random
4
+ import numpy as np
5
+
6
+ import spaces
7
+
8
+ import PIL
9
+ from PIL import Image
10
+ from typing import Tuple
11
+
12
+ import diffusers
13
+ from diffusers.utils import load_image
14
+ from diffusers.models import ControlNetModel
15
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
16
+
17
+ from huggingface_hub import hf_hub_download
18
+
19
+ from insightface.app import FaceAnalysis
20
+
21
+ from style_template import styles
22
+ from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline, draw_kps
23
+
24
+ from controlnet_aux import OpenposeDetector
25
+
26
+ import gradio as gr
27
+
28
+ from depth_anything.dpt import DepthAnything
29
+ from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
30
+
31
+ import torch.nn.functional as F
32
+ from torchvision.transforms import Compose
33
+
34
+ # global variable
35
+ MAX_SEED = np.iinfo(np.int32).max
36
+ device = "cuda" if torch.cuda.is_available() else "cpu"
37
+ dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32
38
+ STYLE_NAMES = list(styles.keys())
39
+ DEFAULT_STYLE_NAME = "Spring Festival"
40
+ enable_lcm_arg = False
41
+
42
+ # download checkpoints
43
+ from huggingface_hub import hf_hub_download
44
+
45
+ hf_hub_download(repo_id="InstantX/InstantID", filename="ControlNetModel/config.json", local_dir="./checkpoints")
46
+ hf_hub_download(
47
+ repo_id="InstantX/InstantID",
48
+ filename="ControlNetModel/diffusion_pytorch_model.safetensors",
49
+ local_dir="./checkpoints",
50
+ )
51
+ hf_hub_download(repo_id="InstantX/InstantID", filename="ip-adapter.bin", local_dir="./checkpoints")
52
+
53
+ # Load face encoder
54
+ app = FaceAnalysis(
55
+ name="antelopev2",
56
+ root="./",
57
+ providers=["CPUExecutionProvider"],
58
+ )
59
+ app.prepare(ctx_id=0, det_size=(640, 640))
60
+
61
+ # Load OpenPose
62
+ openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")
63
+
64
+ depth_anything = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(device).eval()
65
+
66
+ transform = Compose([
67
+ Resize(
68
+ width=518,
69
+ height=518,
70
+ resize_target=False,
71
+ keep_aspect_ratio=True,
72
+ ensure_multiple_of=14,
73
+ resize_method='lower_bound',
74
+ image_interpolation_method=cv2.INTER_CUBIC,
75
+ ),
76
+ NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
77
+ PrepareForNet(),
78
+ ])
79
+
80
+ # Path to InstantID models
81
+ face_adapter = f"./checkpoints/ip-adapter.bin"
82
+ controlnet_path = f"./checkpoints/ControlNetModel"
83
+
84
+ # Load pipeline face ControlNetModel
85
+ controlnet_identitynet = ControlNetModel.from_pretrained(
86
+ controlnet_path, torch_dtype=dtype
87
+ )
88
+
89
+ # controlnet-pose/canny/depth
90
+ controlnet_pose_model = "thibaud/controlnet-openpose-sdxl-1.0"
91
+ controlnet_canny_model = "diffusers/controlnet-canny-sdxl-1.0"
92
+ controlnet_depth_model = "diffusers/controlnet-depth-sdxl-1.0-small"
93
+
94
+ controlnet_pose = ControlNetModel.from_pretrained(
95
+ controlnet_pose_model, torch_dtype=dtype
96
+ ).to(device)
97
+ controlnet_canny = ControlNetModel.from_pretrained(
98
+ controlnet_canny_model, torch_dtype=dtype
99
+ ).to(device)
100
+ controlnet_depth = ControlNetModel.from_pretrained(
101
+ controlnet_depth_model, torch_dtype=dtype
102
+ ).to(device)
103
+
104
+ def get_depth_map(image):
105
+
106
+ image = np.array(image) / 255.0
107
+
108
+ h, w = image.shape[:2]
109
+
110
+ image = transform({'image': image})['image']
111
+ image = torch.from_numpy(image).unsqueeze(0).to("cuda")
112
+
113
+ with torch.no_grad():
114
+ depth = depth_anything(image)
115
+
116
+ depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
117
+ depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
118
+
119
+ depth = depth.cpu().numpy().astype(np.uint8)
120
+
121
+ depth_image = Image.fromarray(depth)
122
+
123
+ return depth_image
124
+
125
+ def get_canny_image(image, t1=100, t2=200):
126
+ image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
127
+ edges = cv2.Canny(image, t1, t2)
128
+ return Image.fromarray(edges, "L")
129
+
130
+ controlnet_map = {
131
+ "pose": controlnet_pose,
132
+ "canny": controlnet_canny,
133
+ "depth": controlnet_depth,
134
+ }
135
+ controlnet_map_fn = {
136
+ "pose": openpose,
137
+ "canny": get_canny_image,
138
+ "depth": get_depth_map,
139
+ }
140
+
141
+ pretrained_model_name_or_path = "wangqixun/YamerMIX_v8"
142
+
143
+ pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
144
+ pretrained_model_name_or_path,
145
+ controlnet=[controlnet_identitynet],
146
+ torch_dtype=dtype,
147
+ safety_checker=None,
148
+ feature_extractor=None,
149
+ ).to(device)
150
+
151
+ pipe.scheduler = diffusers.EulerDiscreteScheduler.from_config(
152
+ pipe.scheduler.config
153
+ )
154
+
155
+ # load and disable LCM
156
+ pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
157
+ pipe.disable_lora()
158
+
159
+ pipe.cuda()
160
+ pipe.load_ip_adapter_instantid(face_adapter)
161
+ pipe.image_proj_model.to("cuda")
162
+ pipe.unet.to("cuda")
163
+
164
+ def toggle_lcm_ui(value):
165
+ if value:
166
+ return (
167
+ gr.update(minimum=0, maximum=100, step=1, value=5),
168
+ gr.update(minimum=0.1, maximum=20.0, step=0.1, value=1.5),
169
+ )
170
+ else:
171
+ return (
172
+ gr.update(minimum=5, maximum=100, step=1, value=30),
173
+ gr.update(minimum=0.1, maximum=20.0, step=0.1, value=5),
174
+ )
175
+
176
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
177
+ if randomize_seed:
178
+ seed = random.randint(0, MAX_SEED)
179
+ return seed
180
+
181
+ def remove_tips():
182
+ return gr.update(visible=False)
183
+
184
+ def get_example():
185
+ case = [
186
+
187
+ [
188
+ "./examples/musk_resize.jpeg",
189
+ "./examples/poses/pose2.jpg",
190
+ "a man flying in the sky in Mars",
191
+ "Mars",
192
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
193
+ ],
194
+
195
+ ]
196
+ return case
197
+
198
+ def run_for_examples(face_file, pose_file, prompt, style, negative_prompt):
199
+ return generate_image(
200
+ face_file,
201
+ pose_file,
202
+ prompt,
203
+ negative_prompt,
204
+ style,
205
+ 20, # num_steps
206
+ 0.8, # identitynet_strength_ratio
207
+ 0.8, # adapter_strength_ratio
208
+ 0.4, # pose_strength
209
+ 0.3, # canny_strength
210
+ 0.5, # depth_strength
211
+ ["depth", "canny", "pose"], # controlnet_selection
212
+ 5.0, # guidance_scale
213
+ 42, # seed
214
+ "EulerDiscreteScheduler", # scheduler
215
+ False, # enable_LCM
216
+ True, # enable_Face_Region
217
+ )
218
+
219
+ def convert_from_cv2_to_image(img: np.ndarray) -> Image:
220
+ return Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
221
+
222
+ def convert_from_image_to_cv2(img: Image) -> np.ndarray:
223
+ return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
224
+
225
+ def resize_img(
226
+ input_image,
227
+ max_side=1280,
228
+ min_side=1024,
229
+ size=None,
230
+ pad_to_max_side=False,
231
+ mode=PIL.Image.BILINEAR,
232
+ base_pixel_number=64,
233
+ ):
234
+ w, h = input_image.size
235
+ if size is not None:
236
+ w_resize_new, h_resize_new = size
237
+ else:
238
+ ratio = min_side / min(h, w)
239
+ w, h = round(ratio * w), round(ratio * h)
240
+ ratio = max_side / max(h, w)
241
+ input_image = input_image.resize([round(ratio * w), round(ratio * h)], mode)
242
+ w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
243
+ h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
244
+ input_image = input_image.resize([w_resize_new, h_resize_new], mode)
245
+
246
+ if pad_to_max_side:
247
+ res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
248
+ offset_x = (max_side - w_resize_new) // 2
249
+ offset_y = (max_side - h_resize_new) // 2
250
+ res[
251
+ offset_y : offset_y + h_resize_new, offset_x : offset_x + w_resize_new
252
+ ] = np.array(input_image)
253
+ input_image = Image.fromarray(res)
254
+ return input_image
255
+
256
+ def apply_style(
257
+ style_name: str, positive: str, negative: str = ""
258
+ ) -> Tuple[str, str]:
259
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
260
+ return p.replace("{prompt}", positive), n + " " + negative
261
+
262
+ @spaces.GPU
263
+ def generate_image(
264
+ face_image_path,
265
+ pose_image_path,
266
+ prompt,
267
+ negative_prompt,
268
+ style_name,
269
+ num_steps,
270
+ identitynet_strength_ratio,
271
+ adapter_strength_ratio,
272
+ pose_strength,
273
+ canny_strength,
274
+ depth_strength,
275
+ controlnet_selection,
276
+ guidance_scale,
277
+ seed,
278
+ scheduler,
279
+ enable_LCM,
280
+ enhance_face_region,
281
+ progress=gr.Progress(track_tqdm=True),
282
+ ):
283
+
284
+ if enable_LCM:
285
+ pipe.scheduler = diffusers.LCMScheduler.from_config(pipe.scheduler.config)
286
+ pipe.enable_lora()
287
+ else:
288
+ pipe.disable_lora()
289
+ scheduler_class_name = scheduler.split("-")[0]
290
+
291
+ add_kwargs = {}
292
+ if len(scheduler.split("-")) > 1:
293
+ add_kwargs["use_karras_sigmas"] = True
294
+ if len(scheduler.split("-")) > 2:
295
+ add_kwargs["algorithm_type"] = "sde-dpmsolver++"
296
+ scheduler = getattr(diffusers, scheduler_class_name)
297
+ pipe.scheduler = scheduler.from_config(pipe.scheduler.config, **add_kwargs)
298
+
299
+ if face_image_path is None:
300
+ raise gr.Error(
301
+ f"Cannot find any input face image! Please upload the face image"
302
+ )
303
+
304
+ if prompt is None:
305
+ prompt = "a person"
306
+
307
+ # apply the style template
308
+ prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
309
+
310
+ face_image = load_image(face_image_path)
311
+ face_image = resize_img(face_image, max_side=1024)
312
+ face_image_cv2 = convert_from_image_to_cv2(face_image)
313
+ height, width, _ = face_image_cv2.shape
314
+
315
+ # Extract face features
316
+ face_info = app.get(face_image_cv2)
317
+
318
+ if len(face_info) == 0:
319
+ raise gr.Error(
320
+ f"Unable to detect a face in the image. Please upload a different photo with a clear face."
321
+ )
322
+
323
+ face_info = sorted(
324
+ face_info,
325
+ key=lambda x: (x["bbox"][2] - x["bbox"][0]) * x["bbox"][3] - x["bbox"][1],
326
+ )[
327
+ -1
328
+ ] # only use the maximum face
329
+ face_emb = face_info["embedding"]
330
+ face_kps = draw_kps(convert_from_cv2_to_image(face_image_cv2), face_info["kps"])
331
+ img_controlnet = face_image
332
+ if pose_image_path is not None:
333
+ pose_image = load_image(pose_image_path)
334
+ pose_image = resize_img(pose_image, max_side=1024)
335
+ img_controlnet = pose_image
336
+ pose_image_cv2 = convert_from_image_to_cv2(pose_image)
337
+
338
+ face_info = app.get(pose_image_cv2)
339
+
340
+ if len(face_info) == 0:
341
+ raise gr.Error(
342
+ f"Cannot find any face in the reference image! Please upload another person image"
343
+ )
344
+
345
+ face_info = face_info[-1]
346
+ face_kps = draw_kps(pose_image, face_info["kps"])
347
+
348
+ width, height = face_kps.size
349
+
350
+ if enhance_face_region:
351
+ control_mask = np.zeros([height, width, 3])
352
+ x1, y1, x2, y2 = face_info["bbox"]
353
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
354
+ control_mask[y1:y2, x1:x2] = 255
355
+ control_mask = Image.fromarray(control_mask.astype(np.uint8))
356
+ else:
357
+ control_mask = None
358
+
359
+ if len(controlnet_selection) > 0:
360
+ controlnet_scales = {
361
+ "pose": pose_strength,
362
+ "canny": canny_strength,
363
+ "depth": depth_strength,
364
+ }
365
+ pipe.controlnet = MultiControlNetModel(
366
+ [controlnet_identitynet]
367
+ + [controlnet_map[s] for s in controlnet_selection]
368
+ )
369
+ control_scales = [float(identitynet_strength_ratio)] + [
370
+ controlnet_scales[s] for s in controlnet_selection
371
+ ]
372
+ control_images = [face_kps] + [
373
+ controlnet_map_fn[s](img_controlnet).resize((width, height))
374
+ for s in controlnet_selection
375
+ ]
376
+ else:
377
+ pipe.controlnet = controlnet_identitynet
378
+ control_scales = float(identitynet_strength_ratio)
379
+ control_images = face_kps
380
+
381
+ generator = torch.Generator(device=device).manual_seed(seed)
382
+
383
+ print("Start inference...")
384
+ print(f"[Debug] Prompt: {prompt}, \n[Debug] Neg Prompt: {negative_prompt}")
385
+
386
+ pipe.set_ip_adapter_scale(adapter_strength_ratio)
387
+ images = pipe(
388
+ prompt=prompt,
389
+ negative_prompt=negative_prompt,
390
+ image_embeds=face_emb,
391
+ image=control_images,
392
+ control_mask=control_mask,
393
+ controlnet_conditioning_scale=control_scales,
394
+ num_inference_steps=num_steps,
395
+ guidance_scale=guidance_scale,
396
+ height=height,
397
+ width=width,
398
+ generator=generator,
399
+ ).images
400
+
401
+ return images[0], gr.update(visible=True)
402
+
403
+ # Description
404
+ title = r"""
405
+ <h1 align="center">Progressive Pose Attention for Person Image Gen (PPAG)</h1>
406
+ """
407
+
408
+ description = r"""
409
+ <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/YourRepo/YourProject' target='_blank'><b>Pose Transfer</b></a>.<br>
410
+ This demo allows you to transfer poses from one image to another.<br>
411
+ How to use:<br>
412
+ 1. Upload an image with a face. For images with multiple faces, we will only detect the largest face. Ensure the face is not too small and is clearly visible without significant obstructions or blurring.
413
+ 2. Upload another image as a reference for the pose.
414
+ 3. (Optional) You can select multiple ControlNet models to control the generation process. The default is to use the IdentityNet only. The ControlNet models include pose skeleton, canny, and depth. You can adjust the strength of each ControlNet model to control the generation process.
415
+ 4. Enter a text prompt, as done in normal text-to-image models.
416
+ 5. Click the <b>Submit</b> button to begin pose transfer.
417
+ 6. Share your customized photo with your friends and enjoy! 😊"""
418
+
419
+ article = r"""
420
+ """
421
+ tips = r"""
422
+ ### Usage tips of InstantID
423
+ 1. If you're not satisfied with the similarity, try increasing the weight of "IdentityNet Strength" and "Adapter Strength."
424
+ 2. If you feel that the saturation is too high, first decrease the Adapter strength. If it remains too high, then decrease the IdentityNet strength.
425
+ 3. If you find that text control is not as expected, decrease Adapter strength.
426
+ 4. If you find that realistic style is not good enough, go for our Github repo and use a more realistic base model.
427
+ """
428
+
429
+
430
+ css = """
431
+ .gradio-container {width: 85% !important}
432
+ """
433
+ with gr.Blocks(css=css) as demo:
434
+ # description
435
+ gr.Markdown(title)
436
+ gr.Markdown(description)
437
+
438
+ with gr.Row():
439
+ with gr.Column():
440
+ with gr.Row(equal_height=True):
441
+ # upload face image
442
+ face_file = gr.Image(
443
+ label="Upload a photo of your face", type="filepath"
444
+ )
445
+ # optional: upload a reference pose image
446
+ pose_file = gr.Image(
447
+ label="Upload a reference pose image (Optional)",
448
+ type="filepath",
449
+ )
450
+
451
+ # prompt
452
+ prompt = gr.Textbox(
453
+ label="Prompt",
454
+ info="Give simple prompt is enough to achieve good face fidelity",
455
+ placeholder="A photo of a person",
456
+ value="",
457
+ )
458
+
459
+ submit = gr.Button("Submit", variant="primary")
460
+ enable_LCM = gr.Checkbox(
461
+ label="Enable Fast Inference with LCM", value=enable_lcm_arg,
462
+ info="LCM speeds up the inference step, the trade-off is the quality of the generated image. It performs better with portrait face images rather than distant faces",
463
+ )
464
+ style = gr.Dropdown(
465
+ label="Style template",
466
+ choices=STYLE_NAMES,
467
+ value=DEFAULT_STYLE_NAME,
468
+ )
469
+
470
+ # strength
471
+ identitynet_strength_ratio = gr.Slider(
472
+ label="IdentityNet strength (for fidelity)",
473
+ minimum=0,
474
+ maximum=1.5,
475
+ step=0.05,
476
+ value=0.80,
477
+ )
478
+ adapter_strength_ratio = gr.Slider(
479
+ label="Image adapter strength (for detail)",
480
+ minimum=0,
481
+ maximum=1.5,
482
+ step=0.05,
483
+ value=0.80,
484
+ )
485
+ with gr.Accordion("Controlnet"):
486
+ controlnet_selection = gr.CheckboxGroup(
487
+ ["pose", "canny", "depth"], label="Controlnet", value=["depth"],
488
+ info="Use pose for skeleton inference, canny for edge detection, and depth for depth map estimation. You can try all three to control the generation process"
489
+ )
490
+ pose_strength = gr.Slider(
491
+ label="Pose strength",
492
+ minimum=0,
493
+ maximum=1.5,
494
+ step=0.05,
495
+ value=0.40,
496
+ )
497
+ canny_strength = gr.Slider(
498
+ label="Canny strength",
499
+ minimum=0,
500
+ maximum=1.5,
501
+ step=0.05,
502
+ value=0.40,
503
+ )
504
+ depth_strength = gr.Slider(
505
+ label="Depth strength",
506
+ minimum=0,
507
+ maximum=1.5,
508
+ step=0.05,
509
+ value=0.40,
510
+ )
511
+ with gr.Accordion(open=False, label="Advanced Options"):
512
+ negative_prompt = gr.Textbox(
513
+ label="Negative Prompt",
514
+ placeholder="low quality",
515
+ value="(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
516
+ )
517
+ num_steps = gr.Slider(
518
+ label="Number of sample steps",
519
+ minimum=1,
520
+ maximum=100,
521
+ step=1,
522
+ value=5 if enable_lcm_arg else 30,
523
+ )
524
+ guidance_scale = gr.Slider(
525
+ label="Guidance scale",
526
+ minimum=0.1,
527
+ maximum=20.0,
528
+ step=0.1,
529
+ value=0.0 if enable_lcm_arg else 5.0,
530
+ )
531
+ seed = gr.Slider(
532
+ label="Seed",
533
+ minimum=0,
534
+ maximum=MAX_SEED,
535
+ step=1,
536
+ value=42,
537
+ )
538
+ schedulers = [
539
+ "DEISMultistepScheduler",
540
+ "HeunDiscreteScheduler",
541
+ "EulerDiscreteScheduler",
542
+ "DPMSolverMultistepScheduler",
543
+ "DPMSolverMultistepScheduler-Karras",
544
+ "DPMSolverMultistepScheduler-Karras-SDE",
545
+ ]
546
+ scheduler = gr.Dropdown(
547
+ label="Schedulers",
548
+ choices=schedulers,
549
+ value="EulerDiscreteScheduler",
550
+ )
551
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
552
+ enhance_face_region = gr.Checkbox(label="Enhance non-face region", value=True)
553
+
554
+ with gr.Column(scale=1):
555
+ gallery = gr.Image(label="Generated Images")
556
+ usage_tips = gr.Markdown(
557
+ label="InstantID Usage Tips", value=tips, visible=False
558
+ )
559
+
560
+ submit.click(
561
+ fn=remove_tips,
562
+ outputs=usage_tips,
563
+ ).then(
564
+ fn=randomize_seed_fn,
565
+ inputs=[seed, randomize_seed],
566
+ outputs=seed,
567
+ queue=False,
568
+ api_name=False,
569
+ ).then(
570
+ fn=generate_image,
571
+ inputs=[
572
+ face_file,
573
+ pose_file,
574
+ prompt,
575
+ negative_prompt,
576
+ style,
577
+ num_steps,
578
+ identitynet_strength_ratio,
579
+ adapter_strength_ratio,
580
+ pose_strength,
581
+ canny_strength,
582
+ depth_strength,
583
+ controlnet_selection,
584
+ guidance_scale,
585
+ seed,
586
+ scheduler,
587
+ enable_LCM,
588
+ enhance_face_region,
589
+ ],
590
+ outputs=[gallery, usage_tips],
591
+ )
592
+
593
+ enable_LCM.input(
594
+ fn=toggle_lcm_ui,
595
+ inputs=[enable_LCM],
596
+ outputs=[num_steps, guidance_scale],
597
+ queue=False,
598
+ )
599
+
600
+ gr.Examples(
601
+ examples=get_example(),
602
+ inputs=[face_file, pose_file, prompt, style, negative_prompt],
603
+ fn=run_for_examples,
604
+ outputs=[gallery, usage_tips],
605
+ cache_examples=True,
606
+ )
607
+
608
+ gr.Markdown(article)
609
+
610
+ demo.queue(api_open=False)
611
  demo.launch()