Jacobmadwed commited on
Commit
2a344a4
·
verified ·
1 Parent(s): bd0fe83

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +451 -1
app.py CHANGED
@@ -1,3 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
- gr.load("models/InstantX/InstantID").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ sys.path.append('./')
3
+
4
+ from typing import Tuple
5
+
6
+ import os
7
+ import cv2
8
+ import math
9
+ import torch
10
+ import random
11
+ import numpy as np
12
+ import argparse
13
+
14
+ import PIL
15
+ from PIL import Image
16
+
17
+ import diffusers
18
+ from diffusers.utils import load_image
19
+ from diffusers.models import ControlNetModel
20
+ from diffusers import LCMScheduler
21
+
22
+ from huggingface_hub import hf_hub_download
23
+
24
+ import insightface
25
+ from insightface.app import FaceAnalysis
26
+
27
+ from style_template import styles
28
+ from pipeline_stable_diffusion_xl_instantid_full import StableDiffusionXLInstantIDPipeline
29
+ from model_util import load_models_xl, get_torch_device, torch_gc
30
+
31
  import gradio as gr
32
 
33
+ # global variable
34
+ MAX_SEED = np.iinfo(np.int32).max
35
+ device = get_torch_device()
36
+ dtype = torch.float16 if str(device).__contains__("cuda") else torch.float32
37
+ STYLE_NAMES = list(styles.keys())
38
+ DEFAULT_STYLE_NAME = "Watercolor"
39
+
40
+ # Load face encoder
41
+ app = FaceAnalysis(name='antelopev2', root='./', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
42
+ app.prepare(ctx_id=0, det_size=(640, 640))
43
+
44
+ # Path to InstantID models
45
+ face_adapter = f'./checkpoints/ip-adapter.bin'
46
+ controlnet_path = f'./checkpoints/ControlNetModel'
47
+
48
+ # Load pipeline
49
+ controlnet = ControlNetModel.from_pretrained(controlnet_path, torch_dtype=dtype)
50
+
51
+ def main(pretrained_model_name_or_path="wangqixun/YamerMIX_v8", enable_lcm_arg=False):
52
+
53
+ if pretrained_model_name_or_path.endswith(
54
+ ".ckpt"
55
+ ) or pretrained_model_name_or_path.endswith(".safetensors"):
56
+ scheduler_kwargs = hf_hub_download(
57
+ repo_id="wangqixun/YamerMIX_v8",
58
+ subfolder="scheduler",
59
+ filename="scheduler_config.json",
60
+ )
61
+
62
+ (tokenizers, text_encoders, unet, _, vae) = load_models_xl(
63
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
64
+ scheduler_name=None,
65
+ weight_dtype=dtype,
66
+ )
67
+
68
+ scheduler = diffusers.EulerDiscreteScheduler.from_config(scheduler_kwargs)
69
+ pipe = StableDiffusionXLInstantIDPipeline(
70
+ vae=vae,
71
+ text_encoder=text_encoders[0],
72
+ text_encoder_2=text_encoders[1],
73
+ tokenizer=tokenizers[0],
74
+ tokenizer_2=tokenizers[1],
75
+ unet=unet,
76
+ scheduler=scheduler,
77
+ controlnet=controlnet,
78
+ ).to(device)
79
+
80
+ else:
81
+ pipe = StableDiffusionXLInstantIDPipeline.from_pretrained(
82
+ pretrained_model_name_or_path,
83
+ controlnet=controlnet,
84
+ torch_dtype=dtype,
85
+ safety_checker=None,
86
+ feature_extractor=None,
87
+ ).to(device)
88
+
89
+ pipe.scheduler = diffusers.EulerDiscreteScheduler.from_config(pipe.scheduler.config)
90
+
91
+ pipe.load_ip_adapter_instantid(face_adapter)
92
+ # load and disable LCM
93
+ pipe.load_lora_weights("latent-consistency/lcm-lora-sdxl")
94
+ pipe.disable_lora()
95
+ def toggle_lcm_ui(value):
96
+ if value:
97
+ return (
98
+ gr.update(minimum=0, maximum=100, step=1, value=5),
99
+ gr.update(minimum=0.1, maximum=20.0, step=0.1, value=1.5)
100
+ )
101
+ else:
102
+ return (
103
+ gr.update(minimum=5, maximum=100, step=1, value=30),
104
+ gr.update(minimum=0.1, maximum=20.0, step=0.1, value=5)
105
+ )
106
+
107
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
108
+ if randomize_seed:
109
+ seed = random.randint(0, MAX_SEED)
110
+ return seed
111
+
112
+ def remove_tips():
113
+ return gr.update(visible=False)
114
+
115
+ def get_example():
116
+ case = [
117
+ [
118
+ './examples/yann-lecun_resize.jpg',
119
+ "a man",
120
+ "Snow",
121
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
122
+ ],
123
+ [
124
+ './examples/musk_resize.jpeg',
125
+ "a man",
126
+ "Mars",
127
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
128
+ ],
129
+ [
130
+ './examples/sam_resize.png',
131
+ "a man",
132
+ "Jungle",
133
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, gree",
134
+ ],
135
+ [
136
+ './examples/schmidhuber_resize.png',
137
+ "a man",
138
+ "Neon",
139
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
140
+ ],
141
+ [
142
+ './examples/kaifu_resize.png',
143
+ "a man",
144
+ "Vibrant Color",
145
+ "(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, photo, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
146
+ ],
147
+ ]
148
+ return case
149
+
150
+ def run_for_examples(face_file, prompt, style, negative_prompt):
151
+ return generate_image(face_file, None, prompt, negative_prompt, style, 30, 0.8, 0.8, 5, 42, False, True)
152
+
153
+ def convert_from_cv2_to_image(img: np.ndarray) -> Image:
154
+ return Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
155
+
156
+ def convert_from_image_to_cv2(img: Image) -> np.ndarray:
157
+ return cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
158
+
159
+ def draw_kps(image_pil, kps, color_list=[(255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255)]):
160
+ stickwidth = 4
161
+ limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
162
+ kps = np.array(kps)
163
+
164
+ w, h = image_pil.size
165
+ out_img = np.zeros([h, w, 3])
166
+
167
+ for i in range(len(limbSeq)):
168
+ index = limbSeq[i]
169
+ color = color_list[index[0]]
170
+
171
+ x = kps[index][:, 0]
172
+ y = kps[index][:, 1]
173
+ length = ((x[0] - x[1]) ** 2 + (y[0] - y[1]) ** 2) ** 0.5
174
+ angle = math.degrees(math.atan2(y[0] - y[1], x[0] - x[1]))
175
+ polygon = cv2.ellipse2Poly((int(np.mean(x)), int(np.mean(y))), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
176
+ out_img = cv2.fillConvexPoly(out_img.copy(), polygon, color)
177
+ out_img = (out_img * 0.6).astype(np.uint8)
178
+
179
+ for idx_kp, kp in enumerate(kps):
180
+ color = color_list[idx_kp]
181
+ x, y = kp
182
+ out_img = cv2.circle(out_img.copy(), (int(x), int(y)), 10, color, -1)
183
+
184
+ out_img_pil = Image.fromarray(out_img.astype(np.uint8))
185
+ return out_img_pil
186
+
187
+ def resize_img(input_image, max_side=1280, min_side=1024, size=None,
188
+ pad_to_max_side=False, mode=PIL.Image.BILINEAR, base_pixel_number=64):
189
+
190
+ w, h = input_image.size
191
+ if size is not None:
192
+ w_resize_new, h_resize_new = size
193
+ else:
194
+ ratio = min_side / min(h, w)
195
+ w, h = round(ratio*w), round(ratio*h)
196
+ ratio = max_side / max(h, w)
197
+ input_image = input_image.resize([round(ratio*w), round(ratio*h)], mode)
198
+ w_resize_new = (round(ratio * w) // base_pixel_number) * base_pixel_number
199
+ h_resize_new = (round(ratio * h) // base_pixel_number) * base_pixel_number
200
+ input_image = input_image.resize([w_resize_new, h_resize_new], mode)
201
+
202
+ if pad_to_max_side:
203
+ res = np.ones([max_side, max_side, 3], dtype=np.uint8) * 255
204
+ offset_x = (max_side - w_resize_new) // 2
205
+ offset_y = (max_side - h_resize_new) // 2
206
+ res[offset_y:offset_y+h_resize_new, offset_x:offset_x+w_resize_new] = np.array(input_image)
207
+ input_image = Image.fromarray(res)
208
+ return input_image
209
+
210
+ def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
211
+ p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
212
+ return p.replace("{prompt}", positive), n + ' ' + negative
213
+
214
+ def generate_image(face_image_path, pose_image_path, prompt, negative_prompt, style_name, num_steps, identitynet_strength_ratio, adapter_strength_ratio, guidance_scale, seed, enable_LCM, enhance_face_region, progress=gr.Progress(track_tqdm=True)):
215
+ if enable_LCM:
216
+ pipe.enable_lora()
217
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
218
+ else:
219
+ pipe.disable_lora()
220
+ pipe.scheduler = diffusers.EulerDiscreteScheduler.from_config(pipe.scheduler.config)
221
+
222
+ if face_image_path is None:
223
+ raise gr.Error(f"Cannot find any input face image! Please upload the face image")
224
+
225
+ if prompt is None:
226
+ prompt = "a person"
227
+
228
+ # apply the style template
229
+ prompt, negative_prompt = apply_style(style_name, prompt, negative_prompt)
230
+
231
+ face_image = load_image(face_image_path)
232
+ face_image = resize_img(face_image)
233
+ face_image_cv2 = convert_from_image_to_cv2(face_image)
234
+ height, width, _ = face_image_cv2.shape
235
+
236
+ # Extract face features
237
+ face_info = app.get(face_image_cv2)
238
+
239
+ if len(face_info) == 0:
240
+ raise gr.Error(f"Cannot find any face in the image! Please upload another person image")
241
+
242
+ face_info = sorted(face_info, key=lambda x:(x['bbox'][2]-x['bbox'][0])*(x['bbox'][3]-x['bbox'][1]))[-1] # only use the maximum face
243
+ face_emb = face_info['embedding']
244
+ face_kps = draw_kps(convert_from_cv2_to_image(face_image_cv2), face_info['kps'])
245
+
246
+ if pose_image_path is not None:
247
+ pose_image = load_image(pose_image_path)
248
+ pose_image = resize_img(pose_image)
249
+ pose_image_cv2 = convert_from_image_to_cv2(pose_image)
250
+
251
+ face_info = app.get(pose_image_cv2)
252
+
253
+ if len(face_info) == 0:
254
+ raise gr.Error(f"Cannot find any face in the reference image! Please upload another person image")
255
+
256
+ face_info = face_info[-1]
257
+ face_kps = draw_kps(pose_image, face_info['kps'])
258
+
259
+ width, height = face_kps.size
260
+
261
+ if enhance_face_region:
262
+ control_mask = np.zeros([height, width, 3])
263
+ x1, y1, x2, y2 = face_info["bbox"]
264
+ x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
265
+ control_mask[y1:y2, x1:x2] = 255
266
+ control_mask = Image.fromarray(control_mask.astype(np.uint8))
267
+ else:
268
+ control_mask = None
269
+
270
+ generator = torch.Generator(device=device).manual_seed(seed)
271
+
272
+ print("Start inference...")
273
+ print(f"[Debug] Prompt: {prompt}, \n[Debug] Neg Prompt: {negative_prompt}")
274
+
275
+ pipe.set_ip_adapter_scale(adapter_strength_ratio)
276
+ images = pipe(
277
+ prompt=prompt,
278
+ negative_prompt=negative_prompt,
279
+ image_embeds=face_emb,
280
+ image=face_kps,
281
+ control_mask=control_mask,
282
+ controlnet_conditioning_scale=float(identitynet_strength_ratio),
283
+ num_inference_steps=num_steps,
284
+ guidance_scale=guidance_scale,
285
+ height=height,
286
+ width=width,
287
+ generator=generator
288
+ ).images
289
+
290
+ return images[0], gr.update(visible=True)
291
+
292
+ ### Description
293
+ title = r"""
294
+ <h1 align="center">InstantID: Zero-shot Identity-Preserving Generation in Seconds</h1>
295
+ """
296
+
297
+ description = r"""
298
+ <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/InstantID/InstantID' target='_blank'><b>InstantID: Zero-shot Identity-Preserving Generation in Seconds</b></a>.<br>
299
+
300
+ How to use:<br>
301
+ 1. Upload an image with a face. For images with multiple faces, we will only detect the largest face. Ensure the face is not too small and is clearly visible without significant obstructions or blurring.
302
+ 2. (Optional) You can upload another image as a reference for the face pose. If you don't, we will use the first detected face image to extract facial landmarks. If you use a cropped face at step 1, it is recommended to upload it to define a new face pose.
303
+ 3. Enter a text prompt, as done in normal text-to-image models.
304
+ 4. Click the <b>Submit</b> button to begin customization.
305
+ 5. Share your customized photo with your friends and enjoy! 😊
306
+ """
307
+
308
+ article = r"""
309
+ ---
310
+ 📝 **Citation**
311
+ <br>
312
+ If our work is helpful for your research or applications, please cite us via:
313
+ ```bibtex
314
+ @article{wang2024instantid,
315
+ title={InstantID: Zero-shot Identity-Preserving Generation in Seconds},
316
+ author={Wang, Qixun and Bai, Xu and Wang, Haofan and Qin, Zekui and Chen, Anthony},
317
+ journal={arXiv preprint arXiv:2401.07519},
318
+ year={2024}
319
+ }
320
+ ```
321
+ 📧 **Contact**
322
+ <br>
323
+ If you have any questions, please feel free to open an issue or directly reach us out at <b>[email protected]</b>.
324
+ """
325
+
326
+ tips = r"""
327
+ ### Usage tips of InstantID
328
+ 1. If you're not satisfied with the similarity, try increasing the weight of "IdentityNet Strength" and "Adapter Strength."
329
+ 2. If you feel that the saturation is too high, first decrease the Adapter strength. If it remains too high, then decrease the IdentityNet strength.
330
+ 3. If you find that text control is not as expected, decrease Adapter strength.
331
+ 4. If you find that realistic style is not good enough, go for our Github repo and use a more realistic base model.
332
+ """
333
+
334
+ css = '''
335
+ .gradio-container {width: 85% !important}
336
+ '''
337
+ with gr.Blocks(css=css) as demo:
338
+
339
+ # description
340
+ gr.Markdown(title)
341
+ gr.Markdown(description)
342
+
343
+ with gr.Row():
344
+ with gr.Column():
345
+
346
+ # upload face image
347
+ face_file = gr.Image(label="Upload a photo of your face", type="filepath")
348
+
349
+ # optional: upload a reference pose image
350
+ pose_file = gr.Image(label="Upload a reference pose image (optional)", type="filepath")
351
+
352
+ # prompt
353
+ prompt = gr.Textbox(label="Prompt",
354
+ info="Give simple prompt is enough to achieve good face fidelity",
355
+ placeholder="A photo of a person",
356
+ value="")
357
+
358
+ submit = gr.Button("Submit", variant="primary")
359
+
360
+ enable_LCM = gr.Checkbox(
361
+ label="Enable Fast Inference with LCM", value=enable_lcm_arg,
362
+ info="LCM speeds up the inference step, the trade-off is the quality of the generated image. It performs better with portrait face images rather than distant faces",
363
+ )
364
+ style = gr.Dropdown(label="Style template", choices=STYLE_NAMES, value=DEFAULT_STYLE_NAME)
365
+
366
+ # strength
367
+ identitynet_strength_ratio = gr.Slider(
368
+ label="IdentityNet strength (for fidelity)",
369
+ minimum=0,
370
+ maximum=1.5,
371
+ step=0.05,
372
+ value=0.80,
373
+ )
374
+ adapter_strength_ratio = gr.Slider(
375
+ label="Image adapter strength (for detail)",
376
+ minimum=0,
377
+ maximum=1.5,
378
+ step=0.05,
379
+ value=0.80,
380
+ )
381
+
382
+ with gr.Accordion(open=False, label="Advanced Options"):
383
+ negative_prompt = gr.Textbox(
384
+ label="Negative Prompt",
385
+ placeholder="low quality",
386
+ value="(lowres, low quality, worst quality:1.2), (text:1.2), watermark, (frame:1.2), deformed, ugly, deformed eyes, blur, out of focus, blurry, deformed cat, deformed, photo, anthropomorphic cat, monochrome, pet collar, gun, weapon, blue, 3d, drones, drone, buildings in background, green",
387
+ )
388
+ num_steps = gr.Slider(
389
+ label="Number of sample steps",
390
+ minimum=20,
391
+ maximum=100,
392
+ step=1,
393
+ value=5 if enable_lcm_arg else 30,
394
+ )
395
+ guidance_scale = gr.Slider(
396
+ label="Guidance scale",
397
+ minimum=0.1,
398
+ maximum=10.0,
399
+ step=0.1,
400
+ value=0 if enable_lcm_arg else 5,
401
+ )
402
+ seed = gr.Slider(
403
+ label="Seed",
404
+ minimum=0,
405
+ maximum=MAX_SEED,
406
+ step=1,
407
+ value=42,
408
+ )
409
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
410
+ enhance_face_region = gr.Checkbox(label="Enhance non-face region", value=True)
411
+
412
+ with gr.Column():
413
+ gallery = gr.Image(label="Generated Images")
414
+ usage_tips = gr.Markdown(label="Usage tips of InstantID", value=tips ,visible=False)
415
+
416
+ submit.click(
417
+ fn=remove_tips,
418
+ outputs=usage_tips,
419
+ ).then(
420
+ fn=randomize_seed_fn,
421
+ inputs=[seed, randomize_seed],
422
+ outputs=seed,
423
+ queue=False,
424
+ api_name=False,
425
+ ).then(
426
+ fn=generate_image,
427
+ inputs=[face_file, pose_file, prompt, negative_prompt, style, num_steps, identitynet_strength_ratio, adapter_strength_ratio, guidance_scale, seed, enable_LCM, enhance_face_region],
428
+ outputs=[gallery, usage_tips]
429
+ )
430
+
431
+ enable_LCM.input(fn=toggle_lcm_ui, inputs=[enable_LCM], outputs=[num_steps, guidance_scale], queue=False)
432
+
433
+ gr.Examples(
434
+ examples=get_example(),
435
+ inputs=[face_file, prompt, style, negative_prompt],
436
+ run_on_click=True,
437
+ fn=run_for_examples,
438
+ outputs=[gallery, usage_tips],
439
+ cache_examples=True,
440
+ )
441
+
442
+ gr.Markdown(article)
443
+
444
+ demo.launch()
445
+
446
+ if __name__ == "__main__":
447
+ parser = argparse.ArgumentParser()
448
+ parser.add_argument("--pretrained_model_name_or_path", type=str, default="wangqixun/YamerMIX_v8")
449
+ parser.add_argument("--enable_LCM", type=bool, default=os.environ.get("ENABLE_LCM", False))
450
+
451
+ args = parser.parse_args()
452
+
453
+ main(args.pretrained_model_name_or_path, args.enable_LCM)