Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -30,13 +30,14 @@ pipe = StableDiffusionPipeline.from_pretrained(
|
|
30 |
base_model_path,
|
31 |
torch_dtype=torch.float16,
|
32 |
scheduler=noise_scheduler,
|
33 |
-
vae=vae
|
34 |
-
safety_checker=None # Disconnected the safety checker
|
35 |
).to(device)
|
36 |
|
|
|
37 |
ip_model = IPAdapterFaceID(pipe, ip_ckpt, device)
|
38 |
ip_model_plus = IPAdapterFaceIDPlus(pipe, image_encoder_path, ip_plus_ckpt, device)
|
39 |
|
|
|
40 |
app = FaceAnalysis(name="buffalo_l", providers=['CPUExecutionProvider'])
|
41 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
42 |
|
@@ -52,7 +53,7 @@ def generate_image(images, prompt, negative_prompt, preserve_face_structure, fac
|
|
52 |
faceid_embed = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
|
53 |
faceid_all_embeds.append(faceid_embed)
|
54 |
if(first_iteration and preserve_face_structure):
|
55 |
-
face_image = face_align.norm_crop(face, landmark=faces[0].kps, image_size=224)
|
56 |
first_iteration = False
|
57 |
|
58 |
average_embedding = torch.mean(torch.stack(faceid_all_embeds, dim=0), dim=0)
|
|
|
30 |
base_model_path,
|
31 |
torch_dtype=torch.float16,
|
32 |
scheduler=noise_scheduler,
|
33 |
+
vae=vae
|
|
|
34 |
).to(device)
|
35 |
|
36 |
+
# Instantiate IPAdapter models
|
37 |
ip_model = IPAdapterFaceID(pipe, ip_ckpt, device)
|
38 |
ip_model_plus = IPAdapterFaceIDPlus(pipe, image_encoder_path, ip_plus_ckpt, device)
|
39 |
|
40 |
+
# Initialize the FaceAnalysis application
|
41 |
app = FaceAnalysis(name="buffalo_l", providers=['CPUExecutionProvider'])
|
42 |
app.prepare(ctx_id=0, det_size=(640, 640))
|
43 |
|
|
|
53 |
faceid_embed = torch.from_numpy(faces[0].normed_embedding).unsqueeze(0)
|
54 |
faceid_all_embeds.append(faceid_embed)
|
55 |
if(first_iteration and preserve_face_structure):
|
56 |
+
face_image = face_align.norm_crop(face, landmark=faces[0].kps, image_size=224) # you can also segment the face
|
57 |
first_iteration = False
|
58 |
|
59 |
average_embedding = torch.mean(torch.stack(faceid_all_embeds, dim=0), dim=0)
|