ohayonguy
commited on
Commit
·
3dc9cef
1
Parent(s):
3b1fe09
fixing sizes
Browse files
app.py
CHANGED
@@ -66,7 +66,7 @@ def enhance_face(img, face_helper, has_aligned, num_flow_steps, only_center_face
|
|
66 |
face_helper.cropped_faces = [img]
|
67 |
else:
|
68 |
face_helper.read_image(img)
|
69 |
-
face_helper.get_face_landmarks_5(only_center_face=only_center_face,
|
70 |
# eye_dist_threshold=5: skip faces whose eye distance is smaller than 5 pixels
|
71 |
# TODO: even with eye_dist_threshold, it will still introduce wrong detections and restorations.
|
72 |
# align and warp each face
|
@@ -74,14 +74,18 @@ def enhance_face(img, face_helper, has_aligned, num_flow_steps, only_center_face
|
|
74 |
# face restoration
|
75 |
for cropped_face in face_helper.cropped_faces:
|
76 |
# prepare data
|
|
|
|
|
77 |
cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
|
78 |
cropped_face_t = cropped_face_t.unsqueeze(0).to(device)
|
79 |
|
80 |
dummy_x = torch.zeros_like(cropped_face_t)
|
81 |
-
with torch.autocast("cuda", dtype=torch.bfloat16):
|
82 |
-
|
83 |
restored_face = tensor2img(output.to(torch.float32).squeeze(0), rgb2bgr=True, min_max=(0, 1))
|
84 |
# restored_face = cropped_face
|
|
|
|
|
85 |
|
86 |
restored_face = restored_face.astype('uint8')
|
87 |
face_helper.add_restored_face(restored_face)
|
|
|
66 |
face_helper.cropped_faces = [img]
|
67 |
else:
|
68 |
face_helper.read_image(img)
|
69 |
+
face_helper.get_face_landmarks_5(only_center_face=only_center_face, eye_dist_threshold=5)
|
70 |
# eye_dist_threshold=5: skip faces whose eye distance is smaller than 5 pixels
|
71 |
# TODO: even with eye_dist_threshold, it will still introduce wrong detections and restorations.
|
72 |
# align and warp each face
|
|
|
74 |
# face restoration
|
75 |
for cropped_face in face_helper.cropped_faces:
|
76 |
# prepare data
|
77 |
+
h, w = cropped_face.shape[0], cropped_face.shape[1]
|
78 |
+
cropped_face = cv2.resize(cropped_face, (512, 512), interpolation=cv2.INTER_LINEAR)
|
79 |
cropped_face_t = img2tensor(cropped_face / 255., bgr2rgb=True, float32=True)
|
80 |
cropped_face_t = cropped_face_t.unsqueeze(0).to(device)
|
81 |
|
82 |
dummy_x = torch.zeros_like(cropped_face_t)
|
83 |
+
# with torch.autocast("cuda", dtype=torch.bfloat16):
|
84 |
+
output = generate_reconstructions(pmrf, dummy_x, cropped_face_t, None, num_flow_steps, device)
|
85 |
restored_face = tensor2img(output.to(torch.float32).squeeze(0), rgb2bgr=True, min_max=(0, 1))
|
86 |
# restored_face = cropped_face
|
87 |
+
restored_face = cv2.resize(restored_face, (h, w), interpolation=cv2.INTER_LINEAR)
|
88 |
+
|
89 |
|
90 |
restored_face = restored_face.astype('uint8')
|
91 |
face_helper.add_restored_face(restored_face)
|