sczhou commited on
Commit
e137bbe
·
1 Parent(s): 1fb2362

add grayscale judgement (#32)

Browse files
facelib/detection/yolov5face/face_detector.py CHANGED
@@ -17,7 +17,7 @@ from facelib.detection.yolov5face.utils.general import (
17
  scale_coords_landmarks,
18
  )
19
 
20
- IS_HIGH_VERSION = tuple(map(int, torch.__version__.split('+')[0].split('.'))) >= (1, 9, 0)
21
 
22
 
23
  def isListempty(inList):
 
17
  scale_coords_landmarks,
18
  )
19
 
20
+ IS_HIGH_VERSION = tuple(map(int, torch.__version__.split('+')[0].split('.')[:3])) >= (1, 9, 0)
21
 
22
 
23
  def isListempty(inList):
facelib/utils/face_restoration_helper.py CHANGED
@@ -299,6 +299,8 @@ class FaceRestoreHelper(object):
299
 
300
 
301
  def add_restored_face(self, face):
 
 
302
  self.restored_faces.append(face)
303
 
304
 
@@ -419,9 +421,6 @@ class FaceRestoreHelper(object):
419
  fuse_mask = (inv_soft_parse_mask<inv_soft_mask).astype('int')
420
  inv_soft_mask = inv_soft_parse_mask*fuse_mask + inv_soft_mask*(1-fuse_mask)
421
 
422
- if self.is_gray:
423
- pasted_face = bgr2gray(pasted_face) # convert img into grayscale
424
-
425
  if len(upsample_img.shape) == 3 and upsample_img.shape[2] == 4: # alpha channel
426
  alpha = upsample_img[:, :, 3:]
427
  upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img[:, :, 0:3]
 
299
 
300
 
301
  def add_restored_face(self, face):
302
+ if self.is_gray:
303
+ face = bgr2gray(face) # convert img into grayscale
304
  self.restored_faces.append(face)
305
 
306
 
 
421
  fuse_mask = (inv_soft_parse_mask<inv_soft_mask).astype('int')
422
  inv_soft_mask = inv_soft_parse_mask*fuse_mask + inv_soft_mask*(1-fuse_mask)
423
 
 
 
 
424
  if len(upsample_img.shape) == 3 and upsample_img.shape[2] == 4: # alpha channel
425
  alpha = upsample_img[:, :, 3:]
426
  upsample_img = inv_soft_mask * pasted_face + (1 - inv_soft_mask) * upsample_img[:, :, 0:3]
inference_codeformer.py CHANGED
@@ -1,4 +1,3 @@
1
- # Modified by Shangchen Zhou from: https://github.com/TencentARC/GFPGAN/blob/master/inference_gfpgan.py
2
  import os
3
  import cv2
4
  import argparse
@@ -8,6 +7,7 @@ from torchvision.transforms.functional import normalize
8
  from basicsr.utils import imwrite, img2tensor, tensor2img
9
  from basicsr.utils.download_util import load_file_from_url
10
  from facelib.utils.face_restoration_helper import FaceRestoreHelper
 
11
  import torch.nn.functional as F
12
 
13
  from basicsr.utils.registry import ARCH_REGISTRY
@@ -122,6 +122,9 @@ if __name__ == '__main__':
122
  if args.has_aligned:
123
  # the input faces are already cropped and aligned
124
  img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
 
 
 
125
  face_helper.cropped_faces = [img]
126
  else:
127
  face_helper.read_image(img)
 
 
1
  import os
2
  import cv2
3
  import argparse
 
7
  from basicsr.utils import imwrite, img2tensor, tensor2img
8
  from basicsr.utils.download_util import load_file_from_url
9
  from facelib.utils.face_restoration_helper import FaceRestoreHelper
10
+ from facelib.utils.misc import is_gray
11
  import torch.nn.functional as F
12
 
13
  from basicsr.utils.registry import ARCH_REGISTRY
 
122
  if args.has_aligned:
123
  # the input faces are already cropped and aligned
124
  img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_LINEAR)
125
+ face_helper.is_gray = is_gray(img, threshold=5)
126
+ if face_helper.is_gray:
127
+ print('Grayscale input: True')
128
  face_helper.cropped_faces = [img]
129
  else:
130
  face_helper.read_image(img)