TDN-M commited on
Commit
ddf1f82
·
verified ·
1 Parent(s): 93ac1b5

Update avatar.py

Browse files
Files changed (1) hide show
  1. avatar.py +14 -14
avatar.py CHANGED
@@ -183,21 +183,21 @@ class Avatar:
183
 
184
  def create_face_detection_results(self, full_frames, save_result=True):
185
  detector = FaceAlignment(LandmarksType.TWO_D, flip_input=False, device=self.device)
186
- images=full_frames
187
  while 1:
188
- predictions = []
189
- try:
190
- for i in tqdm(range(0, len(images), self.face_detect_batch_size)):
191
- batch_images = np.array(images[i:i + self.face_detect_batch_size])
192
- batch_images = torch.from_numpy(batch_images).permute(0, 3, 1, 2).float().to(self.device)
193
- predictions.extend(detector.face_detector.detect_from_batch(batch_images))
194
- except RuntimeError:
195
- if self.face_detect_batch_size == 1:
196
- raise RuntimeError('Image too big to run face detection on GPU. Please use the --resize_factor argument')
197
- self.face_detect_batch_size //= 2
198
- print('Recovering from OOM error; New batch size: {}'.format(self.face_detect_batch_size))
199
- continue
200
- break
201
 
202
  face_detect_results = []
203
  pady1, pady2, padx1, padx2 = [0, 10, 0, 0]
 
183
 
184
  def create_face_detection_results(self, full_frames, save_result=True):
185
  detector = FaceAlignment(LandmarksType.TWO_D, flip_input=False, device=self.device)
186
+ images = full_frames
187
  while 1:
188
+ predictions = []
189
+ try:
190
+ for i in tqdm(range(0, len(images), self.face_detect_batch_size)):
191
+ batch_images = np.array(images[i:i + self.face_detect_batch_size])
192
+ batch_images = torch.from_numpy(batch_images).permute(0, 3, 1, 2).float().to(self.device)
193
+ predictions.extend(detector.face_detector.detect_from_batch(batch_images))
194
+ except RuntimeError:
195
+ if self.face_detect_batch_size == 1:
196
+ raise RuntimeError('Image too big to run face detection on GPU. Please use the --resize_factor argument')
197
+ self.face_detect_batch_size //= 2
198
+ print('Recovering from OOM error; New batch size: {}'.format(self.face_detect_batch_size))
199
+ continue
200
+ break
201
 
202
  face_detect_results = []
203
  pady1, pady2, padx1, padx2 = [0, 10, 0, 0]