rangm commited on
Commit
fdad090
·
verified ·
1 Parent(s): 894e005

Update webgui.py

Browse files
Files changed (1) hide show
  1. webgui.py +1 -64
webgui.py CHANGED
@@ -155,71 +155,8 @@ def select_face(det_bboxes, probs):
155
  sorted_bboxes = sorted(filtered_bboxes, key=lambda x:(x[3]-x[1]) * (x[2] - x[0]), reverse=True)
156
  return sorted_bboxes[0]
157
 
158
- @spaces.GPU
159
  lmk_extractor = LMKExtractor()
160
- # def process_video(uploaded_img, uploaded_audio, width, height, length, seed, facemask_dilation_ratio, facecrop_dilation_ratio, context_frames, context_overlap, cfg, steps, sample_rate, fps, device):
161
-
162
- # if seed is not None and seed > -1:
163
- # generator = torch.manual_seed(seed)
164
- # else:
165
- # generator = torch.manual_seed(random.randint(100, 1000000))
166
-
167
- # #### face musk prepare
168
- # face_img = cv2.imread(uploaded_img)
169
- # face_mask = np.zeros((face_img.shape[0], face_img.shape[1])).astype('uint8')
170
- # det_bboxes, probs = face_detector.detect(face_img)
171
- # select_bbox = select_face(det_bboxes, probs)
172
- # if select_bbox is None:
173
- # face_mask[:, :] = 255
174
- # else:
175
- # xyxy = select_bbox[:4]
176
- # xyxy = np.round(xyxy).astype('int')
177
- # rb, re, cb, ce = xyxy[1], xyxy[3], xyxy[0], xyxy[2]
178
- # r_pad = int((re - rb) * facemask_dilation_ratio)
179
- # c_pad = int((ce - cb) * facemask_dilation_ratio)
180
- # face_mask[rb - r_pad : re + r_pad, cb - c_pad : ce + c_pad] = 255
181
-
182
- # #### face crop
183
- # r_pad_crop = int((re - rb) * facecrop_dilation_ratio)
184
- # c_pad_crop = int((ce - cb) * facecrop_dilation_ratio)
185
- # crop_rect = [max(0, cb - c_pad_crop), max(0, rb - r_pad_crop), min(ce + c_pad_crop, face_img.shape[1]), min(re + r_pad_crop, face_img.shape[0])]
186
- # face_img = crop_and_pad(face_img, crop_rect)
187
- # face_mask = crop_and_pad(face_mask, crop_rect)
188
- # face_img = cv2.resize(face_img, (width, height))
189
- # face_mask = cv2.resize(face_mask, (width, height))
190
-
191
- # ref_image_pil = Image.fromarray(face_img[:, :, [2, 1, 0]])
192
- # face_mask_tensor = torch.Tensor(face_mask).to(dtype=weight_dtype, device="cuda").unsqueeze(0).unsqueeze(0).unsqueeze(0) / 255.0
193
-
194
- # video = pipe(
195
- # ref_image_pil,
196
- # uploaded_audio,
197
- # face_mask_tensor,
198
- # width,
199
- # height,
200
- # length,
201
- # steps,
202
- # cfg,
203
- # generator=generator,
204
- # audio_sample_rate=sample_rate,
205
- # context_frames=context_frames,
206
- # fps=fps,
207
- # context_overlap=context_overlap
208
- # ).videos
209
-
210
- # save_dir = Path("output/tmp")
211
- # save_dir.mkdir(exist_ok=True, parents=True)
212
- # output_video_path = save_dir / "output_video.mp4"
213
- # save_videos_grid(video, str(output_video_path), n_rows=1, fps=fps)
214
-
215
- # video_clip = VideoFileClip(str(output_video_path))
216
- # audio_clip = AudioFileClip(uploaded_audio)
217
- # final_output_path = save_dir / "output_video_with_audio.mp4"
218
- # video_clip = video_clip.set_audio(audio_clip)
219
- # video_clip.write_videofile(str(final_output_path), codec="libx264", audio_codec="aac")
220
-
221
- # return final_output_path
222
-
223
  def process_video(uploaded_img, uploaded_audio, width, height, length, facemask_dilation_ratio, facecrop_dilation_ratio, context_frames, context_overlap, cfg, steps, sample_rate, fps, device):
224
  #### face musk prepare
225
  face_img = cv2.imread(uploaded_img)
 
155
  sorted_bboxes = sorted(filtered_bboxes, key=lambda x:(x[3]-x[1]) * (x[2] - x[0]), reverse=True)
156
  return sorted_bboxes[0]
157
 
 
158
  lmk_extractor = LMKExtractor()
159
+ @spaces.GPU
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  def process_video(uploaded_img, uploaded_audio, width, height, length, facemask_dilation_ratio, facecrop_dilation_ratio, context_frames, context_overlap, cfg, steps, sample_rate, fps, device):
161
  #### face musk prepare
162
  face_img = cv2.imread(uploaded_img)