Spanicin commited on
Commit
7e5f34c
·
verified ·
1 Parent(s): 8e2394b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -43
app.py CHANGED
@@ -23,9 +23,9 @@ import uuid
23
  import time
24
  from PIL import Image
25
  import moviepy.editor as mp
26
- from videoretalking import inference_function
27
- import base64
28
- import gfpgan_enhancer
29
 
30
  start_time = time.time()
31
 
@@ -192,55 +192,55 @@ def main(args):
192
  result, base64_video,temp_file_path,new_audio_path = animate_from_coeff.generate(data, save_dir, pic_path, crop_info, \
193
  enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess)
194
 
195
- face_path = temp_file_path
196
- audio_path = new_audio_path
197
- temp_file = tempfile.NamedTemporaryFile(delete=False, dir=TEMP_DIR.name, suffix='.mp4')
198
- video_lipsync_file_path = temp_file.name
199
- output_path = video_lipsync_file_path
200
-
201
- # Call the function
202
- inference_function.video_lipsync_correctness(
203
- face=face_path,
204
- audio_path=audio_path,
205
- face3d_net_path = path_of_net_recon_model,
206
- outfile=output_path,
207
- tmp_dir="temp",
208
- crop=[0, -1, 0, -1],
209
- re_preprocess=True, # Set to True if you want to reprocess; False otherwise
210
- exp_img="neutral", # Can be 'smile', 'neutral', or path to an expression image
211
- one_shot=False,
212
- up_face="original", # Options: 'original', 'sad', 'angry', 'surprise'
213
- LNet_batch_size=16,
214
- without_rl1=False
215
- )
216
 
217
- print('The video with lip sync is generated')
218
- print("GFPGAN Activated")
219
 
220
- gfpgan_enhancer.process_video_with_gfpgan(output_path, output_path)
221
- audio_clip = mp.AudioFileClip(new_audio_path)
222
- video_clip = mp.VideoFileClip(output_path)
223
- # Combine audio and video
224
- final_clip = video_clip.set_audio(audio_clip)
225
 
226
- temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', dir=TEMP_DIR.name, delete=False)
227
- temp_file.close()
228
- final_video_path = temp_file.name
229
- final_clip.write_videofile(final_video_path)
230
 
231
- with open(final_video_path, 'rb') as f:
232
- video_content = f.read()
233
 
234
- base64_lipsync_video = base64.b64encode(video_content).decode('utf-8')
235
 
236
- video_clip = mp.VideoFileClip(final_video_path)
237
- duration = video_clip.duration
238
 
239
- app.config['temp_response'] = base64_lipsync_video
240
- app.config['final_video_path'] = final_video_path
241
  app.config['final_video_duration'] = duration
242
 
243
- return base64_lipsync_video, final_video_path, duration
244
 
245
  # shutil.move(result, save_dir+'.mp4')
246
 
 
23
  import time
24
  from PIL import Image
25
  import moviepy.editor as mp
26
+ # from videoretalking import inference_function
27
+ # import base64
28
+ # import gfpgan_enhancer
29
 
30
  start_time = time.time()
31
 
 
192
  result, base64_video,temp_file_path,new_audio_path = animate_from_coeff.generate(data, save_dir, pic_path, crop_info, \
193
  enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess)
194
 
195
+ # face_path = temp_file_path
196
+ # audio_path = new_audio_path
197
+ # temp_file = tempfile.NamedTemporaryFile(delete=False, dir=TEMP_DIR.name, suffix='.mp4')
198
+ # video_lipsync_file_path = temp_file.name
199
+ # output_path = video_lipsync_file_path
200
+
201
+ # # Call the function
202
+ # inference_function.video_lipsync_correctness(
203
+ # face=face_path,
204
+ # audio_path=audio_path,
205
+ # face3d_net_path = path_of_net_recon_model,
206
+ # outfile=output_path,
207
+ # tmp_dir="temp",
208
+ # crop=[0, -1, 0, -1],
209
+ # re_preprocess=True, # Set to True if you want to reprocess; False otherwise
210
+ # exp_img="neutral", # Can be 'smile', 'neutral', or path to an expression image
211
+ # one_shot=False,
212
+ # up_face="original", # Options: 'original', 'sad', 'angry', 'surprise'
213
+ # LNet_batch_size=16,
214
+ # without_rl1=False
215
+ # )
216
 
217
+ # print('The video with lip sync is generated')
218
+ # print("GFPGAN Activated")
219
 
220
+ # gfpgan_enhancer.process_video_with_gfpgan(output_path, output_path)
221
+ # audio_clip = mp.AudioFileClip(new_audio_path)
222
+ # video_clip = mp.VideoFileClip(output_path)
223
+ # # Combine audio and video
224
+ # final_clip = video_clip.set_audio(audio_clip)
225
 
226
+ # temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', dir=TEMP_DIR.name, delete=False)
227
+ # temp_file.close()
228
+ # final_video_path = temp_file.name
229
+ # final_clip.write_videofile(final_video_path)
230
 
231
+ # with open(final_video_path, 'rb') as f:
232
+ # video_content = f.read()
233
 
234
+ # base64_lipsync_video = base64.b64encode(video_content).decode('utf-8')
235
 
236
+ # video_clip = mp.VideoFileClip(final_video_path)
237
+ # duration = video_clip.duration
238
 
239
+ app.config['temp_response'] = base64_video
240
+ app.config['final_video_path'] = temp_file_path
241
  app.config['final_video_duration'] = duration
242
 
243
+ return base64_video, temp_file_path, duration
244
 
245
  # shutil.move(result, save_dir+'.mp4')
246