Spanicin commited on
Commit
39245e9
·
verified ·
1 Parent(s): 7b303d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -29
app.py CHANGED
@@ -23,7 +23,7 @@ import uuid
23
  import time
24
  from PIL import Image
25
  import moviepy.editor as mp
26
- from videoretalking import inference_function
27
 
28
  start_time = time.time()
29
 
@@ -190,43 +190,43 @@ def main(args):
190
  result, base64_video,temp_file_path,new_audio_path = animate_from_coeff.generate(data, save_dir, pic_path, crop_info, \
191
  enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess)
192
 
193
- face_path = temp_file_path
194
- audio_path = new_audio_path
195
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
196
- video_lipsync_file_path = temp_file.name
197
- output_path = video_lipsync_file_path
198
-
199
- # Call the function
200
- inference_function.video_lipsync_correctness(
201
- face=face_path,
202
- audio_path=audio_path,
203
- face3d_net_path = path_of_net_recon_model,
204
- outfile=output_path,
205
- tmp_dir="temp",
206
- crop=[0, -1, 0, -1],
207
- re_preprocess=True, # Set to True if you want to reprocess; False otherwise
208
- exp_img="neutral", # Can be 'smile', 'neutral', or path to an expression image
209
- one_shot=False,
210
- up_face="original", # Options: 'original', 'sad', 'angry', 'surprise'
211
- LNet_batch_size=16,
212
- without_rl1=False
213
- )
214
 
215
  print('The video is generated')
216
 
217
- with open(output_path, 'rb') as f:
218
- video_content = f.read()
219
 
220
- base64_lipsync_video = base64.b64encode(video_content).decode('utf-8')
221
 
222
- video_clip = mp.VideoFileClip(output_path)
223
  duration = video_clip.duration
224
 
225
- app.config['temp_response'] = base64_lipsync_video
226
- app.config['final_video_path'] = output_path
227
  app.config['final_video_duration'] = duration
228
 
229
- return base64_lipsync_video, output_path, duration
230
 
231
  # shutil.move(result, save_dir+'.mp4')
232
 
 
23
  import time
24
  from PIL import Image
25
  import moviepy.editor as mp
26
+ # from videoretalking import inference_function
27
 
28
  start_time = time.time()
29
 
 
190
  result, base64_video,temp_file_path,new_audio_path = animate_from_coeff.generate(data, save_dir, pic_path, crop_info, \
191
  enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess)
192
 
193
+ # face_path = temp_file_path
194
+ # audio_path = new_audio_path
195
+ # temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
196
+ # video_lipsync_file_path = temp_file.name
197
+ # output_path = video_lipsync_file_path
198
+
199
+ # # Call the function
200
+ # inference_function.video_lipsync_correctness(
201
+ # face=face_path,
202
+ # audio_path=audio_path,
203
+ # face3d_net_path = path_of_net_recon_model,
204
+ # outfile=output_path,
205
+ # tmp_dir="temp",
206
+ # crop=[0, -1, 0, -1],
207
+ # re_preprocess=True, # Set to True if you want to reprocess; False otherwise
208
+ # exp_img="neutral", # Can be 'smile', 'neutral', or path to an expression image
209
+ # one_shot=False,
210
+ # up_face="original", # Options: 'original', 'sad', 'angry', 'surprise'
211
+ # LNet_batch_size=16,
212
+ # without_rl1=False
213
+ # )
214
 
215
  print('The video is generated')
216
 
217
+ # with open(output_path, 'rb') as f:
218
+ # video_content = f.read()
219
 
220
+ # base64_lipsync_video = base64.b64encode(video_content).decode('utf-8')
221
 
222
+ video_clip = mp.VideoFileClip(temp_file_path)
223
  duration = video_clip.duration
224
 
225
+ app.config['temp_response'] = base64_video
226
+ app.config['final_video_path'] = temp_file_path
227
  app.config['final_video_duration'] = duration
228
 
229
+ return base64_video, temp_file_path, duration
230
 
231
  # shutil.move(result, save_dir+'.mp4')
232