Spaces:
Configuration error
Configuration error
models: | |
warping_spade: | |
name: "WarpingSpadeModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_onnx/warping_spade.onnx" | |
motion_extractor: | |
name: "MotionExtractorModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_onnx/motion_extractor.onnx" | |
landmark: | |
name: "LandmarkModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_onnx/landmark.onnx" | |
face_analysis: | |
name: "MediaPipeFaceModel" | |
predict_type: "mp" | |
app_feat_extractor: | |
name: "AppearanceFeatureExtractorModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_onnx/appearance_feature_extractor.onnx" | |
stitching: | |
name: "StitchingModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_onnx/stitching.onnx" | |
stitching_eye_retarget: | |
name: "StitchingModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_onnx/stitching_eye.onnx" | |
stitching_lip_retarget: | |
name: "StitchingModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_onnx/stitching_lip.onnx" | |
animal_models: | |
warping_spade: | |
name: "WarpingSpadeModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_animal_onnx/warping_spade.onnx" | |
motion_extractor: | |
name: "MotionExtractorModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_animal_onnx/motion_extractor.onnx" | |
app_feat_extractor: | |
name: "AppearanceFeatureExtractorModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_animal_onnx/appearance_feature_extractor.onnx" | |
stitching: | |
name: "StitchingModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_animal_onnx/stitching.onnx" | |
stitching_eye_retarget: | |
name: "StitchingModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_animal_onnx/stitching_eye.onnx" | |
stitching_lip_retarget: | |
name: "StitchingModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_animal_onnx/stitching_lip.onnx" | |
landmark: | |
name: "LandmarkModel" | |
predict_type: "ort" | |
model_path: "./checkpoints/liveportrait_onnx/landmark.onnx" | |
face_analysis: | |
name: "MediaPipeFaceModel" | |
predict_type: "mp" | |
joyvasa_models: | |
motion_model_path: "checkpoints/JoyVASA/motion_generator/motion_generator_hubert_chinese.pt" | |
audio_model_path: "checkpoints/chinese-hubert-base" | |
motion_template_path: "checkpoints/JoyVASA/motion_template/motion_template.pkl" | |
crop_params: | |
src_dsize: 512 | |
src_scale: 2.3 | |
src_vx_ratio: 0.0 | |
src_vy_ratio: -0.125 | |
dri_scale: 2.2 | |
dri_vx_ratio: 0.0 | |
dri_vy_ratio: -0.1 | |
infer_params: | |
flag_crop_driving_video: False | |
flag_normalize_lip: True | |
flag_source_video_eye_retargeting: False | |
flag_video_editing_head_rotation: False | |
flag_eye_retargeting: False | |
flag_lip_retargeting: False | |
flag_stitching: True | |
flag_relative_motion: True | |
flag_pasteback: True | |
flag_do_crop: True | |
flag_do_rot: True | |
# NOT EXPOERTED PARAMS | |
lip_normalize_threshold: 0.03 # threshold for flag_normalize_lip | |
source_video_eye_retargeting_threshold: 0.18 # threshold for eyes retargeting if the input is a source video | |
driving_smooth_observation_variance: 1e-7 # smooth strength scalar for the animated video when the input is a source video, the larger the number, the smoother the animated video; too much smoothness would result in loss of motion accuracy | |
anchor_frame: 0 # TO IMPLEMENT | |
mask_crop_path: "./assets/mask_template.png" | |
driving_multiplier: 1.0 | |
animation_region: "all" | |
cfg_mode: "incremental" | |
cfg_scale: 1.2 | |
source_max_dim: 1280 # the max dim of height and width of source image | |
source_division: 2 # make sure the height and width of source image can be divided by this number |