File size: 4,812 Bytes
a4fd448
 
 
 
 
 
 
 
54e0381
3cf6a6c
d57512d
54e0381
 
 
1f5359c
3cf6a6c
ec5816e
a4fd448
 
 
3cf6a6c
 
54e0381
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a4fd448
 
54e0381
 
 
 
 
 
 
 
 
 
 
 
 
 
a4fd448
 
54e0381
 
a4fd448
54e0381
a4fd448
 
54e0381
 
a4fd448
 
 
54e0381
 
3cf6a6c
 
a4fd448
 
 
 
3cf6a6c
 
a4fd448
 
 
3cf6a6c
 
a4fd448
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d4fd39
a4fd448
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import gradio as gr
from rome.infer import Infer
import pickle
from easydict import EasyDict as edict
from huggingface_hub import hf_hub_download
from rome.src.utils.processing import process_black_shape, tensor2image
from rome.src.utils import args as args_utils
import sys
import torch
import torch

sys.path.append("./rome/")
sys.path.append('./DECA')


# loading models ---- create model repo

default_modnet_path = hf_hub_download(
    'Pie31415/rome', 'modnet_photographic_portrait_matting.ckpt')
default_model_path = hf_hub_download('Pie31415/rome', 'rome.pth')

# parser configurations

args = edict({
    "save_dir": ".",
    "save_render": True,
    "model_checkpoint": default_model_path,
    "modnet_path": default_modnet_path,
    "random_seed": 0,
    "debug": False,
    "verbose": False,
    "model_image_size": 256,
    "align_source": True,
    "align_target": False,
    "align_scale": 1.25,
    "use_mesh_deformations": False,
    "subdivide_mesh": False,
    "renderer_sigma": 1e-08,
    "renderer_zfar": 100.0,
    "renderer_type": "soft_mesh",
    "renderer_texture_type": "texture_uv",
    "renderer_normalized_alphas": False,
    "deca_path": "DECA",
    "rome_data_dir": "rome/data",
    "autoenc_cat_alphas": False,
    "autoenc_align_inputs": False,
    "autoenc_use_warp": False,
    "autoenc_num_channels": 64,
    "autoenc_max_channels": 512,
    "autoenc_num_groups": 4,
    "autoenc_num_bottleneck_groups": 0,
    "autoenc_num_blocks": 2,
    "autoenc_num_layers": 4,
    "autoenc_block_type": "bottleneck",
    "neural_texture_channels": 8,
    "num_harmonic_encoding_funcs": 6,
    "unet_num_channels": 64,
    "unet_max_channels": 512,
    "unet_num_groups": 4,
    "unet_num_blocks": 1,
    "unet_num_layers": 2,
    "unet_block_type": "conv",
    "unet_skip_connection_type": "cat",
    "unet_use_normals_cond": True,
    "unet_use_vertex_cond": False,
    "unet_use_uvs_cond": False,
    "unet_pred_mask": False,
    "use_separate_seg_unet": True,
    "norm_layer_type": "gn",
    "activation_type": "relu",
    "conv_layer_type": "ws_conv",
    "deform_norm_layer_type": "gn",
    "deform_activation_type": "relu",
    "deform_conv_layer_type": "ws_conv",
    "unet_seg_weight": 0.0,
    "unet_seg_type": "bce_with_logits",
    "deform_face_tightness": 0.0001,
    "use_whole_segmentation": False,
    "mask_hair_for_neck": False,
    "use_hair_from_avatar": False,
    "use_scalp_deforms": True,
    "use_neck_deforms": True,
    "use_basis_deformer": False,
    "use_unet_deformer": True,
    "pretrained_encoder_basis_path": "",
    "pretrained_vertex_basis_path": "",
    "num_basis": 50,
    "basis_init": "pca",
    "num_vertex": 5023,
    "train_basis": True,
    "path_to_deca": "DECA",
    "path_to_linear_hair_model": "data/linear_hair.pth",  # N/A
    "path_to_mobile_model": "data/disp_model.pth",  # N/A
    "n_scalp": 60,
    "use_distill": False,
    "use_mobile_version": False,
    "deformer_path": "data/rome.pth",
    "output_unet_deformer_feats": 32,
    "use_deca_details": False,
    "use_flametex": False,
    "upsample_type": "nearest",
    "num_frequencies": 6,
    "deform_face_scale_coef": 0.0,
    "device": "cpu"
})

# download FLAME and DECA pretrained
generic_model_path = hf_hub_download('Pie31415/rome', 'generic_model.pkl')
deca_model_path = hf_hub_download('Pie31415/rome', 'deca_model.tar')

with open(generic_model_path, 'rb') as f:
    ss = pickle.load(f, encoding='latin1')

    with open('./DECA/data/generic_model.pkl', 'wb') as out:
        pickle.dump(ss, out)

with open(deca_model_path, "rb") as input:
    with open('./DECA/data/deca_model.tar', "wb") as out:
        for line in input:
            out.write(line)

# load ROME inference model
infer = Infer(args)

def image_inference(
    source_img: gr.inputs.Image = None,
    driver_img: gr.inputs.Image = None
):
    out = infer.evaluate(source_img, driver_img, crop_center=False)
    res = tensor2image(torch.cat([out['source_information']['data_dict']['source_img'][0].cpu(),
                                  out['source_information']['data_dict']['target_img'][0].cpu(
    ),
        out['render_masked'].cpu(), out['pred_target_shape_img'][0].cpu()], dim=2))
    return res[..., ::-1]

def folder_inference():
    pass

with gr.Blocks() as demo:
    with gr.Tab("Image Inference"):
        image_input = [gr.Image(type="pil"), gr.Image(type="pil")]
        image_output = gr.Image()
        image_button = gr.Button("Predict")
    with gr.Tab("Inference Over Folder"):
        pass
    with gr.Tab("Video Inference"):
        pass

    image_button.click(image_inference, inputs=image_input, outputs=image_output)
    title = "ROME: Realistic one-shot mesh-based head avatars"
    examples = gr.Examples(["examples/lincoln.jpg", "examples/tars2.jpg"])

demo.launch()