File size: 2,038 Bytes
3d49622
 
 
 
 
 
 
 
 
 
1be901d
ebd84dc
04cc394
 
3d49622
 
 
 
 
 
 
 
5169d0f
04cc394
 
 
 
 
 
 
5169d0f
3d49622
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import pdb
from pathlib import Path
import sys
import os
import onnxruntime as ort
PROJECT_ROOT = Path(__file__).absolute().parents[0].absolute()
sys.path.insert(0, str(PROJECT_ROOT))
from parsing_api import onnx_inference
import torch

from huggingface_hub import hf_hub_download



class Parsing:
    def __init__(self, gpu_id: int):
        self.gpu_id = gpu_id
        torch.cuda.set_device(gpu_id)
        session_options = ort.SessionOptions()
        session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
        session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
        session_options.add_session_config_entry('gpu_id', str(gpu_id))

        parsing_ckpt_path = os.path.join(Path(__file__).absolute().parents[2].absolute(), 'checkpoints/humanparsing')
        atr_model_path = 'https://huggingface.co/levihsu/OOTDiffusion/blob/main/checkpoints/humanparsing/parsing_atr.onnx'
        lip_model_path = 'https://huggingface.co/levihsu/OOTDiffusion/blob/main/checkpoints/humanparsing/parsing_lip.onnx'
        
        from basicsr.utils.download_util import load_file_from_url
        load_file_from_url(atr_model_path, model_dir=parsing_ckpt_path)
        load_file_from_url(lip_model_path, model_dir=parsing_ckpt_path)
        
        self.session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'checkpoints/humanparsing/parsing_atr.onnx'),
                                            sess_options=session_options, providers=['CPUExecutionProvider'])
        self.lip_session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'checkpoints/humanparsing/parsing_lip.onnx'),
                                                sess_options=session_options, providers=['CPUExecutionProvider'])
        

    def __call__(self, input_image):
        torch.cuda.set_device(self.gpu_id)
        parsed_image, face_mask = onnx_inference(self.session, self.lip_session, input_image)
        return parsed_image, face_mask