basso4's picture
Upload 1459 files
3f9659e verified
raw
history blame
3.64 kB
import pdb
from pathlib import Path
import sys
import os
import onnxruntime as ort
PROJECT_ROOT = Path(__file__).absolute().parents[0].absolute()
sys.path.insert(0, str(PROJECT_ROOT))
from parsing_api import onnx_inference
import torch
import requests
# class Parsing:
# def __init__(self, gpu_id: int):
# self.gpu_id = gpu_id
# torch.cuda.set_device(gpu_id)
# session_options = ort.SessionOptions()
# session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
# session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
# session_options.add_session_config_entry('gpu_id', str(gpu_id))
# self.session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'ckpt/humanparsing/parsing_atr.onnx'),
# sess_options=session_options, providers=['CPUExecutionProvider'])
# self.lip_session = ort.InferenceSession(os.path.join(Path(__file__).absolute().parents[2].absolute(), 'ckpt/humanparsing/parsing_lip.onnx'),
# sess_options=session_options, providers=['CPUExecutionProvider'])
# def __call__(self, input_image):
# # torch.cuda.set_device(self.gpu_id)
# parsed_image, face_mask = onnx_inference(self.session, self.lip_session, input_image)
# return parsed_image, face_mask
class Parsing:
def __init__(self, gpu_id: int):
self.gpu_id = gpu_id
torch.cuda.set_device(gpu_id)
# Define the URLs for the models on Hugging Face
atr_url = 'https://huggingface.co/basso4/humanparsing/resolve/main/parsing_atr.onnx'
lip_url = 'https://huggingface.co/basso4/humanparsing/resolve/main/parsing_lip.onnx'
# Define local paths for storing the models
model_dir = Path.home() / '.cache/humanparsing_models'
model_dir.mkdir(parents=True, exist_ok=True)
atr_path = model_dir / 'parsing_atr.onnx'
lip_path = model_dir / 'parsing_lip.onnx'
# Download models if they don't already exist locally
self.download_model(atr_url, atr_path)
self.download_model(lip_url, lip_path)
# Configure ONNX Runtime session options
session_options = ort.SessionOptions()
session_options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
session_options.execution_mode = ort.ExecutionMode.ORT_SEQUENTIAL
session_options.add_session_config_entry('gpu_id', str(gpu_id))
# Initialize ONNX Inference Sessions
self.session = ort.InferenceSession(str(atr_path), sess_options=session_options, providers=['CPUExecutionProvider'])
self.lip_session = ort.InferenceSession(str(lip_path), sess_options=session_options, providers=['CPUExecutionProvider'])
def download_model(self, url, path):
# Download the model if it doesn't exist at the specified path
if not path.exists():
print(f"Downloading model from {url}...")
response = requests.get(url, stream=True)
response.raise_for_status()
with open(path, 'wb') as file:
for chunk in response.iter_content(chunk_size=8192):
file.write(chunk)
print(f"Model downloaded and saved to {path}")
def __call__(self, input_image):
# Perform inference using both ONNX models and return the results
parsed_image, face_mask = onnx_inference(self.session, self.lip_session, input_image)
return parsed_image, face_mask