Spaces:
Runtime error
Runtime error
import os | |
from pathlib import Path | |
import sys | |
import torch | |
from PIL import Image, ImageOps | |
import numpy as np | |
from utils_ootd import get_mask_location | |
from cloths_db import cloths_map, modeL_db | |
PROJECT_ROOT = Path(__file__).absolute().parents[1].absolute() | |
sys.path.insert(0, str(PROJECT_ROOT)) | |
from preprocess.openpose.run_openpose import OpenPose | |
from preprocess.humanparsing.run_parsing import Parsing | |
from ootd.inference_ootd_hd import OOTDiffusionHD | |
from ootd.inference_ootd_dc import OOTDiffusionDC | |
from preprocess.openpose.annotator.openpose.util import draw_bodypose | |
openpose_model = OpenPose(0) | |
parsing_model_dc = Parsing(0) | |
ootd_model_dc = OOTDiffusionDC(0) | |
category_dict = ['upperbody', 'lowerbody', 'dress'] | |
category_dict_utils = ['upper_body', 'lower_body', 'dresses'] | |
example_path = os.path.join(os.path.dirname(__file__), 'examples') | |
garment_path = os.path.join(os.path.dirname(__file__), 'examples','garment') | |
openpose_model.preprocessor.body_estimation.model.to('cuda') | |
ootd_model_dc.pipe.to('cuda') | |
ootd_model_dc.image_encoder.to('cuda') | |
ootd_model_dc.text_encoder.to('cuda') | |
def process_dc(vton_img, garm_img, category): | |
model_type = 'dc' | |
if category == 'Upper-body': | |
category = 0 | |
elif category == 'Lower-body': | |
category = 1 | |
else: | |
category =2 | |
with torch.no_grad(): | |
# openpose_model_dc.preprocessor.body_estimation.model.to('cuda') | |
# ootd_model_dc.pipe.to('cuda') | |
# ootd_model_dc.image_encoder.to('cuda') | |
# ootd_model_dc.text_encoder.to('cuda') | |
garm_img = Image.open(garm_img).resize((768, 1024)) | |
vton_img = Image.open(vton_img).resize((768, 1024)) | |
keypoints = openpose_model(vton_img.resize((384, 512))) | |
print(len(keypoints["pose_keypoints_2d"])) | |
print(keypoints["pose_keypoints_2d"]) | |
# person_image = np.asarray(vton_img) | |
# print(len(person_image)) | |
# person_image = np.asarray(Image.open(vton_img).resize((768, 1024))) | |
# output = draw_bodypose(canvas=person_image,candidate=candidate, subset=subset ) | |
# output_image = Image.fromarray(output) | |
# output_image.save('keypose.png') | |
left_point = keypoints["pose_keypoints_2d"][2] | |
right_point = keypoints["pose_keypoints_2d"][5] | |
neck_point = keypoints["pose_keypoints_2d"][1] | |
hip_point = keypoints["pose_keypoints_2d"][8] | |
print(f'left shoulder - {left_point}') | |
print(f'right shoulder - {right_point}') | |
# #find disctance using Euclidian distance | |
shoulder_width_pixels = round(np.sqrt( np.power((right_point[0]-left_point[0]),2) + np.power((right_point[1]-left_point[1]),2)),2) | |
height_pixels = round(np.sqrt( np.power((neck_point[0]-hip_point[0]),2) + np.power((neck_point[1]-hip_point[1]),2)),2) *2 | |
# # Assuming an average human height | |
average_height_cm = 172.72 *1.5 | |
# Conversion factor from pixels to cm | |
conversion_factor = average_height_cm / height_pixels | |
# Convert shoulder width to real-world units | |
shoulder_width_cm = shoulder_width_pixels * conversion_factor | |
print(f'Shoulder width (in pixels): {shoulder_width_pixels}') | |
print(f'Estimated height (in pixels): {height_pixels}') | |
print(f'Conversion factor (pixels to cm): {conversion_factor}') | |
print(f'Shoulder width (in cm): {shoulder_width_cm}') | |
print(f'Shoulder width (in INCH): {round(shoulder_width_cm/2.54,1)}') | |
model_parse, face_mask = parsing_model_dc(vton_img.resize((384, 512))) | |
# model_parse_image = convert_to_image(model_parse) | |
# face_mask_image = convert_to_image(face_mask) | |
# Save the images | |
# model_parse_image.save('model_parse_image.png') | |
# face_mask_image.save('face_mask_image.png') | |
mask, mask_gray = get_mask_location(model_type, category_dict_utils[category], model_parse, keypoints) | |
# up_mask, up_mask_gray = get_mask_location(model_type, category_dict_utils[0], model_parse, keypoints) | |
# lo_mask, lo_mask_gray = get_mask_location(model_type, category_dict_utils[1], model_parse, keypoints) | |
# mask = Image.composite(up_mask,lo_mask,up_mask) | |
# mask_gray = Image.composite(up_mask_gray, lo_mask_gray,up_mask) | |
mask = mask.resize((768, 1024), Image.NEAREST) | |
mask_gray = mask_gray.resize((768, 1024), Image.NEAREST) | |
# Save the resized masks | |
mask.save("mask_resized.png") | |
mask_gray.save("mask_gray_resized.png") | |
masked_vton_img = Image.composite(mask_gray, vton_img, mask) | |
masked_vton_img.save("masked_vton_img.png") | |
print(f'category is {category}') | |
# images = ootd_model_dc( | |
# model_type=model_type, | |
# category=category_dict[category], | |
# image_garm=garm_img, | |
# image_vton=masked_vton_img, | |
# mask=mask, | |
# image_ori=vton_img, | |
# num_samples=1, | |
# num_steps=10, | |
# image_scale= 2.0, | |
# seed=-1, | |
# ) | |
# return None | |
return None | |
if __name__ == '__main__': | |
model_dc = os.path.join(example_path, 'model/model_8.png') | |
garment_dc = os.path.join(example_path, 'garment/048554_1.jpg') | |
print(process_dc(model_dc,garment_dc,0)) |