|
import torch |
|
import numpy as np |
|
from PIL import Image |
|
from controlnet_aux import OpenposeDetector |
|
from model_util import get_torch_device |
|
import cv2 |
|
|
|
|
|
from transformers import DPTImageProcessor, DPTForDepthEstimation |
|
|
|
device = get_torch_device() |
|
depth_estimator = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas").to(device) |
|
feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas") |
|
openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") |
|
|
|
def get_depth_map(image): |
|
image = feature_extractor(images=image, return_tensors="pt").pixel_values.to("cuda") |
|
with torch.no_grad(), torch.autocast("cuda"): |
|
depth_map = depth_estimator(image).predicted_depth |
|
|
|
depth_map = torch.nn.functional.interpolate( |
|
depth_map.unsqueeze(1), |
|
size=(1024, 1024), |
|
mode="bicubic", |
|
align_corners=False, |
|
) |
|
depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True) |
|
depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True) |
|
depth_map = (depth_map - depth_min) / (depth_max - depth_min) |
|
image = torch.cat([depth_map] * 3, dim=1) |
|
|
|
image = image.permute(0, 2, 3, 1).cpu().numpy()[0] |
|
image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8)) |
|
return image |
|
|
|
def get_canny_image(image, t1=100, t2=200): |
|
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) |
|
edges = cv2.Canny(image, t1, t2) |
|
return Image.fromarray(edges, "L") |