Spaces:
Running
on
Zero
Running
on
Zero
import torch | |
torch.jit.script = lambda f: f | |
import gradio as gr | |
import spaces | |
from zoedepth.utils.misc import colorize, save_raw_16bit | |
from zoedepth.utils.geometry import depth_to_points, create_triangles | |
from PIL import Image | |
import numpy as np | |
css = """ | |
img { | |
max-height: 500px; | |
object-fit: contain; | |
} | |
""" | |
# DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' | |
MODEL = torch.hub.load('isl-org/ZoeDepth', "ZoeD_N", pretrained=True).eval() | |
# ----------- Depth functions | |
def save_raw_16bit(depth, fpath="raw.png"): | |
if isinstance(depth, torch.Tensor): | |
depth = depth.squeeze().cpu().numpy() | |
# assert isinstance(depth, np.ndarray), "Depth must be a torch tensor or numpy array" | |
# assert depth.ndim == 2, "Depth must be 2D" | |
depth = depth * 256 # scale for 16-bit png | |
depth = depth.astype(np.uint16) | |
return depth | |
def process_image(image: Image.Image): | |
global MODEL | |
image = image.convert("RGB") | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
MODEL.to(device) | |
depth = MODEL.infer_pil(image) | |
processed_array = save_raw_16bit(colorize(depth)[:, :, 0]) | |
return Image.fromarray(processed_array) | |
# ----------- Depth functions | |
title = "# ZoeDepth" | |
description = """Unofficial demo for **ZoeDepth: Zero-shot Transfer by Combining Relative and Metric Depth**.""" | |
with gr.Blocks(css=css) as API: | |
gr.Markdown(title) | |
gr.Markdown(description) | |
with gr.Tab("Depth Prediction"): | |
with gr.Row(): | |
inputs=gr.Image(label="Input Image", type='pil', height=500) # Input is an image | |
outputs=gr.Image(label="Depth Map", type='pil', height=500) # Output is also an image | |
generate_btn = gr.Button(value="Generate") | |
generate_btn.click(process_image, inputs=inputs, outputs=outputs, api_name="generate_depth") | |
if __name__ == '__main__': | |
API.launch() |