import depth_pro import gradio as gr import matplotlib.cm as cm import numpy as np from depth_pro.depth_pro import DepthProConfig from PIL import Image def run(input_image_path): config = DepthProConfig( patch_encoder_preset="dinov2l16_384", image_encoder_preset="dinov2l16_384", checkpoint_uri="./depth_pro.pt", decoder_features=256, use_fov_head=True, fov_encoder_preset="dinov2l16_384", ) # Load model and preprocessing transform model, transform = depth_pro.create_model_and_transforms(config=config) model.eval() # Load and preprocess an image image, _, f_px = depth_pro.load_rgb(input_image_path) image = transform(image) # Run inference prediction = model.infer(image, f_px=f_px) depth_map = prediction["depth"].squeeze().cpu().numpy() focallength_px = prediction["focallength_px"] depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min()) colormap = cm.get_cmap("viridis") depth_map = colormap(depth_map) depth_map = (depth_map[:, :, :3] * 255).astype(np.uint8) depth_map = Image.fromarray(depth_map) return depth_map, focallength_px.item() demo = gr.Interface( fn=run, inputs=gr.Image(label="Input Image", type="filepath"), outputs=[ gr.Image(label="Depth Map"), gr.Number(label="Focal Length"), ], description="""
""", ) demo.launch()