|
import os |
|
import gradio as gr |
|
import torch |
|
import torch.nn.functional as F |
|
import numpy as np |
|
from PIL import Image |
|
from transformers import pipeline |
|
|
|
depth_estimator = pipeline(task="depth-estimation", model="Intel/dpt-hybrid-midas") |
|
|
|
def launch(input_image): |
|
out = depth_estimator(input_image) |
|
|
|
predicted_depth = torch.tensor(out["predicted_depth"]) |
|
|
|
if len(predicted_depth.shape) == 2: |
|
predicted_depth = predicted_depth.unsqueeze(0).unsqueeze(0) |
|
|
|
prediction = F.interpolate( |
|
predicted_depth, |
|
size=input_image.size[::-1], |
|
mode="bicubic", |
|
align_corners=False, |
|
) |
|
|
|
output = prediction.squeeze().numpy() |
|
formatted = (output * 255 / np.max(output)).astype("uint8") |
|
depth = Image.fromarray(formatted) |
|
return depth |
|
|
|
iface = gr.Interface( |
|
launch, |
|
inputs=gr.Image(type="pil"), |
|
outputs=gr.Image(type="pil"), |
|
) |
|
|
|
demo = gr.Blocks() |
|
|
|
with demo: |
|
gr.TabbedInterface( |
|
[iface], |
|
["Depth Estimation Interface"], |
|
) |
|
|
|
demo.launch(debug=True) |
|
|