File size: 1,134 Bytes
aea53b6
 
 
 
d8ce5a4
3f80fac
aea53b6
295917c
aea53b6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
from transformers import pipeline

depth_estimator = pipeline(task="depth-estimation",
                        model="Intel/dpt-hybrid-midas")
import torch
import numpy as np
import gradio as gr
from PIL import Image
from transformers import Pipeline

def launch(input_image):
    out = depth_estimator(input_image)

    # resize the prediction
    prediction = torch.nn.functional.interpolate(
        out["predicted_depth"].unsqueeze(1),
        size=input_image.size[::-1],
        mode="bicubic",
        align_corners=False,
    )

    # normalize the prediction
    output = prediction.squeeze().numpy()
    formatted = (output * 255 / np.max(output)).astype("uint8")
    depth = Image.fromarray(formatted)
    return depth

iface = gr.Interface(launch, 
                     inputs=[gr.Image(label="Upload image", type="pil")], 
                     outputs=[gr.Image(label="Depth Map", type="pil")],
                     title="DepthSense",
                     description="Dive into the unseen depths of your images! Simply upload and let DepthSense reveal a whole new dimension of your visuals, instantly" )

iface.launch()