File size: 1,146 Bytes
80f5255 782b02d 4daa7b9 857662e 4daa7b9 857662e 4daa7b9 857662e 4daa7b9 9de6643 80f5255 782b02d 4daa7b9 80f5255 4daa7b9 80f5255 782b02d 80f5255 782b02d 4daa7b9 782b02d 80f5255 782b02d 80f5255 4daa7b9 782b02d 4daa7b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 |
import os
import gradio as gr
import torch
import torch.nn.functional as F
import numpy as np
from PIL import Image
from transformers import pipeline
depth_estimator = pipeline(task="depth-estimation", model="Intel/dpt-hybrid-midas")
def launch(input_image):
out = depth_estimator(input_image)
predicted_depth = torch.tensor(out["predicted_depth"])
if len(predicted_depth.shape) == 2: # Если двумерен, добавляем оси
predicted_depth = predicted_depth.unsqueeze(0).unsqueeze(0)
prediction = F.interpolate(
predicted_depth,
size=input_image.size[::-1], # Порядок: (ширина, высота)
mode="bicubic",
align_corners=False,
)
output = prediction.squeeze().numpy()
formatted = (output * 255 / np.max(output)).astype("uint8")
depth = Image.fromarray(formatted)
return depth
iface = gr.Interface(
launch,
inputs=gr.Image(type="pil"),
outputs=gr.Image(type="pil"),
)
demo = gr.Blocks()
with demo:
gr.TabbedInterface(
[iface],
["Depth Estimation Interface"],
)
demo.launch(debug=True)
|