huntrezz commited on
Commit
2649bd5
·
verified ·
1 Parent(s): c9bb6a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -31
app.py CHANGED
@@ -8,23 +8,11 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
  model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-256", torch_dtype=torch.float16).to(device)
9
  processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
10
 
11
- def resize_image(image, target_size=(256, 256)):
12
- return cv2.resize(image, target_size)
13
-
14
- def manual_normalize(depth_map):
15
- min_val = np.min(depth_map)
16
- max_val = np.max(depth_map)
17
- if min_val != max_val:
18
- normalized = (depth_map - min_val) / (max_val - min_val)
19
- return (normalized * 255).astype(np.uint8)
20
- else:
21
- return np.zeros_like(depth_map, dtype=np.uint8)
22
-
23
  color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
24
 
25
  def process_frame(image):
26
  rgb_frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
27
- resized_frame = resize_image(rgb_frame)
28
 
29
  inputs = processor(images=resized_frame, return_tensors="pt").to(device)
30
  inputs = {k: v.to(torch.float16) for k, v in inputs.items()}
@@ -34,26 +22,10 @@ def process_frame(image):
34
  predicted_depth = outputs.predicted_depth
35
 
36
  depth_map = predicted_depth.squeeze().cpu().numpy()
37
-
38
- depth_map = np.nan_to_num(depth_map, nan=0.0, posinf=0.0, neginf=0.0)
39
- depth_map = depth_map.astype(np.float32)
40
-
41
- if depth_map.size == 0:
42
- depth_map = np.zeros((256, 256), dtype=np.uint8)
43
- else:
44
- if np.any(depth_map) and np.min(depth_map) != np.max(depth_map):
45
- depth_map = cv2.normalize(depth_map, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
46
- else:
47
- depth_map = np.zeros_like(depth_map, dtype=np.uint8)
48
-
49
- if np.all(depth_map == 0):
50
- depth_map = manual_normalize(depth_map)
51
-
52
  depth_map_colored = cv2.applyColorMap(depth_map, color_map)
53
- depth_map_colored = cv2.resize(depth_map_colored, (image.shape[1], image.shape[0]))
54
 
55
- combined = np.hstack((image, depth_map_colored))
56
- return cv2.cvtColor(combined, cv2.COLOR_BGR2RGB)
57
 
58
  interface = gr.Interface(
59
  fn=process_frame,
 
8
  model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-256", torch_dtype=torch.float16).to(device)
9
  processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
10
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
12
 
13
  def process_frame(image):
14
  rgb_frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
15
+ resized_frame = cv2.resize(rgb_frame, (128, 128))
16
 
17
  inputs = processor(images=resized_frame, return_tensors="pt").to(device)
18
  inputs = {k: v.to(torch.float16) for k, v in inputs.items()}
 
22
  predicted_depth = outputs.predicted_depth
23
 
24
  depth_map = predicted_depth.squeeze().cpu().numpy()
25
+ depth_map = cv2.normalize(depth_map, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  depth_map_colored = cv2.applyColorMap(depth_map, color_map)
 
27
 
28
+ return cv2.cvtColor(depth_map_colored, cv2.COLOR_BGR2RGB)
 
29
 
30
  interface = gr.Interface(
31
  fn=process_frame,