huntrezz commited on
Commit
a42d79c
·
verified ·
1 Parent(s): 79684c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -10
app.py CHANGED
@@ -6,24 +6,32 @@ import gradio as gr
6
 
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
  model = DPTForDepthEstimation.from_pretrained("./", local_files_only=True, torch_dtype=torch.float16).to(device)
 
9
  processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
10
 
11
  color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
12
 
 
 
 
 
 
 
 
13
  @torch.inference_mode()
14
  def process_frame(image):
15
- rgb_frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
16
- resized_frame = cv2.resize(rgb_frame, (128, 128), interpolation=cv2.INTER_AREA)
17
 
18
- inputs = processor(images=resized_frame, return_tensors="pt").to(device)
19
- inputs = {k: v.to(torch.float16) for k, v in inputs.items()}
20
 
21
- predicted_depth = model(**inputs).predicted_depth
22
- depth_map = predicted_depth.squeeze().cpu().numpy()
23
-
24
- depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
25
- depth_map = (depth_map * 255).astype(np.uint8)
26
- depth_map_colored = cv2.applyColorMap(depth_map, color_map)
27
 
28
  return depth_map_colored
29
 
 
6
 
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
  model = DPTForDepthEstimation.from_pretrained("./", local_files_only=True, torch_dtype=torch.float16).to(device)
9
+ model = torch.jit.script(model)
10
  processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
11
 
12
  color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
13
 
14
+ input_tensor = torch.zeros((1, 3, 128, 128), dtype=torch.float16, device=device)
15
+ depth_map = np.zeros((128, 128), dtype=np.float32)
16
+ depth_map_colored = np.zeros((128, 128, 3), dtype=np.uint8)
17
+
18
+ def preprocess_image(image):
19
+ return cv2.resize(image, (128, 128), interpolation=cv2.INTER_AREA).transpose(2, 0, 1).astype(np.float32) / 255.0
20
+
21
  @torch.inference_mode()
22
  def process_frame(image):
23
+ preprocessed = preprocess_image(image)
24
+ input_tensor[0] = torch.from_numpy(preprocessed).to(device)
25
 
26
+ if torch.cuda.is_available():
27
+ torch.cuda.synchronize()
28
 
29
+ predicted_depth = model(input_tensor).predicted_depth
30
+ np.subtract(predicted_depth.squeeze().cpu().numpy(), predicted_depth.min().item(), out=depth_map)
31
+ np.divide(depth_map, depth_map.max(), out=depth_map)
32
+ np.multiply(depth_map, 255, out=depth_map)
33
+ depth_map = depth_map.astype(np.uint8)
34
+ cv2.applyColorMap(depth_map, color_map, dst=depth_map_colored)
35
 
36
  return depth_map_colored
37