huntrezz commited on
Commit
79684c1
·
verified ·
1 Parent(s): e4132a4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -10
app.py CHANGED
@@ -5,33 +5,34 @@ from transformers import DPTForDepthEstimation, DPTImageProcessor
5
  import gradio as gr
6
 
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
- model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-256", torch_dtype=torch.float16).to(device)
9
  processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
10
 
11
  color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
12
 
 
13
  def process_frame(image):
14
  rgb_frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
15
- resized_frame = cv2.resize(rgb_frame, (128, 128))
16
 
17
  inputs = processor(images=resized_frame, return_tensors="pt").to(device)
18
  inputs = {k: v.to(torch.float16) for k, v in inputs.items()}
19
 
20
- with torch.no_grad():
21
- outputs = model(**inputs)
22
- predicted_depth = outputs.predicted_depth
23
-
24
  depth_map = predicted_depth.squeeze().cpu().numpy()
25
- depth_map = cv2.normalize(depth_map, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U)
 
 
26
  depth_map_colored = cv2.applyColorMap(depth_map, color_map)
27
 
28
- return cv2.cvtColor(depth_map_colored, cv2.COLOR_BGR2RGB)
29
 
30
  interface = gr.Interface(
31
  fn=process_frame,
32
- inputs=gr.Image(sources="webcam", streaming=True),
33
  outputs="image",
34
- live=True
 
35
  )
36
 
37
  interface.launch()
 
5
  import gradio as gr
6
 
7
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
8
+ model = DPTForDepthEstimation.from_pretrained("./", local_files_only=True, torch_dtype=torch.float16).to(device)
9
  processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
10
 
11
  color_map = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_INFERNO)
12
 
13
+ @torch.inference_mode()
14
  def process_frame(image):
15
  rgb_frame = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
16
+ resized_frame = cv2.resize(rgb_frame, (128, 128), interpolation=cv2.INTER_AREA)
17
 
18
  inputs = processor(images=resized_frame, return_tensors="pt").to(device)
19
  inputs = {k: v.to(torch.float16) for k, v in inputs.items()}
20
 
21
+ predicted_depth = model(**inputs).predicted_depth
 
 
 
22
  depth_map = predicted_depth.squeeze().cpu().numpy()
23
+
24
+ depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
25
+ depth_map = (depth_map * 255).astype(np.uint8)
26
  depth_map_colored = cv2.applyColorMap(depth_map, color_map)
27
 
28
+ return depth_map_colored
29
 
30
  interface = gr.Interface(
31
  fn=process_frame,
32
+ inputs=gr.Image(source="webcam", streaming=True),
33
  outputs="image",
34
+ live=True,
35
+ refresh_rate=0.1
36
  )
37
 
38
  interface.launch()