Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -219,27 +219,33 @@ def process_image(input_image):
|
|
219 |
"""
|
220 |
if input_image is None:
|
221 |
return None, None
|
222 |
-
|
223 |
# Move model to GPU for processing
|
224 |
MODEL.to('cuda')
|
225 |
MODEL.eval()
|
226 |
-
|
227 |
# Convert from RGB to BGR for depth processing
|
228 |
input_bgr = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2BGR)
|
229 |
-
|
230 |
with torch.no_grad():
|
231 |
# Get depth map
|
232 |
depth = MODEL.infer_image(input_bgr)
|
233 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
234 |
# Normalize depth for visualization (0-255)
|
235 |
depth_normalized = ((depth - depth.min()) / (depth.max() - depth.min()) * 255).astype(np.uint8)
|
236 |
-
|
237 |
# Move model back to CPU
|
238 |
MODEL.to('cpu')
|
239 |
-
|
240 |
# Get intensity map
|
241 |
intensity_map = get_image_intensity(np.array(input_image), gamma_correction=1.0)
|
242 |
-
|
243 |
# Blend depth raw with intensity map
|
244 |
blended_result = blend_numpy_images(
|
245 |
cv2.cvtColor(depth_normalized, cv2.COLOR_GRAY2RGB), # Convert depth to RGB
|
@@ -247,10 +253,10 @@ def process_image(input_image):
|
|
247 |
blend_factor=0.25,
|
248 |
mode="normal"
|
249 |
)
|
250 |
-
|
251 |
# Generate normal map from blended result
|
252 |
normal_map = process_normal_map(blended_result)
|
253 |
-
|
254 |
return depth_normalized, normal_map
|
255 |
|
256 |
@spaces.GPU
|
|
|
219 |
"""
|
220 |
if input_image is None:
|
221 |
return None, None
|
222 |
+
|
223 |
# Move model to GPU for processing
|
224 |
MODEL.to('cuda')
|
225 |
MODEL.eval()
|
226 |
+
|
227 |
# Convert from RGB to BGR for depth processing
|
228 |
input_bgr = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2BGR)
|
229 |
+
|
230 |
with torch.no_grad():
|
231 |
# Get depth map
|
232 |
depth = MODEL.infer_image(input_bgr)
|
233 |
+
|
234 |
+
# **Apply Gaussian Blur to smooth the depth map**
|
235 |
+
kernel_size = (15, 15) # Size of the Gaussian kernel (must be odd and positive)
|
236 |
+
sigma = 0 # If 0, sigma is calculated based on kernel size
|
237 |
+
depth = cv2.GaussianBlur(depth, kernel_size, sigma)
|
238 |
+
print(f"Applied Gaussian Blur with kernel size {kernel_size} and sigma {sigma}")
|
239 |
+
|
240 |
# Normalize depth for visualization (0-255)
|
241 |
depth_normalized = ((depth - depth.min()) / (depth.max() - depth.min()) * 255).astype(np.uint8)
|
242 |
+
|
243 |
# Move model back to CPU
|
244 |
MODEL.to('cpu')
|
245 |
+
|
246 |
# Get intensity map
|
247 |
intensity_map = get_image_intensity(np.array(input_image), gamma_correction=1.0)
|
248 |
+
|
249 |
# Blend depth raw with intensity map
|
250 |
blended_result = blend_numpy_images(
|
251 |
cv2.cvtColor(depth_normalized, cv2.COLOR_GRAY2RGB), # Convert depth to RGB
|
|
|
253 |
blend_factor=0.25,
|
254 |
mode="normal"
|
255 |
)
|
256 |
+
|
257 |
# Generate normal map from blended result
|
258 |
normal_map = process_normal_map(blended_result)
|
259 |
+
|
260 |
return depth_normalized, normal_map
|
261 |
|
262 |
@spaces.GPU
|