helvipad / helvipad_utils.py
chcorbi's picture
Update helvipad_utils.py
617c521 verified
import torch
import cv2
import time
# Dataset constants
BASELINE = 0.191 # Baseline in meters
HEIGHT_ORIGINAL = 1920 # Original image height
HEIGHT_DOWNSCALED = 960 # Downscaled height for disparity
def compute_depth_from_disparity(disparity_map: torch.Tensor) -> torch.Tensor:
"""
Convert a disparity map to a depth map based on dataset-specific calibration.
The depth is computed using trigonometric projection:
depth = B * (sin(theta) / tan(disparity_rad) + cos(theta))
where:
- B is the baseline.
- theta is the vertical angle corresponding to each pixel in the y-grid.
- disparity_rad is the disparity map scaled to radians.
Parameters:
disparity_map (torch.Tensor): Input tensor of shape (bs, 1, h, w) or (bs, h, w).
Returns:
torch.Tensor: Depth map of shape (bs, h, w).
"""
# Ensure input is 3D (batch, height, width)
has_channel_dim = disparity_map.dim() == 4 and disparity_map.shape[1] == 1
if has_channel_dim:
disparity_map = disparity_map.squeeze(1)
bs, height, width = disparity_map.shape
# Compute y-grid values
y_grid = (
torch.arange(512 + 2 * height - 1, 512, step=-2, device=disparity_map.device)
.unsqueeze(0)
.unsqueeze(-1)
.expand(bs, -1, width)
)
# Compute angle and disparity in radians
theta_grid = y_grid * torch.pi / HEIGHT_ORIGINAL
disparity_map_rad = (torch.pi / HEIGHT_DOWNSCALED) * disparity_map
# Initialize depth map
depth_map = torch.zeros_like(disparity_map, dtype=torch.float32)
# Compute depth only where disparity is valid
non_zero_disparity = disparity_map != 0
depth_map[non_zero_disparity] = (
(torch.sin(theta_grid[non_zero_disparity]) / torch.tan(disparity_map_rad[non_zero_disparity]))
+ torch.cos(theta_grid[non_zero_disparity])
) * BASELINE
# Restore channel dimension if input had it
if has_channel_dim:
depth_map = depth_map.unsqueeze(1)
return depth_map
def compute_disparity_from_depth(depth_map: torch.Tensor) -> torch.Tensor:
"""
Convert a depth map to a disparity map based on dataset-specific calibration.
This function reverses the depth-to-disparity conversion, based on the relationship:
tan(disparity_rad) = sin(theta) / (depth / B - cos(theta))
The final disparity in pixel units is then:
disparity = (H / pi) * atan(tan(disparity_rad))
where:
- B is the baseline.
- theta is the vertical angle corresponding to each pixel in the y-grid.
- disparity_rad is the angular disparity in radians.
Parameters:
depth_map (torch.Tensor): Input tensor of shape (bs, 1, h, w) or (bs, h, w).
Returns:
torch.Tensor: Disparity map of shape (bs, h, w).
"""
# Ensure input is 3D (batch, height, width)
has_channel_dim: bool = depth_map.dim() == 4 and depth_map.shape[1] == 1
if has_channel_dim:
depth_map = depth_map.squeeze(1)
bs, height, width = depth_map.shape
# Compute y-grid values
y_grid = (
torch.arange(512 + 2 * height - 1, 512, step=-2, device=depth_map.device)
.unsqueeze(0)
.unsqueeze(-1)
.expand(bs, -1, width)
)
# Compute theta (polar angle)
theta_grid = y_grid * torch.pi / HEIGHT_ORIGINAL
# Initialize depth map
disparity_map = torch.zeros_like(depth_map, dtype=torch.float32)
# Compute disparity only where depth is valid
non_zero_depth = depth_map != 0
tan_disparity_rad = torch.sin(theta_grid[non_zero_depth]) / (
(depth_map[non_zero_depth] / BASELINE) - torch.cos(theta_grid[non_zero_depth])
)
disparity_map_rad = torch.atan(tan_disparity_rad)
disparity_map[non_zero_depth] = (HEIGHT_DOWNSCALED / torch.pi) * disparity_map_rad
# Restore channel dimension if input had it
if has_channel_dim:
disparity_map = disparity_map.unsqueeze(1)
return disparity_map
def disp_deg_to_disp_pix(disp_deg: float) -> float:
"""
Convert a disparity value from degrees to pixels.
The relationship is:
disp_pix = (H / 180) * disp_deg
where:
- H is the image height specific to the dataset.
- disp_deg is the disparity value in degrees.
Parameters:
disp_deg (float): Disparity in degrees.
Returns:
float: Disparity in pixels.
"""
H_down: int = 960
return (H_down / 180) * disp_deg
def disp_pix_to_disp_deg(disp_pix: float) -> float:
"""
Convert a disparity value from pixels to degrees.
The relationship is:
disp_deg = (180 / H) * disp_pix
where:
- H is the dataset-specific image height.
- disp_pix is the disparity value in pixels.
Parameters:
disp_pix (float): Disparity in pixels.
Returns:
float: Disparity in degrees.
"""
H_down: int = 960
return (180 / H_down) * disp_pix
def readDepthHelvipad(filename: str) -> tuple[torch.Tensor, torch.Tensor]:
"""
Read a depth map from a Helvipad dataset file.
The depth is stored in a 16-bit format and needs to be scaled by 1/256.
Parameters:
filename (str): Path to the depth file.
Returns:
tuple[torch.Tensor, torch.Tensor]:
- Depth map tensor (H, W).
- Validity mask tensor (H, W) indicating valid depth values.
"""
depth = load_depth_with_infinite_retry(filename) / 256.0
valid = depth > 0.0
return depth, valid
def readDisparityHelvipad(filename: str) -> tuple[torch.Tensor, torch.Tensor]:
"""
Read a disparity map from a Helvipad dataset file.
The disparity is stored in a 16-bit format and needs to be scaled by 1/2048.
Parameters:
filename (str): Path to the disparity file.
Returns:
tuple[torch.Tensor, torch.Tensor]:
- Disparity map tensor (H, W).
- Validity mask tensor (H, W) indicating valid disparity values.
"""
disp = load_depth_with_infinite_retry(filename) / 2048.0
valid = disp > 0.0
return disp, valid