diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 0000000000000000000000000000000000000000..a6344aac8c09253b3b630fb776ae94478aa0275b
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1,35 @@
+*.7z filter=lfs diff=lfs merge=lfs -text
+*.arrow filter=lfs diff=lfs merge=lfs -text
+*.bin filter=lfs diff=lfs merge=lfs -text
+*.bz2 filter=lfs diff=lfs merge=lfs -text
+*.ckpt filter=lfs diff=lfs merge=lfs -text
+*.ftz filter=lfs diff=lfs merge=lfs -text
+*.gz filter=lfs diff=lfs merge=lfs -text
+*.h5 filter=lfs diff=lfs merge=lfs -text
+*.joblib filter=lfs diff=lfs merge=lfs -text
+*.lfs.* filter=lfs diff=lfs merge=lfs -text
+*.mlmodel filter=lfs diff=lfs merge=lfs -text
+*.model filter=lfs diff=lfs merge=lfs -text
+*.msgpack filter=lfs diff=lfs merge=lfs -text
+*.npy filter=lfs diff=lfs merge=lfs -text
+*.npz filter=lfs diff=lfs merge=lfs -text
+*.onnx filter=lfs diff=lfs merge=lfs -text
+*.ot filter=lfs diff=lfs merge=lfs -text
+*.parquet filter=lfs diff=lfs merge=lfs -text
+*.pb filter=lfs diff=lfs merge=lfs -text
+*.pickle filter=lfs diff=lfs merge=lfs -text
+*.pkl filter=lfs diff=lfs merge=lfs -text
+*.pt filter=lfs diff=lfs merge=lfs -text
+*.pth filter=lfs diff=lfs merge=lfs -text
+*.rar filter=lfs diff=lfs merge=lfs -text
+*.safetensors filter=lfs diff=lfs merge=lfs -text
+saved_model/**/* filter=lfs diff=lfs merge=lfs -text
+*.tar.* filter=lfs diff=lfs merge=lfs -text
+*.tar filter=lfs diff=lfs merge=lfs -text
+*.tflite filter=lfs diff=lfs merge=lfs -text
+*.tgz filter=lfs diff=lfs merge=lfs -text
+*.wasm filter=lfs diff=lfs merge=lfs -text
+*.xz filter=lfs diff=lfs merge=lfs -text
+*.zip filter=lfs diff=lfs merge=lfs -text
+*.zst filter=lfs diff=lfs merge=lfs -text
+*tfevents* filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..832b9d621852cb1036394519f5fc9c43744516fd
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,148 @@
+*.png
+**.gif
+.vscode/
+*.rdb
+**.xml
+wandb/
+slurm/
+tmp/
+.logs/
+checkpoints/
+external_jobs/
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+ptlflow_logs/
+output/
+log/
+.idea/
+# C extensions
+*.so
+results/
+**.DS_Store
+**.pt
+demo/
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+~shortcuts/
+**/wandb_logs/
+**.db
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
diff --git a/README.md b/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..8abe402a00792595111a1a7395dc6a10413d2ffe
--- /dev/null
+++ b/README.md
@@ -0,0 +1,10 @@
+---
+title: Invisible Stitch
+emoji: 🪡
+colorFrom: pink
+colorTo: purple
+sdk: gradio
+sdk_version: 4.27.0
+app_file: app.py
+pinned: false
+---
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9f59a339a6268ac34a1cf723f06ffb501ee9ef6
--- /dev/null
+++ b/app.py
@@ -0,0 +1,257 @@
+import spaces
+import os
+
+# this is a HF Spaces specific hack, as
+# (i) building pytorch3d with GPU support is a bit tricky here
+# (ii) installing the wheel via requirements.txt breaks ZeroGPU
+os.system("pip install pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt221/download.html")
+
+import torch
+import torch.nn.functional as F
+import matplotlib.pyplot as plt
+import numpy as np
+
+import skimage
+from PIL import Image
+
+import gradio as gr
+
+from utils.render import PointsRendererWithMasks, render
+from utils.ops import snap_high_gradients_to_nn, project_points, get_pointcloud, merge_pointclouds, outpaint_with_depth_estimation
+from utils.gs import gs_options, read_cameras_from_optimization_bundle, Scene, run_gaussian_splatting, get_blank_gs_bundle
+
+from pytorch3d.utils import opencv_from_cameras_projection
+from utils.ops import focal2fov, fov2focal
+from utils.models import infer_with_zoe_dc
+from utils.scene import GaussianModel
+from utils.demo import downsample_point_cloud
+from typing import Iterable, Tuple, Dict, Optional
+import itertools
+
+from pytorch3d.structures import Pointclouds
+from pytorch3d.renderer import (
+ look_at_view_transform,
+ PerspectiveCameras,
+)
+
+from pytorch3d.io import IO
+
+def get_blank_gs_bundle(h, w):
+ return {
+ "camera_angle_x": focal2fov(torch.tensor([w], dtype=torch.float32), w),
+ "W": w,
+ "H": h,
+ "pcd_points": None,
+ "pcd_colors": None,
+ 'frames': [],
+ }
+
+@spaces.GPU(duration=30)
+def extrapolate_point_cloud(prompt: str, image_size: Tuple[int, int], look_at_params: Iterable[Tuple[float, float, float, Tuple[float, float, float]]], point_cloud: Pointclouds = None, dry_run: bool = False, discard_mask: bool = False, initial_image: Optional[Image.Image] = None, depth_scaling: float = 1, **render_kwargs):
+ w, h = image_size
+ optimization_bundle_frames = []
+
+ for azim, elev, dist, at in look_at_params:
+ R, T = look_at_view_transform(device=device, azim=azim, elev=elev, dist=dist, at=at)
+ cameras = PerspectiveCameras(R=R, T=T, focal_length=torch.tensor([w], dtype=torch.float32), principal_point=(((h-1)/2, (w-1)/2),), image_size=(image_size,), device=device, in_ndc=False)
+
+ if point_cloud is not None:
+ images, masks, depths = render(cameras, point_cloud, **render_kwargs)
+
+ if not dry_run:
+ eroded_mask = skimage.morphology.binary_erosion((depths[0] > 0).cpu().numpy(), footprint=None)#skimage.morphology.disk(1))
+ eroded_depth = depths[0].clone()
+ eroded_depth[torch.from_numpy(eroded_mask).to(depths.device) <= 0] = 0
+
+ outpainted_img, aligned_depth = outpaint_with_depth_estimation(images[0], masks[0], eroded_depth, h, w, pipe, zoe_dc_model, prompt, cameras, dilation_size=2, depth_scaling=depth_scaling, generator=torch.Generator(device=pipe.device).manual_seed(0))
+
+ aligned_depth = torch.from_numpy(aligned_depth).to(device)
+
+ else:
+ # in a dry run, we do not actually outpaint the image
+ outpainted_img = Image.fromarray((255*images[0].cpu().numpy()).astype(np.uint8))
+
+ else:
+ assert initial_image is not None
+ assert not dry_run
+
+ # jumpstart the point cloud with a regular depth estimation
+ t_initial_image = torch.from_numpy(np.asarray(initial_image)/255.).permute(2,0,1).float()
+ depth = aligned_depth = infer_with_zoe_dc(zoe_dc_model, t_initial_image, torch.zeros(h, w))
+ outpainted_img = initial_image
+ images = [t_initial_image.to(device)]
+ masks = [torch.ones(h, w, dtype=torch.bool).to(device)]
+
+ if not dry_run:
+ # snap high gradients to nearest neighbor, which eliminates noodle artifacts
+ aligned_depth = snap_high_gradients_to_nn(aligned_depth, threshold=12).cpu()
+ xy_depth_world = project_points(cameras, aligned_depth)
+
+ c2w = cameras.get_world_to_view_transform().get_matrix()[0]
+
+ optimization_bundle_frames.append({
+ "image": outpainted_img,
+ "mask": masks[0].cpu().numpy(),
+ "transform_matrix": c2w.tolist(),
+ "azim": azim,
+ "elev": elev,
+ "dist": dist,
+ })
+
+ if discard_mask:
+ optimization_bundle_frames[-1].pop("mask")
+
+ if not dry_run:
+ optimization_bundle_frames[-1]["center_point"] = xy_depth_world[0].mean(dim=0).tolist()
+ optimization_bundle_frames[-1]["depth"] = aligned_depth.cpu().numpy()
+ optimization_bundle_frames[-1]["mean_depth"] = aligned_depth.mean().item()
+
+ else:
+ # in a dry run, we do not modify the point cloud
+ continue
+
+ rgb = (torch.from_numpy(np.asarray(outpainted_img).copy()).reshape(-1, 3).float() / 255).to(device)
+
+ if point_cloud is None:
+ point_cloud = get_pointcloud(xy_depth_world[0], device=device, features=rgb)
+
+ else:
+ # pytorch 3d's mask might be slightly too big (subpixels), so we erode it a little to avoid seams
+ # in theory, 1 pixel is sufficient but we use 2 to be safe
+ masks[0] = torch.from_numpy(skimage.morphology.binary_erosion(masks[0].cpu().numpy(), footprint=skimage.morphology.disk(2))).to(device)
+
+ partial_outpainted_point_cloud = get_pointcloud(xy_depth_world[0][~masks[0].view(-1)], device=device, features=rgb[~masks[0].view(-1)])
+
+ point_cloud = merge_pointclouds([point_cloud, partial_outpainted_point_cloud])
+
+ return optimization_bundle_frames, point_cloud
+
+@spaces.GPU(duration=30)
+def generate_point_cloud(initial_image: Image.Image, prompt: str):
+ image_size = initial_image.size
+ w, h = image_size
+
+ optimization_bundle = get_blank_gs_bundle(h, w)
+
+ step_size = 25
+
+ azim_steps = [0, step_size, -step_size]
+ look_at_params = [(azim, 0, 0.01, torch.zeros((1, 3))) for azim in azim_steps]
+
+ optimization_bundle["frames"], point_cloud = extrapolate_point_cloud(prompt, image_size, look_at_params, discard_mask=True, initial_image=initial_image, depth_scaling=0.5, fill_point_cloud_holes=True)
+
+ optimization_bundle["pcd_points"] = point_cloud.points_padded()[0].cpu().numpy()
+ optimization_bundle["pcd_colors"] = point_cloud.features_padded()[0].cpu().numpy()
+
+ return optimization_bundle, point_cloud
+
+@spaces.GPU(duration=30)
+def supplement_point_cloud(optimization_bundle: Dict, point_cloud: Pointclouds, prompt: str):
+ w, h = optimization_bundle["W"], optimization_bundle["H"]
+
+ supporting_frames = []
+
+ for i, frame in enumerate(optimization_bundle["frames"]):
+ # skip supporting views
+ if frame.get("supporting", False):
+ continue
+
+ center_point = torch.tensor(frame["center_point"]).to(device)
+ mean_depth = frame["mean_depth"]
+ azim, elev = frame["azim"], frame["elev"]
+
+ azim_jitters = torch.linspace(-5, 5, 3).tolist()
+ elev_jitters = torch.linspace(-5, 5, 3).tolist()
+
+ # build the product of azim and elev jitters
+ camera_jitters = [{"azim": azim + azim_jitter, "elev": elev + elev_jitter} for azim_jitter, elev_jitter in itertools.product(azim_jitters, elev_jitters)]
+
+ look_at_params = [(camera_jitter["azim"], camera_jitter["elev"], mean_depth, center_point.unsqueeze(0)) for camera_jitter in camera_jitters]
+
+ local_supporting_frames, point_cloud = extrapolate_point_cloud(prompt, (w, h), look_at_params, point_cloud, dry_run=True, depth_scaling=0.5, antialiasing=3)
+
+ for local_supporting_frame in local_supporting_frames:
+ local_supporting_frame["supporting"] = True
+
+ supporting_frames.extend(local_supporting_frames)
+
+ optimization_bundle["pcd_points"] = point_cloud.points_padded()[0].cpu().numpy()
+ optimization_bundle["pcd_colors"] = point_cloud.features_padded()[0].cpu().numpy()
+
+ return optimization_bundle, point_cloud
+
+@spaces.GPU(duration=30)
+def generate_scene(img: Image.Image, prompt: str):
+ assert isinstance(img, Image.Image)
+
+ # resize image maintaining the aspect ratio so the longest side is 720 pixels
+ max_size = 720
+ img.thumbnail((max_size, max_size))
+
+ # crop to ensure the image dimensions are divisible by 8
+ img = img.crop((0, 0, img.width - img.width % 8, img.height - img.height % 8))
+
+ from hashlib import sha1
+ from datetime import datetime
+
+ run_id = sha1(datetime.now().isoformat().encode()).hexdigest()[:6]
+
+ run_name = f"gradio_{run_id}"
+
+ gs_optimization_bundle, point_cloud = generate_point_cloud(img, prompt)
+
+ downsampled_point_cloud = downsample_point_cloud(gs_optimization_bundle, device=device)
+
+ gs_optimization_bundle["pcd_points"] = downsampled_point_cloud.points_padded()[0].cpu().numpy()
+ gs_optimization_bundle["pcd_colors"] = downsampled_point_cloud.features_padded()[0].cpu().numpy()
+
+ scene = Scene(gs_optimization_bundle, GaussianModel(gs_options.sh_degree), gs_options)
+
+ scene.gaussians._opacity = torch.ones_like(scene.gaussians._opacity)
+ #scene = run_gaussian_splatting(scene, gs_optimization_bundle)
+
+ # coordinate system transformation
+ scene.gaussians._xyz = scene.gaussians._xyz.detach()
+ scene.gaussians._xyz[:, 1] = -scene.gaussians._xyz[:, 1]
+ scene.gaussians._xyz[:, 2] = -scene.gaussians._xyz[:, 2]
+
+ save_path = os.path.join("outputs", f"{run_name}.ply")
+
+ scene.gaussians.save_ply(save_path)
+
+ return save_path
+
+if __name__ == "__main__":
+ global device
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+ from utils.models import get_zoe_dc_model, get_sd_pipeline
+
+ global zoe_dc_model
+ from huggingface_hub import hf_hub_download
+ zoe_dc_model = get_zoe_dc_model(ckpt_path=hf_hub_download(repo_id="paulengstler/invisible-stitch", filename="invisible-stitch.pt")).to(device)
+
+ global pipe
+ pipe = get_sd_pipeline().to(device)
+
+ demo = gr.Interface(
+ fn=generate_scene,
+ inputs=[
+ gr.Image(label="Input Image", sources=["upload", "clipboard"], type="pil"),
+ gr.Textbox(label="Scene Hallucination Prompt")
+ ],
+ outputs=gr.Model3D(label="Generated Scene"),
+ allow_flagging="never",
+ title="Invisible Stitch: Generating Smooth 3D Scenes with Depth Inpainting",
+ description="Hallucinate geometrically coherent 3D scenes from a single input image in less than 30 seconds.
[Project Page](https://research.paulengstler.com/invisible-stitch) | [GitHub](https://github.com/paulengstler/invisible-stitch) | [Paper](#)
To keep this demo snappy, we have limited its functionality. Scenes are generated at a low resolution without densification, supporting views are not inpainted, and we do not optimize the resulting point cloud. Imperfections are to be expected, in particular around object borders. Please allow a couple of seconds for the generated scene to be downloaded (about 40 megabytes).",
+ article="Please consider running this demo locally to obtain high-quality results (see the GitHub repository).
Here are some observations we made that might help you to get better results:
- Use generic prompts that match the surroundings of your input image.
- Ensure that the borders of your input image are free from partially visible objects.
- Keep your prompts simple and avoid adding specific details.
",
+ examples=[
+ ["examples/photo-1667788000333-4e36f948de9a.jpeg", "a street with traditional buildings in Kyoto, Japan"],
+ ["examples/photo-1628624747186-a941c476b7ef.jpeg", "a suburban street in North Carolina on a bright, sunny day"],
+ ["examples/photo-1469559845082-95b66baaf023.jpeg", "a view of Zion National Park"],
+ ["examples/photo-1514984879728-be0aff75a6e8.jpeg", "a close-up view of a muddy path in a forest"],
+ ["examples/photo-1618197345638-d2df92b39fe1.jpeg", "a close-up view of a white linen bed in a minimalistic room"],
+ ["examples/photo-1546975490-e8b92a360b24.jpeg", "a warm living room with plants"],
+ ["examples/photo-1499916078039-922301b0eb9b.jpeg", "a cozy bedroom on a bright day"],
+ ])
+ demo.queue().launch(share=True)
diff --git a/examples/photo-1469559845082-95b66baaf023.jpeg b/examples/photo-1469559845082-95b66baaf023.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..3b8f3ecadc4dd40ed369d8e7611e041bb07680f5
Binary files /dev/null and b/examples/photo-1469559845082-95b66baaf023.jpeg differ
diff --git a/examples/photo-1499916078039-922301b0eb9b.jpeg b/examples/photo-1499916078039-922301b0eb9b.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..fb0b35f37f5e666113aa542e846a2a814736459d
Binary files /dev/null and b/examples/photo-1499916078039-922301b0eb9b.jpeg differ
diff --git a/examples/photo-1514984879728-be0aff75a6e8.jpeg b/examples/photo-1514984879728-be0aff75a6e8.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..7f776652f1a51841ac5a7885ecabd7f80d9edb7e
Binary files /dev/null and b/examples/photo-1514984879728-be0aff75a6e8.jpeg differ
diff --git a/examples/photo-1546975490-e8b92a360b24.jpeg b/examples/photo-1546975490-e8b92a360b24.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..385e12631c21714a2389d2ae7bd06fd967e84a43
Binary files /dev/null and b/examples/photo-1546975490-e8b92a360b24.jpeg differ
diff --git a/examples/photo-1618197345638-d2df92b39fe1.jpeg b/examples/photo-1618197345638-d2df92b39fe1.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..19d6deaeb45cc54f24e946036dc83a9b5d2810a8
Binary files /dev/null and b/examples/photo-1618197345638-d2df92b39fe1.jpeg differ
diff --git a/examples/photo-1628624747186-a941c476b7ef.jpeg b/examples/photo-1628624747186-a941c476b7ef.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..c003150a9b1f50d8754a687fd8ecdd69f764e688
Binary files /dev/null and b/examples/photo-1628624747186-a941c476b7ef.jpeg differ
diff --git a/examples/photo-1667788000333-4e36f948de9a.jpeg b/examples/photo-1667788000333-4e36f948de9a.jpeg
new file mode 100644
index 0000000000000000000000000000000000000000..79bded5d636fecd1d426d11a0b06619df7a65274
Binary files /dev/null and b/examples/photo-1667788000333-4e36f948de9a.jpeg differ
diff --git a/packages.txt b/packages.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8504f5c96cd44f720ac45715c09352e5804dac10
--- /dev/null
+++ b/packages.txt
@@ -0,0 +1 @@
+python3-dev
\ No newline at end of file
diff --git a/pre-requirements.txt b/pre-requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b6c6cfbe17f74c1b32f23321d0a9782fe61c3853
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,26 @@
+datasets==2.19.0
+diffusers==0.26.3
+fire==0.5.0
+gradio==4.27.0
+h5py==3.10.0
+huggingface_hub==0.22.2
+imageio==2.33.1
+jaxtyping==0.2.28
+matplotlib==3.7.5
+numpy==1.22.4
+opencv_python==4.8.0.76
+pandas==1.5.1
+Pillow==10.3.0
+plyfile==1.0.3
+scipy==1.8.1
+scikit-image
+submitit==1.5.1
+tqdm==4.66.1
+trimesh==3.21.7
+wandb==0.16.3
+xformers==0.0.25
+spaces
+timm==0.6.7
+transformers==4.37.2
+accelerate==0.27.2
+easydict
\ No newline at end of file
diff --git a/utils/demo.py b/utils/demo.py
new file mode 100644
index 0000000000000000000000000000000000000000..acb32d8db198ade17f55f175fd5ae27e38ff61bc
--- /dev/null
+++ b/utils/demo.py
@@ -0,0 +1,54 @@
+import copy
+import torch
+import numpy as np
+
+import skimage
+from pytorch3d.renderer import (
+ look_at_view_transform,
+ PerspectiveCameras,
+)
+
+from .render import render
+from .ops import project_points, get_pointcloud, merge_pointclouds
+
+def downsample_point_cloud(optimization_bundle, device="cpu"):
+ point_cloud = None
+
+ for i, frame in enumerate(optimization_bundle["frames"]):
+ if frame.get("supporting", False):
+ continue
+
+ downsampled_image = copy.deepcopy(frame["image"])
+ downsampled_image.thumbnail((360, 360))
+
+ image_size = downsampled_image.size
+ w, h = image_size
+
+ # regenerate the point cloud at a lower resolution
+ R, T = look_at_view_transform(device=device, azim=frame["azim"], elev=frame["elev"], dist=frame["dist"])#, dist=1+0.15*step)
+ cameras = PerspectiveCameras(R=R, T=T, focal_length=torch.tensor([w], dtype=torch.float32), principal_point=(((h-1)/2, (w-1)/2),), image_size=(image_size,), device=device, in_ndc=False)
+
+ # downsample the depth
+ downsampled_depth = torch.nn.functional.interpolate(torch.tensor(frame["depth"]).unsqueeze(0).unsqueeze(0).float().to(device), size=(h, w), mode="nearest").squeeze()
+
+ xy_depth_world = project_points(cameras, downsampled_depth)
+
+ rgb = (torch.from_numpy(np.asarray(downsampled_image).copy()).reshape(-1, 3).float() / 255).to(device)
+
+ c2w = cameras.get_world_to_view_transform().get_matrix()[0]
+
+ if i == 0:
+ point_cloud = get_pointcloud(xy_depth_world[0], device=device, features=rgb)
+
+ else:
+ images, masks, depths = render(cameras, point_cloud, radius=1e-2)
+
+ # pytorch 3d's mask might be slightly too big (subpixels), so we erode it a little to avoid seams
+ # in theory, 1 pixel is sufficient but we use 2 to be safe
+ masks[0] = torch.from_numpy(skimage.morphology.binary_erosion(masks[0].cpu().numpy(), footprint=skimage.morphology.disk(1))).to(device)
+
+ partial_outpainted_point_cloud = get_pointcloud(xy_depth_world[0][~masks[0].view(-1)], device=device, features=rgb[~masks[0].view(-1)])
+
+ point_cloud = merge_pointclouds([point_cloud, partial_outpainted_point_cloud])
+
+ return point_cloud
diff --git a/utils/gaussian_renderer/__init__.py b/utils/gaussian_renderer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f74e336af41e042dfb9f1c308e40caf17d0b3211
--- /dev/null
+++ b/utils/gaussian_renderer/__init__.py
@@ -0,0 +1,100 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+import torch
+import math
+from diff_gaussian_rasterization import GaussianRasterizationSettings, GaussianRasterizer
+from scene.gaussian_model import GaussianModel
+from utils.sh_utils import eval_sh
+
+def render(viewpoint_camera, pc : GaussianModel, pipe, bg_color : torch.Tensor, scaling_modifier = 1.0, override_color = None):
+ """
+ Render the scene.
+
+ Background tensor (bg_color) must be on GPU!
+ """
+
+ # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means
+ screenspace_points = torch.zeros_like(pc.get_xyz, dtype=pc.get_xyz.dtype, requires_grad=True, device="cuda") + 0
+ try:
+ screenspace_points.retain_grad()
+ except:
+ pass
+
+ # Set up rasterization configuration
+ tanfovx = math.tan(viewpoint_camera.FoVx * 0.5)
+ tanfovy = math.tan(viewpoint_camera.FoVy * 0.5)
+
+ raster_settings = GaussianRasterizationSettings(
+ image_height=int(viewpoint_camera.image_height),
+ image_width=int(viewpoint_camera.image_width),
+ tanfovx=tanfovx,
+ tanfovy=tanfovy,
+ bg=bg_color,
+ scale_modifier=scaling_modifier,
+ viewmatrix=viewpoint_camera.world_view_transform,
+ projmatrix=viewpoint_camera.full_proj_transform,
+ sh_degree=pc.active_sh_degree,
+ campos=viewpoint_camera.camera_center,
+ prefiltered=False,
+ debug=pipe.debug
+ )
+
+ rasterizer = GaussianRasterizer(raster_settings=raster_settings)
+
+ means3D = pc.get_xyz
+ means2D = screenspace_points
+ opacity = pc.get_opacity
+
+ # If precomputed 3d covariance is provided, use it. If not, then it will be computed from
+ # scaling / rotation by the rasterizer.
+ scales = None
+ rotations = None
+ cov3D_precomp = None
+ if pipe.compute_cov3D_python:
+ cov3D_precomp = pc.get_covariance(scaling_modifier)
+ else:
+ scales = pc.get_scaling
+ rotations = pc.get_rotation
+
+ # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors
+ # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.
+ shs = None
+ colors_precomp = None
+ if override_color is None:
+ if pipe.convert_SHs_python:
+ shs_view = pc.get_features.transpose(1, 2).view(-1, 3, (pc.max_sh_degree+1)**2)
+ dir_pp = (pc.get_xyz - viewpoint_camera.camera_center.repeat(pc.get_features.shape[0], 1))
+ dir_pp_normalized = dir_pp/dir_pp.norm(dim=1, keepdim=True)
+ sh2rgb = eval_sh(pc.active_sh_degree, shs_view, dir_pp_normalized)
+ colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)
+ else:
+ shs = pc.get_features
+ else:
+ colors_precomp = override_color
+
+ # Rasterize visible Gaussians to image, obtain their radii (on screen).
+ rendered_image, radii = rasterizer(
+ means3D = means3D,
+ means2D = means2D,
+ shs = shs,
+ colors_precomp = colors_precomp,
+ opacities = opacity,
+ scales = scales,
+ rotations = rotations,
+ cov3D_precomp = cov3D_precomp)
+
+ # Those Gaussians that were frustum culled or had a radius of 0 were not visible.
+ # They will be excluded from value updates used in the splitting criteria.
+ return {"render": rendered_image,
+ "viewspace_points": screenspace_points,
+ "visibility_filter" : radii > 0,
+ "radii": radii}
diff --git a/utils/gaussian_renderer/network_gui.py b/utils/gaussian_renderer/network_gui.py
new file mode 100644
index 0000000000000000000000000000000000000000..df2f9dae782b24527ae5b09f91ca4009361de53f
--- /dev/null
+++ b/utils/gaussian_renderer/network_gui.py
@@ -0,0 +1,86 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+import torch
+import traceback
+import socket
+import json
+from scene.cameras import MiniCam
+
+host = "127.0.0.1"
+port = 6009
+
+conn = None
+addr = None
+
+listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+def init(wish_host, wish_port):
+ global host, port, listener
+ host = wish_host
+ port = wish_port
+ listener.bind((host, port))
+ listener.listen()
+ listener.settimeout(0)
+
+def try_connect():
+ global conn, addr, listener
+ try:
+ conn, addr = listener.accept()
+ print(f"\nConnected by {addr}")
+ conn.settimeout(None)
+ except Exception as inst:
+ pass
+
+def read():
+ global conn
+ messageLength = conn.recv(4)
+ messageLength = int.from_bytes(messageLength, 'little')
+ message = conn.recv(messageLength)
+ return json.loads(message.decode("utf-8"))
+
+def send(message_bytes, verify):
+ global conn
+ if message_bytes != None:
+ conn.sendall(message_bytes)
+ conn.sendall(len(verify).to_bytes(4, 'little'))
+ conn.sendall(bytes(verify, 'ascii'))
+
+def receive():
+ message = read()
+
+ width = message["resolution_x"]
+ height = message["resolution_y"]
+
+ if width != 0 and height != 0:
+ try:
+ do_training = bool(message["train"])
+ fovy = message["fov_y"]
+ fovx = message["fov_x"]
+ znear = message["z_near"]
+ zfar = message["z_far"]
+ do_shs_python = bool(message["shs_python"])
+ do_rot_scale_python = bool(message["rot_scale_python"])
+ keep_alive = bool(message["keep_alive"])
+ scaling_modifier = message["scaling_modifier"]
+ world_view_transform = torch.reshape(torch.tensor(message["view_matrix"]), (4, 4)).cuda()
+ world_view_transform[:,1] = -world_view_transform[:,1]
+ world_view_transform[:,2] = -world_view_transform[:,2]
+ full_proj_transform = torch.reshape(torch.tensor(message["view_projection_matrix"]), (4, 4)).cuda()
+ full_proj_transform[:,1] = -full_proj_transform[:,1]
+ custom_cam = MiniCam(width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform)
+ except Exception as e:
+ print("")
+ traceback.print_exc()
+ raise e
+ return custom_cam, do_training, do_shs_python, do_rot_scale_python, keep_alive, scaling_modifier
+ else:
+ return None, None, None, None, None, None
\ No newline at end of file
diff --git a/utils/gs.py b/utils/gs.py
new file mode 100644
index 0000000000000000000000000000000000000000..7d0aac892fe45ad754586be50f00da3d5fc14367
--- /dev/null
+++ b/utils/gs.py
@@ -0,0 +1,196 @@
+import random
+import torch
+import numpy as np
+from .scene import GaussianModel
+from .scene.dataset_readers import SceneInfo, getNerfppNorm
+from .scene.cameras import Camera
+from .ops import focal2fov, fov2focal
+from .scene.gaussian_model import BasicPointCloud
+from easydict import EasyDict as edict
+from PIL import Image
+
+from tqdm.auto import tqdm
+
+def get_blank_gs_bundle(h, w):
+ return {
+ "camera_angle_x": focal2fov(torch.tensor([w], dtype=torch.float32), w),
+ "W": w,
+ "H": h,
+ "pcd_points": None,
+ "pcd_colors": None,
+ 'frames': [],
+ }
+
+def read_cameras_from_optimization_bundle(optimization_bundle, white_background: bool = False):
+ cameras = []
+
+ fovx = optimization_bundle["camera_angle_x"]
+ frames = optimization_bundle["frames"]
+
+ # we flip the x and y axis to move from PyTorch3D's coordinate system to COLMAP's
+ coordinate_system_transform = np.array([-1, -1, 1])
+
+ for idx, frame in enumerate(frames):
+ c2w = np.array(frame["transform_matrix"])
+ c2w[:3, :3] = c2w[:3, :3] * coordinate_system_transform
+
+ # get the world-to-camera transform and set R, T
+ w2c = np.linalg.inv(c2w)
+ R = np.transpose(w2c[:3, :3]) # R is stored transposed due to 'glm' in CUDA code
+ T = c2w[-1, :3] * coordinate_system_transform
+
+ image = frame["image"]
+
+ im_data = np.array(image.convert("RGBA"))
+
+ bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
+
+ norm_data = im_data / 255.0
+ arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
+ image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
+
+ fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1])
+ FovY = fovy
+ FovX = fovx
+
+ image = torch.Tensor(arr).permute(2,0,1)
+
+ cameras.append(Camera(colmap_id=idx, R=R, T=T, FoVx=FovX, FoVy=FovY, image=image, mask=frame.get("mask", None),
+ gt_alpha_mask=None, image_name='', uid=idx, data_device='cuda'))
+
+ return cameras
+
+class Scene:
+ gaussians: GaussianModel
+
+ def __init__(self, traindata, gaussians: GaussianModel, gs_options, shuffle: bool = True):
+ self.traindata = traindata
+ self.gaussians = gaussians
+
+ train_cameras = read_cameras_from_optimization_bundle(traindata, gs_options.white_background)
+
+ nerf_normalization = getNerfppNorm(train_cameras)
+
+ pcd = BasicPointCloud(points=traindata['pcd_points'], colors=traindata['pcd_colors'], normals=None)
+
+ scene_info = SceneInfo(point_cloud=pcd,
+ train_cameras=train_cameras,
+ test_cameras=[],
+ nerf_normalization=nerf_normalization,
+ ply_path='')
+
+ if shuffle:
+ random.shuffle(scene_info.train_cameras) # Multi-res consistent random shuffling
+
+ self.cameras_extent = scene_info.nerf_normalization["radius"]
+
+ self.train_cameras = scene_info.train_cameras
+
+ bg_color = np.array([1,1,1]) if gs_options.white_background else np.array([0, 0, 0])
+ self.background = torch.tensor(bg_color, dtype=torch.float32, device='cuda')
+
+ self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent)
+ self.gaussians.training_setup(gs_options)
+
+ def getTrainCameras(self):
+ return self.train_cameras
+
+ def getPresetCameras(self, preset):
+ assert preset in self.preset_cameras
+ return self.preset_cameras[preset]
+
+def run_gaussian_splatting(scene, gs_optimization_bundle):
+ torch.cuda.empty_cache()
+
+ return scene
+
+ from random import randint
+ from .gaussian_renderer import render as gs_render
+ from .scene.utils.loss_utils import l1_loss, ssim
+
+ pbar = tqdm(range(1, gs_options.iterations + 1))
+ for iteration in pbar:
+ scene.gaussians.update_learning_rate(iteration)
+
+ # Every 1000 its we increase the levels of SH up to a maximum degree
+ if iteration % 1000 == 0:
+ scene.gaussians.oneupSHdegree()
+
+ # Pick a random Camera
+ random_idx = randint(0, len(gs_optimization_bundle["frames"])-1)
+ viewpoint_cam = scene.getTrainCameras()[random_idx]
+
+ # Render
+ render_pkg = gs_render(viewpoint_cam, scene.gaussians, gs_options, scene.background)
+ image, viewspace_point_tensor, visibility_filter, radii = (
+ render_pkg['render'], render_pkg['viewspace_points'], render_pkg['visibility_filter'], render_pkg['radii'])
+
+ # Loss
+ gt_image = viewpoint_cam.original_image.cuda()
+ Ll1 = l1_loss(image, gt_image, reduce=False)
+ loss = (1.0 - gs_options.lambda_dssim) * Ll1
+
+ if viewpoint_cam.mask is not None:
+ mask = torch.from_numpy(viewpoint_cam.mask).to(loss.device)
+ else:
+ mask = 1
+
+ loss = (loss * mask).mean()
+ loss = loss + gs_options.lambda_dssim * (1.0 - ssim(image, gt_image))
+ loss.backward()
+
+ pbar.set_description(f"Loss: {loss.item():.4f}")
+
+ with torch.no_grad():
+ # Densification
+ if iteration < gs_options.densify_until_iter:
+ # Keep track of max radii in image-space for pruning
+ scene.gaussians.max_radii2D[visibility_filter] = torch.max(
+ scene.gaussians.max_radii2D[visibility_filter], radii[visibility_filter])
+ scene.gaussians.add_densification_stats(viewspace_point_tensor, visibility_filter)
+
+ if iteration > gs_options.densify_from_iter and iteration % gs_options.densification_interval == 0:
+ size_threshold = 20 if iteration > gs_options.opacity_reset_interval else None
+ scene.gaussians.densify_and_prune(
+ gs_options.densify_grad_threshold, 0.005, scene.cameras_extent, size_threshold)
+
+ if (iteration % gs_options.opacity_reset_interval == 0
+ or (gs_options.white_background and iteration == gs_options.densify_from_iter)
+ ):
+ scene.gaussians.reset_opacity()
+
+ # Optimizer step
+ if iteration < gs_options.iterations:
+ scene.gaussians.optimizer.step()
+ scene.gaussians.optimizer.zero_grad(set_to_none = True)
+
+ return scene
+
+gs_options = edict({
+ "sh_degree": 3,
+ "images": "images",
+ "resolution": -1,
+ "white_background": False,
+ "data_device": "cuda",
+ "eval": False,
+ "use_depth": False,
+ "iterations": 0,#250,
+ "position_lr_init": 0.00016,
+ "position_lr_final": 0.0000016,
+ "position_lr_delay_mult": 0.01,
+ "position_lr_max_steps": 2990,
+ "feature_lr": 0.0,#0.0025,
+ "opacity_lr": 0.0,#0.05,
+ "scaling_lr": 0.0,#0.005,
+ "rotation_lr": 0.0,#0.001,
+ "percent_dense": 0.01,
+ "lambda_dssim": 0.2,
+ "densification_interval": 100,
+ "opacity_reset_interval": 3000,
+ "densify_from_iter": 10_000,
+ "densify_until_iter": 15_000,
+ "densify_grad_threshold": 0.0002,
+ "convert_SHs_python": False,
+ "compute_cov3D_python": False,
+ "debug": False,
+})
diff --git a/utils/models.py b/utils/models.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b5b249ff288e66b46de134e39317ddd8941c8f4
--- /dev/null
+++ b/utils/models.py
@@ -0,0 +1,119 @@
+import glob
+import os
+
+import torch
+import torch.nn.functional as F
+import numpy as np
+
+from zoedepth.utils.misc import colorize
+from zoedepth.utils.config import get_config
+from zoedepth.models.builder import build_model
+from zoedepth.models.model_io import load_wts
+
+from diffusers import AsymmetricAutoencoderKL, StableDiffusionInpaintPipeline
+
+def load_ckpt(config, model, checkpoint_dir: str = "./checkpoints", ckpt_type: str = "best"):
+ if hasattr(config, "checkpoint"):
+ checkpoint = config.checkpoint
+ elif hasattr(config, "ckpt_pattern"):
+ pattern = config.ckpt_pattern
+ matches = glob.glob(os.path.join(
+ checkpoint_dir, f"*{pattern}*{ckpt_type}*"))
+ if not (len(matches) > 0):
+ raise ValueError(f"No matches found for the pattern {pattern}")
+
+ checkpoint = matches[0]
+
+ else:
+ return model
+ model = load_wts(model, checkpoint)
+ print("Loaded weights from {0}".format(checkpoint))
+ return model
+
+def get_zoe_dc_model(vanilla: bool = False, ckpt_path: str = None, **kwargs):
+ def ZoeD_N(midas_model_type="DPT_BEiT_L_384", vanilla=False, **kwargs):
+ if midas_model_type != "DPT_BEiT_L_384":
+ raise ValueError(f"Only DPT_BEiT_L_384 MiDaS model is supported for pretrained Zoe_N model, got: {midas_model_type}")
+
+ zoedepth_config = get_config("zoedepth", "train", **kwargs)
+ model = build_model(zoedepth_config)
+
+ if vanilla:
+ model.__setattr__("vanilla", True)
+ return model
+ else:
+ model.__setattr__("vanilla", False)
+
+ if zoedepth_config.add_depth_channel and not vanilla:
+ model.core.core.pretrained.model.patch_embed.proj = torch.nn.Conv2d(
+ model.core.core.pretrained.model.patch_embed.proj.in_channels+2,
+ model.core.core.pretrained.model.patch_embed.proj.out_channels,
+ kernel_size=model.core.core.pretrained.model.patch_embed.proj.kernel_size,
+ stride=model.core.core.pretrained.model.patch_embed.proj.stride,
+ padding=model.core.core.pretrained.model.patch_embed.proj.padding,
+ bias=True)
+
+ if ckpt_path is not None:
+ assert os.path.exists(ckpt_path)
+ zoedepth_config.__setattr__("checkpoint", ckpt_path)
+ else:
+ assert vanilla, "ckpt_path must be provided for non-vanilla model"
+
+ model = load_ckpt(zoedepth_config, model)
+
+ return model
+
+ return ZoeD_N(vanilla=vanilla, ckpt_path=ckpt_path, **kwargs)
+
+def infer_with_pad(zoe, x, pad_input: bool = True, fh: float = 3, fw: float = 3, upsampling_mode: str = "bicubic", padding_mode: str = "reflect", **kwargs):
+ assert x.dim() == 4, "x must be 4 dimensional, got {}".format(x.dim())
+
+ if pad_input:
+ assert fh > 0 or fw > 0, "atlease one of fh and fw must be greater than 0"
+ pad_h = int(np.sqrt(x.shape[2]/2) * fh)
+ pad_w = int(np.sqrt(x.shape[3]/2) * fw)
+ padding = [pad_w, pad_w]
+ if pad_h > 0:
+ padding += [pad_h, pad_h]
+
+ x_rgb = x[:, :3]
+ x_remaining = x[:, 3:]
+ x_rgb = F.pad(x_rgb, padding, mode=padding_mode, **kwargs)
+ x_remaining = F.pad(x_remaining, padding, mode="constant", value=0, **kwargs)
+ x = torch.cat([x_rgb, x_remaining], dim=1)
+ out = zoe(x)["metric_depth"]
+ if out.shape[-2:] != x.shape[-2:]:
+ out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False)
+ if pad_input:
+ # crop to the original size, handling the case where pad_h and pad_w is 0
+ if pad_h > 0:
+ out = out[:, :, pad_h:-pad_h,:]
+ if pad_w > 0:
+ out = out[:, :, :, pad_w:-pad_w]
+ return out
+
+@torch.no_grad()
+def infer_with_zoe_dc(zoe_dc, image, sparse_depth, scaling: float = 1):
+ sparse_depth_mask = (sparse_depth[None, None, ...] > 0).float()
+ # the metric depth range defined during training is [1e-3, 10]
+ x = torch.cat([image[None, ...], sparse_depth[None, None, ...] / (float(scaling) * 10.0), sparse_depth_mask], dim=1).to(zoe_dc.device)
+
+ out = infer_with_pad(zoe_dc, x)
+ out_flip = infer_with_pad(zoe_dc, torch.flip(x, dims=[3]))
+ out = (out + torch.flip(out_flip, dims=[3])) / 2
+
+ pred_depth = float(scaling) * out
+
+ return torch.nn.functional.interpolate(pred_depth, image.shape[-2:], mode='bilinear', align_corners=True)[0, 0]
+
+def get_sd_pipeline():
+ pipe = StableDiffusionInpaintPipeline.from_pretrained(
+ "stabilityai/stable-diffusion-2-inpainting",
+ torch_dtype=torch.float16,
+ )
+ pipe.vae = AsymmetricAutoencoderKL.from_pretrained(
+ "cross-attention/asymmetric-autoencoder-kl-x-2",
+ torch_dtype=torch.float16
+ )
+
+ return pipe
diff --git a/utils/ops.py b/utils/ops.py
new file mode 100644
index 0000000000000000000000000000000000000000..50a7d3340857cf1367bbe9c15792f547667682bd
--- /dev/null
+++ b/utils/ops.py
@@ -0,0 +1,95 @@
+import numpy as np
+import torch
+import skimage
+from scipy import ndimage
+from PIL import Image
+from .models import infer_with_zoe_dc
+from pytorch3d.structures import Pointclouds
+
+import math
+
+def nearest_neighbor_fill(img, mask, erosion=0):
+ img_ = np.copy(img.cpu().numpy())
+
+ if erosion > 0:
+ eroded_mask = skimage.morphology.binary_erosion(mask.cpu().numpy(), footprint=skimage.morphology.disk(erosion))
+ else:
+ eroded_mask = mask.cpu().numpy()
+
+ img_[eroded_mask <= 0] = np.nan
+
+ distance_to_boundary = ndimage.distance_transform_bf((~eroded_mask>0), metric="cityblock")
+
+ for current_dist in np.unique(distance_to_boundary)[1:]:
+ ii, jj = np.where(distance_to_boundary == current_dist)
+
+ ii_ = np.array([ii - 1, ii, ii + 1, ii - 1, ii, ii + 1, ii - 1, ii, ii + 1]).reshape(9, -1)
+ jj_ = np.array([jj - 1, jj - 1, jj - 1, jj, jj, jj, jj + 1, jj + 1, jj + 1]).reshape(9, -1)
+
+ ii_ = ii_.clip(0, img_.shape[0] - 1)
+ jj_ = jj_.clip(0, img_.shape[1] - 1)
+
+ img_[ii, jj] = np.nanmax(img_[ii_, jj_], axis=0)
+
+ return torch.from_numpy(img_).to(img.device)
+
+def snap_high_gradients_to_nn(depth, threshold=20):
+ grad_depth = np.copy(depth.cpu().numpy())
+ grad_depth = grad_depth - grad_depth.min()
+ grad_depth = grad_depth / grad_depth.max()
+
+ grad = skimage.filters.rank.gradient(grad_depth, skimage.morphology.disk(1))
+ return nearest_neighbor_fill(depth, torch.from_numpy(grad < threshold), erosion=3)
+
+def project_points(cameras, depth, use_pixel_centers=True):
+ if len(cameras) > 1:
+ import warnings
+ warnings.warn("project_points assumes only a single camera is used")
+
+ depth_t = torch.from_numpy(depth) if isinstance(depth, np.ndarray) else depth
+ depth_t = depth_t.to(cameras.device)
+
+ pixel_center = 0.5 if use_pixel_centers else 0
+
+ fx, fy = cameras.focal_length[0, 1], cameras.focal_length[0, 0]
+ cx, cy = cameras.principal_point[0, 1], cameras.principal_point[0, 0]
+
+ i, j = torch.meshgrid(
+ torch.arange(cameras.image_size[0][0], dtype=torch.float32, device=cameras.device) + pixel_center,
+ torch.arange(cameras.image_size[0][1], dtype=torch.float32, device=cameras.device) + pixel_center,
+ indexing="xy",
+ )
+
+ directions = torch.stack(
+ [-(i - cx) * depth_t / fx, -(j - cy) * depth_t / fy, depth_t], -1
+ )
+
+ xy_depth_world = cameras.get_world_to_view_transform().inverse().transform_points(directions.view(-1, 3)).unsqueeze(0)
+
+ return xy_depth_world
+
+def get_pointcloud(xy_depth_world, device="cpu", features=None):
+ point_cloud = Pointclouds(points=[xy_depth_world.to(device)], features=[features] if features is not None else None)
+ return point_cloud
+
+def merge_pointclouds(point_clouds):
+ points = torch.cat([pc.points_padded() for pc in point_clouds], dim=1)
+ features = torch.cat([pc.features_padded() for pc in point_clouds], dim=1)
+ return Pointclouds(points=[points[0]], features=[features[0]])
+
+def outpaint_with_depth_estimation(image, mask, previous_depth, h, w, pipe, zoe_dc, prompt, cameras, dilation_size: int = 2, depth_scaling: float = 1, generator = None):
+ img_input = Image.fromarray((255*image[..., :3].cpu().numpy()).astype(np.uint8))
+
+ # we slightly dilate the mask as aliasing might cause us to receive a too small mask from pytorch3d
+ img_mask = Image.fromarray((255*skimage.morphology.isotropic_dilation(((~mask).cpu().numpy()), radius=dilation_size)).astype(np.uint8))#footprint=skimage.morphology.disk(dilation_size)))
+
+ out_image = pipe(prompt=prompt, image=img_input, mask_image=img_mask, height=h, width=w, generator=generator).images[0]
+ out_depth = infer_with_zoe_dc(zoe_dc, torch.from_numpy(np.asarray(out_image)/255.).permute(2,0,1).float().to(zoe_dc.device), (previous_depth * mask).to(zoe_dc.device), scaling=depth_scaling).cpu().numpy()
+
+ return out_image, out_depth
+
+def fov2focal(fov, pixels):
+ return pixels / (2 * math.tan(fov / 2))
+
+def focal2fov(focal, pixels):
+ return 2*math.atan(pixels/(2*focal))
diff --git a/utils/render.py b/utils/render.py
new file mode 100644
index 0000000000000000000000000000000000000000..fdc6d9cc89f837543981b85c5df0d3637870db00
--- /dev/null
+++ b/utils/render.py
@@ -0,0 +1,112 @@
+import torch
+import skimage
+from pytorch3d.structures import Pointclouds
+from pytorch3d.renderer import (
+ look_at_view_transform,
+ FoVOrthographicCameras,
+ FoVPerspectiveCameras,
+ PerspectiveCameras,
+ PointsRasterizationSettings,
+ PointsRenderer,
+ PulsarPointsRenderer,
+ PointsRasterizer,
+ AlphaCompositor,
+ NormWeightedCompositor
+)
+from .ops import nearest_neighbor_fill
+
+from typing import cast, Optional
+
+class PointsRendererWithMasks(PointsRenderer):
+ def forward(self, point_clouds, **kwargs) -> torch.Tensor:
+ fragments = self.rasterizer(point_clouds, **kwargs)
+
+ # Construct weights based on the distance of a point to the true point.
+ # However, this could be done differently: e.g. predicted as opposed
+ # to a function of the weights.
+ r = self.rasterizer.raster_settings.radius
+
+ dists2 = fragments.dists
+ weights = torch.ones_like(dists2)#1 - dists2 / (r * r)
+ ok = cast(torch.BoolTensor, (fragments.idx >= 0)).float()
+
+ weights = weights * ok
+
+ fragments_prm = fragments.idx.long().permute(0, 3, 1, 2)
+ weights_prm = weights.permute(0, 3, 1, 2)
+ images = self.compositor(
+ fragments_prm,
+ weights_prm,
+ point_clouds.features_packed().permute(1, 0),
+ **kwargs,
+ )
+
+ cumprod = torch.cumprod(1 - weights, dim=-1)
+ cumprod = torch.cat((torch.ones_like(cumprod[..., :1]), cumprod[..., :-1]), dim=-1)
+ depths = (weights * cumprod * fragments.zbuf).sum(dim=-1)
+
+ # permute so image comes at the end
+ images = images.permute(0, 2, 3, 1)
+ masks = fragments.idx.long()[..., 0] >= 0
+
+ return images, masks, depths
+
+def render_with_settings(cameras, point_cloud, raster_settings, antialiasing: int = 1):
+ if antialiasing > 1:
+ raster_settings.image_size = (raster_settings.image_size[0] * antialiasing, raster_settings.image_size[1] * antialiasing)
+
+ rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
+
+ renderer = PointsRendererWithMasks(
+ rasterizer=rasterizer,
+ compositor=AlphaCompositor()
+ )
+
+ if antialiasing > 1:
+ images, masks, depths = renderer(point_cloud)
+
+ images = images.permute(0, 3, 1, 2) # NHWC -> NCHW
+ images = F.avg_pool2d(images, kernel_size=antialiasing, stride=antialiasing)
+ images = images.permute(0, 2, 3, 1) # NCHW -> NHWC
+
+ else:
+ return renderer(point_cloud)
+
+
+def render(cameras, point_cloud, fill_point_cloud_holes: bool = False, radius: Optional[float] = None, antialiasing: int = 1):
+ if fill_point_cloud_holes:
+ coarse_raster_settings = PointsRasterizationSettings(
+ image_size=(int(cameras.image_size[0, 1]), int(cameras.image_size[0, 0])),
+ radius = 1e-2,
+ points_per_pixel = 1
+ )
+
+ _, coarse_mask, _ = render_with_settings(cameras, point_cloud, coarse_raster_settings)
+
+ eroded_coarse_mask = torch.from_numpy(skimage.morphology.binary_erosion(coarse_mask[0].cpu().numpy(), footprint=skimage.morphology.disk(2)))
+
+ raster_settings = PointsRasterizationSettings(
+ image_size=(int(cameras.image_size[0, 1]), int(cameras.image_size[0, 0])),
+ radius = (1 / float(max(cameras.image_size[0, 1], cameras.image_size[0, 0])) * 2.0) if radius is None else radius,
+ points_per_pixel = 16
+ )
+
+ # Render the scene
+ images, masks, depths = render_with_settings(cameras, point_cloud, raster_settings)
+
+ holes_in_rendering = masks[0].cpu() ^ eroded_coarse_mask
+
+ images[0] = nearest_neighbor_fill(images[0], ~holes_in_rendering, 0)
+ depths[0] = nearest_neighbor_fill(depths[0], ~holes_in_rendering, 0)
+
+ return images, eroded_coarse_mask.unsqueeze(0).to(masks.device), depths
+
+ else:
+ raster_settings = PointsRasterizationSettings(
+ image_size=(int(cameras.image_size[0, 1]), int(cameras.image_size[0, 0])),
+ radius = (1 / float(max(cameras.image_size[0, 1], cameras.image_size[0, 0])) * 2.0) if radius is None else radius,
+ points_per_pixel = 16
+ )
+
+ # Render the scene
+ return render_with_settings(cameras, point_cloud, raster_settings)
diff --git a/utils/scene/__init__.py b/utils/scene/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..25d1f34e4ba0ee5268abbdea1c0349e59a159f80
--- /dev/null
+++ b/utils/scene/__init__.py
@@ -0,0 +1,92 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+import os
+import random
+import json
+from .utils.system_utils import searchForMaxIteration
+from .dataset_readers import sceneLoadTypeCallbacks
+from .gaussian_model import GaussianModel
+from .utils.camera_utils import cameraList_from_camInfos, camera_to_JSON
+
+class Scene:
+
+ gaussians : GaussianModel
+
+ def __init__(self, args, gaussians : GaussianModel, load_iteration=None, shuffle=True, resolution_scales=[1.0]):
+ """b
+ :param path: Path to colmap scene main folder.
+ """
+ self.model_path = args.model_path
+ self.loaded_iter = None
+ self.gaussians = gaussians
+
+ if load_iteration:
+ if load_iteration == -1:
+ self.loaded_iter = searchForMaxIteration(os.path.join(self.model_path, "point_cloud"))
+ else:
+ self.loaded_iter = load_iteration
+ print("Loading trained model at iteration {}".format(self.loaded_iter))
+
+ self.train_cameras = {}
+ self.test_cameras = {}
+
+ if os.path.exists(os.path.join(args.source_path, "sparse")):
+ scene_info = sceneLoadTypeCallbacks["Colmap"](args.source_path, args.images, args.eval)
+ elif os.path.exists(os.path.join(args.source_path, "transforms_train.json")):
+ print("Found transforms_train.json file, assuming Blender data set!")
+ scene_info = sceneLoadTypeCallbacks["Blender"](args.source_path, args.white_background, args.eval)
+ else:
+ assert False, "Could not recognize scene type!"
+
+ if not self.loaded_iter:
+ with open(scene_info.ply_path, 'rb') as src_file, open(os.path.join(self.model_path, "input.ply") , 'wb') as dest_file:
+ dest_file.write(src_file.read())
+ json_cams = []
+ camlist = []
+ if scene_info.test_cameras:
+ camlist.extend(scene_info.test_cameras)
+ if scene_info.train_cameras:
+ camlist.extend(scene_info.train_cameras)
+ for id, cam in enumerate(camlist):
+ json_cams.append(camera_to_JSON(id, cam))
+ with open(os.path.join(self.model_path, "cameras.json"), 'w') as file:
+ json.dump(json_cams, file)
+
+ if shuffle:
+ random.shuffle(scene_info.train_cameras) # Multi-res consistent random shuffling
+ random.shuffle(scene_info.test_cameras) # Multi-res consistent random shuffling
+
+ self.cameras_extent = scene_info.nerf_normalization["radius"]
+
+ for resolution_scale in resolution_scales:
+ print("Loading Training Cameras")
+ self.train_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.train_cameras, resolution_scale, args)
+ print("Loading Test Cameras")
+ self.test_cameras[resolution_scale] = cameraList_from_camInfos(scene_info.test_cameras, resolution_scale, args)
+
+ if self.loaded_iter:
+ self.gaussians.load_ply(os.path.join(self.model_path,
+ "point_cloud",
+ "iteration_" + str(self.loaded_iter),
+ "point_cloud.ply"))
+ else:
+ self.gaussians.create_from_pcd(scene_info.point_cloud, self.cameras_extent)
+
+ def save(self, iteration):
+ point_cloud_path = os.path.join(self.model_path, "point_cloud/iteration_{}".format(iteration))
+ self.gaussians.save_ply(os.path.join(point_cloud_path, "point_cloud.ply"))
+
+ def getTrainCameras(self, scale=1.0):
+ return self.train_cameras[scale]
+
+ def getTestCameras(self, scale=1.0):
+ return self.test_cameras[scale]
\ No newline at end of file
diff --git a/utils/scene/cameras.py b/utils/scene/cameras.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a02baf2ccf9a80adf54102f6d637828c910b76a
--- /dev/null
+++ b/utils/scene/cameras.py
@@ -0,0 +1,76 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+import torch
+import math
+from torch import nn
+import numpy as np
+from .utils.graphics_utils import getWorld2View2, getProjectionMatrix
+
+class Camera(nn.Module):
+ def __init__(self, colmap_id, R, T, FoVx, FoVy, image, gt_alpha_mask,
+ image_name, uid, crop_box=None, mask=None,
+ trans=np.array([0.0, 0.0, 0.0]), scale=1.0, data_device = "cuda"
+ ):
+ super(Camera, self).__init__()
+
+ self.uid = uid
+ self.colmap_id = colmap_id
+ self.R = R
+ self.T = T
+ self.FoVx = FoVx
+ self.FoVy = FoVy
+ self.image_name = image_name
+ self.crop_box = crop_box
+ self.mask = mask
+
+ try:
+ self.data_device = torch.device(data_device)
+ except Exception as e:
+ print(e)
+ print(f"[Warning] Custom device {data_device} failed, fallback to default cuda device" )
+ self.data_device = torch.device("cuda")
+
+ self.original_image = image.clamp(0.0, 1.0).to(self.data_device)
+ self.image_width = self.original_image.shape[2]
+ self.image_height = self.original_image.shape[1]
+
+ self.gt_alpha_mask = gt_alpha_mask
+
+ #if gt_alpha_mask is not None:
+ # self.original_image *= gt_alpha_mask.to(self.data_device)
+ #else:
+ # self.original_image *= torch.ones((1, self.image_height, self.image_width), device=self.data_device)
+
+ self.zfar = 100.0
+ self.znear = 0.01
+
+ self.trans = trans
+ self.scale = scale
+
+ self.world_view_transform = torch.tensor(getWorld2View2(R, T, trans, scale)).transpose(0, 1).cuda()
+ self.projection_matrix = getProjectionMatrix(znear=self.znear, zfar=self.zfar, fovX=self.FoVx, fovY=self.FoVy, crop_box=self.crop_box, width=self.image_width, height=self.image_height).transpose(0,1).cuda()
+ self.full_proj_transform = (self.world_view_transform.unsqueeze(0).bmm(self.projection_matrix.unsqueeze(0))).squeeze(0)
+ self.camera_center = self.world_view_transform.inverse()[3, :3]
+
+class MiniCam:
+ def __init__(self, width, height, fovy, fovx, znear, zfar, world_view_transform, full_proj_transform):
+ self.image_width = width
+ self.image_height = height
+ self.FoVy = fovy
+ self.FoVx = fovx
+ self.znear = znear
+ self.zfar = zfar
+ self.world_view_transform = world_view_transform
+ self.full_proj_transform = full_proj_transform
+ view_inv = torch.inverse(self.world_view_transform)
+ self.camera_center = view_inv[3][:3]
+
diff --git a/utils/scene/colmap_loader.py b/utils/scene/colmap_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f6fba6a9c961f52c88780ecb44d7821b4cb73ee
--- /dev/null
+++ b/utils/scene/colmap_loader.py
@@ -0,0 +1,294 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+import numpy as np
+import collections
+import struct
+
+CameraModel = collections.namedtuple(
+ "CameraModel", ["model_id", "model_name", "num_params"])
+Camera = collections.namedtuple(
+ "Camera", ["id", "model", "width", "height", "params"])
+BaseImage = collections.namedtuple(
+ "Image", ["id", "qvec", "tvec", "camera_id", "name", "xys", "point3D_ids"])
+Point3D = collections.namedtuple(
+ "Point3D", ["id", "xyz", "rgb", "error", "image_ids", "point2D_idxs"])
+CAMERA_MODELS = {
+ CameraModel(model_id=0, model_name="SIMPLE_PINHOLE", num_params=3),
+ CameraModel(model_id=1, model_name="PINHOLE", num_params=4),
+ CameraModel(model_id=2, model_name="SIMPLE_RADIAL", num_params=4),
+ CameraModel(model_id=3, model_name="RADIAL", num_params=5),
+ CameraModel(model_id=4, model_name="OPENCV", num_params=8),
+ CameraModel(model_id=5, model_name="OPENCV_FISHEYE", num_params=8),
+ CameraModel(model_id=6, model_name="FULL_OPENCV", num_params=12),
+ CameraModel(model_id=7, model_name="FOV", num_params=5),
+ CameraModel(model_id=8, model_name="SIMPLE_RADIAL_FISHEYE", num_params=4),
+ CameraModel(model_id=9, model_name="RADIAL_FISHEYE", num_params=5),
+ CameraModel(model_id=10, model_name="THIN_PRISM_FISHEYE", num_params=12)
+}
+CAMERA_MODEL_IDS = dict([(camera_model.model_id, camera_model)
+ for camera_model in CAMERA_MODELS])
+CAMERA_MODEL_NAMES = dict([(camera_model.model_name, camera_model)
+ for camera_model in CAMERA_MODELS])
+
+
+def qvec2rotmat(qvec):
+ return np.array([
+ [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,
+ 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],
+ 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],
+ [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],
+ 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,
+ 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],
+ [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],
+ 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],
+ 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])
+
+def rotmat2qvec(R):
+ Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = R.flat
+ K = np.array([
+ [Rxx - Ryy - Rzz, 0, 0, 0],
+ [Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
+ [Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
+ [Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
+ eigvals, eigvecs = np.linalg.eigh(K)
+ qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
+ if qvec[0] < 0:
+ qvec *= -1
+ return qvec
+
+class Image(BaseImage):
+ def qvec2rotmat(self):
+ return qvec2rotmat(self.qvec)
+
+def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character="<"):
+ """Read and unpack the next bytes from a binary file.
+ :param fid:
+ :param num_bytes: Sum of combination of {2, 4, 8}, e.g. 2, 6, 16, 30, etc.
+ :param format_char_sequence: List of {c, e, f, d, h, H, i, I, l, L, q, Q}.
+ :param endian_character: Any of {@, =, <, >, !}
+ :return: Tuple of read and unpacked values.
+ """
+ data = fid.read(num_bytes)
+ return struct.unpack(endian_character + format_char_sequence, data)
+
+def read_points3D_text(path):
+ """
+ see: src/base/reconstruction.cc
+ void Reconstruction::ReadPoints3DText(const std::string& path)
+ void Reconstruction::WritePoints3DText(const std::string& path)
+ """
+ xyzs = None
+ rgbs = None
+ errors = None
+ num_points = 0
+ with open(path, "r") as fid:
+ while True:
+ line = fid.readline()
+ if not line:
+ break
+ line = line.strip()
+ if len(line) > 0 and line[0] != "#":
+ num_points += 1
+
+
+ xyzs = np.empty((num_points, 3))
+ rgbs = np.empty((num_points, 3))
+ errors = np.empty((num_points, 1))
+ count = 0
+ with open(path, "r") as fid:
+ while True:
+ line = fid.readline()
+ if not line:
+ break
+ line = line.strip()
+ if len(line) > 0 and line[0] != "#":
+ elems = line.split()
+ xyz = np.array(tuple(map(float, elems[1:4])))
+ rgb = np.array(tuple(map(int, elems[4:7])))
+ error = np.array(float(elems[7]))
+ xyzs[count] = xyz
+ rgbs[count] = rgb
+ errors[count] = error
+ count += 1
+
+ return xyzs, rgbs, errors
+
+def read_points3D_binary(path_to_model_file):
+ """
+ see: src/base/reconstruction.cc
+ void Reconstruction::ReadPoints3DBinary(const std::string& path)
+ void Reconstruction::WritePoints3DBinary(const std::string& path)
+ """
+
+
+ with open(path_to_model_file, "rb") as fid:
+ num_points = read_next_bytes(fid, 8, "Q")[0]
+
+ xyzs = np.empty((num_points, 3))
+ rgbs = np.empty((num_points, 3))
+ errors = np.empty((num_points, 1))
+
+ for p_id in range(num_points):
+ binary_point_line_properties = read_next_bytes(
+ fid, num_bytes=43, format_char_sequence="QdddBBBd")
+ xyz = np.array(binary_point_line_properties[1:4])
+ rgb = np.array(binary_point_line_properties[4:7])
+ error = np.array(binary_point_line_properties[7])
+ track_length = read_next_bytes(
+ fid, num_bytes=8, format_char_sequence="Q")[0]
+ track_elems = read_next_bytes(
+ fid, num_bytes=8*track_length,
+ format_char_sequence="ii"*track_length)
+ xyzs[p_id] = xyz
+ rgbs[p_id] = rgb
+ errors[p_id] = error
+ return xyzs, rgbs, errors
+
+def read_intrinsics_text(path):
+ """
+ Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
+ """
+ cameras = {}
+ with open(path, "r") as fid:
+ while True:
+ line = fid.readline()
+ if not line:
+ break
+ line = line.strip()
+ if len(line) > 0 and line[0] != "#":
+ elems = line.split()
+ camera_id = int(elems[0])
+ model = elems[1]
+ assert model == "PINHOLE", "While the loader support other types, the rest of the code assumes PINHOLE"
+ width = int(elems[2])
+ height = int(elems[3])
+ params = np.array(tuple(map(float, elems[4:])))
+ cameras[camera_id] = Camera(id=camera_id, model=model,
+ width=width, height=height,
+ params=params)
+ return cameras
+
+def read_extrinsics_binary(path_to_model_file):
+ """
+ see: src/base/reconstruction.cc
+ void Reconstruction::ReadImagesBinary(const std::string& path)
+ void Reconstruction::WriteImagesBinary(const std::string& path)
+ """
+ images = {}
+ with open(path_to_model_file, "rb") as fid:
+ num_reg_images = read_next_bytes(fid, 8, "Q")[0]
+ for _ in range(num_reg_images):
+ binary_image_properties = read_next_bytes(
+ fid, num_bytes=64, format_char_sequence="idddddddi")
+ image_id = binary_image_properties[0]
+ qvec = np.array(binary_image_properties[1:5])
+ tvec = np.array(binary_image_properties[5:8])
+ camera_id = binary_image_properties[8]
+ image_name = ""
+ current_char = read_next_bytes(fid, 1, "c")[0]
+ while current_char != b"\x00": # look for the ASCII 0 entry
+ image_name += current_char.decode("utf-8")
+ current_char = read_next_bytes(fid, 1, "c")[0]
+ num_points2D = read_next_bytes(fid, num_bytes=8,
+ format_char_sequence="Q")[0]
+ x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,
+ format_char_sequence="ddq"*num_points2D)
+ xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),
+ tuple(map(float, x_y_id_s[1::3]))])
+ point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))
+ images[image_id] = Image(
+ id=image_id, qvec=qvec, tvec=tvec,
+ camera_id=camera_id, name=image_name,
+ xys=xys, point3D_ids=point3D_ids)
+ return images
+
+
+def read_intrinsics_binary(path_to_model_file):
+ """
+ see: src/base/reconstruction.cc
+ void Reconstruction::WriteCamerasBinary(const std::string& path)
+ void Reconstruction::ReadCamerasBinary(const std::string& path)
+ """
+ cameras = {}
+ with open(path_to_model_file, "rb") as fid:
+ num_cameras = read_next_bytes(fid, 8, "Q")[0]
+ for _ in range(num_cameras):
+ camera_properties = read_next_bytes(
+ fid, num_bytes=24, format_char_sequence="iiQQ")
+ camera_id = camera_properties[0]
+ model_id = camera_properties[1]
+ model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name
+ width = camera_properties[2]
+ height = camera_properties[3]
+ num_params = CAMERA_MODEL_IDS[model_id].num_params
+ params = read_next_bytes(fid, num_bytes=8*num_params,
+ format_char_sequence="d"*num_params)
+ cameras[camera_id] = Camera(id=camera_id,
+ model=model_name,
+ width=width,
+ height=height,
+ params=np.array(params))
+ assert len(cameras) == num_cameras
+ return cameras
+
+
+def read_extrinsics_text(path):
+ """
+ Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py
+ """
+ images = {}
+ with open(path, "r") as fid:
+ while True:
+ line = fid.readline()
+ if not line:
+ break
+ line = line.strip()
+ if len(line) > 0 and line[0] != "#":
+ elems = line.split()
+ image_id = int(elems[0])
+ qvec = np.array(tuple(map(float, elems[1:5])))
+ tvec = np.array(tuple(map(float, elems[5:8])))
+ camera_id = int(elems[8])
+ image_name = elems[9]
+ elems = fid.readline().split()
+ xys = np.column_stack([tuple(map(float, elems[0::3])),
+ tuple(map(float, elems[1::3]))])
+ point3D_ids = np.array(tuple(map(int, elems[2::3])))
+ images[image_id] = Image(
+ id=image_id, qvec=qvec, tvec=tvec,
+ camera_id=camera_id, name=image_name,
+ xys=xys, point3D_ids=point3D_ids)
+ return images
+
+
+def read_colmap_bin_array(path):
+ """
+ Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_dense.py
+
+ :param path: path to the colmap binary file.
+ :return: nd array with the floating point values in the value
+ """
+ with open(path, "rb") as fid:
+ width, height, channels = np.genfromtxt(fid, delimiter="&", max_rows=1,
+ usecols=(0, 1, 2), dtype=int)
+ fid.seek(0)
+ num_delimiter = 0
+ byte = fid.read(1)
+ while True:
+ if byte == b"&":
+ num_delimiter += 1
+ if num_delimiter >= 3:
+ break
+ byte = fid.read(1)
+ array = np.fromfile(fid, np.float32)
+ array = array.reshape((width, height, channels), order="F")
+ return np.transpose(array, (1, 0, 2)).squeeze()
diff --git a/utils/scene/dataset_readers.py b/utils/scene/dataset_readers.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8cf7cf938a5b21fbc5aad815e6b64084d8f532c
--- /dev/null
+++ b/utils/scene/dataset_readers.py
@@ -0,0 +1,270 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+import os
+import sys
+from PIL import Image
+from typing import NamedTuple
+from .colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \
+ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text
+from .utils.graphics_utils import getWorld2View2, focal2fov, fov2focal
+import numpy as np
+import json
+from pathlib import Path
+from plyfile import PlyData, PlyElement
+from .utils.sh_utils import SH2RGB
+from .gaussian_model import BasicPointCloud
+
+class CameraInfo(NamedTuple):
+ uid: int
+ R: np.array
+ T: np.array
+ FovY: np.array
+ FovX: np.array
+ image: np.array
+ image_path: str
+ image_name: str
+ mask: np.array
+ mask_path: str
+ width: int
+ height: int
+
+class SceneInfo(NamedTuple):
+ point_cloud: BasicPointCloud
+ train_cameras: list
+ test_cameras: list
+ nerf_normalization: dict
+ ply_path: str
+
+def getNerfppNorm(cam_info):
+ def get_center_and_diag(cam_centers):
+ cam_centers = np.hstack(cam_centers)
+ avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True)
+ center = avg_cam_center
+ dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True)
+ diagonal = np.max(dist)
+ return center.flatten(), diagonal
+
+ cam_centers = []
+
+ for cam in cam_info:
+ W2C = getWorld2View2(cam.R, cam.T)
+ C2W = np.linalg.inv(W2C)
+ cam_centers.append(C2W[:3, 3:4])
+
+ center, diagonal = get_center_and_diag(cam_centers)
+ radius = diagonal * 1.1
+
+ translate = -center
+
+ return {"translate": translate, "radius": radius}
+
+def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder, masks_folder):
+ cam_infos = []
+ for idx, key in enumerate(cam_extrinsics):
+ sys.stdout.write('\r')
+ # the exact output you're looking for:
+ sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics)))
+ sys.stdout.flush()
+
+ extr = cam_extrinsics[key]
+ intr = cam_intrinsics[extr.camera_id]
+ height = intr.height
+ width = intr.width
+
+ uid = intr.id
+ R = np.transpose(qvec2rotmat(extr.qvec))
+ T = np.array(extr.tvec)
+
+ if intr.model=="SIMPLE_PINHOLE":
+ focal_length_x = intr.params[0]
+ FovY = focal2fov(focal_length_x, height)
+ FovX = focal2fov(focal_length_x, width)
+ elif intr.model=="PINHOLE":
+ focal_length_x = intr.params[0]
+ focal_length_y = intr.params[1]
+ FovY = focal2fov(focal_length_y, height)
+ FovX = focal2fov(focal_length_x, width)
+ else:
+ assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!"
+
+ image_path = os.path.join(images_folder, os.path.basename(extr.name))
+ image_name = os.path.basename(image_path).split(".")[0]
+ image = Image.open(image_path)
+
+ mask_path = os.path.join(masks_folder, os.path.basename(extr.name).replace(".jpg", ".png"))
+ try:
+ mask = Image.open(mask_path)
+ except:
+ mask = None
+
+ cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, mask=mask, mask_path=mask_path,
+ image_path=image_path, image_name=image_name, width=width, height=height)
+ cam_infos.append(cam_info)
+ sys.stdout.write('\n')
+ return cam_infos
+
+def fetchPly(path):
+ plydata = PlyData.read(path)
+ vertices = plydata['vertex']
+ positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T
+ colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0
+ normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T
+ return BasicPointCloud(points=positions, colors=colors, normals=normals)
+
+def storePly(path, xyz, rgb):
+ # Define the dtype for the structured array
+ dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'),
+ ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'),
+ ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]
+
+ normals = np.zeros_like(xyz)
+
+ elements = np.empty(xyz.shape[0], dtype=dtype)
+ attributes = np.concatenate((xyz, normals, rgb), axis=1)
+ elements[:] = list(map(tuple, attributes))
+
+ # Create the PlyData object and write to file
+ vertex_element = PlyElement.describe(elements, 'vertex')
+ ply_data = PlyData([vertex_element])
+ ply_data.write(path)
+
+def readColmapSceneInfo(path, images, eval, llffhold=8):
+ try:
+ cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin")
+ cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin")
+ cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file)
+ cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file)
+ except:
+ cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt")
+ cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt")
+ cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file)
+ cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file)
+
+ reading_dir = "images" if images == None else images
+ # FIXME in post
+ mask_reading_dir = "masks"# if images == None else images
+ cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir), masks_folder=os.path.join(path, mask_reading_dir))
+ cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name)
+
+ if eval:
+ train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0]
+ test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0]
+ else:
+ train_cam_infos = cam_infos
+ test_cam_infos = []
+
+ nerf_normalization = getNerfppNorm(train_cam_infos)
+
+ ply_path = os.path.join(path, "sparse/0/points3D.ply")
+ bin_path = os.path.join(path, "sparse/0/points3D.bin")
+ txt_path = os.path.join(path, "sparse/0/points3D.txt")
+ if not os.path.exists(ply_path):
+ print("Converting point3d.bin to .ply, will happen only the first time you open the scene.")
+ try:
+ xyz, rgb, _ = read_points3D_binary(bin_path)
+ except:
+ xyz, rgb, _ = read_points3D_text(txt_path)
+ storePly(ply_path, xyz, rgb)
+ try:
+ pcd = fetchPly(ply_path)
+ except:
+ pcd = None
+
+ scene_info = SceneInfo(point_cloud=pcd,
+ train_cameras=train_cam_infos,
+ test_cameras=test_cam_infos,
+ nerf_normalization=nerf_normalization,
+ ply_path=ply_path)
+ return scene_info
+
+def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"):
+ cam_infos = []
+
+ with open(os.path.join(path, transformsfile)) as json_file:
+ contents = json.load(json_file)
+ fovx = contents["camera_angle_x"]
+
+ frames = contents["frames"]
+ for idx, frame in enumerate(frames):
+ cam_name = os.path.join(path, frame["file_path"] + extension)
+
+ # NeRF 'transform_matrix' is a camera-to-world transform
+ c2w = np.array(frame["transform_matrix"])
+ # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward)
+ c2w[:3, 1:3] *= -1
+
+ # get the world-to-camera transform and set R, T
+ w2c = np.linalg.inv(c2w)
+ R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code
+ T = w2c[:3, 3]
+
+ image_path = os.path.join(path, cam_name)
+ image_name = Path(cam_name).stem
+ image = Image.open(image_path)
+
+ im_data = np.array(image.convert("RGBA"))
+
+ bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0])
+
+ norm_data = im_data / 255.0
+ arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4])
+ image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB")
+
+ fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1])
+ FovY = fovy
+ FovX = fovx
+
+ cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image,
+ image_path=image_path, image_name=image_name, width=image.size[0], height=image.size[1]))
+
+ return cam_infos
+
+def readNerfSyntheticInfo(path, white_background, eval, extension=".png"):
+ print("Reading Training Transforms")
+ train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension)
+ print("Reading Test Transforms")
+ test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension)
+
+ if not eval:
+ train_cam_infos.extend(test_cam_infos)
+ test_cam_infos = []
+
+ nerf_normalization = getNerfppNorm(train_cam_infos)
+
+ ply_path = os.path.join(path, "points3d.ply")
+ if not os.path.exists(ply_path):
+ # Since this data set has no colmap data, we start with random points
+ num_pts = 100_000
+ print(f"Generating random point cloud ({num_pts})...")
+
+ # We create random points inside the bounds of the synthetic Blender scenes
+ xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3
+ shs = np.random.random((num_pts, 3)) / 255.0
+ pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3)))
+
+ storePly(ply_path, xyz, SH2RGB(shs) * 255)
+ try:
+ pcd = fetchPly(ply_path)
+ except:
+ pcd = None
+
+ scene_info = SceneInfo(point_cloud=pcd,
+ train_cameras=train_cam_infos,
+ test_cameras=test_cam_infos,
+ nerf_normalization=nerf_normalization,
+ ply_path=ply_path)
+ return scene_info
+
+sceneLoadTypeCallbacks = {
+ "Colmap": readColmapSceneInfo,
+ "Blender" : readNerfSyntheticInfo
+}
\ No newline at end of file
diff --git a/utils/scene/gaussian_model.py b/utils/scene/gaussian_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..16e53cb3936ff65509dd0a477b21da7d647542cf
--- /dev/null
+++ b/utils/scene/gaussian_model.py
@@ -0,0 +1,416 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+import torch
+import numpy as np
+from .utils.general_utils import inverse_sigmoid, get_expon_lr_func, build_rotation
+from torch import nn
+import os
+from .utils.system_utils import mkdir_p
+from plyfile import PlyData, PlyElement
+from .utils.sh_utils import RGB2SH
+from .utils.graphics_utils import BasicPointCloud
+from .utils.general_utils import strip_symmetric, build_scaling_rotation
+
+from scipy.spatial import KDTree
+
+# credit to https://github.com/graphdeco-inria/gaussian-splatting/issues/292#issuecomment-2007934451
+def distCUDA2(points):
+ points_np = points.detach().cpu().float().numpy()
+ dists, inds = KDTree(points_np).query(points_np, k=4)
+ meanDists = (dists[:, 1:] ** 2).mean(1)
+
+ return torch.tensor(meanDists, dtype=points.dtype, device=points.device)
+
+class GaussianModel:
+
+ def setup_functions(self):
+ def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation):
+ L = build_scaling_rotation(scaling_modifier * scaling, rotation)
+ actual_covariance = L @ L.transpose(1, 2)
+ symm = strip_symmetric(actual_covariance)
+ return symm
+
+ self.scaling_activation = torch.exp
+ self.scaling_inverse_activation = torch.log
+
+ self.covariance_activation = build_covariance_from_scaling_rotation
+
+ self.opacity_activation = torch.sigmoid
+ self.inverse_opacity_activation = inverse_sigmoid
+
+ self.rotation_activation = torch.nn.functional.normalize
+
+
+ def __init__(self, sh_degree : int):
+ self.active_sh_degree = 0
+ self.max_sh_degree = sh_degree
+ self._xyz = torch.empty(0)
+ self._features_dc = torch.empty(0)
+ self._features_rest = torch.empty(0)
+ self._scaling = torch.empty(0)
+ self._rotation = torch.empty(0)
+ self._opacity = torch.empty(0)
+ self.max_radii2D = torch.empty(0)
+ self.xyz_gradient_accum = torch.empty(0)
+ self.denom = torch.empty(0)
+ self.optimizer = None
+ self.percent_dense = 0
+ self.spatial_lr_scale = 0
+ self.setup_functions()
+
+ def capture(self):
+ return (
+ self.active_sh_degree,
+ self._xyz,
+ self._features_dc,
+ self._features_rest,
+ self._scaling,
+ self._rotation,
+ self._opacity,
+ self.max_radii2D,
+ self.xyz_gradient_accum,
+ self.denom,
+ self.optimizer.state_dict(),
+ self.spatial_lr_scale,
+ )
+
+ def restore(self, model_args, training_args):
+ (self.active_sh_degree,
+ self._xyz,
+ self._features_dc,
+ self._features_rest,
+ self._scaling,
+ self._rotation,
+ self._opacity,
+ self.max_radii2D,
+ xyz_gradient_accum,
+ denom,
+ opt_dict,
+ self.spatial_lr_scale) = model_args
+ self.training_setup(training_args)
+ self.xyz_gradient_accum = xyz_gradient_accum
+ self.denom = denom
+ self.optimizer.load_state_dict(opt_dict)
+
+ @property
+ def get_scaling(self):
+ return self.scaling_activation(self._scaling)
+
+ @property
+ def get_rotation(self):
+ return self.rotation_activation(self._rotation)
+
+ @property
+ def get_xyz(self):
+ return self._xyz
+
+ @property
+ def get_features(self):
+ features_dc = self._features_dc
+ features_rest = self._features_rest
+ return torch.cat((features_dc, features_rest), dim=1)
+
+ @property
+ def get_opacity(self):
+ return self.opacity_activation(self._opacity)
+
+ def get_covariance(self, scaling_modifier = 1):
+ return self.covariance_activation(self.get_scaling, scaling_modifier, self._rotation)
+
+ def oneupSHdegree(self):
+ if self.active_sh_degree < self.max_sh_degree:
+ self.active_sh_degree += 1
+
+ def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):
+ self.spatial_lr_scale = spatial_lr_scale
+ fused_point_cloud = torch.tensor(np.asarray(pcd.points)).float().cuda()
+ fused_color = RGB2SH(torch.tensor(np.asarray(pcd.colors)).float().cuda())
+ features = torch.zeros((fused_color.shape[0], 3, (self.max_sh_degree + 1) ** 2)).float().cuda()
+ features[:, :3, 0 ] = fused_color
+ features[:, 3:, 1:] = 0.0
+
+ print("Number of points at initialisation : ", fused_point_cloud.shape[0])
+
+ dist2 = torch.clamp_min(distCUDA2(torch.from_numpy(np.asarray(pcd.points)).float().cuda()), 0.0000001)
+ scales = torch.log(torch.sqrt(dist2))[...,None].repeat(1, 3)
+ rots = torch.zeros((fused_point_cloud.shape[0], 4), device="cuda")
+ rots[:, 0] = 1
+
+ opacities = inverse_sigmoid(0.1 * torch.ones((fused_point_cloud.shape[0], 1), dtype=torch.float, device="cuda"))
+
+ self._xyz = nn.Parameter(fused_point_cloud.requires_grad_(True))
+ self._features_dc = nn.Parameter(features[:,:,0:1].transpose(1, 2).contiguous().requires_grad_(True))
+ self._features_rest = nn.Parameter(features[:,:,1:].transpose(1, 2).contiguous().requires_grad_(True))
+ self._scaling = nn.Parameter(scales.requires_grad_(True))
+ self._rotation = nn.Parameter(rots.requires_grad_(True))
+ self._opacity = nn.Parameter(opacities.requires_grad_(True))
+ self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
+
+ def training_setup(self, training_args):
+ self.percent_dense = training_args.percent_dense
+ self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
+ self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
+
+ l = [
+ {'params': [self._xyz], 'lr': training_args.position_lr_init * self.spatial_lr_scale, "name": "xyz"},
+ {'params': [self._features_dc], 'lr': training_args.feature_lr, "name": "f_dc"},
+ {'params': [self._features_rest], 'lr': training_args.feature_lr / 20.0, "name": "f_rest"},
+ {'params': [self._opacity], 'lr': training_args.opacity_lr, "name": "opacity"},
+ {'params': [self._scaling], 'lr': training_args.scaling_lr, "name": "scaling"},
+ {'params': [self._rotation], 'lr': training_args.rotation_lr, "name": "rotation"}
+ ]
+
+ self.optimizer = torch.optim.Adam(l, lr=0.0, eps=1e-15)
+ self.xyz_scheduler_args = get_expon_lr_func(lr_init=training_args.position_lr_init*self.spatial_lr_scale,
+ lr_final=training_args.position_lr_final*self.spatial_lr_scale,
+ lr_delay_mult=training_args.position_lr_delay_mult,
+ max_steps=training_args.position_lr_max_steps)
+
+ def update_learning_rate(self, iteration):
+ ''' Learning rate scheduling per step '''
+ for param_group in self.optimizer.param_groups:
+ if param_group["name"] == "xyz":
+ lr = self.xyz_scheduler_args(iteration)
+ param_group['lr'] = lr
+ return lr
+
+ def construct_list_of_attributes(self):
+ l = ['x', 'y', 'z', 'nx', 'ny', 'nz']
+ # All channels except the 3 DC
+ for i in range(self._features_dc.shape[1]*self._features_dc.shape[2]):
+ l.append('f_dc_{}'.format(i))
+ for i in range(self._features_rest.shape[1]*self._features_rest.shape[2]):
+ l.append('f_rest_{}'.format(i))
+ l.append('opacity')
+ for i in range(self._scaling.shape[1]):
+ l.append('scale_{}'.format(i))
+ for i in range(self._rotation.shape[1]):
+ l.append('rot_{}'.format(i))
+ return l
+
+ def save_ply(self, path):
+ mkdir_p(os.path.dirname(path))
+
+ xyz = self._xyz.detach().cpu().numpy()
+ normals = np.zeros_like(xyz)
+ f_dc = self._features_dc.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
+ f_rest = self._features_rest.detach().transpose(1, 2).flatten(start_dim=1).contiguous().cpu().numpy()
+ opacities = self._opacity.detach().cpu().numpy()
+ scale = self._scaling.detach().cpu().numpy()
+ rotation = self._rotation.detach().cpu().numpy()
+
+ dtype_full = [(attribute, 'f4') for attribute in self.construct_list_of_attributes()]
+
+ elements = np.empty(xyz.shape[0], dtype=dtype_full)
+ attributes = np.concatenate((xyz, normals, f_dc, f_rest, opacities, scale, rotation), axis=1)
+ elements[:] = list(map(tuple, attributes))
+ el = PlyElement.describe(elements, 'vertex')
+ PlyData([el]).write(path)
+
+ def reset_opacity(self):
+ opacities_new = inverse_sigmoid(torch.min(self.get_opacity, torch.ones_like(self.get_opacity)*0.01))
+ optimizable_tensors = self.replace_tensor_to_optimizer(opacities_new, "opacity")
+ self._opacity = optimizable_tensors["opacity"]
+
+ def load_ply(self, path):
+ plydata = PlyData.read(path)
+
+ xyz = np.stack((np.asarray(plydata.elements[0]["x"]),
+ np.asarray(plydata.elements[0]["y"]),
+ np.asarray(plydata.elements[0]["z"])), axis=1)
+ opacities = np.asarray(plydata.elements[0]["opacity"])[..., np.newaxis]
+
+ features_dc = np.zeros((xyz.shape[0], 3, 1))
+ features_dc[:, 0, 0] = np.asarray(plydata.elements[0]["f_dc_0"])
+ features_dc[:, 1, 0] = np.asarray(plydata.elements[0]["f_dc_1"])
+ features_dc[:, 2, 0] = np.asarray(plydata.elements[0]["f_dc_2"])
+
+ extra_f_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("f_rest_")]
+ extra_f_names = sorted(extra_f_names, key = lambda x: int(x.split('_')[-1]))
+ assert len(extra_f_names)==3*(self.max_sh_degree + 1) ** 2 - 3
+ features_extra = np.zeros((xyz.shape[0], len(extra_f_names)))
+ for idx, attr_name in enumerate(extra_f_names):
+ features_extra[:, idx] = np.asarray(plydata.elements[0][attr_name])
+ # Reshape (P,F*SH_coeffs) to (P, F, SH_coeffs except DC)
+ features_extra = features_extra.reshape((features_extra.shape[0], 3, (self.max_sh_degree + 1) ** 2 - 1))
+
+ scale_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("scale_")]
+ scale_names = sorted(scale_names, key = lambda x: int(x.split('_')[-1]))
+ scales = np.zeros((xyz.shape[0], len(scale_names)))
+ for idx, attr_name in enumerate(scale_names):
+ scales[:, idx] = np.asarray(plydata.elements[0][attr_name])
+
+ rot_names = [p.name for p in plydata.elements[0].properties if p.name.startswith("rot")]
+ rot_names = sorted(rot_names, key = lambda x: int(x.split('_')[-1]))
+ rots = np.zeros((xyz.shape[0], len(rot_names)))
+ for idx, attr_name in enumerate(rot_names):
+ rots[:, idx] = np.asarray(plydata.elements[0][attr_name])
+
+ self._xyz = nn.Parameter(torch.tensor(xyz, dtype=torch.float, device="cuda").requires_grad_(True))
+ self._features_dc = nn.Parameter(torch.tensor(features_dc, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
+ self._features_rest = nn.Parameter(torch.tensor(features_extra, dtype=torch.float, device="cuda").transpose(1, 2).contiguous().requires_grad_(True))
+ self._opacity = nn.Parameter(torch.tensor(opacities, dtype=torch.float, device="cuda").requires_grad_(True))
+ self._scaling = nn.Parameter(torch.tensor(scales, dtype=torch.float, device="cuda").requires_grad_(True))
+ self._rotation = nn.Parameter(torch.tensor(rots, dtype=torch.float, device="cuda").requires_grad_(True))
+
+ self.active_sh_degree = self.max_sh_degree
+
+ def replace_tensor_to_optimizer(self, tensor, name):
+ optimizable_tensors = {}
+ for group in self.optimizer.param_groups:
+ if group["name"] == name:
+ stored_state = self.optimizer.state.get(group['params'][0], None)
+ stored_state["exp_avg"] = torch.zeros_like(tensor)
+ stored_state["exp_avg_sq"] = torch.zeros_like(tensor)
+
+ del self.optimizer.state[group['params'][0]]
+ group["params"][0] = nn.Parameter(tensor.requires_grad_(True))
+ self.optimizer.state[group['params'][0]] = stored_state
+
+ optimizable_tensors[group["name"]] = group["params"][0]
+ return optimizable_tensors
+
+ def _prune_optimizer(self, mask):
+ optimizable_tensors = {}
+ for group in self.optimizer.param_groups:
+ stored_state = self.optimizer.state.get(group['params'][0], None)
+ if stored_state is not None:
+ stored_state["exp_avg"] = stored_state["exp_avg"][mask]
+ stored_state["exp_avg_sq"] = stored_state["exp_avg_sq"][mask]
+
+ del self.optimizer.state[group['params'][0]]
+ group["params"][0] = nn.Parameter((group["params"][0][mask].requires_grad_(True)))
+ self.optimizer.state[group['params'][0]] = stored_state
+
+ optimizable_tensors[group["name"]] = group["params"][0]
+ else:
+ group["params"][0] = nn.Parameter(group["params"][0][mask].requires_grad_(True))
+ optimizable_tensors[group["name"]] = group["params"][0]
+ return optimizable_tensors
+
+ def prune_points(self, mask):
+ valid_points_mask = ~mask
+ optimizable_tensors = self._prune_optimizer(valid_points_mask)
+
+ self._xyz = optimizable_tensors["xyz"]
+ self._features_dc = optimizable_tensors["f_dc"]
+ self._features_rest = optimizable_tensors["f_rest"]
+ self._opacity = optimizable_tensors["opacity"]
+ self._scaling = optimizable_tensors["scaling"]
+ self._rotation = optimizable_tensors["rotation"]
+
+ self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]
+
+ self.denom = self.denom[valid_points_mask]
+ self.max_radii2D = self.max_radii2D[valid_points_mask]
+
+ def cat_tensors_to_optimizer(self, tensors_dict):
+ optimizable_tensors = {}
+ for group in self.optimizer.param_groups:
+ assert len(group["params"]) == 1
+ extension_tensor = tensors_dict[group["name"]]
+ stored_state = self.optimizer.state.get(group['params'][0], None)
+ if stored_state is not None:
+
+ stored_state["exp_avg"] = torch.cat((stored_state["exp_avg"], torch.zeros_like(extension_tensor)), dim=0)
+ stored_state["exp_avg_sq"] = torch.cat((stored_state["exp_avg_sq"], torch.zeros_like(extension_tensor)), dim=0)
+
+ del self.optimizer.state[group['params'][0]]
+ group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
+ self.optimizer.state[group['params'][0]] = stored_state
+
+ optimizable_tensors[group["name"]] = group["params"][0]
+ else:
+ group["params"][0] = nn.Parameter(torch.cat((group["params"][0], extension_tensor), dim=0).requires_grad_(True))
+ optimizable_tensors[group["name"]] = group["params"][0]
+
+ return optimizable_tensors
+
+ def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):
+ d = {"xyz": new_xyz,
+ "f_dc": new_features_dc,
+ "f_rest": new_features_rest,
+ "opacity": new_opacities,
+ "scaling" : new_scaling,
+ "rotation" : new_rotation}
+
+ optimizable_tensors = self.cat_tensors_to_optimizer(d)
+ self._xyz = optimizable_tensors["xyz"]
+ self._features_dc = optimizable_tensors["f_dc"]
+ self._features_rest = optimizable_tensors["f_rest"]
+ self._opacity = optimizable_tensors["opacity"]
+ self._scaling = optimizable_tensors["scaling"]
+ self._rotation = optimizable_tensors["rotation"]
+
+ self.xyz_gradient_accum = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
+ self.denom = torch.zeros((self.get_xyz.shape[0], 1), device="cuda")
+ self.max_radii2D = torch.zeros((self.get_xyz.shape[0]), device="cuda")
+
+ def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):
+ n_init_points = self.get_xyz.shape[0]
+ # Extract points that satisfy the gradient condition
+ padded_grad = torch.zeros((n_init_points), device="cuda")
+ padded_grad[:grads.shape[0]] = grads.squeeze()
+ selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)
+ selected_pts_mask = torch.logical_and(selected_pts_mask,
+ torch.max(self.get_scaling, dim=1).values > self.percent_dense*scene_extent)
+
+ stds = self.get_scaling[selected_pts_mask].repeat(N,1)
+ means =torch.zeros((stds.size(0), 3),device="cuda")
+ samples = torch.normal(mean=means, std=stds)
+ rots = build_rotation(self._rotation[selected_pts_mask]).repeat(N,1,1)
+ new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self.get_xyz[selected_pts_mask].repeat(N, 1)
+ new_scaling = self.scaling_inverse_activation(self.get_scaling[selected_pts_mask].repeat(N,1) / (0.8*N))
+ new_rotation = self._rotation[selected_pts_mask].repeat(N,1)
+ new_features_dc = self._features_dc[selected_pts_mask].repeat(N,1,1)
+ new_features_rest = self._features_rest[selected_pts_mask].repeat(N,1,1)
+ new_opacity = self._opacity[selected_pts_mask].repeat(N,1)
+
+ self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacity, new_scaling, new_rotation)
+
+ prune_filter = torch.cat((selected_pts_mask, torch.zeros(N * selected_pts_mask.sum(), device="cuda", dtype=bool)))
+ self.prune_points(prune_filter)
+
+ def densify_and_clone(self, grads, grad_threshold, scene_extent):
+ # Extract points that satisfy the gradient condition
+ selected_pts_mask = torch.where(torch.norm(grads, dim=-1) >= grad_threshold, True, False)
+ selected_pts_mask = torch.logical_and(selected_pts_mask,
+ torch.max(self.get_scaling, dim=1).values <= self.percent_dense*scene_extent)
+
+ new_xyz = self._xyz[selected_pts_mask]
+ new_features_dc = self._features_dc[selected_pts_mask]
+ new_features_rest = self._features_rest[selected_pts_mask]
+ new_opacities = self._opacity[selected_pts_mask]
+ new_scaling = self._scaling[selected_pts_mask]
+ new_rotation = self._rotation[selected_pts_mask]
+
+ self.densification_postfix(new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation)
+
+ def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size):
+ grads = self.xyz_gradient_accum / self.denom
+ grads[grads.isnan()] = 0.0
+
+ self.densify_and_clone(grads, max_grad, extent)
+ self.densify_and_split(grads, max_grad, extent)
+
+ prune_mask = (self.get_opacity < min_opacity).squeeze()
+ if max_screen_size:
+ big_points_vs = self.max_radii2D > max_screen_size
+ big_points_ws = self.get_scaling.max(dim=1).values > 0.1 * extent
+ prune_mask = torch.logical_or(torch.logical_or(prune_mask, big_points_vs), big_points_ws)
+ self.prune_points(prune_mask)
+
+ torch.cuda.empty_cache()
+
+ def add_densification_stats(self, viewspace_point_tensor, update_filter):
+ self.xyz_gradient_accum[update_filter] += torch.norm(viewspace_point_tensor.grad[update_filter,:2], dim=-1, keepdim=True)
+ self.denom[update_filter] += 1
\ No newline at end of file
diff --git a/utils/scene/utils/camera_utils.py b/utils/scene/utils/camera_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..fae41415fbe6887337d8016f2760667bedaa361d
--- /dev/null
+++ b/utils/scene/utils/camera_utils.py
@@ -0,0 +1,84 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+from ..cameras import Camera
+import numpy as np
+from .general_utils import PILtoTorch
+from .graphics_utils import fov2focal
+
+WARNED = False
+
+def loadCam(args, id, cam_info, resolution_scale):
+ orig_w, orig_h = cam_info.image.size
+
+ if args.resolution in [1, 2, 4, 8]:
+ resolution = round(orig_w/(resolution_scale * args.resolution)), round(orig_h/(resolution_scale * args.resolution))
+ else: # should be a type that converts to float
+ if args.resolution == -1:
+ if orig_w > 1600:
+ global WARNED
+ if not WARNED:
+ print("[ INFO ] Encountered quite large input images (>1.6K pixels width), rescaling to 1.6K.\n "
+ "If this is not desired, please explicitly specify '--resolution/-r' as 1")
+ WARNED = True
+ global_down = orig_w / 1600
+ else:
+ global_down = 1
+ else:
+ global_down = orig_w / args.resolution
+
+ scale = float(global_down) * float(resolution_scale)
+ resolution = (int(orig_w / scale), int(orig_h / scale))
+
+ resized_image_rgb = PILtoTorch(cam_info.image, resolution)
+
+ gt_image = resized_image_rgb[:3, ...]
+ loaded_mask = None
+
+ if resized_image_rgb.shape[1] == 4:
+ loaded_mask = resized_image_rgb[3:4, ...]
+ elif cam_info.mask is not None:
+ loaded_mask = ~(PILtoTorch(cam_info.mask, resolution)[0:1, ...] > 0)
+
+ return Camera(colmap_id=cam_info.uid, R=cam_info.R, T=cam_info.T,
+ FoVx=cam_info.FovX, FoVy=cam_info.FovY,
+ image=gt_image, gt_alpha_mask=loaded_mask,
+ image_name=cam_info.image_name, uid=id, data_device=args.data_device)
+
+def cameraList_from_camInfos(cam_infos, resolution_scale, args):
+ camera_list = []
+
+ for id, c in enumerate(cam_infos):
+ camera_list.append(loadCam(args, id, c, resolution_scale))
+
+ return camera_list
+
+def camera_to_JSON(id, camera : Camera):
+ Rt = np.zeros((4, 4))
+ Rt[:3, :3] = camera.R.transpose()
+ Rt[:3, 3] = camera.T
+ Rt[3, 3] = 1.0
+
+ W2C = np.linalg.inv(Rt)
+ pos = W2C[:3, 3]
+ rot = W2C[:3, :3]
+ serializable_array_2d = [x.tolist() for x in rot]
+ camera_entry = {
+ 'id' : id,
+ 'img_name' : camera.image_name,
+ 'width' : camera.width,
+ 'height' : camera.height,
+ 'position': pos.tolist(),
+ 'rotation': serializable_array_2d,
+ 'fy' : fov2focal(camera.FovY, camera.height),
+ 'fx' : fov2focal(camera.FovX, camera.width)
+ }
+ return camera_entry
diff --git a/utils/scene/utils/general_utils.py b/utils/scene/utils/general_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..541c0825229a2d86e84460b765879f86f724a59d
--- /dev/null
+++ b/utils/scene/utils/general_utils.py
@@ -0,0 +1,133 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+import torch
+import sys
+from datetime import datetime
+import numpy as np
+import random
+
+def inverse_sigmoid(x):
+ return torch.log(x/(1-x))
+
+def PILtoTorch(pil_image, resolution):
+ resized_image_PIL = pil_image.resize(resolution)
+ resized_image = torch.from_numpy(np.array(resized_image_PIL)) / 255.0
+ if len(resized_image.shape) == 3:
+ return resized_image.permute(2, 0, 1)
+ else:
+ return resized_image.unsqueeze(dim=-1).permute(2, 0, 1)
+
+def get_expon_lr_func(
+ lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000
+):
+ """
+ Copied from Plenoxels
+
+ Continuous learning rate decay function. Adapted from JaxNeRF
+ The returned rate is lr_init when step=0 and lr_final when step=max_steps, and
+ is log-linearly interpolated elsewhere (equivalent to exponential decay).
+ If lr_delay_steps>0 then the learning rate will be scaled by some smooth
+ function of lr_delay_mult, such that the initial learning rate is
+ lr_init*lr_delay_mult at the beginning of optimization but will be eased back
+ to the normal learning rate when steps>lr_delay_steps.
+ :param conf: config subtree 'lr' or similar
+ :param max_steps: int, the number of steps during optimization.
+ :return HoF which takes step as input
+ """
+
+ def helper(step):
+ if step < 0 or (lr_init == 0.0 and lr_final == 0.0):
+ # Disable this parameter
+ return 0.0
+ if lr_delay_steps > 0:
+ # A kind of reverse cosine decay.
+ delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(
+ 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)
+ )
+ else:
+ delay_rate = 1.0
+ t = np.clip(step / max_steps, 0, 1)
+ log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)
+ return delay_rate * log_lerp
+
+ return helper
+
+def strip_lowerdiag(L):
+ uncertainty = torch.zeros((L.shape[0], 6), dtype=torch.float, device="cuda")
+
+ uncertainty[:, 0] = L[:, 0, 0]
+ uncertainty[:, 1] = L[:, 0, 1]
+ uncertainty[:, 2] = L[:, 0, 2]
+ uncertainty[:, 3] = L[:, 1, 1]
+ uncertainty[:, 4] = L[:, 1, 2]
+ uncertainty[:, 5] = L[:, 2, 2]
+ return uncertainty
+
+def strip_symmetric(sym):
+ return strip_lowerdiag(sym)
+
+def build_rotation(r):
+ norm = torch.sqrt(r[:,0]*r[:,0] + r[:,1]*r[:,1] + r[:,2]*r[:,2] + r[:,3]*r[:,3])
+
+ q = r / norm[:, None]
+
+ R = torch.zeros((q.size(0), 3, 3), device='cuda')
+
+ r = q[:, 0]
+ x = q[:, 1]
+ y = q[:, 2]
+ z = q[:, 3]
+
+ R[:, 0, 0] = 1 - 2 * (y*y + z*z)
+ R[:, 0, 1] = 2 * (x*y - r*z)
+ R[:, 0, 2] = 2 * (x*z + r*y)
+ R[:, 1, 0] = 2 * (x*y + r*z)
+ R[:, 1, 1] = 1 - 2 * (x*x + z*z)
+ R[:, 1, 2] = 2 * (y*z - r*x)
+ R[:, 2, 0] = 2 * (x*z - r*y)
+ R[:, 2, 1] = 2 * (y*z + r*x)
+ R[:, 2, 2] = 1 - 2 * (x*x + y*y)
+ return R
+
+def build_scaling_rotation(s, r):
+ L = torch.zeros((s.shape[0], 3, 3), dtype=torch.float, device="cuda")
+ R = build_rotation(r)
+
+ L[:,0,0] = s[:,0]
+ L[:,1,1] = s[:,1]
+ L[:,2,2] = s[:,2]
+
+ L = R @ L
+ return L
+
+def safe_state(silent):
+ old_f = sys.stdout
+ class F:
+ def __init__(self, silent):
+ self.silent = silent
+
+ def write(self, x):
+ if not self.silent:
+ if x.endswith("\n"):
+ old_f.write(x.replace("\n", " [{}]\n".format(str(datetime.now().strftime("%d/%m %H:%M:%S")))))
+ else:
+ old_f.write(x)
+
+ def flush(self):
+ old_f.flush()
+
+ sys.stdout = F(silent)
+
+ random.seed(0)
+ np.random.seed(0)
+ torch.manual_seed(0)
+ torch.cuda.set_device(torch.device("cuda:0"))
diff --git a/utils/scene/utils/graphics_utils.py b/utils/scene/utils/graphics_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b31312c1c9ce62da31ed6eac6555f9d284a7528
--- /dev/null
+++ b/utils/scene/utils/graphics_utils.py
@@ -0,0 +1,88 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+import torch
+import math
+import numpy as np
+from typing import NamedTuple
+
+class BasicPointCloud(NamedTuple):
+ points : np.array
+ colors : np.array
+ normals : np.array
+
+def geom_transform_points(points, transf_matrix):
+ P, _ = points.shape
+ ones = torch.ones(P, 1, dtype=points.dtype, device=points.device)
+ points_hom = torch.cat([points, ones], dim=1)
+ points_out = torch.matmul(points_hom, transf_matrix.unsqueeze(0))
+
+ denom = points_out[..., 3:] + 0.0000001
+ return (points_out[..., :3] / denom).squeeze(dim=0)
+
+def getWorld2View(R, t):
+ Rt = np.zeros((4, 4))
+ Rt[:3, :3] = R.transpose()
+ Rt[:3, 3] = t
+ Rt[3, 3] = 1.0
+ return np.float32(Rt)
+
+def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):
+ Rt = np.zeros((4, 4))
+ Rt[:3, :3] = R.transpose()
+ Rt[:3, 3] = t
+ Rt[3, 3] = 1.0
+
+ C2W = np.linalg.inv(Rt)
+ cam_center = C2W[:3, 3]
+ cam_center = (cam_center + translate) * scale
+ C2W[:3, 3] = cam_center
+ Rt = np.linalg.inv(C2W)
+ return np.float32(Rt)
+
+def getProjectionMatrix(znear, zfar, fovX, fovY, crop_box=None, width=None, height=None):
+ tanHalfFovY = math.tan((fovY / 2))
+ tanHalfFovX = math.tan((fovX / 2))
+
+ top = tanHalfFovY * znear
+ bottom = -top
+ right = tanHalfFovX * znear
+ left = -right
+
+ frustum_width = right - left
+ frustum_height = top - bottom
+
+ if crop_box is not None:
+ assert width is not None and height is not None
+ x, y, w, h = crop_box
+ left = left + x / width * frustum_width
+ right = left + w / width * frustum_width
+ top = top - y / height * frustum_height
+ bottom = top - h / height * frustum_height
+
+ P = torch.zeros(4, 4)
+
+ z_sign = 1.0
+
+ P[0, 0] = 2.0 * znear / (right - left)
+ P[1, 1] = 2.0 * znear / (top - bottom)
+ P[0, 2] = (right + left) / (right - left)
+ P[1, 2] = (top + bottom) / (top - bottom)
+ P[3, 2] = z_sign
+ P[2, 2] = z_sign * zfar / (zfar - znear)
+ P[2, 3] = -(zfar * znear) / (zfar - znear)
+ return P
+
+def fov2focal(fov, pixels):
+ return pixels / (2 * math.tan(fov / 2))
+
+def focal2fov(focal, pixels):
+ return 2*math.atan(pixels/(2*focal))
\ No newline at end of file
diff --git a/utils/scene/utils/image_utils.py b/utils/scene/utils/image_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdeaa1b6d250e549181ab165070f82ccd31b3eb9
--- /dev/null
+++ b/utils/scene/utils/image_utils.py
@@ -0,0 +1,19 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+import torch
+
+def mse(img1, img2):
+ return (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
+
+def psnr(img1, img2):
+ mse = (((img1 - img2)) ** 2).view(img1.shape[0], -1).mean(1, keepdim=True)
+ return 20 * torch.log10(1.0 / torch.sqrt(mse))
diff --git a/utils/scene/utils/loss_utils.py b/utils/scene/utils/loss_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..08a060098c3f24950c0710fa0bb4b1fb2ca46fa3
--- /dev/null
+++ b/utils/scene/utils/loss_utils.py
@@ -0,0 +1,65 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+import torch
+import torch.nn.functional as F
+from torch.autograd import Variable
+from math import exp
+
+def l1_loss(network_output, gt, reduce=True):
+ l1_loss = torch.abs((network_output - gt))
+ return l1_loss.mean() if reduce else l1_loss
+
+def l2_loss(network_output, gt):
+ return ((network_output - gt) ** 2).mean()
+
+def gaussian(window_size, sigma):
+ gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
+ return gauss / gauss.sum()
+
+def create_window(window_size, channel):
+ _1D_window = gaussian(window_size, 1.5).unsqueeze(1)
+ _2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
+ window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
+ return window
+
+def ssim(img1, img2, window_size=11, size_average=True):
+ channel = img1.size(-3)
+ window = create_window(window_size, channel)
+
+ if img1.is_cuda:
+ window = window.cuda(img1.get_device())
+ window = window.type_as(img1)
+
+ return _ssim(img1, img2, window, window_size, channel, size_average)
+
+def _ssim(img1, img2, window, window_size, channel, size_average=True):
+ mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
+ mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
+
+ mu1_sq = mu1.pow(2)
+ mu2_sq = mu2.pow(2)
+ mu1_mu2 = mu1 * mu2
+
+ sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
+ sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
+ sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
+
+ C1 = 0.01 ** 2
+ C2 = 0.03 ** 2
+
+ ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
+
+ if size_average:
+ return ssim_map.mean()
+ else:
+ return ssim_map.mean(1).mean(1).mean(1)
+
diff --git a/utils/scene/utils/sh_utils.py b/utils/scene/utils/sh_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbca7d192aa3a7edf8c5b2d24dee535eac765785
--- /dev/null
+++ b/utils/scene/utils/sh_utils.py
@@ -0,0 +1,118 @@
+# Copyright 2021 The PlenOctree Authors.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+import torch
+
+C0 = 0.28209479177387814
+C1 = 0.4886025119029199
+C2 = [
+ 1.0925484305920792,
+ -1.0925484305920792,
+ 0.31539156525252005,
+ -1.0925484305920792,
+ 0.5462742152960396
+]
+C3 = [
+ -0.5900435899266435,
+ 2.890611442640554,
+ -0.4570457994644658,
+ 0.3731763325901154,
+ -0.4570457994644658,
+ 1.445305721320277,
+ -0.5900435899266435
+]
+C4 = [
+ 2.5033429417967046,
+ -1.7701307697799304,
+ 0.9461746957575601,
+ -0.6690465435572892,
+ 0.10578554691520431,
+ -0.6690465435572892,
+ 0.47308734787878004,
+ -1.7701307697799304,
+ 0.6258357354491761,
+]
+
+
+def eval_sh(deg, sh, dirs):
+ """
+ Evaluate spherical harmonics at unit directions
+ using hardcoded SH polynomials.
+ Works with torch/np/jnp.
+ ... Can be 0 or more batch dimensions.
+ Args:
+ deg: int SH deg. Currently, 0-3 supported
+ sh: jnp.ndarray SH coeffs [..., C, (deg + 1) ** 2]
+ dirs: jnp.ndarray unit directions [..., 3]
+ Returns:
+ [..., C]
+ """
+ assert deg <= 4 and deg >= 0
+ coeff = (deg + 1) ** 2
+ assert sh.shape[-1] >= coeff
+
+ result = C0 * sh[..., 0]
+ if deg > 0:
+ x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
+ result = (result -
+ C1 * y * sh[..., 1] +
+ C1 * z * sh[..., 2] -
+ C1 * x * sh[..., 3])
+
+ if deg > 1:
+ xx, yy, zz = x * x, y * y, z * z
+ xy, yz, xz = x * y, y * z, x * z
+ result = (result +
+ C2[0] * xy * sh[..., 4] +
+ C2[1] * yz * sh[..., 5] +
+ C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] +
+ C2[3] * xz * sh[..., 7] +
+ C2[4] * (xx - yy) * sh[..., 8])
+
+ if deg > 2:
+ result = (result +
+ C3[0] * y * (3 * xx - yy) * sh[..., 9] +
+ C3[1] * xy * z * sh[..., 10] +
+ C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] +
+ C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] +
+ C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] +
+ C3[5] * z * (xx - yy) * sh[..., 14] +
+ C3[6] * x * (xx - 3 * yy) * sh[..., 15])
+
+ if deg > 3:
+ result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] +
+ C4[1] * yz * (3 * xx - yy) * sh[..., 17] +
+ C4[2] * xy * (7 * zz - 1) * sh[..., 18] +
+ C4[3] * yz * (7 * zz - 3) * sh[..., 19] +
+ C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] +
+ C4[5] * xz * (7 * zz - 3) * sh[..., 21] +
+ C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] +
+ C4[7] * xz * (xx - 3 * yy) * sh[..., 23] +
+ C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24])
+ return result
+
+def RGB2SH(rgb):
+ return (rgb - 0.5) / C0
+
+def SH2RGB(sh):
+ return sh * C0 + 0.5
\ No newline at end of file
diff --git a/utils/scene/utils/system_utils.py b/utils/scene/utils/system_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..90ca6d7f77610c967affe313398777cd86920e8e
--- /dev/null
+++ b/utils/scene/utils/system_utils.py
@@ -0,0 +1,28 @@
+#
+# Copyright (C) 2023, Inria
+# GRAPHDECO research group, https://team.inria.fr/graphdeco
+# All rights reserved.
+#
+# This software is free for non-commercial, research and evaluation use
+# under the terms of the LICENSE.md file.
+#
+# For inquiries contact george.drettakis@inria.fr
+#
+
+from errno import EEXIST
+from os import makedirs, path
+import os
+
+def mkdir_p(folder_path):
+ # Creates a directory. equivalent to using mkdir -p on the command line
+ try:
+ makedirs(folder_path)
+ except OSError as exc: # Python >2.5
+ if exc.errno == EEXIST and path.isdir(folder_path):
+ pass
+ else:
+ raise
+
+def searchForMaxIteration(folder):
+ saved_iters = [int(fname.split("_")[-1]) for fname in os.listdir(folder)]
+ return max(saved_iters)
diff --git a/zoedepth/LICENSE b/zoedepth/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..70a2068262a774e0e179b9c898ff38a665f59884
--- /dev/null
+++ b/zoedepth/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2022 Intelligent Systems Lab Org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/zoedepth/data/__init__.py b/zoedepth/data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f2668792389157609abb2a0846fb620e7d67eb9
--- /dev/null
+++ b/zoedepth/data/__init__.py
@@ -0,0 +1,24 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
diff --git a/zoedepth/data/data_mono.py b/zoedepth/data/data_mono.py
new file mode 100644
index 0000000000000000000000000000000000000000..4027425a5dfa41d138eaf78f35b04aa8d01ab638
--- /dev/null
+++ b/zoedepth/data/data_mono.py
@@ -0,0 +1,697 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+# This file is partly inspired from BTS (https://github.com/cleinc/bts/blob/master/pytorch/bts_dataloader.py); author: Jin Han Lee
+
+import itertools
+import os
+import random
+from random import choice
+
+import numpy as np
+import cv2
+import torch
+import torch.nn as nn
+import torch.utils.data.distributed
+from zoedepth.utils.easydict import EasyDict as edict
+from PIL import Image, ImageOps
+from torch.utils.data import DataLoader, Dataset
+from torchvision import transforms
+
+from zoedepth.utils.config import change_dataset
+
+from .ddad import get_ddad_loader
+from .diml_indoor_test import get_diml_indoor_loader
+from .diml_outdoor_test import get_diml_outdoor_loader
+from .diode import get_diode_loader
+from .hypersim import get_hypersim_loader
+from .ibims import get_ibims_loader
+from .sun_rgbd_loader import get_sunrgbd_loader
+from .vkitti import get_vkitti_loader
+from .vkitti2 import get_vkitti2_loader
+from .places365 import get_places365_loader, Places365
+from .marigold_nyu import get_marigold_nyu_loader, MarigoldNYU
+
+from .preprocess import CropParams, get_white_border, get_black_border
+
+
+def _is_pil_image(img):
+ return isinstance(img, Image.Image)
+
+
+def _is_numpy_image(img):
+ return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
+
+
+def preprocessing_transforms(mode, **kwargs):
+ return transforms.Compose([
+ ToTensor(mode=mode, **kwargs)
+ ])
+
+
+class DepthDataLoader(object):
+ def __init__(self, config, mode, device='cpu', transform=None, **kwargs):
+ """
+ Data loader for depth datasets
+
+ Args:
+ config (dict): Config dictionary. Refer to utils/config.py
+ mode (str): "train" or "online_eval"
+ device (str, optional): Device to load the data on. Defaults to 'cpu'.
+ transform (torchvision.transforms, optional): Transform to apply to the data. Defaults to None.
+ """
+
+ self.config = config
+
+ if config.dataset == 'ibims':
+ self.data = get_ibims_loader(config, batch_size=1, num_workers=1)
+ return
+
+ if config.dataset == 'sunrgbd':
+ self.data = get_sunrgbd_loader(
+ data_dir_root=config.sunrgbd_root, batch_size=1, num_workers=1)
+ return
+
+ if config.dataset == 'diml_indoor':
+ self.data = get_diml_indoor_loader(
+ data_dir_root=config.diml_indoor_root, batch_size=1, num_workers=1)
+ return
+
+ if config.dataset == 'diml_outdoor':
+ self.data = get_diml_outdoor_loader(
+ data_dir_root=config.diml_outdoor_root, batch_size=1, num_workers=1)
+ return
+
+ if "diode" in config.dataset:
+ self.data = get_diode_loader(
+ config[config.dataset+"_root"], batch_size=1, num_workers=1)
+ return
+
+ if config.dataset == 'hypersim_test':
+ self.data = get_hypersim_loader(
+ config.hypersim_test_root, batch_size=1, num_workers=1)
+ return
+
+ if config.dataset == 'vkitti':
+ self.data = get_vkitti_loader(
+ config.vkitti_root, batch_size=1, num_workers=1)
+ return
+
+ if config.dataset == 'vkitti2':
+ self.data = get_vkitti2_loader(
+ config.vkitti2_root, batch_size=1, num_workers=1)
+ return
+
+ if config.dataset == 'ddad':
+ self.data = get_ddad_loader(config.ddad_root, resize_shape=(
+ 352, 1216), batch_size=1, num_workers=1)
+ return
+
+ img_size = self.config.get("img_size", None)
+ img_size = img_size if self.config.get(
+ "do_input_resize", False) else None
+
+ if transform is None:
+ transform = preprocessing_transforms(mode, size=img_size)
+
+ if mode == 'train':
+
+ Dataset = DataLoadPreprocess
+ self.training_samples = Dataset(
+ config, mode, transform=transform, device=device)
+
+ if config.distributed and not config.debug_mode:
+ self.train_sampler = torch.utils.data.distributed.DistributedSampler(
+ self.training_samples)
+ else:
+ self.train_sampler = None
+
+ if not config.debug_mode:
+ self.data = DataLoader(self.training_samples,
+ batch_size=config.batch_size,
+ shuffle=(self.train_sampler is None),
+ num_workers=config.workers,
+ pin_memory=True,
+ persistent_workers=True,
+ # prefetch_factor=2,
+ sampler=self.train_sampler)
+ else:
+ self.data = DataLoader(self.training_samples,
+ batch_size=config.batch_size,
+ shuffle=(self.train_sampler is None),
+ num_workers=0,
+ pin_memory=True,
+ # prefetch_factor=2,
+ sampler=self.train_sampler)
+
+ elif mode == 'online_eval':
+ self.testing_samples = DataLoadPreprocess(
+ config, mode, transform=transform)
+ if config.distributed: # redundant. here only for readability and to be more explicit
+ # Give whole test set to all processes (and report evaluation only on one) regardless
+ self.eval_sampler = None
+ else:
+ self.eval_sampler = None
+ self.data = DataLoader(self.testing_samples, 1,
+ shuffle=kwargs.get("shuffle_test", False),
+ num_workers=1,
+ pin_memory=False,
+ sampler=self.eval_sampler)
+
+ elif mode == 'test':
+ self.testing_samples = DataLoadPreprocess(
+ config, mode, transform=transform)
+ self.data = DataLoader(self.testing_samples,
+ 1, shuffle=False, num_workers=1)
+
+ else:
+ print(
+ 'mode should be one of \'train, test, online_eval\'. Got {}'.format(mode))
+
+
+def repetitive_roundrobin(*iterables):
+ """
+ cycles through iterables but sample wise
+ first yield first sample from first iterable then first sample from second iterable and so on
+ then second sample from first iterable then second sample from second iterable and so on
+
+ If one iterable is shorter than the others, it is repeated until all iterables are exhausted
+ repetitive_roundrobin('ABC', 'D', 'EF') --> A D E B D F C D E
+ """
+ # Repetitive roundrobin
+ iterables_ = [iter(it) for it in iterables]
+ exhausted = [False] * len(iterables)
+ while not all(exhausted):
+ for i, it in enumerate(iterables_):
+ try:
+ yield next(it)
+ except StopIteration:
+ exhausted[i] = True
+ iterables_[i] = itertools.cycle(iterables[i])
+ # First elements may get repeated if one iterable is shorter than the others
+ yield next(iterables_[i])
+
+
+class RepetitiveRoundRobinDataLoader(object):
+ def __init__(self, *dataloaders):
+ self.dataloaders = dataloaders
+
+ def __iter__(self):
+ return repetitive_roundrobin(*self.dataloaders)
+
+ def __len__(self):
+ # First samples get repeated, thats why the plus one
+ return len(self.dataloaders) * (max(len(dl) for dl in self.dataloaders) + 1)
+
+
+class MixedNYUKITTI(object):
+ def __init__(self, config, mode, device='cpu', **kwargs):
+ config = edict(config)
+ config.workers = config.workers // 2
+ self.config = config
+ nyu_conf = change_dataset(edict(config), 'nyu')
+ kitti_conf = change_dataset(edict(config), 'kitti')
+
+ # make nyu default for testing
+ self.config = config = nyu_conf
+ img_size = self.config.get("img_size", None)
+ img_size = img_size if self.config.get(
+ "do_input_resize", False) else None
+ if mode == 'train':
+ nyu_loader = DepthDataLoader(
+ nyu_conf, mode, device=device, transform=preprocessing_transforms(mode, size=img_size)).data
+ kitti_loader = DepthDataLoader(
+ kitti_conf, mode, device=device, transform=preprocessing_transforms(mode, size=img_size)).data
+ # It has been changed to repetitive roundrobin
+ self.data = RepetitiveRoundRobinDataLoader(
+ nyu_loader, kitti_loader)
+ else:
+ self.data = DepthDataLoader(nyu_conf, mode, device=device).data
+
+class MixedNYUPlaces365(object):
+ def __init__(self, config, mode, device='cpu', **kwargs):
+ config = edict(config)
+ config.workers = config.workers // 2
+ self.config = config
+ nyu_conf = change_dataset(edict(config), 'nyu')
+ places365_conf = change_dataset(edict(config), 'places365')
+
+ # make nyu default for testing
+ self.config = config = nyu_conf
+ img_size = self.config.get("img_size", None)
+ img_size = img_size if self.config.get(
+ "do_input_resize", False) else None
+ if mode == 'train':
+ nyu_loader = DepthDataLoader(
+ nyu_conf, mode, device=device, transform=preprocessing_transforms(mode, size=img_size)).data
+ places365_loader = DepthDataLoader(
+ places365_conf, mode, device=device, transform=preprocessing_transforms(mode, size=img_size)).data
+ # It has been changed to repetitive roundrobin
+ self.data = RepetitiveRoundRobinDataLoader(
+ nyu_loader, places365_loader)
+ else:
+ self.data = DepthDataLoader(nyu_conf, mode, device=device).data
+
+def remove_leading_slash(s):
+ if s[0] == '/' or s[0] == '\\':
+ return s[1:]
+ return s
+
+
+class CachedReader:
+ def __init__(self, shared_dict=None):
+ if shared_dict:
+ self._cache = shared_dict
+ else:
+ self._cache = {}
+
+ def open(self, fpath):
+ im = self._cache.get(fpath, None)
+ if im is None:
+ im = self._cache[fpath] = Image.open(fpath)
+ return im
+
+
+class ImReader:
+ def __init__(self):
+ pass
+
+ # @cache
+ def open(self, fpath):
+ return Image.open(fpath)
+
+
+class DataLoadPreprocess(Dataset):
+ def __init__(self, config, mode, transform=None, is_for_online_eval=False, device="cpu", **kwargs):
+ self.config = config
+ if mode == 'online_eval':
+ with open(config.filenames_file_eval, 'r') as f:
+ self.filenames = f.readlines()
+ else:
+ with open(config.filenames_file, 'r') as f:
+ self.filenames = f.readlines()
+
+ self.device = torch.device(device)
+ self.mode = mode
+ self.transform = transform
+ self.to_tensor = ToTensor(mode)
+ self.is_for_online_eval = is_for_online_eval
+ if config.use_shared_dict:
+ self.reader = CachedReader(config.shared_dict)
+ else:
+ self.reader = ImReader()
+
+ if config.dataset == "places365" or config.inpaint_task_probability > 0:
+ places365_conf = change_dataset(edict(config), 'places365')
+ self.places365_data = self.data = Places365(places365_conf.places365_root, places365_conf.places365_depth_root, places365_conf.places365_depth_masks_root, randomize_masks=places365_conf.get("randomize_masks", True), debug_mode=self.config.debug_mode)
+
+ if config.dataset == "marigold_nyu":
+ self.marigold_data = self.data = MarigoldNYU(config.nyu_dir_root, config.marigold_depth_root, debug_mode=self.config.debug_mode)
+ self.config.avoid_boundary = True
+
+ def postprocess(self, sample):
+ return sample
+
+ def __getitem__(self, idx):
+ sample_path = self.filenames[idx] if self.config.dataset not in ('places365', "marigold_nyu") else self.filenames[0]
+ focal = float(sample_path.split()[2])
+ sample = {}
+
+ if self.mode == 'train':
+ depth_mask = None
+ if self.config.dataset == 'kitti' and self.config.use_right and random.random() > 0.5:
+ image_path = os.path.join(
+ self.config.data_path, remove_leading_slash(sample_path.split()[3]))
+ depth_path = os.path.join(
+ self.config.gt_path, remove_leading_slash(sample_path.split()[4]))
+
+ image = self.reader.open(image_path)
+ depth_gt = self.reader.open(depth_path)
+ w, h = image.size
+
+ elif self.config.dataset == 'places365':
+ image, depth_gt, depth_mask, image_path, depth_path, _ = self.places365_data[idx]
+ h, w = image.shape[:2]
+
+ if image.ndim == 2:
+ image = image.reshape(image.shape[0], image.shape[1], 1)
+ image = np.repeat(image, 3, axis=-1)
+
+ elif self.config.dataset == 'marigold_nyu':
+ image, depth_gt, marigold_gt, image_path, depth_path = self.marigold_data[idx]
+
+ h, w = image.shape[:2]
+
+ if image.ndim == 2:
+ image = image.reshape(image.shape[0], image.shape[1], 1)
+ image = np.repeat(image, 3, axis=-1)
+
+ else:
+ image_path = os.path.join(
+ self.config.data_path, remove_leading_slash(sample_path.split()[0]))
+ depth_path = os.path.join(
+ self.config.gt_path, remove_leading_slash(sample_path.split()[1]))
+
+ image = self.reader.open(image_path)
+ depth_gt = self.reader.open(depth_path)
+ w, h = image.size
+
+ if self.config.inpaint_task_probability > 0:
+ _, _, depth_mask, _, _, _ = self.places365_data[idx]
+
+ if self.config.do_kb_crop:
+ height = image.height
+ width = image.width
+ top_margin = int(height - 352)
+ left_margin = int((width - 1216) / 2)
+ depth_gt = depth_gt.crop(
+ (left_margin, top_margin, left_margin + 1216, top_margin + 352))
+ image = image.crop(
+ (left_margin, top_margin, left_margin + 1216, top_margin + 352))
+
+ # Avoid blank boundaries due to pixel registration?
+ # Train images have white border. Test images have black border.
+ if self.config.dataset in ('nyu', 'marigold_nyu') and self.config.avoid_boundary:
+ # print("Avoiding Blank Boundaries!")
+ # We just crop and pad again with reflect padding to original size
+ # original_size = image.size
+ #crop_params = get_white_border(np.array(255*image, dtype=np.uint8))
+ # crop image down from 640x480 to 624x464
+ crop_params = CropParams(8, 472, 8, 632)
+
+ image = image[crop_params.top:crop_params.bottom, crop_params.left:crop_params.right]
+ depth_gt = depth_gt[crop_params.top:crop_params.bottom, crop_params.left:crop_params.right]
+
+ # Use reflect padding to fill the blank
+ #image = np.pad(image, ((crop_params.top, h - crop_params.bottom), (crop_params.left, w - crop_params.right), (0, 0)), mode='reflect')
+ #image = Image.fromarray(image)
+
+ #depth_gt = np.pad(depth_gt, ((crop_params.top, h - crop_params.bottom), (crop_params.left, w - crop_params.right), (0, 0)), 'constant', constant_values=0)
+ #depth_gt = Image.fromarray(depth_gt)
+
+ if self.config.dataset == "marigold_nyu":
+ marigold_gt = marigold_gt[crop_params.top:crop_params.bottom, crop_params.left:crop_params.right]
+
+ if self.config.do_random_rotate and (self.config.aug) and self.config.dataset not in ('places365', "marigold_nyu"):
+ random_angle = (random.random() - 0.5) * 2 * self.config.degree
+ image = self.rotate_image(image, random_angle)
+ depth_gt = self.rotate_image(
+ depth_gt, random_angle, flag=Image.NEAREST)
+
+ if self.config.dataset not in ('places365', "marigold_nyu"):
+ image = np.asarray(image, dtype=np.float32) / 255.0
+ depth_gt = np.asarray(depth_gt, dtype=np.float32)
+ depth_gt = np.expand_dims(depth_gt, axis=2)
+
+ if self.config.dataset in ('nyu', 'marigold_nyu'):
+ depth_gt = depth_gt / 1000.0
+ elif self.config.dataset != 'places365':
+ depth_gt = depth_gt / 256.0
+
+ if self.config.aug and (self.config.random_crop) and self.config.dataset not in ('places365', "marigold_nyu"):
+ image, depth_gt = self.random_crop(
+ image, depth_gt, self.config.input_height, self.config.input_width)
+
+ if self.config.aug and self.config.random_translate and self.config.dataset not in ('places365', "marigold_nyu"):
+ # print("Random Translation!")
+ image, depth_gt = self.random_translate(image, depth_gt, self.config.max_translation)
+
+ mask = np.logical_and(depth_gt > self.config.min_depth,
+ depth_gt < self.config.max_depth).squeeze()[None, ...]
+
+ is_inpainting_sample = self.config.inpaint_task_probability > 0 and (torch.rand(1).item() < self.config.inpaint_task_probability)
+
+ def randomly_scale_depth(depth_to_scale):
+ # scale the mask
+ max_scale_factor = self.config.max_depth / depth_to_scale.max()
+ min_scale_factor = self.config.min_depth / depth_to_scale.min()
+
+ scale_factor = torch.rand(1).item() * (max_scale_factor - min_scale_factor) + min_scale_factor
+ scaled_depth = depth_to_scale * scale_factor
+
+ scaled_depth = scaled_depth.clip(self.config.min_depth, self.config.max_depth)
+
+ return scaled_depth
+
+ if self.config.dataset in ("marigold_nyu"):
+ marigold_mask = (marigold_gt > -1).squeeze()[None, ...]
+
+ if is_inpainting_sample and self.config.random_inpainting_scaling:
+ marigold_gt = randomly_scale_depth(marigold_gt)
+
+ marigold_gt[~marigold_mask[0]] = 0
+
+ depth_gt = marigold_gt
+ mask = marigold_mask
+
+ image, depth_gt, mask = self.train_preprocess(image, depth_gt, mask)
+
+ sample = {'image': image, 'depth': depth_gt, 'focal': focal,
+ 'mask': mask, **sample}
+
+ if self.config["depth_channel_mask_augment"]:
+ if self.config.dataset in ("marigold_nyu",):
+ if (not self.config.inpaint_task_probability > 0) and depth_mask is None:
+ depth_mask = np.zeros_like(depth_gt)
+ elif self.config.inpaint_task_probability > 0:
+ # we randomly mask with places365, or provide no sparse input at all
+ if is_inpainting_sample:
+ # upsample depth_mask to match depth_gt
+ depth_mask = torch.nn.functional.interpolate(torch.from_numpy(depth_mask).permute(2, 0, 1).unsqueeze(0), size=depth_gt.shape[:2], mode='nearest').squeeze(0).permute(1, 2, 0).numpy()
+ else:
+ depth_mask = np.zeros_like(depth_gt)
+
+ sample["masked_depth"] = depth_gt * depth_mask
+
+ else:
+ if self.mode == 'online_eval':
+ data_path = self.config.data_path_eval
+ else:
+ data_path = self.config.data_path
+
+ image_path = os.path.join(
+ data_path, remove_leading_slash(sample_path.split()[0]))
+ image = np.asarray(self.reader.open(image_path),
+ dtype=np.float32) / 255.0
+
+ if self.mode == 'online_eval':
+ gt_path = self.config.gt_path_eval
+ depth_path = os.path.join(
+ gt_path, remove_leading_slash(sample_path.split()[1]))
+ has_valid_depth = False
+ try:
+ depth_gt = self.reader.open(depth_path)
+ has_valid_depth = True
+ except IOError:
+ depth_gt = False
+ # print('Missing gt for {}'.format(image_path))
+
+ if has_valid_depth:
+ depth_gt = np.asarray(depth_gt, dtype=np.float32)
+ depth_gt = np.expand_dims(depth_gt, axis=2)
+ if self.config.dataset == 'nyu':
+ depth_gt = depth_gt / 1000.0
+ elif self.config.dataset != 'places365':
+ depth_gt = depth_gt / 256.0
+
+ mask = np.logical_and(
+ depth_gt >= self.config.min_depth, depth_gt <= self.config.max_depth).squeeze()[None, ...]
+ else:
+ mask = False
+
+ if self.config.do_kb_crop:
+ height = image.shape[0]
+ width = image.shape[1]
+ top_margin = int(height - 352)
+ left_margin = int((width - 1216) / 2)
+ image = image[top_margin:top_margin + 352,
+ left_margin:left_margin + 1216, :]
+ if self.mode == 'online_eval' and has_valid_depth:
+ depth_gt = depth_gt[top_margin:top_margin +
+ 352, left_margin:left_margin + 1216, :]
+
+ if self.mode == 'online_eval':
+ sample = {'image': image, 'depth': depth_gt, 'focal': focal, 'has_valid_depth': has_valid_depth,
+ 'image_path': sample_path.split()[0], 'depth_path': sample_path.split()[1],
+ 'mask': mask}
+ else:
+ sample = {'image': image, 'focal': focal}
+
+ if (self.mode == 'train') or ('has_valid_depth' in sample and sample['has_valid_depth']):
+ if (self.config.dataset not in ('places365', "marigold_nyu")):
+ mask = np.logical_and(depth_gt > self.config.min_depth,
+ depth_gt < self.config.max_depth).squeeze()[None, ...]
+ sample['mask'] = mask
+
+ if self.transform:
+ sample = self.transform(sample)
+
+ sample = self.postprocess(sample)
+ sample['dataset'] = self.config.dataset
+
+ if self.config.dataset != 'places365':
+ sample = {**sample, 'image_path': sample_path.split()[0], 'depth_path': sample_path.split()[1]}
+ else:
+ sample = {**sample, 'image_path': image_path, 'depth_path': depth_path}
+
+ return sample
+
+ def rotate_image(self, image, angle, flag=Image.BILINEAR):
+ result = image.rotate(angle, resample=flag)
+ return result
+
+ def random_crop(self, img, depth, height, width):
+ assert img.shape[0] >= height
+ assert img.shape[1] >= width
+ assert img.shape[0] == depth.shape[0]
+ assert img.shape[1] == depth.shape[1]
+ x = random.randint(0, img.shape[1] - width)
+ y = random.randint(0, img.shape[0] - height)
+ img = img[y:y + height, x:x + width, :]
+ depth = depth[y:y + height, x:x + width, :]
+
+ return img, depth
+
+ def random_translate(self, img, depth, max_t=20):
+ assert img.shape[0] == depth.shape[0]
+ assert img.shape[1] == depth.shape[1]
+ p = self.config.translate_prob
+ do_translate = random.random()
+ if do_translate > p:
+ return img, depth
+ x = random.randint(-max_t, max_t)
+ y = random.randint(-max_t, max_t)
+ M = np.float32([[1, 0, x], [0, 1, y]])
+ # print(img.shape, depth.shape)
+ img = cv2.warpAffine(img, M, (img.shape[1], img.shape[0]))
+ depth = cv2.warpAffine(depth, M, (depth.shape[1], depth.shape[0]))
+ depth = depth.squeeze()[..., None] # add channel dim back. Affine warp removes it
+ # print("after", img.shape, depth.shape)
+ return img, depth
+
+ def train_preprocess(self, image, depth_gt, mask):
+ if self.config.aug:
+ # Random flipping
+ do_flip = random.random()
+ if do_flip > 0.5:
+ # image is H x W x 3
+ image = (image[:, ::-1, :]).copy()
+ # depth_gt is H x W x 1
+ depth_gt = (depth_gt[:, ::-1, :]).copy()
+ # mask is B x H x W
+ mask = (mask[:, :, ::-1]).copy()
+
+ # Random gamma, brightness, color augmentation
+ do_augment = random.random()
+ if do_augment > 0.5:
+ image = self.augment_image(image)
+
+ return image, depth_gt, mask
+
+ def augment_image(self, image):
+ # gamma augmentation
+ gamma = random.uniform(0.9, 1.1)
+ image_aug = image ** gamma
+
+ # brightness augmentation
+ if self.config.dataset == 'nyu':
+ brightness = random.uniform(0.75, 1.25)
+ else:
+ brightness = random.uniform(0.9, 1.1)
+ image_aug = image_aug * brightness
+
+ # color augmentation
+ colors = np.random.uniform(0.9, 1.1, size=3)
+ white = np.ones((image.shape[0], image.shape[1]))
+ color_image = np.stack([white * colors[i] for i in range(3)], axis=2)
+ image_aug *= color_image
+ image_aug = np.clip(image_aug, 0, 1)
+
+ return image_aug
+
+ def __len__(self):
+ return len(self.data) if (self.config.dataset in ('places365', "marigold_nyu") and self.mode != 'online_eval') else len(self.filenames)
+
+
+class ToTensor(object):
+ def __init__(self, mode, do_normalize=False, size=None):
+ self.mode = mode
+ self.normalize = transforms.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) if do_normalize else nn.Identity()
+ self.size = size
+ if size is not None:
+ self.resize = transforms.Resize(size=size)
+ else:
+ self.resize = nn.Identity()
+
+ def __call__(self, sample):
+ image, focal = sample['image'], sample['focal']
+ image = self.to_tensor(image)
+ image = self.normalize(image)
+ image = self.resize(image)
+
+ if self.mode == 'test':
+ return {'image': image, 'focal': focal}
+
+ depth = sample['depth']
+ if self.mode == 'train':
+ depth = self.to_tensor(depth)
+ return {**sample, 'image': image, 'depth': depth, 'focal': focal}
+ else:
+ has_valid_depth = sample['has_valid_depth']
+ image = self.resize(image)
+ return {**sample, 'image': image, 'depth': depth, 'focal': focal, 'has_valid_depth': has_valid_depth,
+ 'image_path': sample['image_path'], 'depth_path': sample['depth_path']}
+
+ def to_tensor(self, pic):
+ if not (_is_pil_image(pic) or _is_numpy_image(pic)):
+ raise TypeError(
+ 'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
+
+ if isinstance(pic, np.ndarray):
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ return img
+
+ # handle PIL Image
+ if pic.mode == 'I':
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
+ elif pic.mode == 'I;16':
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
+ else:
+ img = torch.ByteTensor(
+ torch.ByteStorage.from_buffer(pic.tobytes()))
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
+ if pic.mode == 'YCbCr':
+ nchannel = 3
+ elif pic.mode == 'I;16':
+ nchannel = 1
+ else:
+ nchannel = len(pic.mode)
+ img = img.view(pic.size[1], pic.size[0], nchannel)
+
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
+ if isinstance(img, torch.ByteTensor):
+ return img.float()
+ else:
+ return img
diff --git a/zoedepth/data/ddad.py b/zoedepth/data/ddad.py
new file mode 100644
index 0000000000000000000000000000000000000000..4bd0492bdec767685d3a21992b4a26e62d002d97
--- /dev/null
+++ b/zoedepth/data/ddad.py
@@ -0,0 +1,117 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import os
+
+import numpy as np
+import torch
+from PIL import Image
+from torch.utils.data import DataLoader, Dataset
+from torchvision import transforms
+
+
+class ToTensor(object):
+ def __init__(self, resize_shape):
+ # self.normalize = transforms.Normalize(
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ self.normalize = lambda x : x
+ self.resize = transforms.Resize(resize_shape)
+
+ def __call__(self, sample):
+ image, depth = sample['image'], sample['depth']
+ image = self.to_tensor(image)
+ image = self.normalize(image)
+ depth = self.to_tensor(depth)
+
+ image = self.resize(image)
+
+ return {'image': image, 'depth': depth, 'dataset': "ddad"}
+
+ def to_tensor(self, pic):
+
+ if isinstance(pic, np.ndarray):
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ return img
+
+ # # handle PIL Image
+ if pic.mode == 'I':
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
+ elif pic.mode == 'I;16':
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
+ else:
+ img = torch.ByteTensor(
+ torch.ByteStorage.from_buffer(pic.tobytes()))
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
+ if pic.mode == 'YCbCr':
+ nchannel = 3
+ elif pic.mode == 'I;16':
+ nchannel = 1
+ else:
+ nchannel = len(pic.mode)
+ img = img.view(pic.size[1], pic.size[0], nchannel)
+
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
+
+ if isinstance(img, torch.ByteTensor):
+ return img.float()
+ else:
+ return img
+
+
+class DDAD(Dataset):
+ def __init__(self, data_dir_root, resize_shape):
+ import glob
+
+ # image paths are of the form /{outleft, depthmap}/*.png
+ self.image_files = glob.glob(os.path.join(data_dir_root, '*.png'))
+ self.depth_files = [r.replace("_rgb.png", "_depth.npy")
+ for r in self.image_files]
+ self.transform = ToTensor(resize_shape)
+
+ def __getitem__(self, idx):
+
+ image_path = self.image_files[idx]
+ depth_path = self.depth_files[idx]
+
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
+ depth = np.load(depth_path) # meters
+
+ # depth[depth > 8] = -1
+ depth = depth[..., None]
+
+ sample = dict(image=image, depth=depth)
+ sample = self.transform(sample)
+
+ if idx == 0:
+ print(sample["image"].shape)
+
+ return sample
+
+ def __len__(self):
+ return len(self.image_files)
+
+
+def get_ddad_loader(data_dir_root, resize_shape, batch_size=1, **kwargs):
+ dataset = DDAD(data_dir_root, resize_shape)
+ return DataLoader(dataset, batch_size, **kwargs)
diff --git a/zoedepth/data/diml_indoor_test.py b/zoedepth/data/diml_indoor_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..f720ad9aefaee78ef4ec363dfef0f82ace850a6d
--- /dev/null
+++ b/zoedepth/data/diml_indoor_test.py
@@ -0,0 +1,125 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import os
+
+import numpy as np
+import torch
+from PIL import Image
+from torch.utils.data import DataLoader, Dataset
+from torchvision import transforms
+
+
+class ToTensor(object):
+ def __init__(self):
+ # self.normalize = transforms.Normalize(
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ self.normalize = lambda x : x
+ self.resize = transforms.Resize((480, 640))
+
+ def __call__(self, sample):
+ image, depth = sample['image'], sample['depth']
+ image = self.to_tensor(image)
+ image = self.normalize(image)
+ depth = self.to_tensor(depth)
+
+ image = self.resize(image)
+
+ return {'image': image, 'depth': depth, 'dataset': "diml_indoor"}
+
+ def to_tensor(self, pic):
+
+ if isinstance(pic, np.ndarray):
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ return img
+
+ # # handle PIL Image
+ if pic.mode == 'I':
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
+ elif pic.mode == 'I;16':
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
+ else:
+ img = torch.ByteTensor(
+ torch.ByteStorage.from_buffer(pic.tobytes()))
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
+ if pic.mode == 'YCbCr':
+ nchannel = 3
+ elif pic.mode == 'I;16':
+ nchannel = 1
+ else:
+ nchannel = len(pic.mode)
+ img = img.view(pic.size[1], pic.size[0], nchannel)
+
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
+ if isinstance(img, torch.ByteTensor):
+ return img.float()
+ else:
+ return img
+
+
+class DIML_Indoor(Dataset):
+ def __init__(self, data_dir_root):
+ import glob
+
+ # image paths are of the form /{HR, LR}//{color, depth_filled}/*.png
+ self.image_files = glob.glob(os.path.join(
+ data_dir_root, "LR", '*', 'color', '*.png'))
+ self.depth_files = [r.replace("color", "depth_filled").replace(
+ "_c.png", "_depth_filled.png") for r in self.image_files]
+ self.transform = ToTensor()
+
+ def __getitem__(self, idx):
+ image_path = self.image_files[idx]
+ depth_path = self.depth_files[idx]
+
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
+ depth = np.asarray(Image.open(depth_path),
+ dtype='uint16') / 1000.0 # mm to meters
+
+ # print(np.shape(image))
+ # print(np.shape(depth))
+
+ # depth[depth > 8] = -1
+ depth = depth[..., None]
+
+ sample = dict(image=image, depth=depth)
+
+ # return sample
+ sample = self.transform(sample)
+
+ if idx == 0:
+ print(sample["image"].shape)
+
+ return sample
+
+ def __len__(self):
+ return len(self.image_files)
+
+
+def get_diml_indoor_loader(data_dir_root, batch_size=1, **kwargs):
+ dataset = DIML_Indoor(data_dir_root)
+ return DataLoader(dataset, batch_size, **kwargs)
+
+# get_diml_indoor_loader(data_dir_root="datasets/diml/indoor/test/HR")
+# get_diml_indoor_loader(data_dir_root="datasets/diml/indoor/test/LR")
diff --git a/zoedepth/data/diml_outdoor_test.py b/zoedepth/data/diml_outdoor_test.py
new file mode 100644
index 0000000000000000000000000000000000000000..8670b48f5febafb819dac22848ad79ccb5dd5ae4
--- /dev/null
+++ b/zoedepth/data/diml_outdoor_test.py
@@ -0,0 +1,114 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import os
+
+import numpy as np
+import torch
+from PIL import Image
+from torch.utils.data import DataLoader, Dataset
+from torchvision import transforms
+
+
+class ToTensor(object):
+ def __init__(self):
+ # self.normalize = transforms.Normalize(
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ self.normalize = lambda x : x
+
+ def __call__(self, sample):
+ image, depth = sample['image'], sample['depth']
+ image = self.to_tensor(image)
+ image = self.normalize(image)
+ depth = self.to_tensor(depth)
+
+ return {'image': image, 'depth': depth, 'dataset': "diml_outdoor"}
+
+ def to_tensor(self, pic):
+
+ if isinstance(pic, np.ndarray):
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ return img
+
+ # # handle PIL Image
+ if pic.mode == 'I':
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
+ elif pic.mode == 'I;16':
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
+ else:
+ img = torch.ByteTensor(
+ torch.ByteStorage.from_buffer(pic.tobytes()))
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
+ if pic.mode == 'YCbCr':
+ nchannel = 3
+ elif pic.mode == 'I;16':
+ nchannel = 1
+ else:
+ nchannel = len(pic.mode)
+ img = img.view(pic.size[1], pic.size[0], nchannel)
+
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
+ if isinstance(img, torch.ByteTensor):
+ return img.float()
+ else:
+ return img
+
+
+class DIML_Outdoor(Dataset):
+ def __init__(self, data_dir_root):
+ import glob
+
+ # image paths are of the form /{outleft, depthmap}/*.png
+ self.image_files = glob.glob(os.path.join(
+ data_dir_root, "*", 'outleft', '*.png'))
+ self.depth_files = [r.replace("outleft", "depthmap")
+ for r in self.image_files]
+ self.transform = ToTensor()
+
+ def __getitem__(self, idx):
+ image_path = self.image_files[idx]
+ depth_path = self.depth_files[idx]
+
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
+ depth = np.asarray(Image.open(depth_path),
+ dtype='uint16') / 1000.0 # mm to meters
+
+ # depth[depth > 8] = -1
+ depth = depth[..., None]
+
+ sample = dict(image=image, depth=depth, dataset="diml_outdoor")
+
+ # return sample
+ return self.transform(sample)
+
+ def __len__(self):
+ return len(self.image_files)
+
+
+def get_diml_outdoor_loader(data_dir_root, batch_size=1, **kwargs):
+ dataset = DIML_Outdoor(data_dir_root)
+ return DataLoader(dataset, batch_size, **kwargs)
+
+# get_diml_outdoor_loader(data_dir_root="datasets/diml/outdoor/test/HR")
+# get_diml_outdoor_loader(data_dir_root="datasets/diml/outdoor/test/LR")
diff --git a/zoedepth/data/diode.py b/zoedepth/data/diode.py
new file mode 100644
index 0000000000000000000000000000000000000000..1510c87116b8f70ce2e1428873a8e4da042bee23
--- /dev/null
+++ b/zoedepth/data/diode.py
@@ -0,0 +1,125 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import os
+
+import numpy as np
+import torch
+from PIL import Image
+from torch.utils.data import DataLoader, Dataset
+from torchvision import transforms
+
+
+class ToTensor(object):
+ def __init__(self):
+ # self.normalize = transforms.Normalize(
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ self.normalize = lambda x : x
+ self.resize = transforms.Resize(480)
+
+ def __call__(self, sample):
+ image, depth = sample['image'], sample['depth']
+ image = self.to_tensor(image)
+ image = self.normalize(image)
+ depth = self.to_tensor(depth)
+
+ image = self.resize(image)
+
+ return {'image': image, 'depth': depth, 'dataset': "diode"}
+
+ def to_tensor(self, pic):
+
+ if isinstance(pic, np.ndarray):
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ return img
+
+ # # handle PIL Image
+ if pic.mode == 'I':
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
+ elif pic.mode == 'I;16':
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
+ else:
+ img = torch.ByteTensor(
+ torch.ByteStorage.from_buffer(pic.tobytes()))
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
+ if pic.mode == 'YCbCr':
+ nchannel = 3
+ elif pic.mode == 'I;16':
+ nchannel = 1
+ else:
+ nchannel = len(pic.mode)
+ img = img.view(pic.size[1], pic.size[0], nchannel)
+
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
+
+ if isinstance(img, torch.ByteTensor):
+ return img.float()
+ else:
+ return img
+
+
+class DIODE(Dataset):
+ def __init__(self, data_dir_root):
+ import glob
+
+ # image paths are of the form /scene_#/scan_#/*.png
+ self.image_files = glob.glob(
+ os.path.join(data_dir_root, '*', '*', '*.png'))
+ self.depth_files = [r.replace(".png", "_depth.npy")
+ for r in self.image_files]
+ self.depth_mask_files = [
+ r.replace(".png", "_depth_mask.npy") for r in self.image_files]
+ self.transform = ToTensor()
+
+ def __getitem__(self, idx):
+ image_path = self.image_files[idx]
+ depth_path = self.depth_files[idx]
+ depth_mask_path = self.depth_mask_files[idx]
+
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
+ depth = np.load(depth_path) # in meters
+ valid = np.load(depth_mask_path) # binary
+
+ # depth[depth > 8] = -1
+ # depth = depth[..., None]
+
+ sample = dict(image=image, depth=depth, valid=valid)
+
+ # return sample
+ sample = self.transform(sample)
+
+ if idx == 0:
+ print(sample["image"].shape)
+
+ return sample
+
+ def __len__(self):
+ return len(self.image_files)
+
+
+def get_diode_loader(data_dir_root, batch_size=1, **kwargs):
+ dataset = DIODE(data_dir_root)
+ return DataLoader(dataset, batch_size, **kwargs)
+
+# get_diode_loader(data_dir_root="datasets/diode/val/outdoor")
diff --git a/zoedepth/data/hypersim.py b/zoedepth/data/hypersim.py
new file mode 100644
index 0000000000000000000000000000000000000000..4334198971830200f72ea2910d03f4c7d6a43334
--- /dev/null
+++ b/zoedepth/data/hypersim.py
@@ -0,0 +1,138 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import glob
+import os
+
+import h5py
+import numpy as np
+import torch
+from PIL import Image
+from torch.utils.data import DataLoader, Dataset
+from torchvision import transforms
+
+
+def hypersim_distance_to_depth(npyDistance):
+ intWidth, intHeight, fltFocal = 1024, 768, 886.81
+
+ npyImageplaneX = np.linspace((-0.5 * intWidth) + 0.5, (0.5 * intWidth) - 0.5, intWidth).reshape(
+ 1, intWidth).repeat(intHeight, 0).astype(np.float32)[:, :, None]
+ npyImageplaneY = np.linspace((-0.5 * intHeight) + 0.5, (0.5 * intHeight) - 0.5,
+ intHeight).reshape(intHeight, 1).repeat(intWidth, 1).astype(np.float32)[:, :, None]
+ npyImageplaneZ = np.full([intHeight, intWidth, 1], fltFocal, np.float32)
+ npyImageplane = np.concatenate(
+ [npyImageplaneX, npyImageplaneY, npyImageplaneZ], 2)
+
+ npyDepth = npyDistance / np.linalg.norm(npyImageplane, 2, 2) * fltFocal
+ return npyDepth
+
+
+class ToTensor(object):
+ def __init__(self):
+ # self.normalize = transforms.Normalize(
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ self.normalize = lambda x: x
+ self.resize = transforms.Resize((480, 640))
+
+ def __call__(self, sample):
+ image, depth = sample['image'], sample['depth']
+ image = self.to_tensor(image)
+ image = self.normalize(image)
+ depth = self.to_tensor(depth)
+
+ image = self.resize(image)
+
+ return {'image': image, 'depth': depth, 'dataset': "hypersim"}
+
+ def to_tensor(self, pic):
+
+ if isinstance(pic, np.ndarray):
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ return img
+
+ # # handle PIL Image
+ if pic.mode == 'I':
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
+ elif pic.mode == 'I;16':
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
+ else:
+ img = torch.ByteTensor(
+ torch.ByteStorage.from_buffer(pic.tobytes()))
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
+ if pic.mode == 'YCbCr':
+ nchannel = 3
+ elif pic.mode == 'I;16':
+ nchannel = 1
+ else:
+ nchannel = len(pic.mode)
+ img = img.view(pic.size[1], pic.size[0], nchannel)
+
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
+ if isinstance(img, torch.ByteTensor):
+ return img.float()
+ else:
+ return img
+
+
+class HyperSim(Dataset):
+ def __init__(self, data_dir_root):
+ # image paths are of the form //images/scene_cam_#_final_preview/*.tonemap.jpg
+ # depth paths are of the form //images/scene_cam_#_final_preview/*.depth_meters.hdf5
+ self.image_files = glob.glob(os.path.join(
+ data_dir_root, '*', 'images', 'scene_cam_*_final_preview', '*.tonemap.jpg'))
+ self.depth_files = [r.replace("_final_preview", "_geometry_hdf5").replace(
+ ".tonemap.jpg", ".depth_meters.hdf5") for r in self.image_files]
+ self.transform = ToTensor()
+
+ def __getitem__(self, idx):
+ image_path = self.image_files[idx]
+ depth_path = self.depth_files[idx]
+
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
+
+ # depth from hdf5
+ depth_fd = h5py.File(depth_path, "r")
+ # in meters (Euclidean distance)
+ distance_meters = np.array(depth_fd['dataset'])
+ depth = hypersim_distance_to_depth(
+ distance_meters) # in meters (planar depth)
+
+ # depth[depth > 8] = -1
+ depth = depth[..., None]
+
+ sample = dict(image=image, depth=depth)
+ sample = self.transform(sample)
+
+ if idx == 0:
+ print(sample["image"].shape)
+
+ return sample
+
+ def __len__(self):
+ return len(self.image_files)
+
+
+def get_hypersim_loader(data_dir_root, batch_size=1, **kwargs):
+ dataset = HyperSim(data_dir_root)
+ return DataLoader(dataset, batch_size, **kwargs)
diff --git a/zoedepth/data/ibims.py b/zoedepth/data/ibims.py
new file mode 100644
index 0000000000000000000000000000000000000000..b66abfabcf4cfc617d4a60ec818780c3548d9920
--- /dev/null
+++ b/zoedepth/data/ibims.py
@@ -0,0 +1,81 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import os
+
+import numpy as np
+import torch
+from PIL import Image
+from torch.utils.data import DataLoader, Dataset
+from torchvision import transforms as T
+
+
+class iBims(Dataset):
+ def __init__(self, config):
+ root_folder = config.ibims_root
+ with open(os.path.join(root_folder, "imagelist.txt"), 'r') as f:
+ imglist = f.read().split()
+
+ samples = []
+ for basename in imglist:
+ img_path = os.path.join(root_folder, 'rgb', basename + ".png")
+ depth_path = os.path.join(root_folder, 'depth', basename + ".png")
+ valid_mask_path = os.path.join(
+ root_folder, 'mask_invalid', basename+".png")
+ transp_mask_path = os.path.join(
+ root_folder, 'mask_transp', basename+".png")
+
+ samples.append(
+ (img_path, depth_path, valid_mask_path, transp_mask_path))
+
+ self.samples = samples
+ # self.normalize = T.Normalize(
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ self.normalize = lambda x : x
+
+ def __getitem__(self, idx):
+ img_path, depth_path, valid_mask_path, transp_mask_path = self.samples[idx]
+
+ img = np.asarray(Image.open(img_path), dtype=np.float32) / 255.0
+ depth = np.asarray(Image.open(depth_path),
+ dtype=np.uint16).astype('float')*50.0/65535
+
+ mask_valid = np.asarray(Image.open(valid_mask_path))
+ mask_transp = np.asarray(Image.open(transp_mask_path))
+
+ # depth = depth * mask_valid * mask_transp
+ depth = np.where(mask_valid * mask_transp, depth, -1)
+
+ img = torch.from_numpy(img).permute(2, 0, 1)
+ img = self.normalize(img)
+ depth = torch.from_numpy(depth).unsqueeze(0)
+ return dict(image=img, depth=depth, image_path=img_path, depth_path=depth_path, dataset='ibims')
+
+ def __len__(self):
+ return len(self.samples)
+
+
+def get_ibims_loader(config, batch_size=1, **kwargs):
+ dataloader = DataLoader(iBims(config), batch_size=batch_size, **kwargs)
+ return dataloader
diff --git a/zoedepth/data/marigold_nyu.py b/zoedepth/data/marigold_nyu.py
new file mode 100644
index 0000000000000000000000000000000000000000..f33d6a02d33f8bea9d8bb1526d6a1fae8de4db1c
--- /dev/null
+++ b/zoedepth/data/marigold_nyu.py
@@ -0,0 +1,112 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import os
+
+import numpy as np
+import torch
+from PIL import Image
+from torch.utils.data import DataLoader, Dataset
+from torchvision import transforms
+from random import choice
+
+
+class ToTensor(object):
+ def __init__(self):
+ self.normalize = transforms.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ #self.normalize = lambda x : x
+
+ def __call__(self, sample):
+ image, depth = sample['image'], sample['depth']
+ image = self.to_tensor(image)
+ image = self.normalize(image)
+ depth = self.to_tensor(depth)
+
+ return {'image': image, 'depth': depth, 'dataset': "marigold_nyu"}
+
+ def to_tensor(self, pic):
+
+ if isinstance(pic, np.ndarray):
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ return img
+
+ # # handle PIL Image
+ if pic.mode == 'I':
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
+ elif pic.mode == 'I;16':
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
+ else:
+ img = torch.ByteTensor(
+ torch.ByteStorage.from_buffer(pic.tobytes()))
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
+ if pic.mode == 'YCbCr':
+ nchannel = 3
+ elif pic.mode == 'I;16':
+ nchannel = 1
+ else:
+ nchannel = len(pic.mode)
+ img = img.view(pic.size[1], pic.size[0], nchannel)
+
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
+ if isinstance(img, torch.ByteTensor):
+ return img.float()
+ else:
+ return img
+
+
+class MarigoldNYU(Dataset):
+ def __init__(self, nyu_dir_root, marigold_depth_root, debug_mode=False):
+ import glob
+ import os
+ import itertools
+
+ categories = os.listdir(os.path.join(nyu_dir_root))
+ if debug_mode:
+ categories = categories[:2]
+
+ self.image_files = list(itertools.chain(*[glob.glob(os.path.join(nyu_dir_root, c, "rgb_*.jpg")) for c in categories]))
+ self.nyu_depth_files = [os.path.join(nyu_dir_root, os.path.join(*r.split("/")[-2:])).replace("jpg", "png").replace("rgb", "sync_depth") for r in self.image_files]
+ self.marigold_depth_files = [os.path.join(marigold_depth_root, os.path.join(*r.split("/")[-2:])).replace("jpg", "npy") for r in self.image_files]
+
+ self.transform = ToTensor()
+
+ def __getitem__(self, idx):
+ image_path = self.image_files[idx]
+ nyu_depth_path = self.nyu_depth_files[idx]
+ marigold_depth_path = self.marigold_depth_files[idx]
+
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
+ nyu_depth = np.asarray(Image.open(nyu_depth_path), dtype=np.float32)
+ marigold_depth = np.load(marigold_depth_path)
+
+ return image, nyu_depth[..., np.newaxis], marigold_depth[..., np.newaxis], image_path, nyu_depth_path
+
+ def __len__(self):
+ return len(self.image_files)
+
+
+def get_marigold_nyu_loader(nyu_dir_root, marigold_depth_root, batch_size=1, **kwargs):
+ dataset = MarigoldNYU(nyu_dir_root, marigold_depth_root)
+ return DataLoader(dataset, batch_size, **kwargs)
diff --git a/zoedepth/data/places365.py b/zoedepth/data/places365.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a4f4ae20b6d2dc980e4fbf00358133f8378ceb6
--- /dev/null
+++ b/zoedepth/data/places365.py
@@ -0,0 +1,118 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import os
+
+import numpy as np
+import torch
+from PIL import Image
+from torch.utils.data import DataLoader, Dataset
+from torchvision import transforms
+from random import choice
+
+
+class ToTensor(object):
+ def __init__(self):
+ self.normalize = transforms.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ #self.normalize = lambda x : x
+
+ def __call__(self, sample):
+ image, depth = sample['image'], sample['depth']
+ image = self.to_tensor(image)
+ image = self.normalize(image)
+ depth = self.to_tensor(depth)
+
+ return {'image': image, 'depth': depth, 'dataset': "places365"}
+
+ def to_tensor(self, pic):
+
+ if isinstance(pic, np.ndarray):
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ return img
+
+ # # handle PIL Image
+ if pic.mode == 'I':
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
+ elif pic.mode == 'I;16':
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
+ else:
+ img = torch.ByteTensor(
+ torch.ByteStorage.from_buffer(pic.tobytes()))
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
+ if pic.mode == 'YCbCr':
+ nchannel = 3
+ elif pic.mode == 'I;16':
+ nchannel = 1
+ else:
+ nchannel = len(pic.mode)
+ img = img.view(pic.size[1], pic.size[0], nchannel)
+
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
+ if isinstance(img, torch.ByteTensor):
+ return img.float()
+ else:
+ return img
+
+
+class Places365(Dataset):
+ def __init__(self, data_dir_root, depth_dir_root, depth_masks_dir_root, randomize_masks=True, debug_mode=False):
+ import glob
+ import os
+ import itertools
+
+ categories = os.listdir(os.path.join(data_dir_root))
+ if debug_mode:
+ categories = categories[:2]
+
+ self.image_files = list(itertools.chain(*[glob.glob(os.path.join(data_dir_root, c, "*.jpg")) for c in categories]))
+ self.depth_files = [os.path.join(depth_dir_root, os.path.join(*r.split("/")[-2:])).replace("jpg", "npy") for r in self.image_files]
+ self.depth_masks_files = [os.path.join(depth_masks_dir_root, os.path.join(*r.split("/")[-2:])).replace("jpg", "npy") for r in self.image_files]
+
+ self.randomize_masks = randomize_masks
+
+ self.transform = ToTensor()
+
+ def __getitem__(self, idx):
+ image_path = self.image_files[idx]
+ depth_path = self.depth_files[idx]
+
+ if not self.randomize_masks:
+ depth_masks_path = self.depth_masks_files[idx]
+ else:
+ depth_masks_path = choice(self.depth_masks_files)
+
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
+ depth = np.load(depth_path)
+ depth_mask = 1 - np.load(depth_masks_path)
+
+ return image, depth[..., np.newaxis], depth_mask[..., np.newaxis], image_path, depth_path, depth_masks_path
+
+ def __len__(self):
+ return len(self.image_files)
+
+
+def get_places365_loader(data_dir_root, depth_dir_root, depth_masks_dir_root, batch_size=1, **kwargs):
+ dataset = Places365(data_dir_root, depth_dir_root, depth_masks_dir_root)
+ return DataLoader(dataset, batch_size, **kwargs)
diff --git a/zoedepth/data/preprocess.py b/zoedepth/data/preprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..e08cc309dc823ae6efd7cda8db9eb37130dc5499
--- /dev/null
+++ b/zoedepth/data/preprocess.py
@@ -0,0 +1,154 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import numpy as np
+from dataclasses import dataclass
+from typing import Tuple, List
+
+# dataclass to store the crop parameters
+@dataclass
+class CropParams:
+ top: int
+ bottom: int
+ left: int
+ right: int
+
+
+
+def get_border_params(rgb_image, tolerance=0.1, cut_off=20, value=0, level_diff_threshold=5, channel_axis=-1, min_border=5) -> CropParams:
+ gray_image = np.mean(rgb_image, axis=channel_axis)
+ h, w = gray_image.shape
+
+
+ def num_value_pixels(arr):
+ return np.sum(np.abs(arr - value) < level_diff_threshold)
+
+ def is_above_tolerance(arr, total_pixels):
+ return (num_value_pixels(arr) / total_pixels) > tolerance
+
+ # Crop top border until number of value pixels become below tolerance
+ top = min_border
+ while is_above_tolerance(gray_image[top, :], w) and top < h-1:
+ top += 1
+ if top > cut_off:
+ break
+
+ # Crop bottom border until number of value pixels become below tolerance
+ bottom = h - min_border
+ while is_above_tolerance(gray_image[bottom, :], w) and bottom > 0:
+ bottom -= 1
+ if h - bottom > cut_off:
+ break
+
+ # Crop left border until number of value pixels become below tolerance
+ left = min_border
+ while is_above_tolerance(gray_image[:, left], h) and left < w-1:
+ left += 1
+ if left > cut_off:
+ break
+
+ # Crop right border until number of value pixels become below tolerance
+ right = w - min_border
+ while is_above_tolerance(gray_image[:, right], h) and right > 0:
+ right -= 1
+ if w - right > cut_off:
+ break
+
+
+ return CropParams(top, bottom, left, right)
+
+
+def get_white_border(rgb_image, value=255, **kwargs) -> CropParams:
+ """Crops the white border of the RGB.
+
+ Args:
+ rgb: RGB image, shape (H, W, 3).
+ Returns:
+ Crop parameters.
+ """
+ if value == 255:
+ # assert range of values in rgb image is [0, 255]
+ assert np.max(rgb_image) <= 255 and np.min(rgb_image) >= 0, "RGB image values are not in range [0, 255]."
+ assert rgb_image.max() > 1, "RGB image values are not in range [0, 255]."
+ elif value == 1:
+ # assert range of values in rgb image is [0, 1]
+ assert np.max(rgb_image) <= 1 and np.min(rgb_image) >= 0, "RGB image values are not in range [0, 1]."
+
+ return get_border_params(rgb_image, value=value, **kwargs)
+
+def get_black_border(rgb_image, **kwargs) -> CropParams:
+ """Crops the black border of the RGB.
+
+ Args:
+ rgb: RGB image, shape (H, W, 3).
+
+ Returns:
+ Crop parameters.
+ """
+
+ return get_border_params(rgb_image, value=0, **kwargs)
+
+def crop_image(image: np.ndarray, crop_params: CropParams) -> np.ndarray:
+ """Crops the image according to the crop parameters.
+
+ Args:
+ image: RGB or depth image, shape (H, W, 3) or (H, W).
+ crop_params: Crop parameters.
+
+ Returns:
+ Cropped image.
+ """
+ return image[crop_params.top:crop_params.bottom, crop_params.left:crop_params.right]
+
+def crop_images(*images: np.ndarray, crop_params: CropParams) -> Tuple[np.ndarray]:
+ """Crops the images according to the crop parameters.
+
+ Args:
+ images: RGB or depth images, shape (H, W, 3) or (H, W).
+ crop_params: Crop parameters.
+
+ Returns:
+ Cropped images.
+ """
+ return tuple(crop_image(image, crop_params) for image in images)
+
+def crop_black_or_white_border(rgb_image, *other_images: np.ndarray, tolerance=0.1, cut_off=20, level_diff_threshold=5) -> Tuple[np.ndarray]:
+ """Crops the white and black border of the RGB and depth images.
+
+ Args:
+ rgb: RGB image, shape (H, W, 3). This image is used to determine the border.
+ other_images: The other images to crop according to the border of the RGB image.
+ Returns:
+ Cropped RGB and other images.
+ """
+ # crop black border
+ crop_params = get_black_border(rgb_image, tolerance=tolerance, cut_off=cut_off, level_diff_threshold=level_diff_threshold)
+ cropped_images = crop_images(rgb_image, *other_images, crop_params=crop_params)
+
+ # crop white border
+ crop_params = get_white_border(cropped_images[0], tolerance=tolerance, cut_off=cut_off, level_diff_threshold=level_diff_threshold)
+ cropped_images = crop_images(*cropped_images, crop_params=crop_params)
+
+ return cropped_images
+
\ No newline at end of file
diff --git a/zoedepth/data/sun_rgbd_loader.py b/zoedepth/data/sun_rgbd_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e2bdb9aefe68ca4439f41eff3bba722c49fb976
--- /dev/null
+++ b/zoedepth/data/sun_rgbd_loader.py
@@ -0,0 +1,106 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import os
+
+import numpy as np
+import torch
+from PIL import Image
+from torch.utils.data import DataLoader, Dataset
+from torchvision import transforms
+
+
+class ToTensor(object):
+ def __init__(self):
+ # self.normalize = transforms.Normalize(
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ self.normalize = lambda x : x
+
+ def __call__(self, sample):
+ image, depth = sample['image'], sample['depth']
+ image = self.to_tensor(image)
+ image = self.normalize(image)
+ depth = self.to_tensor(depth)
+
+ return {'image': image, 'depth': depth, 'dataset': "sunrgbd"}
+
+ def to_tensor(self, pic):
+
+ if isinstance(pic, np.ndarray):
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ return img
+
+ # # handle PIL Image
+ if pic.mode == 'I':
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
+ elif pic.mode == 'I;16':
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
+ else:
+ img = torch.ByteTensor(
+ torch.ByteStorage.from_buffer(pic.tobytes()))
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
+ if pic.mode == 'YCbCr':
+ nchannel = 3
+ elif pic.mode == 'I;16':
+ nchannel = 1
+ else:
+ nchannel = len(pic.mode)
+ img = img.view(pic.size[1], pic.size[0], nchannel)
+
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
+ if isinstance(img, torch.ByteTensor):
+ return img.float()
+ else:
+ return img
+
+
+class SunRGBD(Dataset):
+ def __init__(self, data_dir_root):
+ # test_file_dirs = loadmat(train_test_file)['alltest'].squeeze()
+ # all_test = [t[0].replace("/n/fs/sun3d/data/", "") for t in test_file_dirs]
+ # self.all_test = [os.path.join(data_dir_root, t) for t in all_test]
+ import glob
+ self.image_files = glob.glob(
+ os.path.join(data_dir_root, 'rgb', 'rgb', '*'))
+ self.depth_files = [
+ r.replace("rgb/rgb", "gt/gt").replace("jpg", "png") for r in self.image_files]
+ self.transform = ToTensor()
+
+ def __getitem__(self, idx):
+ image_path = self.image_files[idx]
+ depth_path = self.depth_files[idx]
+
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
+ depth = np.asarray(Image.open(depth_path), dtype='uint16') / 1000.0
+ depth[depth > 8] = -1
+ depth = depth[..., None]
+ return self.transform(dict(image=image, depth=depth))
+
+ def __len__(self):
+ return len(self.image_files)
+
+
+def get_sunrgbd_loader(data_dir_root, batch_size=1, **kwargs):
+ dataset = SunRGBD(data_dir_root)
+ return DataLoader(dataset, batch_size, **kwargs)
diff --git a/zoedepth/data/transforms.py b/zoedepth/data/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..374416dff24fb4fd55598f3946d6d6b091ddefc9
--- /dev/null
+++ b/zoedepth/data/transforms.py
@@ -0,0 +1,481 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import math
+import random
+
+import cv2
+import numpy as np
+
+
+class RandomFliplr(object):
+ """Horizontal flip of the sample with given probability.
+ """
+
+ def __init__(self, probability=0.5):
+ """Init.
+
+ Args:
+ probability (float, optional): Flip probability. Defaults to 0.5.
+ """
+ self.__probability = probability
+
+ def __call__(self, sample):
+ prob = random.random()
+
+ if prob < self.__probability:
+ for k, v in sample.items():
+ if len(v.shape) >= 2:
+ sample[k] = np.fliplr(v).copy()
+
+ return sample
+
+
+def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
+ """Rezise the sample to ensure the given size. Keeps aspect ratio.
+
+ Args:
+ sample (dict): sample
+ size (tuple): image size
+
+ Returns:
+ tuple: new size
+ """
+ shape = list(sample["disparity"].shape)
+
+ if shape[0] >= size[0] and shape[1] >= size[1]:
+ return sample
+
+ scale = [0, 0]
+ scale[0] = size[0] / shape[0]
+ scale[1] = size[1] / shape[1]
+
+ scale = max(scale)
+
+ shape[0] = math.ceil(scale * shape[0])
+ shape[1] = math.ceil(scale * shape[1])
+
+ # resize
+ sample["image"] = cv2.resize(
+ sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
+ )
+
+ sample["disparity"] = cv2.resize(
+ sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
+ )
+ sample["mask"] = cv2.resize(
+ sample["mask"].astype(np.float32),
+ tuple(shape[::-1]),
+ interpolation=cv2.INTER_NEAREST,
+ )
+ sample["mask"] = sample["mask"].astype(bool)
+
+ return tuple(shape)
+
+
+class RandomCrop(object):
+ """Get a random crop of the sample with the given size (width, height).
+ """
+
+ def __init__(
+ self,
+ width,
+ height,
+ resize_if_needed=False,
+ image_interpolation_method=cv2.INTER_AREA,
+ ):
+ """Init.
+
+ Args:
+ width (int): output width
+ height (int): output height
+ resize_if_needed (bool, optional): If True, sample might be upsampled to ensure
+ that a crop of size (width, height) is possbile. Defaults to False.
+ """
+ self.__size = (height, width)
+ self.__resize_if_needed = resize_if_needed
+ self.__image_interpolation_method = image_interpolation_method
+
+ def __call__(self, sample):
+
+ shape = sample["disparity"].shape
+
+ if self.__size[0] > shape[0] or self.__size[1] > shape[1]:
+ if self.__resize_if_needed:
+ shape = apply_min_size(
+ sample, self.__size, self.__image_interpolation_method
+ )
+ else:
+ raise Exception(
+ "Output size {} bigger than input size {}.".format(
+ self.__size, shape
+ )
+ )
+
+ offset = (
+ np.random.randint(shape[0] - self.__size[0] + 1),
+ np.random.randint(shape[1] - self.__size[1] + 1),
+ )
+
+ for k, v in sample.items():
+ if k == "code" or k == "basis":
+ continue
+
+ if len(sample[k].shape) >= 2:
+ sample[k] = v[
+ offset[0]: offset[0] + self.__size[0],
+ offset[1]: offset[1] + self.__size[1],
+ ]
+
+ return sample
+
+
+class Resize(object):
+ """Resize sample to given size (width, height).
+ """
+
+ def __init__(
+ self,
+ width,
+ height,
+ resize_target=True,
+ keep_aspect_ratio=False,
+ ensure_multiple_of=1,
+ resize_method="lower_bound",
+ image_interpolation_method=cv2.INTER_AREA,
+ letter_box=False,
+ ):
+ """Init.
+
+ Args:
+ width (int): desired output width
+ height (int): desired output height
+ resize_target (bool, optional):
+ True: Resize the full sample (image, mask, target).
+ False: Resize image only.
+ Defaults to True.
+ keep_aspect_ratio (bool, optional):
+ True: Keep the aspect ratio of the input sample.
+ Output sample might not have the given width and height, and
+ resize behaviour depends on the parameter 'resize_method'.
+ Defaults to False.
+ ensure_multiple_of (int, optional):
+ Output width and height is constrained to be multiple of this parameter.
+ Defaults to 1.
+ resize_method (str, optional):
+ "lower_bound": Output will be at least as large as the given size.
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
+ Defaults to "lower_bound".
+ """
+ self.__width = width
+ self.__height = height
+
+ self.__resize_target = resize_target
+ self.__keep_aspect_ratio = keep_aspect_ratio
+ self.__multiple_of = ensure_multiple_of
+ self.__resize_method = resize_method
+ self.__image_interpolation_method = image_interpolation_method
+ self.__letter_box = letter_box
+
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ if max_val is not None and y > max_val:
+ y = (np.floor(x / self.__multiple_of)
+ * self.__multiple_of).astype(int)
+
+ if y < min_val:
+ y = (np.ceil(x / self.__multiple_of)
+ * self.__multiple_of).astype(int)
+
+ return y
+
+ def get_size(self, width, height):
+ # determine new height and width
+ scale_height = self.__height / height
+ scale_width = self.__width / width
+
+ if self.__keep_aspect_ratio:
+ if self.__resize_method == "lower_bound":
+ # scale such that output size is lower bound
+ if scale_width > scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "upper_bound":
+ # scale such that output size is upper bound
+ if scale_width < scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "minimal":
+ # scale as least as possbile
+ if abs(1 - scale_width) < abs(1 - scale_height):
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ else:
+ raise ValueError(
+ f"resize_method {self.__resize_method} not implemented"
+ )
+
+ if self.__resize_method == "lower_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, min_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, min_val=self.__width
+ )
+ elif self.__resize_method == "upper_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, max_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, max_val=self.__width
+ )
+ elif self.__resize_method == "minimal":
+ new_height = self.constrain_to_multiple_of(scale_height * height)
+ new_width = self.constrain_to_multiple_of(scale_width * width)
+ else:
+ raise ValueError(
+ f"resize_method {self.__resize_method} not implemented")
+
+ return (new_width, new_height)
+
+ def make_letter_box(self, sample):
+ top = bottom = (self.__height - sample.shape[0]) // 2
+ left = right = (self.__width - sample.shape[1]) // 2
+ sample = cv2.copyMakeBorder(
+ sample, top, bottom, left, right, cv2.BORDER_CONSTANT, None, 0)
+ return sample
+
+ def __call__(self, sample):
+ width, height = self.get_size(
+ sample["image"].shape[1], sample["image"].shape[0]
+ )
+
+ # resize sample
+ sample["image"] = cv2.resize(
+ sample["image"],
+ (width, height),
+ interpolation=self.__image_interpolation_method,
+ )
+
+ if self.__letter_box:
+ sample["image"] = self.make_letter_box(sample["image"])
+
+ if self.__resize_target:
+ if "disparity" in sample:
+ sample["disparity"] = cv2.resize(
+ sample["disparity"],
+ (width, height),
+ interpolation=cv2.INTER_NEAREST,
+ )
+
+ if self.__letter_box:
+ sample["disparity"] = self.make_letter_box(
+ sample["disparity"])
+
+ if "depth" in sample:
+ sample["depth"] = cv2.resize(
+ sample["depth"], (width,
+ height), interpolation=cv2.INTER_NEAREST
+ )
+
+ if self.__letter_box:
+ sample["depth"] = self.make_letter_box(sample["depth"])
+
+ sample["mask"] = cv2.resize(
+ sample["mask"].astype(np.float32),
+ (width, height),
+ interpolation=cv2.INTER_NEAREST,
+ )
+
+ if self.__letter_box:
+ sample["mask"] = self.make_letter_box(sample["mask"])
+
+ sample["mask"] = sample["mask"].astype(bool)
+
+ return sample
+
+
+class ResizeFixed(object):
+ def __init__(self, size):
+ self.__size = size
+
+ def __call__(self, sample):
+ sample["image"] = cv2.resize(
+ sample["image"], self.__size[::-1], interpolation=cv2.INTER_LINEAR
+ )
+
+ sample["disparity"] = cv2.resize(
+ sample["disparity"], self.__size[::-
+ 1], interpolation=cv2.INTER_NEAREST
+ )
+
+ sample["mask"] = cv2.resize(
+ sample["mask"].astype(np.float32),
+ self.__size[::-1],
+ interpolation=cv2.INTER_NEAREST,
+ )
+ sample["mask"] = sample["mask"].astype(bool)
+
+ return sample
+
+
+class Rescale(object):
+ """Rescale target values to the interval [0, max_val].
+ If input is constant, values are set to max_val / 2.
+ """
+
+ def __init__(self, max_val=1.0, use_mask=True):
+ """Init.
+
+ Args:
+ max_val (float, optional): Max output value. Defaults to 1.0.
+ use_mask (bool, optional): Only operate on valid pixels (mask == True). Defaults to True.
+ """
+ self.__max_val = max_val
+ self.__use_mask = use_mask
+
+ def __call__(self, sample):
+ disp = sample["disparity"]
+
+ if self.__use_mask:
+ mask = sample["mask"]
+ else:
+ mask = np.ones_like(disp, dtype=np.bool)
+
+ if np.sum(mask) == 0:
+ return sample
+
+ min_val = np.min(disp[mask])
+ max_val = np.max(disp[mask])
+
+ if max_val > min_val:
+ sample["disparity"][mask] = (
+ (disp[mask] - min_val) / (max_val - min_val) * self.__max_val
+ )
+ else:
+ sample["disparity"][mask] = np.ones_like(
+ disp[mask]) * self.__max_val / 2.0
+
+ return sample
+
+
+# mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
+class NormalizeImage(object):
+ """Normlize image by given mean and std.
+ """
+
+ def __init__(self, mean, std):
+ self.__mean = mean
+ self.__std = std
+
+ def __call__(self, sample):
+ sample["image"] = (sample["image"] - self.__mean) / self.__std
+
+ return sample
+
+
+class DepthToDisparity(object):
+ """Convert depth to disparity. Removes depth from sample.
+ """
+
+ def __init__(self, eps=1e-4):
+ self.__eps = eps
+
+ def __call__(self, sample):
+ assert "depth" in sample
+
+ sample["mask"][sample["depth"] < self.__eps] = False
+
+ sample["disparity"] = np.zeros_like(sample["depth"])
+ sample["disparity"][sample["depth"] >= self.__eps] = (
+ 1.0 / sample["depth"][sample["depth"] >= self.__eps]
+ )
+
+ del sample["depth"]
+
+ return sample
+
+
+class DisparityToDepth(object):
+ """Convert disparity to depth. Removes disparity from sample.
+ """
+
+ def __init__(self, eps=1e-4):
+ self.__eps = eps
+
+ def __call__(self, sample):
+ assert "disparity" in sample
+
+ disp = np.abs(sample["disparity"])
+ sample["mask"][disp < self.__eps] = False
+
+ # print(sample["disparity"])
+ # print(sample["mask"].sum())
+ # exit()
+
+ sample["depth"] = np.zeros_like(disp)
+ sample["depth"][disp >= self.__eps] = (
+ 1.0 / disp[disp >= self.__eps]
+ )
+
+ del sample["disparity"]
+
+ return sample
+
+
+class PrepareForNet(object):
+ """Prepare sample for usage as network input.
+ """
+
+ def __init__(self):
+ pass
+
+ def __call__(self, sample):
+ image = np.transpose(sample["image"], (2, 0, 1))
+ sample["image"] = np.ascontiguousarray(image).astype(np.float32)
+
+ if "mask" in sample:
+ sample["mask"] = sample["mask"].astype(np.float32)
+ sample["mask"] = np.ascontiguousarray(sample["mask"])
+
+ if "disparity" in sample:
+ disparity = sample["disparity"].astype(np.float32)
+ sample["disparity"] = np.ascontiguousarray(disparity)
+
+ if "depth" in sample:
+ depth = sample["depth"].astype(np.float32)
+ sample["depth"] = np.ascontiguousarray(depth)
+
+ return sample
diff --git a/zoedepth/data/vkitti.py b/zoedepth/data/vkitti.py
new file mode 100644
index 0000000000000000000000000000000000000000..72a2e5a8346f6e630ede0e28d6959725af8d7e72
--- /dev/null
+++ b/zoedepth/data/vkitti.py
@@ -0,0 +1,151 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+from torch.utils.data import Dataset, DataLoader
+from torchvision import transforms
+import os
+
+from PIL import Image
+import numpy as np
+import cv2
+
+
+class ToTensor(object):
+ def __init__(self):
+ self.normalize = transforms.Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ # self.resize = transforms.Resize((375, 1242))
+
+ def __call__(self, sample):
+ image, depth = sample['image'], sample['depth']
+
+ image = self.to_tensor(image)
+ image = self.normalize(image)
+ depth = self.to_tensor(depth)
+
+ # image = self.resize(image)
+
+ return {'image': image, 'depth': depth, 'dataset': "vkitti"}
+
+ def to_tensor(self, pic):
+
+ if isinstance(pic, np.ndarray):
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ return img
+
+ # # handle PIL Image
+ if pic.mode == 'I':
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
+ elif pic.mode == 'I;16':
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
+ else:
+ img = torch.ByteTensor(
+ torch.ByteStorage.from_buffer(pic.tobytes()))
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
+ if pic.mode == 'YCbCr':
+ nchannel = 3
+ elif pic.mode == 'I;16':
+ nchannel = 1
+ else:
+ nchannel = len(pic.mode)
+ img = img.view(pic.size[1], pic.size[0], nchannel)
+
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
+ if isinstance(img, torch.ByteTensor):
+ return img.float()
+ else:
+ return img
+
+
+class VKITTI(Dataset):
+ def __init__(self, data_dir_root, do_kb_crop=True):
+ import glob
+ # image paths are of the form /{HR, LR}//{color, depth_filled}/*.png
+ self.image_files = glob.glob(os.path.join(
+ data_dir_root, "test_color", '*.png'))
+ self.depth_files = [r.replace("test_color", "test_depth")
+ for r in self.image_files]
+ self.do_kb_crop = True
+ self.transform = ToTensor()
+
+ def __getitem__(self, idx):
+ image_path = self.image_files[idx]
+ depth_path = self.depth_files[idx]
+
+ image = Image.open(image_path)
+ depth = Image.open(depth_path)
+ depth = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR |
+ cv2.IMREAD_ANYDEPTH)
+ print("dpeth min max", depth.min(), depth.max())
+
+ # print(np.shape(image))
+ # print(np.shape(depth))
+
+ # depth[depth > 8] = -1
+
+ if self.do_kb_crop and False:
+ height = image.height
+ width = image.width
+ top_margin = int(height - 352)
+ left_margin = int((width - 1216) / 2)
+ depth = depth.crop(
+ (left_margin, top_margin, left_margin + 1216, top_margin + 352))
+ image = image.crop(
+ (left_margin, top_margin, left_margin + 1216, top_margin + 352))
+ # uv = uv[:, top_margin:top_margin + 352, left_margin:left_margin + 1216]
+
+ image = np.asarray(image, dtype=np.float32) / 255.0
+ # depth = np.asarray(depth, dtype=np.uint16) /1.
+ depth = depth[..., None]
+ sample = dict(image=image, depth=depth)
+
+ # return sample
+ sample = self.transform(sample)
+
+ if idx == 0:
+ print(sample["image"].shape)
+
+ return sample
+
+ def __len__(self):
+ return len(self.image_files)
+
+
+def get_vkitti_loader(data_dir_root, batch_size=1, **kwargs):
+ dataset = VKITTI(data_dir_root)
+ return DataLoader(dataset, batch_size, **kwargs)
+
+
+if __name__ == "__main__":
+ loader = get_vkitti_loader(
+ data_dir_root="/home/bhatsf/shortcuts/datasets/vkitti_test")
+ print("Total files", len(loader.dataset))
+ for i, sample in enumerate(loader):
+ print(sample["image"].shape)
+ print(sample["depth"].shape)
+ print(sample["dataset"])
+ print(sample['depth'].min(), sample['depth'].max())
+ if i > 5:
+ break
diff --git a/zoedepth/data/vkitti2.py b/zoedepth/data/vkitti2.py
new file mode 100644
index 0000000000000000000000000000000000000000..9bcfb0414b7f3f21859f30ae34bd71689516a3e7
--- /dev/null
+++ b/zoedepth/data/vkitti2.py
@@ -0,0 +1,187 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import os
+
+import cv2
+import numpy as np
+import torch
+from PIL import Image
+from torch.utils.data import DataLoader, Dataset
+from torchvision import transforms
+
+
+class ToTensor(object):
+ def __init__(self):
+ # self.normalize = transforms.Normalize(
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ self.normalize = lambda x: x
+ # self.resize = transforms.Resize((375, 1242))
+
+ def __call__(self, sample):
+ image, depth = sample['image'], sample['depth']
+
+ image = self.to_tensor(image)
+ image = self.normalize(image)
+ depth = self.to_tensor(depth)
+
+ # image = self.resize(image)
+
+ return {'image': image, 'depth': depth, 'dataset': "vkitti"}
+
+ def to_tensor(self, pic):
+
+ if isinstance(pic, np.ndarray):
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
+ return img
+
+ # # handle PIL Image
+ if pic.mode == 'I':
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
+ elif pic.mode == 'I;16':
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
+ else:
+ img = torch.ByteTensor(
+ torch.ByteStorage.from_buffer(pic.tobytes()))
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
+ if pic.mode == 'YCbCr':
+ nchannel = 3
+ elif pic.mode == 'I;16':
+ nchannel = 1
+ else:
+ nchannel = len(pic.mode)
+ img = img.view(pic.size[1], pic.size[0], nchannel)
+
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
+ if isinstance(img, torch.ByteTensor):
+ return img.float()
+ else:
+ return img
+
+
+class VKITTI2(Dataset):
+ def __init__(self, data_dir_root, do_kb_crop=True, split="test"):
+ import glob
+
+ # image paths are of the form /rgb///frames//Camera<0,1>/rgb_{}.jpg
+ self.image_files = glob.glob(os.path.join(
+ data_dir_root, "rgb", "**", "frames", "rgb", "Camera_0", '*.jpg'), recursive=True)
+ self.depth_files = [r.replace("/rgb/", "/depth/").replace(
+ "rgb_", "depth_").replace(".jpg", ".png") for r in self.image_files]
+ self.do_kb_crop = True
+ self.transform = ToTensor()
+
+ # If train test split is not created, then create one.
+ # Split is such that 8% of the frames from each scene are used for testing.
+ if not os.path.exists(os.path.join(data_dir_root, "train.txt")):
+ import random
+ scenes = set([os.path.basename(os.path.dirname(
+ os.path.dirname(os.path.dirname(f)))) for f in self.image_files])
+ train_files = []
+ test_files = []
+ for scene in scenes:
+ scene_files = [f for f in self.image_files if os.path.basename(
+ os.path.dirname(os.path.dirname(os.path.dirname(f)))) == scene]
+ random.shuffle(scene_files)
+ train_files.extend(scene_files[:int(len(scene_files) * 0.92)])
+ test_files.extend(scene_files[int(len(scene_files) * 0.92):])
+ with open(os.path.join(data_dir_root, "train.txt"), "w") as f:
+ f.write("\n".join(train_files))
+ with open(os.path.join(data_dir_root, "test.txt"), "w") as f:
+ f.write("\n".join(test_files))
+
+ if split == "train":
+ with open(os.path.join(data_dir_root, "train.txt"), "r") as f:
+ self.image_files = f.read().splitlines()
+ self.depth_files = [r.replace("/rgb/", "/depth/").replace(
+ "rgb_", "depth_").replace(".jpg", ".png") for r in self.image_files]
+ elif split == "test":
+ with open(os.path.join(data_dir_root, "test.txt"), "r") as f:
+ self.image_files = f.read().splitlines()
+ self.depth_files = [r.replace("/rgb/", "/depth/").replace(
+ "rgb_", "depth_").replace(".jpg", ".png") for r in self.image_files]
+
+ def __getitem__(self, idx):
+ image_path = self.image_files[idx]
+ depth_path = self.depth_files[idx]
+
+ image = Image.open(image_path)
+ # depth = Image.open(depth_path)
+ depth = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR |
+ cv2.IMREAD_ANYDEPTH) / 100.0 # cm to m
+ depth = Image.fromarray(depth)
+ # print("dpeth min max", depth.min(), depth.max())
+
+ # print(np.shape(image))
+ # print(np.shape(depth))
+
+ if self.do_kb_crop:
+ if idx == 0:
+ print("Using KB input crop")
+ height = image.height
+ width = image.width
+ top_margin = int(height - 352)
+ left_margin = int((width - 1216) / 2)
+ depth = depth.crop(
+ (left_margin, top_margin, left_margin + 1216, top_margin + 352))
+ image = image.crop(
+ (left_margin, top_margin, left_margin + 1216, top_margin + 352))
+ # uv = uv[:, top_margin:top_margin + 352, left_margin:left_margin + 1216]
+
+ image = np.asarray(image, dtype=np.float32) / 255.0
+ # depth = np.asarray(depth, dtype=np.uint16) /1.
+ depth = np.asarray(depth, dtype=np.float32) / 1.
+ depth[depth > 80] = -1
+
+ depth = depth[..., None]
+ sample = dict(image=image, depth=depth)
+
+ # return sample
+ sample = self.transform(sample)
+
+ if idx == 0:
+ print(sample["image"].shape)
+
+ return sample
+
+ def __len__(self):
+ return len(self.image_files)
+
+
+def get_vkitti2_loader(data_dir_root, batch_size=1, **kwargs):
+ dataset = VKITTI2(data_dir_root)
+ return DataLoader(dataset, batch_size, **kwargs)
+
+
+if __name__ == "__main__":
+ loader = get_vkitti2_loader(
+ data_dir_root="/home/bhatsf/shortcuts/datasets/vkitti2")
+ print("Total files", len(loader.dataset))
+ for i, sample in enumerate(loader):
+ print(sample["image"].shape)
+ print(sample["depth"].shape)
+ print(sample["dataset"])
+ print(sample['depth'].min(), sample['depth'].max())
+ if i > 5:
+ break
diff --git a/zoedepth/models/__init__.py b/zoedepth/models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f2668792389157609abb2a0846fb620e7d67eb9
--- /dev/null
+++ b/zoedepth/models/__init__.py
@@ -0,0 +1,24 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
diff --git a/zoedepth/models/base_models/__init__.py b/zoedepth/models/base_models/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f2668792389157609abb2a0846fb620e7d67eb9
--- /dev/null
+++ b/zoedepth/models/base_models/__init__.py
@@ -0,0 +1,24 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
diff --git a/zoedepth/models/base_models/depth_anything.py b/zoedepth/models/base_models/depth_anything.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b20f8b3854eff55dceeb24406a8f53e716c4769
--- /dev/null
+++ b/zoedepth/models/base_models/depth_anything.py
@@ -0,0 +1,397 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.nn as nn
+import numpy as np
+from torchvision.transforms import Normalize
+from zoedepth.models.base_models.dpt_dinov2.dpt import DPT_DINOv2
+
+
+def denormalize(x):
+ """Reverses the imagenet normalization applied to the input.
+
+ Args:
+ x (torch.Tensor - shape(N,3,H,W)): input tensor
+
+ Returns:
+ torch.Tensor - shape(N,3,H,W): Denormalized input
+ """
+ mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device)
+ std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device)
+ return x * std + mean
+
+def get_activation(name, bank):
+ def hook(model, input, output):
+ bank[name] = output
+ return hook
+
+
+class Resize(object):
+ """Resize sample to given size (width, height).
+ """
+
+ def __init__(
+ self,
+ width,
+ height,
+ resize_target=True,
+ keep_aspect_ratio=False,
+ ensure_multiple_of=1,
+ resize_method="lower_bound",
+ ):
+ """Init.
+ Args:
+ width (int): desired output width
+ height (int): desired output height
+ resize_target (bool, optional):
+ True: Resize the full sample (image, mask, target).
+ False: Resize image only.
+ Defaults to True.
+ keep_aspect_ratio (bool, optional):
+ True: Keep the aspect ratio of the input sample.
+ Output sample might not have the given width and height, and
+ resize behaviour depends on the parameter 'resize_method'.
+ Defaults to False.
+ ensure_multiple_of (int, optional):
+ Output width and height is constrained to be multiple of this parameter.
+ Defaults to 1.
+ resize_method (str, optional):
+ "lower_bound": Output will be at least as large as the given size.
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
+ Defaults to "lower_bound".
+ """
+ print("Params passed to Resize transform:")
+ print("\twidth: ", width)
+ print("\theight: ", height)
+ print("\tresize_target: ", resize_target)
+ print("\tkeep_aspect_ratio: ", keep_aspect_ratio)
+ print("\tensure_multiple_of: ", ensure_multiple_of)
+ print("\tresize_method: ", resize_method)
+
+ self.__width = width
+ self.__height = height
+
+ self.__keep_aspect_ratio = keep_aspect_ratio
+ self.__multiple_of = ensure_multiple_of
+ self.__resize_method = resize_method
+
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ if max_val is not None and y > max_val:
+ y = (np.floor(x / self.__multiple_of)
+ * self.__multiple_of).astype(int)
+
+ if y < min_val:
+ y = (np.ceil(x / self.__multiple_of)
+ * self.__multiple_of).astype(int)
+
+ return y
+
+ def get_size(self, width, height):
+ # determine new height and width
+ scale_height = self.__height / height
+ scale_width = self.__width / width
+
+ if self.__keep_aspect_ratio:
+ if self.__resize_method == "lower_bound":
+ # scale such that output size is lower bound
+ if scale_width > scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "upper_bound":
+ # scale such that output size is upper bound
+ if scale_width < scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "minimal":
+ # scale as least as possbile
+ if abs(1 - scale_width) < abs(1 - scale_height):
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ else:
+ raise ValueError(
+ f"resize_method {self.__resize_method} not implemented"
+ )
+
+ if self.__resize_method == "lower_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, min_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, min_val=self.__width
+ )
+ elif self.__resize_method == "upper_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, max_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, max_val=self.__width
+ )
+ elif self.__resize_method == "minimal":
+ new_height = self.constrain_to_multiple_of(scale_height * height)
+ new_width = self.constrain_to_multiple_of(scale_width * width)
+ else:
+ raise ValueError(
+ f"resize_method {self.__resize_method} not implemented")
+
+ return (new_width, new_height)
+
+ def __call__(self, x):
+ width, height = self.get_size(*x.shape[-2:][::-1])
+ return nn.functional.interpolate(x, (int(height), int(width)), mode='bilinear', align_corners=True)
+
+class PrepForMidas(object):
+ def __init__(self, resize_mode="minimal", keep_aspect_ratio=True, img_size=384, do_resize=True):
+ if isinstance(img_size, int):
+ img_size = (img_size, img_size)
+ net_h, net_w = img_size
+ # self.normalization = Normalize(
+ # mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+ self.normalization = Normalize(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
+ self.resizer = Resize(net_w, net_h, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=14, resize_method=resize_mode) \
+ if do_resize else nn.Identity()
+
+ def __call__(self, x):
+ return self.normalization(self.resizer(x))
+
+class PrepForMidasNonRGB(object):
+ def __init__(self, resize_mode="minimal", keep_aspect_ratio=True, img_size=384, do_resize=True):
+ if isinstance(img_size, int):
+ img_size = (img_size, img_size)
+ net_h, net_w = img_size
+ # self.normalization = Normalize(
+ # mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+ self.resizer = Resize(net_w, net_h, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=14, resize_method=resize_mode) \
+ if do_resize else nn.Identity()
+
+ def __call__(self, x):
+ return self.resizer(x)
+
+class DepthAnythingCore(nn.Module):
+ def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True,
+ img_size=384, **kwargs):
+ """Midas Base model used for multi-scale feature extraction.
+
+ Args:
+ midas (torch.nn.Module): Midas model.
+ trainable (bool, optional): Train midas model. Defaults to False.
+ fetch_features (bool, optional): Extract multi-scale features. Defaults to True.
+ layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features, last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1').
+ freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance. Defaults to False.
+ keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True.
+ img_size (int, tuple, optional): Input resolution. Defaults to 384.
+ """
+ super().__init__()
+ self.core = midas
+ self.output_channels = None
+ self.core_out = {}
+ self.trainable = trainable
+ self.fetch_features = fetch_features
+ # midas.scratch.output_conv = nn.Identity()
+ self.handles = []
+ # self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1']
+ self.layer_names = layer_names
+
+ self.set_trainable(trainable)
+ self.set_fetch_features(fetch_features)
+
+ self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio,
+ img_size=img_size, do_resize=kwargs.get('do_resize', True))
+ self.prep_non_rgb = PrepForMidasNonRGB(keep_aspect_ratio=keep_aspect_ratio,
+ img_size=img_size, do_resize=kwargs.get('do_resize', True))
+
+ if freeze_bn:
+ self.freeze_bn()
+
+ def set_trainable(self, trainable):
+ self.trainable = trainable
+ if trainable:
+ self.unfreeze()
+ else:
+ self.freeze()
+ return self
+
+ def set_fetch_features(self, fetch_features):
+ self.fetch_features = fetch_features
+ if fetch_features:
+ if len(self.handles) == 0:
+ self.attach_hooks(self.core)
+ else:
+ self.remove_hooks()
+ return self
+
+ def freeze(self):
+ for p in self.parameters():
+ p.requires_grad = False
+ self.trainable = False
+ return self
+
+ def unfreeze(self):
+ for p in self.parameters():
+ p.requires_grad = True
+ self.trainable = True
+ return self
+
+ def freeze_bn(self):
+ for m in self.modules():
+ if isinstance(m, nn.BatchNorm2d):
+ m.eval()
+ return self
+
+ def forward(self, x, denorm=False, return_rel_depth=False):
+ # print('input to midas:', x.shape)
+ with torch.no_grad():
+ rgb = x[:, :3, ...]
+ non_rgb = x[:, 3:, ...]
+
+ if denorm:
+ rgb = denormalize(rgb)
+
+ rgb = self.prep(rgb)
+ non_rgb = self.prep_non_rgb(non_rgb)
+
+ x = torch.cat((rgb, non_rgb), dim=1)
+
+ with torch.set_grad_enabled(self.trainable):
+
+ rel_depth = self.core(x)
+ if not self.fetch_features:
+ return rel_depth
+ out = [self.core_out[k] for k in self.layer_names]
+
+ if return_rel_depth:
+ return rel_depth, out
+ return out
+
+ def get_rel_pos_params(self):
+ for name, p in self.core.pretrained.named_parameters():
+ if "pos_embed" in name:
+ yield p
+
+ def get_enc_params_except_rel_pos(self):
+ for name, p in self.core.pretrained.named_parameters():
+ if "pos_embed" not in name:
+ yield p
+
+ def freeze_encoder(self, freeze_rel_pos=False):
+ if freeze_rel_pos:
+ for p in self.core.pretrained.parameters():
+ p.requires_grad = False
+ else:
+ for p in self.get_enc_params_except_rel_pos():
+ p.requires_grad = False
+ return self
+
+ def attach_hooks(self, midas):
+ if len(self.handles) > 0:
+ self.remove_hooks()
+ if "out_conv" in self.layer_names:
+ self.handles.append(list(midas.depth_head.scratch.output_conv2.children())[
+ 1].register_forward_hook(get_activation("out_conv", self.core_out)))
+ if "r4" in self.layer_names:
+ self.handles.append(midas.depth_head.scratch.refinenet4.register_forward_hook(
+ get_activation("r4", self.core_out)))
+ if "r3" in self.layer_names:
+ self.handles.append(midas.depth_head.scratch.refinenet3.register_forward_hook(
+ get_activation("r3", self.core_out)))
+ if "r2" in self.layer_names:
+ self.handles.append(midas.depth_head.scratch.refinenet2.register_forward_hook(
+ get_activation("r2", self.core_out)))
+ if "r1" in self.layer_names:
+ self.handles.append(midas.depth_head.scratch.refinenet1.register_forward_hook(
+ get_activation("r1", self.core_out)))
+ if "l4_rn" in self.layer_names:
+ self.handles.append(midas.depth_head.scratch.layer4_rn.register_forward_hook(
+ get_activation("l4_rn", self.core_out)))
+
+ return self
+
+ def remove_hooks(self):
+ for h in self.handles:
+ h.remove()
+ return self
+
+ def __del__(self):
+ self.remove_hooks()
+
+ def set_output_channels(self):
+ self.output_channels = [256, 256, 256, 256, 256]
+
+ @staticmethod
+ def build(midas_model_type="dinov2_large", train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs):
+ if "img_size" in kwargs:
+ kwargs = DepthAnythingCore.parse_img_size(kwargs)
+ img_size = kwargs.pop("img_size", [384, 384])
+
+ depth_anything = DPT_DINOv2(out_channels=[256, 512, 1024, 1024], use_clstoken=False)
+
+ state_dict = torch.load('/scratch/shared/beegfs/paule/models/depth_anything_vitl14.pth', map_location='cpu')
+ depth_anything.load_state_dict(state_dict)
+
+ kwargs.update({'keep_aspect_ratio': force_keep_ar})
+
+ depth_anything_core = DepthAnythingCore(depth_anything, trainable=train_midas, fetch_features=fetch_features,
+ freeze_bn=freeze_bn, img_size=img_size, **kwargs)
+
+ depth_anything_core.set_output_channels()
+ return depth_anything_core
+
+ @staticmethod
+ def parse_img_size(config):
+ assert 'img_size' in config
+ if isinstance(config['img_size'], str):
+ assert "," in config['img_size'], "img_size should be a string with comma separated img_size=H,W"
+ config['img_size'] = list(map(int, config['img_size'].split(",")))
+ assert len(
+ config['img_size']) == 2, "img_size should be a string with comma separated img_size=H,W"
+ elif isinstance(config['img_size'], int):
+ config['img_size'] = [config['img_size'], config['img_size']]
+ else:
+ assert isinstance(config['img_size'], list) and len(
+ config['img_size']) == 2, "img_size should be a list of H,W"
+ return config
+
+
+nchannels2models = {
+ tuple([256]*5): ["DPT_BEiT_L_384", "DPT_BEiT_L_512", "DPT_BEiT_B_384", "DPT_SwinV2_L_384", "DPT_SwinV2_B_384", "DPT_SwinV2_T_256", "DPT_Large", "DPT_Hybrid"],
+ (512, 256, 128, 64, 64): ["MiDaS_small"]
+}
+
+# Model name to number of output channels
+MIDAS_SETTINGS = {m: k for k, v in nchannels2models.items()
+ for m in v
+ }
\ No newline at end of file
diff --git a/zoedepth/models/base_models/dpt_dinov2/blocks.py b/zoedepth/models/base_models/dpt_dinov2/blocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..3dc585ea8ca43a3ecca8e82200b0156599694c3b
--- /dev/null
+++ b/zoedepth/models/base_models/dpt_dinov2/blocks.py
@@ -0,0 +1,153 @@
+import torch.nn as nn
+
+
+def _make_scratch(in_shape, out_shape, groups=1, expand=False):
+ scratch = nn.Module()
+
+ out_shape1 = out_shape
+ out_shape2 = out_shape
+ out_shape3 = out_shape
+ if len(in_shape) >= 4:
+ out_shape4 = out_shape
+
+ if expand:
+ out_shape1 = out_shape
+ out_shape2 = out_shape*2
+ out_shape3 = out_shape*4
+ if len(in_shape) >= 4:
+ out_shape4 = out_shape*8
+
+ scratch.layer1_rn = nn.Conv2d(
+ in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+ scratch.layer2_rn = nn.Conv2d(
+ in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+ scratch.layer3_rn = nn.Conv2d(
+ in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+ if len(in_shape) >= 4:
+ scratch.layer4_rn = nn.Conv2d(
+ in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+
+ return scratch
+
+
+class ResidualConvUnit(nn.Module):
+ """Residual convolution module.
+ """
+
+ def __init__(self, features, activation, bn):
+ """Init.
+
+ Args:
+ features (int): number of features
+ """
+ super().__init__()
+
+ self.bn = bn
+
+ self.groups=1
+
+ self.conv1 = nn.Conv2d(
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
+ )
+
+ self.conv2 = nn.Conv2d(
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
+ )
+
+ if self.bn==True:
+ self.bn1 = nn.BatchNorm2d(features)
+ self.bn2 = nn.BatchNorm2d(features)
+
+ self.activation = activation
+
+ self.skip_add = nn.quantized.FloatFunctional()
+
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input
+
+ Returns:
+ tensor: output
+ """
+
+ out = self.activation(x)
+ out = self.conv1(out)
+ if self.bn==True:
+ out = self.bn1(out)
+
+ out = self.activation(out)
+ out = self.conv2(out)
+ if self.bn==True:
+ out = self.bn2(out)
+
+ if self.groups > 1:
+ out = self.conv_merge(out)
+
+ return self.skip_add.add(out, x)
+
+
+class FeatureFusionBlock(nn.Module):
+ """Feature fusion block.
+ """
+
+ def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
+ """Init.
+
+ Args:
+ features (int): number of features
+ """
+ super(FeatureFusionBlock, self).__init__()
+
+ self.deconv = deconv
+ self.align_corners = align_corners
+
+ self.groups=1
+
+ self.expand = expand
+ out_features = features
+ if self.expand==True:
+ out_features = features//2
+
+ self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
+
+ self.resConfUnit1 = ResidualConvUnit(features, activation, bn)
+ self.resConfUnit2 = ResidualConvUnit(features, activation, bn)
+
+ self.skip_add = nn.quantized.FloatFunctional()
+
+ self.size=size
+
+ def forward(self, *xs, size=None):
+ """Forward pass.
+
+ Returns:
+ tensor: output
+ """
+ output = xs[0]
+
+ if len(xs) == 2:
+ res = self.resConfUnit1(xs[1])
+ output = self.skip_add.add(output, res)
+
+ output = self.resConfUnit2(output)
+
+ if (size is None) and (self.size is None):
+ modifier = {"scale_factor": 2}
+ elif size is None:
+ modifier = {"size": self.size}
+ else:
+ modifier = {"size": size}
+
+ output = nn.functional.interpolate(
+ output, **modifier, mode="bilinear", align_corners=self.align_corners
+ )
+
+ output = self.out_conv(output)
+
+ return output
\ No newline at end of file
diff --git a/zoedepth/models/base_models/dpt_dinov2/dpt.py b/zoedepth/models/base_models/dpt_dinov2/dpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec6e12fb8c8ee0659183d8e6f0ae44af297b6583
--- /dev/null
+++ b/zoedepth/models/base_models/dpt_dinov2/dpt.py
@@ -0,0 +1,157 @@
+import torch
+import torch.nn as nn
+
+from .blocks import FeatureFusionBlock, _make_scratch
+import torch.nn.functional as F
+
+
+def _make_fusion_block(features, use_bn, size = None):
+ return FeatureFusionBlock(
+ features,
+ nn.ReLU(False),
+ deconv=False,
+ bn=use_bn,
+ expand=False,
+ align_corners=True,
+ size=size,
+ )
+
+
+class DPTHead(nn.Module):
+ def __init__(self, in_channels, features=256, use_bn=False, out_channels=[256, 512, 1024, 1024], use_clstoken=False):
+ super(DPTHead, self).__init__()
+
+ self.use_clstoken = use_clstoken
+
+ # out_channels = [in_channels // 8, in_channels // 4, in_channels // 2, in_channels]
+ # out_channels = [in_channels // 4, in_channels // 2, in_channels, in_channels]
+ # out_channels = [in_channels, in_channels, in_channels, in_channels]
+
+ self.projects = nn.ModuleList([
+ nn.Conv2d(
+ in_channels=in_channels,
+ out_channels=out_channel,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ) for out_channel in out_channels
+ ])
+
+ self.resize_layers = nn.ModuleList([
+ nn.ConvTranspose2d(
+ in_channels=out_channels[0],
+ out_channels=out_channels[0],
+ kernel_size=4,
+ stride=4,
+ padding=0),
+ nn.ConvTranspose2d(
+ in_channels=out_channels[1],
+ out_channels=out_channels[1],
+ kernel_size=2,
+ stride=2,
+ padding=0),
+ nn.Identity(),
+ nn.Conv2d(
+ in_channels=out_channels[3],
+ out_channels=out_channels[3],
+ kernel_size=3,
+ stride=2,
+ padding=1)
+ ])
+
+ if use_clstoken:
+ self.readout_projects = nn.ModuleList()
+ for _ in range(len(self.projects)):
+ self.readout_projects.append(
+ nn.Sequential(
+ nn.Linear(2 * in_channels, in_channels),
+ nn.GELU()))
+
+ self.scratch = _make_scratch(
+ out_channels,
+ features,
+ groups=1,
+ expand=False,
+ )
+
+ self.scratch.stem_transpose = None
+
+ self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
+ self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
+ self.scratch.refinenet3 = _make_fusion_block(features, use_bn)
+ self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
+
+ head_features_1 = features
+ head_features_2 = 32
+
+ self.scratch.output_conv1 = nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1)
+
+ self.scratch.output_conv2 = nn.Sequential(
+ nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(True),
+ nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
+ nn.ReLU(True),
+ nn.Identity(),
+ )
+
+ def forward(self, out_features, patch_h, patch_w):
+ out = []
+ for i, x in enumerate(out_features):
+ if self.use_clstoken:
+ x, cls_token = x[0], x[1]
+ readout = cls_token.unsqueeze(1).expand_as(x)
+ x = self.readout_projects[i](torch.cat((x, readout), -1))
+ else:
+ x = x[0]
+
+ x = x.permute(0, 2, 1).reshape((x.shape[0], x.shape[-1], patch_h, patch_w))
+
+ x = self.projects[i](x)
+ x = self.resize_layers[i](x)
+
+ out.append(x)
+
+ layer_1, layer_2, layer_3, layer_4 = out
+
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
+
+ path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
+
+ out = self.scratch.output_conv1(path_1)
+ out = F.interpolate(out, (int(patch_h * 14), int(patch_w * 14)), mode="bilinear", align_corners=True)
+ out = self.scratch.output_conv2(out)
+
+ return out
+
+
+class DPT_DINOv2(nn.Module):
+ def __init__(self, encoder='vitl', features=256, use_bn=False, out_channels=[256, 512, 1024, 1024], use_clstoken=False):
+
+ super(DPT_DINOv2, self).__init__()
+
+ torch.manual_seed(1)
+
+ self.pretrained = torch.hub.load('/work/paule/Depth-Anything/torchhub/facebookresearch_dinov2_main', 'dinov2_{:}14'.format(encoder), source='local', pretrained=False)
+
+ dim = self.pretrained.blocks[0].attn.qkv.in_features
+
+ self.depth_head = DPTHead(dim, features, use_bn, out_channels=out_channels, use_clstoken=use_clstoken)
+
+ def forward(self, x):
+ h, w = x.shape[-2:]
+
+ features = self.pretrained.get_intermediate_layers(x, 4, return_class_token=True)
+
+ patch_h, patch_w = h // 14, w // 14
+
+ depth = self.depth_head(features, patch_h, patch_w)
+ depth = F.interpolate(depth, size=(h, w), mode="bilinear", align_corners=True)
+ depth = F.relu(depth)
+
+ return depth.squeeze(1)
\ No newline at end of file
diff --git a/zoedepth/models/base_models/midas.py b/zoedepth/models/base_models/midas.py
new file mode 100644
index 0000000000000000000000000000000000000000..0bd7a461485c741880bb114d7362b82bc22aa306
--- /dev/null
+++ b/zoedepth/models/base_models/midas.py
@@ -0,0 +1,382 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.nn as nn
+import numpy as np
+from torchvision.transforms import Normalize
+
+
+def denormalize(x):
+ """Reverses the imagenet normalization applied to the input.
+
+ Args:
+ x (torch.Tensor - shape(N,3,H,W)): input tensor
+
+ Returns:
+ torch.Tensor - shape(N,3,H,W): Denormalized input
+ """
+ mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device)
+ std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device)
+ return x * std + mean
+
+def get_activation(name, bank):
+ def hook(model, input, output):
+ bank[name] = output
+ return hook
+
+
+class Resize(object):
+ """Resize sample to given size (width, height).
+ """
+
+ def __init__(
+ self,
+ width,
+ height,
+ resize_target=True,
+ keep_aspect_ratio=False,
+ ensure_multiple_of=1,
+ resize_method="lower_bound",
+ ):
+ """Init.
+ Args:
+ width (int): desired output width
+ height (int): desired output height
+ resize_target (bool, optional):
+ True: Resize the full sample (image, mask, target).
+ False: Resize image only.
+ Defaults to True.
+ keep_aspect_ratio (bool, optional):
+ True: Keep the aspect ratio of the input sample.
+ Output sample might not have the given width and height, and
+ resize behaviour depends on the parameter 'resize_method'.
+ Defaults to False.
+ ensure_multiple_of (int, optional):
+ Output width and height is constrained to be multiple of this parameter.
+ Defaults to 1.
+ resize_method (str, optional):
+ "lower_bound": Output will be at least as large as the given size.
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
+ Defaults to "lower_bound".
+ """
+ print("Params passed to Resize transform:")
+ print("\twidth: ", width)
+ print("\theight: ", height)
+ print("\tresize_target: ", resize_target)
+ print("\tkeep_aspect_ratio: ", keep_aspect_ratio)
+ print("\tensure_multiple_of: ", ensure_multiple_of)
+ print("\tresize_method: ", resize_method)
+
+ self.__width = width
+ self.__height = height
+
+ self.__keep_aspect_ratio = keep_aspect_ratio
+ self.__multiple_of = ensure_multiple_of
+ self.__resize_method = resize_method
+
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ if max_val is not None and y > max_val:
+ y = (np.floor(x / self.__multiple_of)
+ * self.__multiple_of).astype(int)
+
+ if y < min_val:
+ y = (np.ceil(x / self.__multiple_of)
+ * self.__multiple_of).astype(int)
+
+ return y
+
+ def get_size(self, width, height):
+ # determine new height and width
+ scale_height = self.__height / height
+ scale_width = self.__width / width
+
+ if self.__keep_aspect_ratio:
+ if self.__resize_method == "lower_bound":
+ # scale such that output size is lower bound
+ if scale_width > scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "upper_bound":
+ # scale such that output size is upper bound
+ if scale_width < scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "minimal":
+ # scale as least as possbile
+ if abs(1 - scale_width) < abs(1 - scale_height):
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ else:
+ raise ValueError(
+ f"resize_method {self.__resize_method} not implemented"
+ )
+
+ if self.__resize_method == "lower_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, min_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, min_val=self.__width
+ )
+ elif self.__resize_method == "upper_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, max_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, max_val=self.__width
+ )
+ elif self.__resize_method == "minimal":
+ new_height = self.constrain_to_multiple_of(scale_height * height)
+ new_width = self.constrain_to_multiple_of(scale_width * width)
+ else:
+ raise ValueError(
+ f"resize_method {self.__resize_method} not implemented")
+
+ return (new_width, new_height)
+
+ def __call__(self, x):
+ width, height = self.get_size(*x.shape[-2:][::-1])
+ return nn.functional.interpolate(x, (int(height), int(width)), mode='bilinear', align_corners=True)
+
+class PrepForMidas(object):
+ def __init__(self, resize_mode="minimal", keep_aspect_ratio=True, img_size=384, do_resize=True):
+ if isinstance(img_size, int):
+ img_size = (img_size, img_size)
+ net_h, net_w = img_size
+ self.normalization = Normalize(
+ mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+ self.resizer = Resize(net_w, net_h, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=32, resize_method=resize_mode) \
+ if do_resize else nn.Identity()
+
+ def __call__(self, x):
+ if x.shape[1] > 3:
+ resized_x = self.resizer(x)
+ partial_normalized_x = self.normalization(resized_x[:, :3, :, :])
+ return torch.cat([partial_normalized_x, resized_x[:, 3:, :, :]], dim=1)
+ else:
+ return self.normalization(self.resizer(x))
+
+
+class MidasCore(nn.Module):
+ def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True,
+ img_size=384, **kwargs):
+ """Midas Base model used for multi-scale feature extraction.
+
+ Args:
+ midas (torch.nn.Module): Midas model.
+ trainable (bool, optional): Train midas model. Defaults to False.
+ fetch_features (bool, optional): Extract multi-scale features. Defaults to True.
+ layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features, last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1').
+ freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance. Defaults to False.
+ keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True.
+ img_size (int, tuple, optional): Input resolution. Defaults to 384.
+ """
+ super().__init__()
+ self.core = midas
+ self.output_channels = None
+ self.core_out = {}
+ self.trainable = trainable
+ self.fetch_features = fetch_features
+ # midas.scratch.output_conv = nn.Identity()
+ self.handles = []
+ # self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1']
+ self.layer_names = layer_names
+
+ self.set_trainable(trainable)
+ self.set_fetch_features(fetch_features)
+
+ self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio,
+ img_size=img_size, do_resize=kwargs.get('do_resize', True))
+
+ if freeze_bn:
+ self.freeze_bn()
+
+ def set_trainable(self, trainable):
+ self.trainable = trainable
+ if trainable:
+ self.unfreeze()
+ else:
+ self.freeze()
+ return self
+
+ def set_fetch_features(self, fetch_features):
+ self.fetch_features = fetch_features
+ if fetch_features:
+ if len(self.handles) == 0:
+ self.attach_hooks(self.core)
+ else:
+ self.remove_hooks()
+ return self
+
+ def freeze(self):
+ for p in self.parameters():
+ p.requires_grad = False
+ self.trainable = False
+ return self
+
+ def unfreeze(self):
+ for p in self.parameters():
+ p.requires_grad = True
+ self.trainable = True
+ return self
+
+ def freeze_bn(self):
+ for m in self.modules():
+ if isinstance(m, nn.BatchNorm2d):
+ m.eval()
+ return self
+
+ def forward(self, x, denorm=False, return_rel_depth=False):
+ with torch.no_grad():
+ if denorm:
+ x = denormalize(x)
+ x = self.prep(x)
+ # print("Shape after prep: ", x.shape)
+
+ with torch.set_grad_enabled(self.trainable):
+
+ # print("Input size to Midascore", x.shape)
+ rel_depth = self.core(x)
+ # print("Output from midas shape", rel_depth.shape)
+ if not self.fetch_features:
+ return rel_depth
+ out = [self.core_out[k] for k in self.layer_names]
+
+ if return_rel_depth:
+ return rel_depth, out
+ return out
+
+ def get_rel_pos_params(self):
+ for name, p in self.core.pretrained.named_parameters():
+ if "relative_position" in name:
+ yield p
+
+ def get_enc_params_except_rel_pos(self):
+ for name, p in self.core.pretrained.named_parameters():
+ if "relative_position" not in name:
+ yield p
+
+ def freeze_encoder(self, freeze_rel_pos=False):
+ if freeze_rel_pos:
+ for p in self.core.pretrained.parameters():
+ p.requires_grad = False
+ else:
+ for p in self.get_enc_params_except_rel_pos():
+ p.requires_grad = False
+ return self
+
+ def attach_hooks(self, midas):
+ if len(self.handles) > 0:
+ self.remove_hooks()
+ if "out_conv" in self.layer_names:
+ self.handles.append(list(midas.scratch.output_conv.children())[
+ 3].register_forward_hook(get_activation("out_conv", self.core_out)))
+ if "r4" in self.layer_names:
+ self.handles.append(midas.scratch.refinenet4.register_forward_hook(
+ get_activation("r4", self.core_out)))
+ if "r3" in self.layer_names:
+ self.handles.append(midas.scratch.refinenet3.register_forward_hook(
+ get_activation("r3", self.core_out)))
+ if "r2" in self.layer_names:
+ self.handles.append(midas.scratch.refinenet2.register_forward_hook(
+ get_activation("r2", self.core_out)))
+ if "r1" in self.layer_names:
+ self.handles.append(midas.scratch.refinenet1.register_forward_hook(
+ get_activation("r1", self.core_out)))
+ if "l4_rn" in self.layer_names:
+ self.handles.append(midas.scratch.layer4_rn.register_forward_hook(
+ get_activation("l4_rn", self.core_out)))
+
+ return self
+
+ def remove_hooks(self):
+ for h in self.handles:
+ h.remove()
+ return self
+
+ def __del__(self):
+ self.remove_hooks()
+
+ def set_output_channels(self, model_type):
+ self.output_channels = MIDAS_SETTINGS[model_type]
+
+ @staticmethod
+ def build(midas_model_type="DPT_BEiT_L_384", train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs):
+ if midas_model_type not in MIDAS_SETTINGS:
+ raise ValueError(
+ f"Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}")
+ if "img_size" in kwargs:
+ kwargs = MidasCore.parse_img_size(kwargs)
+ img_size = kwargs.pop("img_size", [384, 384])
+ print("img_size", img_size)
+ midas = torch.hub.load("intel-isl/MiDaS", midas_model_type,
+ pretrained=use_pretrained_midas, force_reload=force_reload)
+ kwargs.update({'keep_aspect_ratio': force_keep_ar})
+ midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features,
+ freeze_bn=freeze_bn, img_size=img_size, **kwargs)
+ midas_core.set_output_channels(midas_model_type)
+ return midas_core
+
+ @staticmethod
+ def build_from_config(config):
+ return MidasCore.build(**config)
+
+ @staticmethod
+ def parse_img_size(config):
+ assert 'img_size' in config
+ if isinstance(config['img_size'], str):
+ assert "," in config['img_size'], "img_size should be a string with comma separated img_size=H,W"
+ config['img_size'] = list(map(int, config['img_size'].split(",")))
+ assert len(
+ config['img_size']) == 2, "img_size should be a string with comma separated img_size=H,W"
+ elif isinstance(config['img_size'], int):
+ config['img_size'] = [config['img_size'], config['img_size']]
+ else:
+ assert isinstance(config['img_size'], list) and len(
+ config['img_size']) == 2, "img_size should be a list of H,W"
+ return config
+
+
+nchannels2models = {
+ tuple([256]*5): ["DPT_BEiT_L_384", "DPT_BEiT_L_512", "DPT_BEiT_B_384", "DPT_SwinV2_L_384", "DPT_SwinV2_B_384", "DPT_SwinV2_T_256", "DPT_Large", "DPT_Hybrid"],
+ (512, 256, 128, 64, 64): ["MiDaS_small"]
+}
+
+# Model name to number of output channels
+MIDAS_SETTINGS = {m: k for k, v in nchannels2models.items()
+ for m in v
+ }
diff --git a/zoedepth/models/builder.py b/zoedepth/models/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..4363d59689158912a412feb5c296b4a72bc2c608
--- /dev/null
+++ b/zoedepth/models/builder.py
@@ -0,0 +1,51 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+from importlib import import_module
+from zoedepth.models.depth_model import DepthModel
+
+def build_model(config) -> DepthModel:
+ """Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface.
+ This function should be used to construct models for training and evaluation.
+
+ Args:
+ config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder.
+
+ Returns:
+ torch.nn.Module: Model corresponding to name and version as specified in config
+ """
+ module_name = f"zoedepth.models.{config.model}"
+ try:
+ module = import_module(module_name)
+ except ModuleNotFoundError as e:
+ # print the original error message
+ print(e)
+ raise ValueError(
+ f"Model {config.model} not found. Refer above error for details.") from e
+ try:
+ get_version = getattr(module, "get_version")
+ except AttributeError as e:
+ raise ValueError(
+ f"Model {config.model} has no get_version function.") from e
+ return get_version(config.version_name).build_from_config(config)
diff --git a/zoedepth/models/depth_model.py b/zoedepth/models/depth_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a4b6eb875113bbd1cfeb61c1334c33e12fe5475
--- /dev/null
+++ b/zoedepth/models/depth_model.py
@@ -0,0 +1,157 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torchvision import transforms
+import PIL.Image
+from PIL import Image
+from typing import Union
+
+
+class DepthModel(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.device = 'cpu'
+
+ def to(self, device) -> nn.Module:
+ self.device = device
+ return super().to(device)
+
+ def forward(self, x, *args, **kwargs):
+ raise NotImplementedError
+
+ def _infer(self, x: torch.Tensor):
+ """
+ Inference interface for the model
+ Args:
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
+ Returns:
+ torch.Tensor: output tensor of shape (b, 1, h, w)
+ """
+ return self(x)['metric_depth']
+
+ def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode="reflect", **kwargs) -> torch.Tensor:
+ """
+ Inference interface for the model with padding augmentation
+ Padding augmentation fixes the boundary artifacts in the output depth map.
+ Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image.
+ This augmentation pads the input image and crops the prediction back to the original size / view.
+
+ Note: This augmentation is not required for the models trained with 'avoid_boundary'=True.
+ Args:
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
+ pad_input (bool, optional): whether to pad the input or not. Defaults to True.
+ fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3.
+ fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3.
+ upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'.
+ padding_mode (str, optional): padding mode. Defaults to "reflect".
+ Returns:
+ torch.Tensor: output tensor of shape (b, 1, h, w)
+ """
+ # assert x is nchw and c = 3
+ assert x.dim() == 4, "x must be 4 dimensional, got {}".format(x.dim())
+ #assert (x.shape[1] == 3 or x.shape[1] == 4), "x must have 3 or 4 channels, got {}".format(x.shape[1])
+
+ if pad_input:
+ assert fh > 0 or fw > 0, "atlease one of fh and fw must be greater than 0"
+ pad_h = int(np.sqrt(x.shape[2]/2) * fh)
+ pad_w = int(np.sqrt(x.shape[3]/2) * fw)
+ padding = [pad_w, pad_w]
+ if pad_h > 0:
+ padding += [pad_h, pad_h]
+
+ x = F.pad(x, padding, mode=padding_mode, **kwargs)
+ out = self._infer(x)
+ if out.shape[-2:] != x.shape[-2:]:
+ out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False)
+ if pad_input:
+ # crop to the original size, handling the case where pad_h and pad_w is 0
+ if pad_h > 0:
+ out = out[:, :, pad_h:-pad_h,:]
+ if pad_w > 0:
+ out = out[:, :, :, pad_w:-pad_w]
+ return out
+
+ def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor:
+ """
+ Inference interface for the model with horizontal flip augmentation
+ Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip.
+ Args:
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
+ pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
+ Returns:
+ torch.Tensor: output tensor of shape (b, 1, h, w)
+ """
+ # infer with horizontal flip and average
+ out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)
+ out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs)
+ out = (out + torch.flip(out_flip, dims=[3])) / 2
+ return out
+
+ def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor:
+ """
+ Inference interface for the model
+ Args:
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
+ pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
+ with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
+ Returns:
+ torch.Tensor: output tensor of shape (b, 1, h, w)
+ """
+ if with_flip_aug:
+ return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs)
+ else:
+ return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)
+
+ @torch.no_grad()
+ def infer_pil(self, pil_img, sparse_depth = None, pad_input: bool=True, with_flip_aug: bool=True, output_type: str="numpy", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]:
+ """
+ Inference interface for the model for PIL image
+ Args:
+ pil_img (PIL.Image.Image): input PIL image
+ pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
+ with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
+ output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to "numpy".
+ """
+ x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device)
+ if sparse_depth is not None:
+ sparse_depth = sparse_depth[None, None, ...].to(self.device)
+ sparse_depth_mask = (sparse_depth > 0).float()
+ x = torch.cat([x, sparse_depth / 10.0, sparse_depth_mask], dim=1)
+
+ out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs)
+ if output_type == "numpy":
+ return out_tensor.squeeze().cpu().numpy()
+ elif output_type == "pil":
+ # uint16 is required for depth pil image
+ out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16)
+ return Image.fromarray(out_16bit_numpy)
+ elif output_type == "tensor":
+ return out_tensor.squeeze().cpu()
+ else:
+ raise ValueError(f"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'")
+
\ No newline at end of file
diff --git a/zoedepth/models/layers/attractor.py b/zoedepth/models/layers/attractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a8efe645adea1d88a12e2ac5cc6bb2a251eef9d
--- /dev/null
+++ b/zoedepth/models/layers/attractor.py
@@ -0,0 +1,208 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.nn as nn
+
+
+@torch.jit.script
+def exp_attractor(dx, alpha: float = 300, gamma: int = 2):
+ """Exponential attractor: dc = exp(-alpha*|dx|^gamma) * dx , where dx = a - c, a = attractor point, c = bin center, dc = shift in bin centermmary for exp_attractor
+
+ Args:
+ dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
+ alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300.
+ gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2.
+
+ Returns:
+ torch.Tensor : Delta shifts - dc; New bin centers = Old bin centers + dc
+ """
+ return torch.exp(-alpha*(torch.abs(dx)**gamma)) * (dx)
+
+
+@torch.jit.script
+def inv_attractor(dx, alpha: float = 300, gamma: int = 2):
+ """Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center, dc = shift in bin center
+ This is the default one according to the accompanying paper.
+
+ Args:
+ dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
+ alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300.
+ gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2.
+
+ Returns:
+ torch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc
+ """
+ return dx.div(1+alpha*dx.pow(gamma))
+
+
+class AttractorLayer(nn.Module):
+ def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,
+ alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):
+ """
+ Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)
+ """
+ super().__init__()
+
+ self.n_attractors = n_attractors
+ self.n_bins = n_bins
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+ self.alpha = alpha
+ self.gamma = gamma
+ self.kind = kind
+ self.attractor_type = attractor_type
+ self.memory_efficient = memory_efficient
+
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm
+ nn.ReLU(inplace=True)
+ )
+
+ def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):
+ """
+ Args:
+ x (torch.Tensor) : feature block; shape - n, c, h, w
+ b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w
+
+ Returns:
+ tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w
+ """
+ if prev_b_embedding is not None:
+ if interpolate:
+ prev_b_embedding = nn.functional.interpolate(
+ prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
+ x = x + prev_b_embedding
+
+ A = self._net(x)
+ eps = 1e-3
+ A = A + eps
+ n, c, h, w = A.shape
+ A = A.view(n, self.n_attractors, 2, h, w)
+ A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w
+ A_normed = A[:, :, 0, ...] # n, na, h, w
+
+ b_prev = nn.functional.interpolate(
+ b_prev, (h, w), mode='bilinear', align_corners=True)
+ b_centers = b_prev
+
+ if self.attractor_type == 'exp':
+ dist = exp_attractor
+ else:
+ dist = inv_attractor
+
+ if not self.memory_efficient:
+ func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]
+ # .shape N, nbins, h, w
+ delta_c = func(dist(A_normed.unsqueeze(
+ 2) - b_centers.unsqueeze(1)), dim=1)
+ else:
+ delta_c = torch.zeros_like(b_centers, device=b_centers.device)
+ for i in range(self.n_attractors):
+ # .shape N, nbins, h, w
+ delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers)
+
+ if self.kind == 'mean':
+ delta_c = delta_c / self.n_attractors
+
+ b_new_centers = b_centers + delta_c
+ B_centers = (self.max_depth - self.min_depth) * \
+ b_new_centers + self.min_depth
+ B_centers, _ = torch.sort(B_centers, dim=1)
+ B_centers = torch.clip(B_centers, self.min_depth, self.max_depth)
+ return b_new_centers, B_centers
+
+
+class AttractorLayerUnnormed(nn.Module):
+ def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,
+ alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):
+ """
+ Attractor layer for bin centers. Bin centers are unbounded
+ """
+ super().__init__()
+
+ self.n_attractors = n_attractors
+ self.n_bins = n_bins
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+ self.alpha = alpha
+ self.gamma = gamma
+ self.kind = kind
+ self.attractor_type = attractor_type
+ self.memory_efficient = memory_efficient
+
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0),
+ nn.Softplus()
+ )
+
+ def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):
+ """
+ Args:
+ x (torch.Tensor) : feature block; shape - n, c, h, w
+ b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w
+
+ Returns:
+ tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version
+ """
+ if prev_b_embedding is not None:
+ if interpolate:
+ prev_b_embedding = nn.functional.interpolate(
+ prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
+ x = x + prev_b_embedding
+
+ A = self._net(x)
+ n, c, h, w = A.shape
+
+ b_prev = nn.functional.interpolate(
+ b_prev, (h, w), mode='bilinear', align_corners=True)
+ b_centers = b_prev
+
+ if self.attractor_type == 'exp':
+ dist = exp_attractor
+ else:
+ dist = inv_attractor
+
+ if not self.memory_efficient:
+ func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]
+ # .shape N, nbins, h, w
+ delta_c = func(
+ dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1)
+ else:
+ delta_c = torch.zeros_like(b_centers, device=b_centers.device)
+ for i in range(self.n_attractors):
+ delta_c += dist(A[:, i, ...].unsqueeze(1) -
+ b_centers) # .shape N, nbins, h, w
+
+ if self.kind == 'mean':
+ delta_c = delta_c / self.n_attractors
+
+ b_new_centers = b_centers + delta_c
+ B_centers = b_new_centers
+
+ return b_new_centers, B_centers
diff --git a/zoedepth/models/layers/dist_layers.py b/zoedepth/models/layers/dist_layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..3208405dfb78fdfc28d5765e5a6d5dbe31967a23
--- /dev/null
+++ b/zoedepth/models/layers/dist_layers.py
@@ -0,0 +1,121 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.nn as nn
+
+
+def log_binom(n, k, eps=1e-7):
+ """ log(nCk) using stirling approximation """
+ n = n + eps
+ k = k + eps
+ return n * torch.log(n) - k * torch.log(k) - (n-k) * torch.log(n-k+eps)
+
+
+class LogBinomial(nn.Module):
+ def __init__(self, n_classes=256, act=torch.softmax):
+ """Compute log binomial distribution for n_classes
+
+ Args:
+ n_classes (int, optional): number of output classes. Defaults to 256.
+ """
+ super().__init__()
+ self.K = n_classes
+ self.act = act
+ self.register_buffer('k_idx', torch.arange(
+ 0, n_classes).view(1, -1, 1, 1))
+ self.register_buffer('K_minus_1', torch.Tensor(
+ [self.K-1]).view(1, -1, 1, 1))
+
+ def forward(self, x, t=1., eps=1e-4):
+ """Compute log binomial distribution for x
+
+ Args:
+ x (torch.Tensor - NCHW): probabilities
+ t (float, torch.Tensor - NCHW, optional): Temperature of distribution. Defaults to 1..
+ eps (float, optional): Small number for numerical stability. Defaults to 1e-4.
+
+ Returns:
+ torch.Tensor -NCHW: log binomial distribution logbinomial(p;t)
+ """
+ if x.ndim == 3:
+ x = x.unsqueeze(1) # make it nchw
+
+ one_minus_x = torch.clamp(1 - x, eps, 1)
+ x = torch.clamp(x, eps, 1)
+ y = log_binom(self.K_minus_1, self.k_idx) + self.k_idx * \
+ torch.log(x) + (self.K - 1 - self.k_idx) * torch.log(one_minus_x)
+ return self.act(y/t, dim=1)
+
+
+class ConditionalLogBinomial(nn.Module):
+ def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax):
+ """Conditional Log Binomial distribution
+
+ Args:
+ in_features (int): number of input channels in main feature
+ condition_dim (int): number of input channels in condition feature
+ n_classes (int, optional): Number of classes. Defaults to 256.
+ bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.
+ p_eps (float, optional): small eps value. Defaults to 1e-4.
+ max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.
+ min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7.
+ """
+ super().__init__()
+ self.p_eps = p_eps
+ self.max_temp = max_temp
+ self.min_temp = min_temp
+ self.log_binomial_transform = LogBinomial(n_classes, act=act)
+ bottleneck = (in_features + condition_dim) // bottleneck_factor
+ self.mlp = nn.Sequential(
+ nn.Conv2d(in_features + condition_dim, bottleneck,
+ kernel_size=1, stride=1, padding=0),
+ nn.GELU(),
+ # 2 for p linear norm, 2 for t linear norm
+ nn.Conv2d(bottleneck, 2+2, kernel_size=1, stride=1, padding=0),
+ nn.Softplus()
+ )
+
+ def forward(self, x, cond):
+ """Forward pass
+
+ Args:
+ x (torch.Tensor - NCHW): Main feature
+ cond (torch.Tensor - NCHW): condition feature
+
+ Returns:
+ torch.Tensor: Output log binomial distribution
+ """
+ pt = self.mlp(torch.concat((x, cond), dim=1))
+ p, t = pt[:, :2, ...], pt[:, 2:, ...]
+
+ p = p + self.p_eps
+ p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...])
+
+ t = t + self.p_eps
+ t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...])
+ t = t.unsqueeze(1)
+ t = (self.max_temp - self.min_temp) * t + self.min_temp
+
+ return self.log_binomial_transform(p, t)
diff --git a/zoedepth/models/layers/localbins_layers.py b/zoedepth/models/layers/localbins_layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..f94481605c3e6958ce50e73b2eb31d9f0c07dc67
--- /dev/null
+++ b/zoedepth/models/layers/localbins_layers.py
@@ -0,0 +1,169 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.nn as nn
+
+
+class SeedBinRegressor(nn.Module):
+ def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):
+ """Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval.
+
+ Args:
+ in_features (int): input channels
+ n_bins (int, optional): Number of bin centers. Defaults to 16.
+ mlp_dim (int, optional): Hidden dimension. Defaults to 256.
+ min_depth (float, optional): Min depth value. Defaults to 1e-3.
+ max_depth (float, optional): Max depth value. Defaults to 10.
+ """
+ super().__init__()
+ self.version = "1_1"
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),
+ nn.ReLU(inplace=True)
+ )
+
+ def forward(self, x):
+ """
+ Returns tensor of bin_width vectors (centers). One vector b for every pixel
+ """
+ B = self._net(x)
+ eps = 1e-3
+ B = B + eps
+ B_widths_normed = B / B.sum(dim=1, keepdim=True)
+ B_widths = (self.max_depth - self.min_depth) * \
+ B_widths_normed # .shape NCHW
+ # pad has the form (left, right, top, bottom, front, back)
+ B_widths = nn.functional.pad(
+ B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth)
+ B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW
+
+ B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...])
+ return B_widths_normed, B_centers
+
+
+class SeedBinRegressorUnnormed(nn.Module):
+ def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):
+ """Bin center regressor network. Bin centers are unbounded
+
+ Args:
+ in_features (int): input channels
+ n_bins (int, optional): Number of bin centers. Defaults to 16.
+ mlp_dim (int, optional): Hidden dimension. Defaults to 256.
+ min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)
+ max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)
+ """
+ super().__init__()
+ self.version = "1_1"
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),
+ nn.Softplus()
+ )
+
+ def forward(self, x):
+ """
+ Returns tensor of bin_width vectors (centers). One vector b for every pixel
+ """
+ B_centers = self._net(x)
+ return B_centers, B_centers
+
+
+class Projector(nn.Module):
+ def __init__(self, in_features, out_features, mlp_dim=128):
+ """Projector MLP
+
+ Args:
+ in_features (int): input channels
+ out_features (int): output channels
+ mlp_dim (int, optional): hidden dimension. Defaults to 128.
+ """
+ super().__init__()
+
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(mlp_dim, out_features, 1, 1, 0),
+ )
+
+ def forward(self, x):
+ return self._net(x)
+
+
+
+class LinearSplitter(nn.Module):
+ def __init__(self, in_features, prev_nbins, split_factor=2, mlp_dim=128, min_depth=1e-3, max_depth=10):
+ super().__init__()
+
+ self.prev_nbins = prev_nbins
+ self.split_factor = split_factor
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.GELU(),
+ nn.Conv2d(mlp_dim, prev_nbins * split_factor, 1, 1, 0),
+ nn.ReLU()
+ )
+
+ def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):
+ """
+ x : feature block; shape - n, c, h, w
+ b_prev : previous bin widths normed; shape - n, prev_nbins, h, w
+ """
+ if prev_b_embedding is not None:
+ if interpolate:
+ prev_b_embedding = nn.functional.interpolate(prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
+ x = x + prev_b_embedding
+ S = self._net(x)
+ eps = 1e-3
+ S = S + eps
+ n, c, h, w = S.shape
+ S = S.view(n, self.prev_nbins, self.split_factor, h, w)
+ S_normed = S / S.sum(dim=2, keepdim=True) # fractional splits
+
+ b_prev = nn.functional.interpolate(b_prev, (h,w), mode='bilinear', align_corners=True)
+
+
+ b_prev = b_prev / b_prev.sum(dim=1, keepdim=True) # renormalize for gurantees
+ # print(b_prev.shape, S_normed.shape)
+ # if is_for_query:(1).expand(-1, b_prev.size(0)//n, -1, -1, -1, -1).flatten(0,1) # TODO ? can replace all this with a single torch.repeat?
+ b = b_prev.unsqueeze(2) * S_normed
+ b = b.flatten(1,2) # .shape n, prev_nbins * split_factor, h, w
+
+ # calculate bin centers for loss calculation
+ B_widths = (self.max_depth - self.min_depth) * b # .shape N, nprev * splitfactor, H, W
+ # pad has the form (left, right, top, bottom, front, back)
+ B_widths = nn.functional.pad(B_widths, (0,0,0,0,1,0), mode='constant', value=self.min_depth)
+ B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW
+
+ B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:,1:,...])
+ return b, B_centers
\ No newline at end of file
diff --git a/zoedepth/models/layers/patch_transformer.py b/zoedepth/models/layers/patch_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..99d9e51a06b981bae45ce7dd64eaef19a4121991
--- /dev/null
+++ b/zoedepth/models/layers/patch_transformer.py
@@ -0,0 +1,91 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.nn as nn
+
+
+class PatchTransformerEncoder(nn.Module):
+ def __init__(self, in_channels, patch_size=10, embedding_dim=128, num_heads=4, use_class_token=False):
+ """ViT-like transformer block
+
+ Args:
+ in_channels (int): Input channels
+ patch_size (int, optional): patch size. Defaults to 10.
+ embedding_dim (int, optional): Embedding dimension in transformer model. Defaults to 128.
+ num_heads (int, optional): number of attention heads. Defaults to 4.
+ use_class_token (bool, optional): Whether to use extra token at the start for global accumulation (called as "class token"). Defaults to False.
+ """
+ super(PatchTransformerEncoder, self).__init__()
+ self.use_class_token = use_class_token
+ encoder_layers = nn.TransformerEncoderLayer(
+ embedding_dim, num_heads, dim_feedforward=1024)
+ self.transformer_encoder = nn.TransformerEncoder(
+ encoder_layers, num_layers=4) # takes shape S,N,E
+
+ self.embedding_convPxP = nn.Conv2d(in_channels, embedding_dim,
+ kernel_size=patch_size, stride=patch_size, padding=0)
+
+ def positional_encoding_1d(self, sequence_length, batch_size, embedding_dim, device='cpu'):
+ """Generate positional encodings
+
+ Args:
+ sequence_length (int): Sequence length
+ embedding_dim (int): Embedding dimension
+
+ Returns:
+ torch.Tensor SBE: Positional encodings
+ """
+ position = torch.arange(
+ 0, sequence_length, dtype=torch.float32, device=device).unsqueeze(1)
+ index = torch.arange(
+ 0, embedding_dim, 2, dtype=torch.float32, device=device).unsqueeze(0)
+ div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim))
+ pos_encoding = position * div_term
+ pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1)
+ pos_encoding = pos_encoding.unsqueeze(1).repeat(1, batch_size, 1)
+ return pos_encoding
+
+
+ def forward(self, x):
+ """Forward pass
+
+ Args:
+ x (torch.Tensor - NCHW): Input feature tensor
+
+ Returns:
+ torch.Tensor - SNE: Transformer output embeddings. S - sequence length (=HW/patch_size^2), N - batch size, E - embedding dim
+ """
+ embeddings = self.embedding_convPxP(x).flatten(
+ 2) # .shape = n,c,s = n, embedding_dim, s
+ if self.use_class_token:
+ # extra special token at start ?
+ embeddings = nn.functional.pad(embeddings, (1, 0))
+
+ # change to S,N,E format required by transformer
+ embeddings = embeddings.permute(2, 0, 1)
+ S, N, E = embeddings.shape
+ embeddings = embeddings + self.positional_encoding_1d(S, N, E, device=embeddings.device)
+ x = self.transformer_encoder(embeddings) # .shape = S, N, E
+ return x
diff --git a/zoedepth/models/model_io.py b/zoedepth/models/model_io.py
new file mode 100644
index 0000000000000000000000000000000000000000..78b6579631dd847ac76651238cb5a948b5a66286
--- /dev/null
+++ b/zoedepth/models/model_io.py
@@ -0,0 +1,92 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+
+def load_state_dict(model, state_dict):
+ """Load state_dict into model, handling DataParallel and DistributedDataParallel. Also checks for "model" key in state_dict.
+
+ DataParallel prefixes state_dict keys with 'module.' when saving.
+ If the model is not a DataParallel model but the state_dict is, then prefixes are removed.
+ If the model is a DataParallel model but the state_dict is not, then prefixes are added.
+ """
+ state_dict = state_dict.get('model', state_dict)
+ # if model is a DataParallel model, then state_dict keys are prefixed with 'module.'
+
+ do_prefix = isinstance(
+ model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel))
+ state = {}
+ for k, v in state_dict.items():
+ if k.startswith('module.') and not do_prefix:
+ k = k[7:]
+
+ if not k.startswith('module.') and do_prefix:
+ k = 'module.' + k
+
+ state[k] = v
+
+ model.load_state_dict(state)
+ print("Loaded successfully")
+ return model
+
+
+def load_wts(model, checkpoint_path):
+ ckpt = torch.load(checkpoint_path, map_location='cpu')
+ return load_state_dict(model, ckpt)
+
+
+def load_state_dict_from_url(model, url, **kwargs):
+ state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu', **kwargs)
+ return load_state_dict(model, state_dict)
+
+
+def load_state_from_resource(model, resource: str):
+ """Loads weights to the model from a given resource. A resource can be of following types:
+ 1. URL. Prefixed with "url::"
+ e.g. url::http(s)://url.resource.com/ckpt.pt
+
+ 2. Local path. Prefixed with "local::"
+ e.g. local::/path/to/ckpt.pt
+
+
+ Args:
+ model (torch.nn.Module): Model
+ resource (str): resource string
+
+ Returns:
+ torch.nn.Module: Model with loaded weights
+ """
+ print(f"Using pretrained resource {resource}")
+
+ if resource.startswith('url::'):
+ url = resource.split('url::')[1]
+ return load_state_dict_from_url(model, url, progress=True)
+
+ elif resource.startswith('local::'):
+ path = resource.split('local::')[1]
+ return load_wts(model, path)
+
+ else:
+ raise ValueError("Invalid resource type, only url:: and local:: are supported")
+
\ No newline at end of file
diff --git a/zoedepth/models/zoedepth/__init__.py b/zoedepth/models/zoedepth/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc33f737d238766559f0e3a8def3c0b568f23b7f
--- /dev/null
+++ b/zoedepth/models/zoedepth/__init__.py
@@ -0,0 +1,31 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+from .zoedepth_v1 import ZoeDepth
+
+all_versions = {
+ "v1": ZoeDepth,
+}
+
+get_version = lambda v : all_versions[v]
\ No newline at end of file
diff --git a/zoedepth/models/zoedepth/config_zoedepth.json b/zoedepth/models/zoedepth/config_zoedepth.json
new file mode 100644
index 0000000000000000000000000000000000000000..762641b2a98ea3ed6ce68067de3e77701e4b99a3
--- /dev/null
+++ b/zoedepth/models/zoedepth/config_zoedepth.json
@@ -0,0 +1,59 @@
+{
+ "model": {
+ "name": "ZoeDepth",
+ "version_name": "v1",
+ "n_bins": 64,
+ "bin_embedding_dim": 128,
+ "bin_centers_type": "softplus",
+ "n_attractors":[16, 8, 4, 1],
+ "attractor_alpha": 1000,
+ "attractor_gamma": 2,
+ "attractor_kind" : "mean",
+ "attractor_type" : "inv",
+ "midas_model_type" : "DPT_BEiT_L_384",
+ "min_temp": 0.0212,
+ "max_temp": 50.0,
+ "output_distribution": "logbinomial",
+ "memory_efficient": true,
+ "inverse_midas": false,
+ "img_size": [384, 512]
+ },
+
+ "train": {
+ "train_midas": true,
+ "use_pretrained_midas": true,
+ "trainer": "zoedepth",
+ "epochs": 5,
+ "bs": 16,
+ "optim_kwargs": {"lr": 0.000161, "wd": 0.01},
+ "sched_kwargs": {"div_factor": 1, "final_div_factor": 10000, "pct_start": 0.7, "three_phase":false, "cycle_momentum": true},
+ "same_lr": false,
+ "w_si": 1,
+ "w_domain": 0.2,
+ "w_reg": 0,
+ "w_grad": 0,
+ "w_sd": 1,
+ "avoid_boundary": false,
+ "random_crop": false,
+ "input_width": 640,
+ "input_height": 480,
+ "midas_lr_factor": 1,
+ "encoder_lr_factor":10,
+ "pos_enc_lr_factor":10,
+ "freeze_midas_bn": true
+
+ },
+
+ "infer":{
+ "train_midas": false,
+ "use_pretrained_midas": false,
+ "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_N.pt",
+ "force_keep_ar": true
+ },
+
+ "eval":{
+ "train_midas": false,
+ "use_pretrained_midas": false,
+ "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_N.pt"
+ }
+}
\ No newline at end of file
diff --git a/zoedepth/models/zoedepth/config_zoedepth_kitti.json b/zoedepth/models/zoedepth/config_zoedepth_kitti.json
new file mode 100644
index 0000000000000000000000000000000000000000..b51802aa44b91c39e15aacaac4b5ab6bec884414
--- /dev/null
+++ b/zoedepth/models/zoedepth/config_zoedepth_kitti.json
@@ -0,0 +1,22 @@
+{
+ "model": {
+ "bin_centers_type": "normed",
+ "img_size": [384, 768]
+ },
+
+ "train": {
+ },
+
+ "infer":{
+ "train_midas": false,
+ "use_pretrained_midas": false,
+ "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt",
+ "force_keep_ar": true
+ },
+
+ "eval":{
+ "train_midas": false,
+ "use_pretrained_midas": false,
+ "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt"
+ }
+}
\ No newline at end of file
diff --git a/zoedepth/models/zoedepth/zoedepth_v1.py b/zoedepth/models/zoedepth/zoedepth_v1.py
new file mode 100644
index 0000000000000000000000000000000000000000..6228b3a7bfc117744bb8756a8981294461229194
--- /dev/null
+++ b/zoedepth/models/zoedepth/zoedepth_v1.py
@@ -0,0 +1,262 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import itertools
+
+import torch
+import torch.nn as nn
+from zoedepth.models.depth_model import DepthModel
+from zoedepth.models.base_models.midas import MidasCore
+from zoedepth.models.layers.attractor import AttractorLayer, AttractorLayerUnnormed
+from zoedepth.models.layers.dist_layers import ConditionalLogBinomial
+from zoedepth.models.layers.localbins_layers import (Projector, SeedBinRegressor,
+ SeedBinRegressorUnnormed)
+from zoedepth.models.model_io import load_state_from_resource
+
+
+class ZoeDepth(DepthModel):
+ def __init__(self, core, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10,
+ n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True,
+ midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, add_depth_channel=False, **kwargs):
+ """ZoeDepth model. This is the version of ZoeDepth that has a single metric head
+
+ Args:
+ core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features
+ n_bins (int, optional): Number of bin centers. Defaults to 64.
+ bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
+ For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus".
+ bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128.
+ min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3.
+ max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10.
+ n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
+ attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300.
+ attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
+ attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'.
+ attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'.
+ min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5.
+ max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50.
+ train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True.
+ midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10.
+ encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10.
+ pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10.
+ """
+ super().__init__()
+
+ self.core = core
+ self.max_depth = max_depth
+ self.min_depth = min_depth
+ self.min_temp = min_temp
+ self.bin_centers_type = bin_centers_type
+
+ self.midas_lr_factor = midas_lr_factor
+ self.encoder_lr_factor = encoder_lr_factor
+ self.pos_enc_lr_factor = pos_enc_lr_factor
+ self.train_midas = train_midas
+ self.inverse_midas = inverse_midas
+
+ if self.encoder_lr_factor <= 0:
+ self.core.freeze_encoder(
+ freeze_rel_pos=self.pos_enc_lr_factor <= 0)
+
+ N_MIDAS_OUT = 32
+ btlnck_features = self.core.output_channels[0]
+ num_out_features = self.core.output_channels[1:]
+
+ self.conv2 = nn.Conv2d(btlnck_features, btlnck_features,
+ kernel_size=1, stride=1, padding=0) # btlnck conv
+
+ if bin_centers_type == "normed":
+ SeedBinRegressorLayer = SeedBinRegressor
+ Attractor = AttractorLayer
+ elif bin_centers_type == "softplus":
+ SeedBinRegressorLayer = SeedBinRegressorUnnormed
+ Attractor = AttractorLayerUnnormed
+ elif bin_centers_type == "hybrid1":
+ SeedBinRegressorLayer = SeedBinRegressor
+ Attractor = AttractorLayerUnnormed
+ elif bin_centers_type == "hybrid2":
+ SeedBinRegressorLayer = SeedBinRegressorUnnormed
+ Attractor = AttractorLayer
+ else:
+ raise ValueError(
+ "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'")
+
+ self.seed_bin_regressor = SeedBinRegressorLayer(
+ btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth)
+ self.seed_projector = Projector(btlnck_features, bin_embedding_dim)
+ self.projectors = nn.ModuleList([
+ Projector(num_out, bin_embedding_dim)
+ for num_out in num_out_features
+ ])
+ self.attractors = nn.ModuleList([
+ Attractor(bin_embedding_dim, n_bins, n_attractors=n_attractors[i], min_depth=min_depth, max_depth=max_depth,
+ alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type)
+ for i in range(len(num_out_features))
+ ])
+
+ last_in = N_MIDAS_OUT + 1 # +1 for relative depth
+
+ # use log binomial instead of softmax
+ self.conditional_log_binomial = ConditionalLogBinomial(
+ last_in, bin_embedding_dim, n_classes=n_bins, min_temp=min_temp, max_temp=max_temp)
+
+ """
+ if add_depth_channel:
+ additional_depth_channels = 2
+ self.core.core.pretrained.model.patch_embed.proj = torch.nn.Conv2d(
+ self.core.core.pretrained.model.patch_embed.proj.in_channels + additional_depth_channels,
+ self.core.core.pretrained.model.patch_embed.proj.out_channels,
+ kernel_size=self.core.core.pretrained.model.patch_embed.proj.kernel_size,
+ stride=self.core.core.pretrained.model.patch_embed.proj.stride,
+ padding=self.core.core.pretrained.model.patch_embed.proj.padding,
+ bias=True)
+ """
+
+ def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs):
+ """
+ Args:
+ x (torch.Tensor): Input image tensor of shape (B, C, H, W)
+ return_final_centers (bool, optional): Whether to return the final bin centers. Defaults to False.
+ denorm (bool, optional): Whether to denormalize the input image. This reverses ImageNet normalization as midas normalization is different. Defaults to False.
+ return_probs (bool, optional): Whether to return the output probability distribution. Defaults to False.
+
+ Returns:
+ dict: Dictionary containing the following keys:
+ - rel_depth (torch.Tensor): Relative depth map of shape (B, H, W)
+ - metric_depth (torch.Tensor): Metric depth map of shape (B, 1, H, W)
+ - bin_centers (torch.Tensor): Bin centers of shape (B, n_bins). Present only if return_final_centers is True
+ - probs (torch.Tensor): Output probability distribution of shape (B, n_bins, H, W). Present only if return_probs is True
+
+ """
+ b, c, h, w = x.shape
+ # print("input shape ", x.shape)
+ self.orig_input_width = w
+ self.orig_input_height = h
+ rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True)
+ # print("output shapes", rel_depth.shape, out.shape)
+
+ outconv_activation = out[0]
+ btlnck = out[1]
+ x_blocks = out[2:]
+
+ x_d0 = self.conv2(btlnck)
+ x = x_d0
+ _, seed_b_centers = self.seed_bin_regressor(x)
+
+ if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2':
+ b_prev = (seed_b_centers - self.min_depth) / \
+ (self.max_depth - self.min_depth)
+ else:
+ b_prev = seed_b_centers
+
+ prev_b_embedding = self.seed_projector(x)
+
+ # unroll this loop for better performance
+ for projector, attractor, x in zip(self.projectors, self.attractors, x_blocks):
+ b_embedding = projector(x)
+ b, b_centers = attractor(
+ b_embedding, b_prev, prev_b_embedding, interpolate=True)
+ b_prev = b.clone()
+ prev_b_embedding = b_embedding.clone()
+
+ last = outconv_activation
+
+ if self.inverse_midas:
+ # invert depth followed by normalization
+ rel_depth = 1.0 / (rel_depth + 1e-6)
+ rel_depth = (rel_depth - rel_depth.min()) / \
+ (rel_depth.max() - rel_depth.min())
+ # concat rel depth with last. First interpolate rel depth to last size
+ rel_cond = rel_depth.unsqueeze(1)
+ rel_cond = nn.functional.interpolate(
+ rel_cond, size=last.shape[2:], mode='bilinear', align_corners=True)
+ last = torch.cat([last, rel_cond], dim=1)
+
+ b_embedding = nn.functional.interpolate(
+ b_embedding, last.shape[-2:], mode='bilinear', align_corners=True)
+ x = self.conditional_log_binomial(last, b_embedding)
+
+ # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor
+ # print(x.shape, b_centers.shape)
+ b_centers = nn.functional.interpolate(
+ b_centers, x.shape[-2:], mode='bilinear', align_corners=True)
+ out = torch.sum(x * b_centers, dim=1, keepdim=True)
+
+ # Structure output dict
+ output = dict(metric_depth=out)
+ if return_final_centers or return_probs:
+ output['bin_centers'] = b_centers
+
+ if return_probs:
+ output['probs'] = x
+
+ return output
+
+ def get_lr_params(self, lr):
+ """
+ Learning rate configuration for different layers of the model
+ Args:
+ lr (float) : Base learning rate
+ Returns:
+ list : list of parameters to optimize and their learning rates, in the format required by torch optimizers.
+ """
+ param_conf = []
+ if self.train_midas:
+ if self.encoder_lr_factor > 0:
+ param_conf.append({'params': self.core.get_enc_params_except_rel_pos(
+ ), 'lr': lr / self.encoder_lr_factor})
+
+ if self.pos_enc_lr_factor > 0:
+ param_conf.append(
+ {'params': self.core.get_rel_pos_params(), 'lr': lr / self.pos_enc_lr_factor})
+
+ midas_params = self.core.core.scratch.parameters()
+ midas_lr_factor = self.midas_lr_factor
+ param_conf.append(
+ {'params': midas_params, 'lr': lr / midas_lr_factor})
+
+ remaining_modules = []
+ for name, child in self.named_children():
+ if name != 'core':
+ remaining_modules.append(child)
+ remaining_params = itertools.chain(
+ *[child.parameters() for child in remaining_modules])
+
+ param_conf.append({'params': remaining_params, 'lr': lr})
+
+ return param_conf
+
+ @staticmethod
+ def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs):
+ core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas,
+ train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs)
+ model = ZoeDepth(core, **kwargs)
+ if pretrained_resource:
+ assert isinstance(pretrained_resource, str), "pretrained_resource must be a string"
+ model = load_state_from_resource(model, pretrained_resource)
+ return model
+
+ @staticmethod
+ def build_from_config(config):
+ return ZoeDepth.build(**config)
diff --git a/zoedepth/models/zoedepth_nk/__init__.py b/zoedepth/models/zoedepth_nk/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..513a278b939c10c010e3c0250ec73544d5663886
--- /dev/null
+++ b/zoedepth/models/zoedepth_nk/__init__.py
@@ -0,0 +1,31 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+from .zoedepth_nk_v1 import ZoeDepthNK
+
+all_versions = {
+ "v1": ZoeDepthNK,
+}
+
+get_version = lambda v : all_versions[v]
\ No newline at end of file
diff --git a/zoedepth/models/zoedepth_nk/config_zoedepth_nk.json b/zoedepth/models/zoedepth_nk/config_zoedepth_nk.json
new file mode 100644
index 0000000000000000000000000000000000000000..42bab2a3ad159a09599a5aba270c491021a3cf1a
--- /dev/null
+++ b/zoedepth/models/zoedepth_nk/config_zoedepth_nk.json
@@ -0,0 +1,67 @@
+{
+ "model": {
+ "name": "ZoeDepthNK",
+ "version_name": "v1",
+ "bin_conf" : [
+ {
+ "name": "nyu",
+ "n_bins": 64,
+ "min_depth": 1e-3,
+ "max_depth": 10.0
+ },
+ {
+ "name": "kitti",
+ "n_bins": 64,
+ "min_depth": 1e-3,
+ "max_depth": 80.0
+ }
+ ],
+ "bin_embedding_dim": 128,
+ "bin_centers_type": "softplus",
+ "n_attractors":[16, 8, 4, 1],
+ "attractor_alpha": 1000,
+ "attractor_gamma": 2,
+ "attractor_kind" : "mean",
+ "attractor_type" : "inv",
+ "min_temp": 0.0212,
+ "max_temp": 50.0,
+ "memory_efficient": true,
+ "midas_model_type" : "DPT_BEiT_L_384",
+ "img_size": [384, 512]
+ },
+
+ "train": {
+ "train_midas": true,
+ "use_pretrained_midas": true,
+ "trainer": "zoedepth_nk",
+ "epochs": 5,
+ "bs": 16,
+ "optim_kwargs": {"lr": 0.0002512, "wd": 0.01},
+ "sched_kwargs": {"div_factor": 1, "final_div_factor": 10000, "pct_start": 0.7, "three_phase":false, "cycle_momentum": true},
+ "same_lr": false,
+ "w_si": 1,
+ "w_domain": 100,
+ "avoid_boundary": false,
+ "random_crop": false,
+ "input_width": 640,
+ "input_height": 480,
+ "w_grad": 0,
+ "w_reg": 0,
+ "midas_lr_factor": 10,
+ "encoder_lr_factor":10,
+ "pos_enc_lr_factor":10
+ },
+
+ "infer": {
+ "train_midas": false,
+ "pretrained_resource": "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_NK.pt",
+ "use_pretrained_midas": false,
+ "force_keep_ar": true
+ },
+
+ "eval": {
+ "train_midas": false,
+ "pretrained_resource": "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_NK.pt",
+ "use_pretrained_midas": false
+ }
+}
\ No newline at end of file
diff --git a/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py b/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py
new file mode 100644
index 0000000000000000000000000000000000000000..7368ae8031188a9f946d9d3f29633c96e791e68e
--- /dev/null
+++ b/zoedepth/models/zoedepth_nk/zoedepth_nk_v1.py
@@ -0,0 +1,333 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import itertools
+
+import torch
+import torch.nn as nn
+
+from zoedepth.models.depth_model import DepthModel
+from zoedepth.models.base_models.midas import MidasCore
+from zoedepth.models.layers.attractor import AttractorLayer, AttractorLayerUnnormed
+from zoedepth.models.layers.dist_layers import ConditionalLogBinomial
+from zoedepth.models.layers.localbins_layers import (Projector, SeedBinRegressor,
+ SeedBinRegressorUnnormed)
+from zoedepth.models.layers.patch_transformer import PatchTransformerEncoder
+from zoedepth.models.model_io import load_state_from_resource
+
+
+class ZoeDepthNK(DepthModel):
+ def __init__(self, core, bin_conf, bin_centers_type="softplus", bin_embedding_dim=128,
+ n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp',
+ min_temp=5, max_temp=50,
+ memory_efficient=False, train_midas=True,
+ is_midas_pretrained=True, midas_lr_factor=1, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs):
+ """ZoeDepthNK model. This is the version of ZoeDepth that has two metric heads and uses a learned router to route to experts.
+
+ Args:
+ core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features
+
+ bin_conf (List[dict]): A list of dictionaries that contain the bin configuration for each metric head. Each dictionary should contain the following keys:
+ "name" (str, typically same as the dataset name), "n_bins" (int), "min_depth" (float), "max_depth" (float)
+
+ The length of this list determines the number of metric heads.
+ bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
+ For "softplus", softplus activation is used and thus are unbounded. Defaults to "normed".
+ bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128.
+
+ n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
+ attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300.
+ attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
+ attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'.
+ attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'.
+
+ min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5.
+ max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50.
+
+ memory_efficient (bool, optional): Whether to use memory efficient version of attractor layers. Memory efficient version is slower but is recommended incase of multiple metric heads in order save GPU memory. Defaults to False.
+
+ train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True.
+ is_midas_pretrained (bool, optional): Is "core" pretrained? Defaults to True.
+ midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10.
+ encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10.
+ pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10.
+
+ """
+
+ super().__init__()
+
+ self.core = core
+ self.bin_conf = bin_conf
+ self.min_temp = min_temp
+ self.max_temp = max_temp
+ self.memory_efficient = memory_efficient
+ self.train_midas = train_midas
+ self.is_midas_pretrained = is_midas_pretrained
+ self.midas_lr_factor = midas_lr_factor
+ self.encoder_lr_factor = encoder_lr_factor
+ self.pos_enc_lr_factor = pos_enc_lr_factor
+ self.inverse_midas = inverse_midas
+
+ N_MIDAS_OUT = 32
+ btlnck_features = self.core.output_channels[0]
+ num_out_features = self.core.output_channels[1:]
+ # self.scales = [16, 8, 4, 2] # spatial scale factors
+
+ self.conv2 = nn.Conv2d(
+ btlnck_features, btlnck_features, kernel_size=1, stride=1, padding=0)
+
+ # Transformer classifier on the bottleneck
+ self.patch_transformer = PatchTransformerEncoder(
+ btlnck_features, 1, 128, use_class_token=True)
+ self.mlp_classifier = nn.Sequential(
+ nn.Linear(128, 128),
+ nn.ReLU(),
+ nn.Linear(128, 2)
+ )
+
+ if bin_centers_type == "normed":
+ SeedBinRegressorLayer = SeedBinRegressor
+ Attractor = AttractorLayer
+ elif bin_centers_type == "softplus":
+ SeedBinRegressorLayer = SeedBinRegressorUnnormed
+ Attractor = AttractorLayerUnnormed
+ elif bin_centers_type == "hybrid1":
+ SeedBinRegressorLayer = SeedBinRegressor
+ Attractor = AttractorLayerUnnormed
+ elif bin_centers_type == "hybrid2":
+ SeedBinRegressorLayer = SeedBinRegressorUnnormed
+ Attractor = AttractorLayer
+ else:
+ raise ValueError(
+ "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'")
+ self.bin_centers_type = bin_centers_type
+ # We have bins for each bin conf.
+ # Create a map (ModuleDict) of 'name' -> seed_bin_regressor
+ self.seed_bin_regressors = nn.ModuleDict(
+ {conf['name']: SeedBinRegressorLayer(btlnck_features, conf["n_bins"], mlp_dim=bin_embedding_dim//2, min_depth=conf["min_depth"], max_depth=conf["max_depth"])
+ for conf in bin_conf}
+ )
+
+ self.seed_projector = Projector(
+ btlnck_features, bin_embedding_dim, mlp_dim=bin_embedding_dim//2)
+ self.projectors = nn.ModuleList([
+ Projector(num_out, bin_embedding_dim, mlp_dim=bin_embedding_dim//2)
+ for num_out in num_out_features
+ ])
+
+ # Create a map (ModuleDict) of 'name' -> attractors (ModuleList)
+ self.attractors = nn.ModuleDict(
+ {conf['name']: nn.ModuleList([
+ Attractor(bin_embedding_dim, n_attractors[i],
+ mlp_dim=bin_embedding_dim, alpha=attractor_alpha,
+ gamma=attractor_gamma, kind=attractor_kind,
+ attractor_type=attractor_type, memory_efficient=memory_efficient,
+ min_depth=conf["min_depth"], max_depth=conf["max_depth"])
+ for i in range(len(n_attractors))
+ ])
+ for conf in bin_conf}
+ )
+
+ last_in = N_MIDAS_OUT
+ # conditional log binomial for each bin conf
+ self.conditional_log_binomial = nn.ModuleDict(
+ {conf['name']: ConditionalLogBinomial(last_in, bin_embedding_dim, conf['n_bins'], bottleneck_factor=4, min_temp=self.min_temp, max_temp=self.max_temp)
+ for conf in bin_conf}
+ )
+
+ def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs):
+ """
+ Args:
+ x (torch.Tensor): Input image tensor of shape (B, C, H, W). Assumes all images are from the same domain.
+ return_final_centers (bool, optional): Whether to return the final centers of the attractors. Defaults to False.
+ denorm (bool, optional): Whether to denormalize the input image. Defaults to False.
+ return_probs (bool, optional): Whether to return the probabilities of the bins. Defaults to False.
+
+ Returns:
+ dict: Dictionary of outputs with keys:
+ - "rel_depth": Relative depth map of shape (B, 1, H, W)
+ - "metric_depth": Metric depth map of shape (B, 1, H, W)
+ - "domain_logits": Domain logits of shape (B, 2)
+ - "bin_centers": Bin centers of shape (B, N, H, W). Present only if return_final_centers is True
+ - "probs": Bin probabilities of shape (B, N, H, W). Present only if return_probs is True
+ """
+ b, c, h, w = x.shape
+ self.orig_input_width = w
+ self.orig_input_height = h
+ rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True)
+
+ outconv_activation = out[0]
+ btlnck = out[1]
+ x_blocks = out[2:]
+
+ x_d0 = self.conv2(btlnck)
+ x = x_d0
+
+ # Predict which path to take
+ embedding = self.patch_transformer(x)[0] # N, E
+ domain_logits = self.mlp_classifier(embedding) # N, 2
+ domain_vote = torch.softmax(domain_logits.sum(
+ dim=0, keepdim=True), dim=-1) # 1, 2
+
+ # Get the path
+ bin_conf_name = ["nyu", "kitti"][torch.argmax(
+ domain_vote, dim=-1).squeeze().item()]
+
+ try:
+ conf = [c for c in self.bin_conf if c.name == bin_conf_name][0]
+ except IndexError:
+ raise ValueError(
+ f"bin_conf_name {bin_conf_name} not found in bin_confs")
+
+ min_depth = conf['min_depth']
+ max_depth = conf['max_depth']
+
+ seed_bin_regressor = self.seed_bin_regressors[bin_conf_name]
+ _, seed_b_centers = seed_bin_regressor(x)
+ if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2':
+ b_prev = (seed_b_centers - min_depth)/(max_depth - min_depth)
+ else:
+ b_prev = seed_b_centers
+ prev_b_embedding = self.seed_projector(x)
+
+ attractors = self.attractors[bin_conf_name]
+ for projector, attractor, x in zip(self.projectors, attractors, x_blocks):
+ b_embedding = projector(x)
+ b, b_centers = attractor(
+ b_embedding, b_prev, prev_b_embedding, interpolate=True)
+ b_prev = b
+ prev_b_embedding = b_embedding
+
+ last = outconv_activation
+
+ b_centers = nn.functional.interpolate(
+ b_centers, last.shape[-2:], mode='bilinear', align_corners=True)
+ b_embedding = nn.functional.interpolate(
+ b_embedding, last.shape[-2:], mode='bilinear', align_corners=True)
+
+ clb = self.conditional_log_binomial[bin_conf_name]
+ x = clb(last, b_embedding)
+
+ # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor
+ # print(x.shape, b_centers.shape)
+ # b_centers = nn.functional.interpolate(b_centers, x.shape[-2:], mode='bilinear', align_corners=True)
+ out = torch.sum(x * b_centers, dim=1, keepdim=True)
+
+ output = dict(domain_logits=domain_logits, metric_depth=out)
+ if return_final_centers or return_probs:
+ output['bin_centers'] = b_centers
+
+ if return_probs:
+ output['probs'] = x
+ return output
+
+ def get_lr_params(self, lr):
+ """
+ Learning rate configuration for different layers of the model
+
+ Args:
+ lr (float) : Base learning rate
+ Returns:
+ list : list of parameters to optimize and their learning rates, in the format required by torch optimizers.
+ """
+ param_conf = []
+ if self.train_midas:
+ def get_rel_pos_params():
+ for name, p in self.core.core.pretrained.named_parameters():
+ if "relative_position" in name:
+ yield p
+
+ def get_enc_params_except_rel_pos():
+ for name, p in self.core.core.pretrained.named_parameters():
+ if "relative_position" not in name:
+ yield p
+
+ encoder_params = get_enc_params_except_rel_pos()
+ rel_pos_params = get_rel_pos_params()
+ midas_params = self.core.core.scratch.parameters()
+ midas_lr_factor = self.midas_lr_factor if self.is_midas_pretrained else 1.0
+ param_conf.extend([
+ {'params': encoder_params, 'lr': lr / self.encoder_lr_factor},
+ {'params': rel_pos_params, 'lr': lr / self.pos_enc_lr_factor},
+ {'params': midas_params, 'lr': lr / midas_lr_factor}
+ ])
+
+ remaining_modules = []
+ for name, child in self.named_children():
+ if name != 'core':
+ remaining_modules.append(child)
+ remaining_params = itertools.chain(
+ *[child.parameters() for child in remaining_modules])
+ param_conf.append({'params': remaining_params, 'lr': lr})
+ return param_conf
+
+ def get_conf_parameters(self, conf_name):
+ """
+ Returns parameters of all the ModuleDicts children that are exclusively used for the given bin configuration
+ """
+ params = []
+ for name, child in self.named_children():
+ if isinstance(child, nn.ModuleDict):
+ for bin_conf_name, module in child.items():
+ if bin_conf_name == conf_name:
+ params += list(module.parameters())
+ return params
+
+ def freeze_conf(self, conf_name):
+ """
+ Freezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration
+ """
+ for p in self.get_conf_parameters(conf_name):
+ p.requires_grad = False
+
+ def unfreeze_conf(self, conf_name):
+ """
+ Unfreezes all the parameters of all the ModuleDicts children that are exclusively used for the given bin configuration
+ """
+ for p in self.get_conf_parameters(conf_name):
+ p.requires_grad = True
+
+ def freeze_all_confs(self):
+ """
+ Freezes all the parameters of all the ModuleDicts children
+ """
+ for name, child in self.named_children():
+ if isinstance(child, nn.ModuleDict):
+ for bin_conf_name, module in child.items():
+ for p in module.parameters():
+ p.requires_grad = False
+
+ @staticmethod
+ def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs):
+ core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas,
+ train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs)
+ model = ZoeDepthNK(core, **kwargs)
+ if pretrained_resource:
+ assert isinstance(pretrained_resource, str), "pretrained_resource must be a string"
+ model = load_state_from_resource(model, pretrained_resource)
+ return model
+
+ @staticmethod
+ def build_from_config(config):
+ return ZoeDepthNK.build(**config)
diff --git a/zoedepth/trainers/base_trainer.py b/zoedepth/trainers/base_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..445a57cf43b566400136cdd45b470a9a68590069
--- /dev/null
+++ b/zoedepth/trainers/base_trainer.py
@@ -0,0 +1,345 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import os
+import uuid
+import warnings
+from datetime import datetime as dt
+from typing import Dict
+
+import matplotlib.pyplot as plt
+import numpy as np
+import torch
+import torch.distributed as dist
+import torch.nn as nn
+import torch.optim as optim
+import wandb
+from tqdm import tqdm
+
+from zoedepth.utils.config import flatten
+from zoedepth.utils.misc import RunningAverageDict, colorize, colors
+
+import sys
+sys.path.append("/users/paule/gaussian-splatting/ZoeDepth")
+from scannet_eval import run_scannet_scene
+
+def is_rank_zero(args):
+ return args.rank == 0
+
+
+class BaseTrainer:
+ def __init__(self, config, model, train_loader, test_loader=None, device=None):
+ """ Base Trainer class for training a model."""
+
+ self.config = config
+ self.metric_criterion = "abs_rel"
+ if device is None:
+ device = torch.device(
+ 'cuda') if torch.cuda.is_available() else torch.device('cpu')
+ self.device = device
+ self.model = model
+ self.train_loader = train_loader
+ self.test_loader = test_loader
+ self.optimizer = self.init_optimizer()
+ self.scheduler = self.init_scheduler()
+
+ def resize_to_target(self, prediction, target):
+ if prediction.shape[2:] != target.shape[-2:]:
+ prediction = nn.functional.interpolate(
+ prediction, size=target.shape[-2:], mode="bilinear", align_corners=True
+ )
+ return prediction
+
+ def load_ckpt(self, checkpoint_dir="./checkpoints", ckpt_type="best"):
+ import glob
+ import os
+
+ from zoedepth.models.model_io import load_wts
+
+ if hasattr(self.config, "checkpoint"):
+ checkpoint = self.config.checkpoint
+ elif hasattr(self.config, "ckpt_pattern"):
+ pattern = self.config.ckpt_pattern
+ matches = glob.glob(os.path.join(
+ checkpoint_dir, f"*{pattern}*{ckpt_type}*"))
+ if not (len(matches) > 0):
+ raise ValueError(f"No matches found for the pattern {pattern}")
+ checkpoint = matches[0]
+ else:
+ return
+ model = load_wts(self.model, checkpoint)
+ # TODO : Resuming training is not properly supported in this repo. Implement loading / saving of optimizer and scheduler to support it.
+ print("Loaded weights from {0}".format(checkpoint))
+ warnings.warn(
+ "Resuming training is not properly supported in this repo. Implement loading / saving of optimizer and scheduler to support it.")
+ self.model = model
+
+ def init_optimizer(self):
+ m = self.model.module if self.config.multigpu else self.model
+
+ if self.config.same_lr:
+ print("Using same LR")
+ if hasattr(m, 'core'):
+ m.core.unfreeze()
+ params = self.model.parameters()
+ else:
+ print("Using diff LR")
+ if not hasattr(m, 'get_lr_params'):
+ raise NotImplementedError(
+ f"Model {m.__class__.__name__} does not implement get_lr_params. Please implement it or use the same LR for all parameters.")
+
+ params = m.get_lr_params(self.config.lr)
+
+ return optim.AdamW(params, lr=self.config.lr, weight_decay=self.config.wd)
+
+ def init_scheduler(self):
+ lrs = [l['lr'] for l in self.optimizer.param_groups]
+ return optim.lr_scheduler.OneCycleLR(self.optimizer, lrs, epochs=self.config.epochs, steps_per_epoch=len(self.train_loader),
+ cycle_momentum=self.config.cycle_momentum,
+ base_momentum=0.85, max_momentum=0.95, div_factor=self.config.div_factor, final_div_factor=self.config.final_div_factor, pct_start=self.config.pct_start, three_phase=self.config.three_phase)
+
+ def train_on_batch(self, batch, train_step):
+ raise NotImplementedError
+
+ def validate_on_batch(self, batch, val_step):
+ raise NotImplementedError
+
+ def raise_if_nan(self, losses):
+ for key, value in losses.items():
+ if torch.isnan(value):
+ raise ValueError(f"{key} is NaN, Stopping training")
+
+ @property
+ def iters_per_epoch(self):
+ return len(self.train_loader)
+
+ @property
+ def total_iters(self):
+ return self.config.epochs * self.iters_per_epoch
+
+ def should_early_stop(self):
+ if self.config.get('early_stop', False) and self.step > self.config.early_stop:
+ return True
+
+ def train(self):
+ print(f"Training {self.config.name}")
+ if self.config.uid is None:
+ self.config.uid = str(uuid.uuid4()).split('-')[-1]
+ run_id = f"{dt.now().strftime('%d-%h_%H-%M')}-{self.config.uid}"
+ self.config.run_id = run_id
+ self.config.experiment_id = f"{self.config.name}{self.config.version_name}_{run_id}"
+ self.should_write = ((not self.config.distributed)
+ or self.config.rank == 0)
+ self.should_log = self.should_write # and logging
+ if self.should_log:
+ tags = self.config.tags.split(
+ ',') if self.config.tags != '' else None
+ wandb.init(project=self.config.project, name=self.config.experiment_id, config=flatten(self.config), dir=self.config.root,
+ tags=tags, notes=self.config.notes, settings=wandb.Settings(start_method="fork"))
+
+ self.model.train()
+ self.step = 0
+ best_loss = np.inf
+ validate_every = int(self.config.validate_every * self.iters_per_epoch)
+
+
+ if self.config.prefetch:
+
+ for i, batch in tqdm(enumerate(self.train_loader), desc=f"Prefetching...",
+ total=self.iters_per_epoch) if is_rank_zero(self.config) else enumerate(self.train_loader):
+ pass
+
+ losses = {}
+ def stringify_losses(L): return "; ".join(map(
+ lambda kv: f"{colors.fg.purple}{kv[0]}{colors.reset}: {round(kv[1].item(),3):.4e}", L.items()))
+ for epoch in range(250):#range(self.config.epochs):
+ if self.should_early_stop():
+ break
+
+ self.epoch = epoch
+ ################################# Train loop ##########################################################
+ if self.should_log:
+ wandb.log({"Epoch": epoch}, step=self.step)
+ pbar = tqdm(enumerate(self.train_loader), desc=f"Epoch: {epoch + 1}/{self.config.epochs}. Loop: Train",
+ total=self.iters_per_epoch) if is_rank_zero(self.config) else enumerate(self.train_loader)
+ for i, batch in pbar:
+ if self.should_early_stop():
+ print("Early stopping")
+ break
+ # print(f"Batch {self.step+1} on rank {self.config.rank}")
+ losses = self.train_on_batch(batch, i)
+ # print(f"trained batch {self.step+1} on rank {self.config.rank}")
+
+ self.raise_if_nan(losses)
+ if is_rank_zero(self.config) and self.config.print_losses:
+ pbar.set_description(
+ f"Epoch: {epoch + 1}/{self.config.epochs}. Loop: Train. Losses: {stringify_losses(losses)}")
+ self.scheduler.step()
+
+ if self.should_log and self.step % 50 == 0:
+ wandb.log({f"Train/{name}": loss.item()
+ for name, loss in losses.items()}, step=self.step)
+
+ self.step += 1
+
+ if self.step % 1000 == 0 and self.should_write:
+ self.save_checkpoint(
+ f"{self.config.experiment_id}_latest.pt")
+
+ ########################################################################################################
+
+ if self.test_loader:
+ if (self.step % validate_every) == 0:
+ self.model.eval()
+ if self.should_write:
+ self.save_checkpoint(
+ f"{self.config.experiment_id}_latest.pt")
+
+ ################################# Validation loop ##################################################
+ # validate on the entire validation set in every process but save only from rank 0, I know, inefficient, but avoids divergence of processes
+ metrics, test_losses = self.validate()
+ # print("Validated: {}".format(metrics))
+ if self.should_log:
+ wandb.log(
+ {f"Test/{name}": tloss for name, tloss in test_losses.items()}, step=self.step)
+
+ wandb.log({f"Metrics/{k}": v for k,
+ v in metrics.items()}, step=self.step)
+
+ if (metrics[self.metric_criterion] < best_loss) and self.should_write:
+ self.save_checkpoint(
+ f"{self.config.experiment_id}_best.pt")
+ best_loss = metrics[self.metric_criterion]
+
+ self.model.train()
+
+ if self.config.distributed and not self.config.debug_mode:
+ dist.barrier()
+ # print(f"Validated: {metrics} on device {self.config.rank}")
+
+ # print(f"Finished step {self.step} on device {self.config.rank}")
+ #################################################################################################
+
+ # Save / validate at the end
+ self.step += 1 # log as final point
+ self.model.eval()
+ self.save_checkpoint(f"{self.config.experiment_id}_latest.pt")
+ if self.test_loader:
+
+ ################################# Validation loop ##################################################
+ metrics, test_losses = self.validate()
+ # print("Validated: {}".format(metrics))
+ if self.should_log:
+ wandb.log({f"Test/{name}": tloss for name,
+ tloss in test_losses.items()}, step=self.step)
+ wandb.log({f"Metrics/{k}": v for k,
+ v in metrics.items()}, step=self.step)
+
+ if (metrics[self.metric_criterion] < best_loss) and self.should_write:
+ self.save_checkpoint(
+ f"{self.config.experiment_id}_best.pt")
+ best_loss = metrics[self.metric_criterion]
+
+ self.model.train()
+
+ def validate(self):
+ # scannet validation
+ if is_rank_zero(self.config):
+ m = self.model.module if self.config.multigpu else self.model
+ d = torch.device(self.device)
+
+ for mode in ("pcd",):
+ # alignment is very slow, so only do it after the second epoch once results are somewhat good
+ #if self.epoch < 3 and mode == "zero": continue
+ scannet_seq_error = run_scannet_scene(zoe_dc_model=m.to(d), device=d, mode=mode, align=(mode == "zero"))
+
+ wandb.log({f"Metrics/scannet_seq_error_{mode}": scannet_seq_error}, step=self.step)
+
+ with torch.no_grad():
+ losses_avg = RunningAverageDict()
+ metrics_avg = RunningAverageDict()
+ for i, batch in tqdm(enumerate(self.test_loader), desc=f"Epoch: {self.epoch + 1}/{self.config.epochs}. Loop: Validation", total=len(self.test_loader), disable=not is_rank_zero(self.config)):
+ metrics, losses = self.validate_on_batch(batch, val_step=i)
+
+ if losses:
+ losses_avg.update(losses)
+ if metrics:
+ metrics_avg.update(metrics)
+
+ return metrics_avg.get_value(), losses_avg.get_value()
+
+ def save_checkpoint(self, filename):
+ if not self.should_write:
+ return
+ root = self.config.save_dir
+ if not os.path.isdir(root):
+ os.makedirs(root)
+
+ fpath = os.path.join(root, filename)
+ m = self.model.module if self.config.multigpu else self.model
+ torch.save(
+ {
+ "model": m.state_dict(),
+ "optimizer": None, # TODO : Change to self.optimizer.state_dict() if resume support is needed, currently None to reduce file size
+ "epoch": self.epoch
+ }, fpath)
+
+ def log_images(self, rgb: Dict[str, list] = {}, depth: Dict[str, list] = {}, scalar_field: Dict[str, list] = {}, prefix="", scalar_cmap="jet", min_depth=None, max_depth=None):
+ if not self.should_log:
+ return
+
+ if min_depth is None:
+ try:
+ min_depth = self.config.min_depth
+ max_depth = self.config.max_depth
+ except AttributeError:
+ min_depth = None
+ max_depth = None
+
+ depth = {k: colorize(v, vmin=min_depth, vmax=max_depth)
+ for k, v in depth.items()}
+ scalar_field = {k: colorize(
+ v, vmin=None, vmax=None, cmap=scalar_cmap) for k, v in scalar_field.items()}
+ images = {**rgb, **depth, **scalar_field}
+ wimages = {
+ prefix+"Predictions": [wandb.Image(v, caption=k) for k, v in images.items()]}
+ wandb.log(wimages, step=self.step)
+
+ def log_line_plot(self, data):
+ if not self.should_log:
+ return
+
+ plt.plot(data)
+ plt.ylabel("Scale factors")
+ wandb.log({"Scale factors": wandb.Image(plt)}, step=self.step)
+ plt.close()
+
+ def log_bar_plot(self, title, labels, values):
+ if not self.should_log:
+ return
+
+ data = [[label, val] for (label, val) in zip(labels, values)]
+ table = wandb.Table(data=data, columns=["label", "value"])
+ wandb.log({title: wandb.plot.bar(table, "label",
+ "value", title=title)}, step=self.step)
diff --git a/zoedepth/trainers/builder.py b/zoedepth/trainers/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..a663541b08912ebedce21a68c7599ce4c06e85d0
--- /dev/null
+++ b/zoedepth/trainers/builder.py
@@ -0,0 +1,48 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+from importlib import import_module
+
+
+def get_trainer(config):
+ """Builds and returns a trainer based on the config.
+
+ Args:
+ config (dict): the config dict (typically constructed using utils.config.get_config)
+ config.trainer (str): the name of the trainer to use. The module named "{config.trainer}_trainer" must exist in trainers root module
+
+ Raises:
+ ValueError: If the specified trainer does not exist under trainers/ folder
+
+ Returns:
+ Trainer (inherited from zoedepth.trainers.BaseTrainer): The Trainer object
+ """
+ assert "trainer" in config and config.trainer is not None and config.trainer != '', "Trainer not specified. Config: {0}".format(
+ config)
+ try:
+ Trainer = getattr(import_module(
+ f"zoedepth.trainers.{config.trainer}_trainer"), 'Trainer')
+ except ModuleNotFoundError as e:
+ raise ValueError(f"Trainer {config.trainer}_trainer not found.") from e
+ return Trainer
diff --git a/zoedepth/trainers/loss.py b/zoedepth/trainers/loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0f65b2de6b21f2ac5424d26ffdb63c6ce22d412
--- /dev/null
+++ b/zoedepth/trainers/loss.py
@@ -0,0 +1,316 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.cuda.amp as amp
+import numpy as np
+
+
+KEY_OUTPUT = 'metric_depth'
+
+
+def extract_key(prediction, key):
+ if isinstance(prediction, dict):
+ return prediction[key]
+ return prediction
+
+
+# Main loss function used for ZoeDepth. Copy/paste from AdaBins repo (https://github.com/shariqfarooq123/AdaBins/blob/0952d91e9e762be310bb4cd055cbfe2448c0ce20/loss.py#L7)
+class SILogLoss(nn.Module):
+ """SILog loss (pixel-wise)"""
+ def __init__(self, beta=0.15):
+ super(SILogLoss, self).__init__()
+ self.name = 'SILog'
+ self.beta = beta
+
+ def forward(self, input, target, mask=None, interpolate=True, return_interpolated=False):
+ input = extract_key(input, KEY_OUTPUT)
+ if input.shape[-1] != target.shape[-1] and interpolate:
+ input = nn.functional.interpolate(
+ input, target.shape[-2:], mode='bilinear', align_corners=True)
+ intr_input = input
+ else:
+ intr_input = input
+
+ if target.ndim == 3:
+ target = target.unsqueeze(1)
+
+ if mask is not None:
+ if mask.ndim == 3:
+ mask = mask.unsqueeze(1)
+
+ input = input[mask]
+ target = target[mask]
+
+ with amp.autocast(enabled=False): # amp causes NaNs in this loss function
+ alpha = 1e-7
+ g = torch.log(input + alpha) - torch.log(target + alpha)
+
+ # n, c, h, w = g.shape
+ # norm = 1/(h*w)
+ # Dg = norm * torch.sum(g**2) - (0.85/(norm**2)) * (torch.sum(g))**2
+
+ Dg = torch.var(g) + self.beta * torch.pow(torch.mean(g), 2)
+
+ loss = 10 * torch.sqrt(Dg)
+
+ if torch.isnan(loss):
+ print("Nan SILog loss")
+ print("input:", input.shape)
+ print("target:", target.shape)
+ print("G", torch.sum(torch.isnan(g)))
+ print("Input min max", torch.min(input), torch.max(input))
+ print("Target min max", torch.min(target), torch.max(target))
+ print("Dg", torch.isnan(Dg))
+ print("loss", torch.isnan(loss))
+
+ if not return_interpolated:
+ return loss
+
+ return loss, intr_input
+
+
+def grad(x):
+ # x.shape : n, c, h, w
+ diff_x = x[..., 1:, 1:] - x[..., 1:, :-1]
+ diff_y = x[..., 1:, 1:] - x[..., :-1, 1:]
+ mag = diff_x**2 + diff_y**2
+ # angle_ratio
+ angle = torch.atan(diff_y / (diff_x + 1e-10))
+ return mag, angle
+
+
+def grad_mask(mask):
+ return mask[..., 1:, 1:] & mask[..., 1:, :-1] & mask[..., :-1, 1:]
+
+
+class GradL1Loss(nn.Module):
+ """Gradient loss"""
+ def __init__(self):
+ super(GradL1Loss, self).__init__()
+ self.name = 'GradL1'
+
+ def forward(self, input, target, mask=None, interpolate=True, return_interpolated=False):
+ input = extract_key(input, KEY_OUTPUT)
+ if input.shape[-1] != target.shape[-1] and interpolate:
+ input = nn.functional.interpolate(
+ input, target.shape[-2:], mode='bilinear', align_corners=True)
+ intr_input = input
+ else:
+ intr_input = input
+
+ grad_gt = grad(target)
+ grad_pred = grad(input)
+ mask_g = grad_mask(mask)
+
+ loss = nn.functional.l1_loss(grad_pred[0][mask_g], grad_gt[0][mask_g])
+ loss = loss + \
+ nn.functional.l1_loss(grad_pred[1][mask_g], grad_gt[1][mask_g])
+ if not return_interpolated:
+ return loss
+ return loss, intr_input
+
+
+class OrdinalRegressionLoss(object):
+
+ def __init__(self, ord_num, beta, discretization="SID"):
+ self.ord_num = ord_num
+ self.beta = beta
+ self.discretization = discretization
+
+ def _create_ord_label(self, gt):
+ N,one, H, W = gt.shape
+ # print("gt shape:", gt.shape)
+
+ ord_c0 = torch.ones(N, self.ord_num, H, W).to(gt.device)
+ if self.discretization == "SID":
+ label = self.ord_num * torch.log(gt) / np.log(self.beta)
+ else:
+ label = self.ord_num * (gt - 1.0) / (self.beta - 1.0)
+ label = label.long()
+ mask = torch.linspace(0, self.ord_num - 1, self.ord_num, requires_grad=False) \
+ .view(1, self.ord_num, 1, 1).to(gt.device)
+ mask = mask.repeat(N, 1, H, W).contiguous().long()
+ mask = (mask > label)
+ ord_c0[mask] = 0
+ ord_c1 = 1 - ord_c0
+ # implementation according to the paper.
+ # ord_label = torch.ones(N, self.ord_num * 2, H, W).to(gt.device)
+ # ord_label[:, 0::2, :, :] = ord_c0
+ # ord_label[:, 1::2, :, :] = ord_c1
+ # reimplementation for fast speed.
+ ord_label = torch.cat((ord_c0, ord_c1), dim=1)
+ return ord_label, mask
+
+ def __call__(self, prob, gt):
+ """
+ :param prob: ordinal regression probability, N x 2*Ord Num x H x W, torch.Tensor
+ :param gt: depth ground truth, NXHxW, torch.Tensor
+ :return: loss: loss value, torch.float
+ """
+ # N, C, H, W = prob.shape
+ valid_mask = gt > 0.
+ ord_label, mask = self._create_ord_label(gt)
+ # print("prob shape: {}, ord label shape: {}".format(prob.shape, ord_label.shape))
+ entropy = -prob * ord_label
+ loss = torch.sum(entropy, dim=1)[valid_mask.squeeze(1)]
+ return loss.mean()
+
+
+class DiscreteNLLLoss(nn.Module):
+ """Cross entropy loss"""
+ def __init__(self, min_depth=1e-3, max_depth=10, depth_bins=64):
+ super(DiscreteNLLLoss, self).__init__()
+ self.name = 'CrossEntropy'
+ self.ignore_index = -(depth_bins + 1)
+ # self._loss_func = nn.NLLLoss(ignore_index=self.ignore_index)
+ self._loss_func = nn.CrossEntropyLoss(ignore_index=self.ignore_index)
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+ self.depth_bins = depth_bins
+ self.alpha = 1
+ self.zeta = 1 - min_depth
+ self.beta = max_depth + self.zeta
+
+ def quantize_depth(self, depth):
+ # depth : N1HW
+ # output : NCHW
+
+ # Quantize depth log-uniformly on [1, self.beta] into self.depth_bins bins
+ depth = torch.log(depth / self.alpha) / np.log(self.beta / self.alpha)
+ depth = depth * (self.depth_bins - 1)
+ depth = torch.round(depth)
+ depth = depth.long()
+ return depth
+
+
+
+ def _dequantize_depth(self, depth):
+ """
+ Inverse of quantization
+ depth : NCHW -> N1HW
+ """
+ # Get the center of the bin
+
+
+
+
+ def forward(self, input, target, mask=None, interpolate=True, return_interpolated=False):
+ input = extract_key(input, KEY_OUTPUT)
+ # assert torch.all(input <= 0), "Input should be negative"
+
+ if input.shape[-1] != target.shape[-1] and interpolate:
+ input = nn.functional.interpolate(
+ input, target.shape[-2:], mode='bilinear', align_corners=True)
+ intr_input = input
+ else:
+ intr_input = input
+
+ # assert torch.all(input)<=1)
+ if target.ndim == 3:
+ target = target.unsqueeze(1)
+
+ target = self.quantize_depth(target)
+ if mask is not None:
+ if mask.ndim == 3:
+ mask = mask.unsqueeze(1)
+
+ # Set the mask to ignore_index
+ mask = mask.long()
+ input = input * mask + (1 - mask) * self.ignore_index
+ target = target * mask + (1 - mask) * self.ignore_index
+
+
+
+ input = input.flatten(2) # N, nbins, H*W
+ target = target.flatten(1) # N, H*W
+ loss = self._loss_func(input, target)
+
+ if not return_interpolated:
+ return loss
+ return loss, intr_input
+
+
+
+
+def compute_scale_and_shift(prediction, target, mask):
+ # system matrix: A = [[a_00, a_01], [a_10, a_11]]
+ a_00 = torch.sum(mask * prediction * prediction, (1, 2))
+ a_01 = torch.sum(mask * prediction, (1, 2))
+ a_11 = torch.sum(mask, (1, 2))
+
+ # right hand side: b = [b_0, b_1]
+ b_0 = torch.sum(mask * prediction * target, (1, 2))
+ b_1 = torch.sum(mask * target, (1, 2))
+
+ # solution: x = A^-1 . b = [[a_11, -a_01], [-a_10, a_00]] / (a_00 * a_11 - a_01 * a_10) . b
+ scale = torch.zeros_like(b_0)
+ shift = torch.zeros_like(b_1)
+
+ det = a_00 * a_11 - a_01 * a_01
+ valid = det.nonzero()
+
+ scale[valid] = (a_11[valid] * b_0[valid] - a_01[valid] * b_1[valid]) / det[valid]
+ shift[valid] = (-a_01[valid] * b_0[valid] + a_00[valid] * b_1[valid]) / det[valid]
+
+ return scale, shift
+
+class ScaleAndShiftInvariantLoss(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.name = "SSILoss"
+
+ def forward(self, prediction, target, mask, interpolate=True, return_interpolated=False):
+
+ if prediction.shape[-1] != target.shape[-1] and interpolate:
+ prediction = nn.functional.interpolate(prediction, target.shape[-2:], mode='bilinear', align_corners=True)
+ intr_input = prediction
+ else:
+ intr_input = prediction
+
+
+ #prediction, target, mask = prediction.squeeze(), target.squeeze(), mask.squeeze()
+ assert prediction.shape == target.shape, f"Shape mismatch: Expected same shape but got {prediction.shape} and {target.shape}."
+
+ scale, shift = compute_scale_and_shift(prediction, target, mask)
+
+ scaled_prediction = scale.view(-1, 1, 1) * prediction + shift.view(-1, 1, 1)
+
+ loss = nn.functional.l1_loss(scaled_prediction[mask], target[mask])
+ if not return_interpolated:
+ return loss
+ return loss, intr_input
+
+
+
+
+if __name__ == '__main__':
+ # Tests for DiscreteNLLLoss
+ celoss = DiscreteNLLLoss()
+ print(celoss(torch.rand(4, 64, 26, 32)*10, torch.rand(4, 1, 26, 32)*10, ))
+
+ d = torch.Tensor([6.59, 3.8, 10.0])
+ print(celoss.dequantize_depth(celoss.quantize_depth(d)))
diff --git a/zoedepth/trainers/zoedepth_nk_trainer.py b/zoedepth/trainers/zoedepth_nk_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..56b0d3cb152dd9d92644983b16bf4c1a3e68684c
--- /dev/null
+++ b/zoedepth/trainers/zoedepth_nk_trainer.py
@@ -0,0 +1,143 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.cuda.amp as amp
+import torch.nn as nn
+
+from zoedepth.trainers.loss import GradL1Loss, SILogLoss
+from zoedepth.utils.config import DATASETS_CONFIG
+from zoedepth.utils.misc import compute_metrics
+
+from .base_trainer import BaseTrainer
+
+
+class Trainer(BaseTrainer):
+ def __init__(self, config, model, train_loader, test_loader=None, device=None):
+ super().__init__(config, model, train_loader,
+ test_loader=test_loader, device=device)
+ #self.device = device
+ self.silog_loss = SILogLoss()
+ self.grad_loss = GradL1Loss()
+ self.domain_classifier_loss = nn.CrossEntropyLoss()
+
+ self.scaler = amp.GradScaler(enabled=self.config.use_amp)
+
+ def train_on_batch(self, batch, train_step):
+ """
+ Expects a batch of images and depth as input
+ batch["image"].shape : batch_size, c, h, w
+ batch["depth"].shape : batch_size, 1, h, w
+
+ Assumes all images in a batch are from the same dataset
+ """
+
+ images, depths_gt = batch['image'].to(
+ self.device), batch['depth'].to(self.device)
+ # batch['dataset'] is a tensor strings all valued either 'nyu' or 'kitti'. labels nyu -> 0, kitti -> 1
+ dataset = batch['dataset'][0]
+ # Convert to 0s or 1s
+ domain_labels = torch.Tensor([dataset == 'kitti' for _ in range(
+ images.size(0))]).to(torch.long).to(self.device)
+
+ # m = self.model.module if self.config.multigpu else self.model
+
+ b, c, h, w = images.size()
+ mask = batch["mask"].to(self.device).to(torch.bool)
+
+ losses = {}
+
+ with amp.autocast(enabled=self.config.use_amp):
+ output = self.model(images)
+ pred_depths = output['metric_depth']
+ domain_logits = output['domain_logits']
+
+ l_si, pred = self.silog_loss(
+ pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True)
+ loss = self.config.w_si * l_si
+ losses[self.silog_loss.name] = l_si
+
+ if self.config.w_grad > 0:
+ l_grad = self.grad_loss(pred, depths_gt, mask=mask)
+ loss = loss + self.config.w_grad * l_grad
+ losses[self.grad_loss.name] = l_grad
+ else:
+ l_grad = torch.Tensor([0])
+
+ if self.config.w_domain > 0:
+ l_domain = self.domain_classifier_loss(
+ domain_logits, domain_labels)
+ loss = loss + self.config.w_domain * l_domain
+ losses["DomainLoss"] = l_domain
+ else:
+ l_domain = torch.Tensor([0.])
+
+ self.scaler.scale(loss).backward()
+
+ if self.config.clip_grad > 0:
+ self.scaler.unscale_(self.optimizer)
+ nn.utils.clip_grad_norm_(
+ self.model.parameters(), self.config.clip_grad)
+
+ self.scaler.step(self.optimizer)
+
+ if self.should_log and self.step > 1 and (self.step % int(self.config.log_images_every * self.iters_per_epoch)) == 0:
+ depths_gt[torch.logical_not(mask)] = -99
+ self.log_images(rgb={"Input": images[0, ...]}, depth={"GT": depths_gt[0], "PredictedMono": pred[0]}, prefix="Train",
+ min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth'])
+
+ self.scaler.update()
+ self.optimizer.zero_grad(set_to_none=True)
+
+ return losses
+
+ def validate_on_batch(self, batch, val_step):
+ images = batch['image'].to(self.device)
+ depths_gt = batch['depth'].to(self.device)
+ dataset = batch['dataset'][0]
+ if 'has_valid_depth' in batch:
+ if not batch['has_valid_depth']:
+ return None, None
+
+ depths_gt = depths_gt.squeeze().unsqueeze(0).unsqueeze(0)
+ with amp.autocast(enabled=self.config.use_amp):
+ m = self.model.module if self.config.multigpu else self.model
+ pred_depths = m(images)["metric_depth"]
+ pred_depths = pred_depths.squeeze().unsqueeze(0).unsqueeze(0)
+
+ mask = torch.logical_and(
+ depths_gt > self.config.min_depth, depths_gt < self.config.max_depth)
+ with amp.autocast(enabled=self.config.use_amp):
+ l_depth = self.silog_loss(
+ pred_depths, depths_gt, mask=mask.to(torch.bool), interpolate=True)
+
+ metrics = compute_metrics(depths_gt, pred_depths, **self.config)
+ losses = {f"{self.silog_loss.name}": l_depth.item()}
+
+ if val_step == 1 and self.should_log:
+ depths_gt[torch.logical_not(mask)] = -99
+ self.log_images(rgb={"Input": images[0]}, depth={"GT": depths_gt[0], "PredictedMono": pred_depths[0]}, prefix="Test",
+ min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth'])
+
+ return metrics, losses
diff --git a/zoedepth/trainers/zoedepth_trainer.py b/zoedepth/trainers/zoedepth_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..df3cf3c17e357e92108987d1bf7c60c707ef4daf
--- /dev/null
+++ b/zoedepth/trainers/zoedepth_trainer.py
@@ -0,0 +1,204 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.cuda.amp as amp
+import torch.nn as nn
+
+from zoedepth.trainers.loss import GradL1Loss, SILogLoss
+from zoedepth.utils.config import DATASETS_CONFIG
+from zoedepth.utils.misc import compute_metrics
+from zoedepth.data.preprocess import get_black_border
+
+from .base_trainer import BaseTrainer
+from torchvision import transforms
+from PIL import Image
+import numpy as np
+
+class Trainer(BaseTrainer):
+ def __init__(self, config, model, train_loader, test_loader=None, device=None):
+ super().__init__(config, model, train_loader,
+ test_loader=test_loader, device=device)
+ #self.device = device
+ self.silog_loss = SILogLoss()
+ self.grad_loss = GradL1Loss()
+ self.scaler = amp.GradScaler(enabled=self.config.use_amp)
+
+ def train_on_batch(self, batch, train_step):
+ """
+ Expects a batch of images and depth as input
+ batch["image"].shape : batch_size, c, h, w
+ batch["depth"].shape : batch_size, 1, h, w
+ """
+
+ images, depths_gt = batch['image'].to(
+ self.device), batch['depth'].to(self.device)
+ if "masked_depth" in batch.keys():
+ # FIXME fix the permutation here, i've missed this upstream somewhere
+ masked_depth = batch["masked_depth"].to(self.device).permute(0, 3, 1, 2)
+ depth_mask = (masked_depth != 0).float()
+
+ dataset = batch['dataset'][0]
+
+ max_depth = self.config.max_depth
+
+ if self.config["add_depth_channel"] and "masked_depth" in batch.keys():
+ images = torch.cat([images, masked_depth / max_depth, depth_mask], dim=1)
+ elif self.config["add_depth_channel"]:
+ images = torch.cat([images, depths_gt / max_depth, depth_mask], dim=1)
+
+ b, c, h, w = images.size()
+ mask = batch["mask"].to(self.device).to(torch.bool)
+
+ losses = {}
+
+ with amp.autocast(enabled=self.config.use_amp):
+
+ output = self.model(images)
+ pred_depths = output['metric_depth']
+
+ l_si, pred = self.silog_loss(
+ pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True)
+ loss = self.config.w_si * l_si
+ losses[self.silog_loss.name] = l_si
+
+ if self.config.w_grad > 0:
+ l_grad = self.grad_loss(pred, depths_gt, mask=mask)
+ loss = loss + self.config.w_grad * l_grad
+ losses[self.grad_loss.name] = l_grad
+ else:
+ l_grad = torch.Tensor([0])
+
+ if hasattr(self.config, "w_sd") and self.config.w_sd > 0:
+ l_sd = (nn.functional.mse_loss(pred, depths_gt, reduction="none") * depth_mask).mean()
+ loss = loss + self.config.w_sd * l_sd
+ losses["SparseDepth"] = l_sd
+
+ self.scaler.scale(loss).backward()
+
+ if self.config.clip_grad > 0:
+ self.scaler.unscale_(self.optimizer)
+ nn.utils.clip_grad_norm_(
+ self.model.parameters(), self.config.clip_grad)
+
+ self.scaler.step(self.optimizer)
+
+ if self.should_log and (self.step % int(self.config.log_images_every * self.iters_per_epoch)) == 0:
+ # -99 is treated as invalid depth in the log_images function and is colored grey.
+ depths_gt[torch.logical_not(mask)] = -99
+
+ rand_batch_idx = torch.randint(0, b, (1,)).item()
+
+ depth_log_items = {"GT": depths_gt[rand_batch_idx], "PredictedMono": pred[rand_batch_idx]}
+ if "masked_depth" in batch.keys():
+ depth_log_items["MaskedGT"] = masked_depth[rand_batch_idx]
+
+ self.log_images(rgb={"Input": images[rand_batch_idx, :3, ...]}, depth=depth_log_items, prefix="Train",
+ min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth'])
+
+ if self.config.get("log_rel", False):
+ self.log_images(
+ scalar_field={"RelPred": output["relative_depth"][rand_batch_idx]}, prefix="TrainRel")
+
+ self.scaler.update()
+ self.optimizer.zero_grad()
+
+ return losses
+
+ @torch.no_grad()
+ def eval_infer(self, x):
+ with amp.autocast(enabled=self.config.use_amp):
+ m = self.model.module if self.config.multigpu else self.model
+ pred_depths = m(x)['metric_depth']
+ return pred_depths
+
+ @torch.no_grad()
+ def crop_aware_infer(self, x):
+ # if we are not avoiding the black border, we can just use the normal inference
+ if not self.config.get("avoid_boundary", False):
+ return self.eval_infer(x)
+
+ # otherwise, we need to crop the image to avoid the black border
+ # For now, this may be a bit slow due to converting to numpy and back
+ # We assume no normalization is done on the input image
+
+ # get the black border
+ assert x.shape[0] == 1, "Only batch size 1 is supported for now"
+ x_pil = transforms.ToPILImage()(x[0].cpu())
+ x_np = np.array(x_pil, dtype=np.uint8)
+ black_border_params = get_black_border(x_np)
+ top, bottom, left, right = black_border_params.top, black_border_params.bottom, black_border_params.left, black_border_params.right
+ x_np_cropped = x_np[top:bottom, left:right, :]
+ x_cropped = transforms.ToTensor()(Image.fromarray(x_np_cropped))
+
+ # run inference on the cropped image
+ pred_depths_cropped = self.eval_infer(x_cropped.unsqueeze(0).to(self.device))
+
+ # resize the prediction to x_np_cropped's size
+ pred_depths_cropped = nn.functional.interpolate(
+ pred_depths_cropped, size=(x_np_cropped.shape[0], x_np_cropped.shape[1]), mode="bilinear", align_corners=False)
+
+
+ # pad the prediction back to the original size
+ pred_depths = torch.zeros((1, 1, x_np.shape[0], x_np.shape[1]), device=pred_depths_cropped.device, dtype=pred_depths_cropped.dtype)
+ pred_depths[:, :, top:bottom, left:right] = pred_depths_cropped
+
+ return pred_depths
+
+
+
+ def validate_on_batch(self, batch, val_step):
+ images = batch['image'].to(self.device)
+ depths_gt = batch['depth'].to(self.device)
+ dataset = batch['dataset'][0]
+ mask = batch["mask"].to(self.device)
+ if 'has_valid_depth' in batch:
+ if not batch['has_valid_depth']:
+ return None, None
+
+ depths_gt = depths_gt.squeeze().unsqueeze(0).unsqueeze(0)
+ mask = mask.squeeze().unsqueeze(0).unsqueeze(0)
+
+ if self.config["add_depth_channel"]:
+ images = torch.cat([images, torch.zeros_like(depths_gt), torch.zeros_like(depths_gt)], dim=1)
+
+ if dataset == 'nyu':
+ pred_depths = self.crop_aware_infer(images)
+ else:
+ pred_depths = self.eval_infer(images)
+ pred_depths = pred_depths.squeeze().unsqueeze(0).unsqueeze(0)
+
+ with amp.autocast(enabled=self.config.use_amp):
+ l_depth = self.silog_loss(
+ pred_depths, depths_gt, mask=mask.to(torch.bool), interpolate=True)
+
+ metrics = compute_metrics(depths_gt, pred_depths, **self.config)
+ losses = {f"{self.silog_loss.name}": l_depth.item()}
+
+ if val_step == 1 and self.should_log:
+ depths_gt[torch.logical_not(mask)] = -99
+ self.log_images(rgb={"Input": images[0, :3, ...]}, depth={"GT": depths_gt[0], "PredictedMono": pred_depths[0]}, prefix="Test",
+ min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth'])
+
+ return metrics, losses
diff --git a/zoedepth/utils/__init__.py b/zoedepth/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5f2668792389157609abb2a0846fb620e7d67eb9
--- /dev/null
+++ b/zoedepth/utils/__init__.py
@@ -0,0 +1,24 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
diff --git a/zoedepth/utils/arg_utils.py b/zoedepth/utils/arg_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a3004ec3679c0a40fd8961253733fb4343ad545
--- /dev/null
+++ b/zoedepth/utils/arg_utils.py
@@ -0,0 +1,33 @@
+
+
+def infer_type(x): # hacky way to infer type from string args
+ if not isinstance(x, str):
+ return x
+
+ try:
+ x = int(x)
+ return x
+ except ValueError:
+ pass
+
+ try:
+ x = float(x)
+ return x
+ except ValueError:
+ pass
+
+ return x
+
+
+def parse_unknown(unknown_args):
+ clean = []
+ for a in unknown_args:
+ if "=" in a:
+ k, v = a.split("=")
+ clean.extend([k, v])
+ else:
+ clean.append(a)
+
+ keys = clean[::2]
+ values = clean[1::2]
+ return {k.replace("--", ""): infer_type(v) for k, v in zip(keys, values)}
diff --git a/zoedepth/utils/config.py b/zoedepth/utils/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..4933427f164c08687a88b296595622153bd74c79
--- /dev/null
+++ b/zoedepth/utils/config.py
@@ -0,0 +1,504 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import json
+import os
+
+from zoedepth.utils.easydict import EasyDict as edict
+
+from zoedepth.utils.arg_utils import infer_type
+import pathlib
+import platform
+
+ROOT = pathlib.Path(__file__).parent.parent.resolve()
+
+HOME_DIR = os.path.expanduser("~")
+
+COMMON_CONFIG = {
+ "save_dir": os.path.expanduser("~/shortcuts/monodepth3_checkpoints/pytorch3d_masked_depth_nyu_masked_zoedepth"),
+ "project": "ZoeDepth",
+ "tags": '',
+ "notes": "",
+ "gpu": None,
+ "root": ".",
+ "uid": None,
+ "print_losses": False
+}
+
+DATASETS_CONFIG = {
+ "kitti": {
+ "dataset": "kitti",
+ "min_depth": 0.001,
+ "max_depth": 80,
+ "data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
+ "gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
+ "filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt",
+ "input_height": 352,
+ "input_width": 1216, # 704
+ "data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
+ "gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
+ "filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt",
+
+ "min_depth_eval": 1e-3,
+ "max_depth_eval": 80,
+
+ "do_random_rotate": True,
+ "degree": 1.0,
+ "do_kb_crop": True,
+ "garg_crop": True,
+ "eigen_crop": False,
+ "use_right": False
+ },
+ "kitti_test": {
+ "dataset": "kitti",
+ "min_depth": 0.001,
+ "max_depth": 80,
+ "data_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
+ "gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
+ "filenames_file": "./train_test_inputs/kitti_eigen_train_files_with_gt.txt",
+ "input_height": 352,
+ "input_width": 1216,
+ "data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/raw"),
+ "gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/kitti/gts"),
+ "filenames_file_eval": "./train_test_inputs/kitti_eigen_test_files_with_gt.txt",
+
+ "min_depth_eval": 1e-3,
+ "max_depth_eval": 80,
+
+ "do_random_rotate": False,
+ "degree": 1.0,
+ "do_kb_crop": True,
+ "garg_crop": True,
+ "eigen_crop": False,
+ "use_right": False
+ },
+ "nyu": {
+ "dataset": "nyu",
+ "avoid_boundary": False,
+ "min_depth": 1e-3, # originally 0.1
+ "max_depth": 10,
+ "data_path": "/scratch/shared/beegfs/paule/datasets/nyu-depth-v2/sync/",
+ "gt_path": "/scratch/shared/beegfs/paule/datasets/nyu-depth-v2/sync/",
+ "filenames_file": "./train_test_inputs/nyudepthv2_train_files_with_gt.txt",
+ "input_height": 480,
+ "input_width": 640,
+ "data_path_eval": "/scratch/shared/beegfs/paule/datasets/nyu-depth-v2/official_splits/test/",
+ "gt_path_eval": "/scratch/shared/beegfs/paule/datasets/nyu-depth-v2/official_splits/test/",
+ "filenames_file_eval": "./train_test_inputs/nyudepthv2_test_files_with_gt.txt",
+ "min_depth_eval": 1e-3,
+ "max_depth_eval": 10,
+ "min_depth_diff": -10,
+ "max_depth_diff": 10,
+
+ "do_random_rotate": True,
+ "degree": 1.0,
+ "do_kb_crop": False,
+ "garg_crop": False,
+ "eigen_crop": True
+ },
+ "marigold_nyu": {
+ "dataset": "marigold_nyu",
+ "avoid_boundary": True,
+ "min_depth": 1e-3, # originally 0.1
+ "max_depth": 10,
+
+ "nyu_dir_root": "/scratch/shared/nfs2/paule/sync/",
+ "marigold_depth_root": "/scratch/shared/beegfs/paule/datasets/nyu_depth_marigold/",
+
+ "data_path": "/scratch/shared/nfs2/paule/sync/",
+ "gt_path": "/scratch/shared/nfs2/paule/sync/",
+ "filenames_file": "./train_test_inputs/nyudepthv2_train_files_with_gt.txt",
+ "input_height": 480,
+ "input_width": 640,
+ "data_path_eval": "/scratch/shared/nfs2/paule/official_splits/test/",
+ "gt_path_eval": "/scratch/shared/nfs2/paule/official_splits/test/",
+ "filenames_file_eval": "./train_test_inputs/nyudepthv2_test_files_with_gt.txt",
+ "min_depth_eval": 1e-3,
+ "max_depth_eval": 10,
+ "min_depth_diff": -10,
+ "max_depth_diff": 10,
+
+ "do_random_rotate": False,
+ "degree": 1.0,
+ "do_kb_crop": False,
+ "garg_crop": False,
+ "eigen_crop": True
+ },
+ "places365": {
+ "dataset": "places365",
+
+ # dummy to prevent errors
+ "data_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"),
+ "gt_path": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/sync/"),
+ "filenames_file": "./train_test_inputs/nyudepthv2_train_files_with_gt.txt",
+
+ "places365_root": "/scratch/shared/beegfs/shared-datasets/Places/Places/train",
+ "places365_depth_root": "/scratch/shared/beegfs/paule/datasets/places365_depth/",
+ "places365_depth_masks_root": "/scratch/shared/beegfs/paule/datasets/places365_depth_masks/",
+ "randomize_masks": True,
+
+ "avoid_boundary": False,
+ "input_height": 256,
+ "input_width": 256,
+ "min_depth": 1e-3, # originally 0.1
+ "max_depth": 10,
+ "eigen_crop": True,
+ "garg_crop": False,
+ "do_kb_crop": False,
+ "min_depth_eval": 0,
+ "max_depth_eval": 8,
+ "do_random_rotate": False,
+ "data_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"),
+ "gt_path_eval": os.path.join(HOME_DIR, "shortcuts/datasets/nyu_depth_v2/official_splits/test/"),
+ "filenames_file_eval": "./train_test_inputs/nyudepthv2_test_files_with_gt.txt",
+ },
+ "ibims": {
+ "dataset": "ibims",
+ "ibims_root": os.path.join(HOME_DIR, "shortcuts/datasets/ibims/ibims1_core_raw/"),
+ "eigen_crop": True,
+ "garg_crop": False,
+ "do_kb_crop": False,
+ "min_depth_eval": 0,
+ "max_depth_eval": 10,
+ "min_depth": 1e-3,
+ "max_depth": 10
+ },
+ "sunrgbd": {
+ "dataset": "sunrgbd",
+ "sunrgbd_root": os.path.join(HOME_DIR, "shortcuts/datasets/SUNRGBD/test/"),
+ "eigen_crop": True,
+ "garg_crop": False,
+ "do_kb_crop": False,
+ "min_depth_eval": 0,
+ "max_depth_eval": 8,
+ "min_depth": 1e-3,
+ "max_depth": 10
+ },
+ "diml_indoor": {
+ "dataset": "diml_indoor",
+ "diml_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_indoor_test/"),
+ "eigen_crop": True,
+ "garg_crop": False,
+ "do_kb_crop": False,
+ "min_depth_eval": 0,
+ "max_depth_eval": 10,
+ "min_depth": 1e-3,
+ "max_depth": 10
+ },
+ "diml_outdoor": {
+ "dataset": "diml_outdoor",
+ "diml_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diml_outdoor_test/"),
+ "eigen_crop": False,
+ "garg_crop": True,
+ "do_kb_crop": False,
+ "min_depth_eval": 2,
+ "max_depth_eval": 80,
+ "min_depth": 1e-3,
+ "max_depth": 80
+ },
+ "diode_indoor": {
+ "dataset": "diode_indoor",
+ "diode_indoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_indoor/"),
+ "eigen_crop": True,
+ "garg_crop": False,
+ "do_kb_crop": False,
+ "min_depth_eval": 1e-3,
+ "max_depth_eval": 10,
+ "min_depth": 1e-3,
+ "max_depth": 10
+ },
+ "diode_outdoor": {
+ "dataset": "diode_outdoor",
+ "diode_outdoor_root": os.path.join(HOME_DIR, "shortcuts/datasets/diode_outdoor/"),
+ "eigen_crop": False,
+ "garg_crop": True,
+ "do_kb_crop": False,
+ "min_depth_eval": 1e-3,
+ "max_depth_eval": 80,
+ "min_depth": 1e-3,
+ "max_depth": 80
+ },
+ "hypersim_test": {
+ "dataset": "hypersim_test",
+ "hypersim_test_root": os.path.join(HOME_DIR, "shortcuts/datasets/hypersim_test/"),
+ "eigen_crop": True,
+ "garg_crop": False,
+ "do_kb_crop": False,
+ "min_depth_eval": 1e-3,
+ "max_depth_eval": 80,
+ "min_depth": 1e-3,
+ "max_depth": 10
+ },
+ "vkitti": {
+ "dataset": "vkitti",
+ "vkitti_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti_test/"),
+ "eigen_crop": False,
+ "garg_crop": True,
+ "do_kb_crop": True,
+ "min_depth_eval": 1e-3,
+ "max_depth_eval": 80,
+ "min_depth": 1e-3,
+ "max_depth": 80
+ },
+ "vkitti2": {
+ "dataset": "vkitti2",
+ "vkitti2_root": os.path.join(HOME_DIR, "shortcuts/datasets/vkitti2/"),
+ "eigen_crop": False,
+ "garg_crop": True,
+ "do_kb_crop": True,
+ "min_depth_eval": 1e-3,
+ "max_depth_eval": 80,
+ "min_depth": 1e-3,
+ "max_depth": 80,
+ },
+ "ddad": {
+ "dataset": "ddad",
+ "ddad_root": os.path.join(HOME_DIR, "shortcuts/datasets/ddad/ddad_val/"),
+ "eigen_crop": False,
+ "garg_crop": True,
+ "do_kb_crop": True,
+ "min_depth_eval": 1e-3,
+ "max_depth_eval": 80,
+ "min_depth": 1e-3,
+ "max_depth": 80,
+ },
+}
+
+ALL_INDOOR = ["nyu", "ibims", "sunrgbd", "diode_indoor", "hypersim_test"]
+ALL_OUTDOOR = ["kitti", "diml_outdoor", "diode_outdoor", "vkitti2", "ddad"]
+ALL_EVAL_DATASETS = ALL_INDOOR + ALL_OUTDOOR
+
+COMMON_TRAINING_CONFIG = {
+ "dataset": "nyu",
+ "distributed": True,
+ "workers": 16,
+ "clip_grad": 0.1,
+ "use_shared_dict": False,
+ "shared_dict": None,
+ "use_amp": False,
+
+ "aug": True,
+ "random_crop": False,
+ "random_translate": False,
+ "translate_prob": 0.2,
+ "max_translation": 100,
+
+ "validate_every": 0.25,
+ "log_images_every": 0.1,
+ "prefetch": False,
+ "add_depth_channel": True,
+ "depth_channel_mask_augment": True,
+ "depth_masking_min": 0.3,
+ "depth_masking_max": 0.8,
+
+ "inpaint_task_probability": 0.5,
+
+ "debug_mode": True,
+ "batch_size": 1,
+
+ "random_inpainting_scaling": True,
+}
+
+
+def flatten(config, except_keys=('bin_conf')):
+ def recurse(inp):
+ if isinstance(inp, dict):
+ for key, value in inp.items():
+ if key in except_keys:
+ yield (key, value)
+ if isinstance(value, dict):
+ yield from recurse(value)
+ else:
+ yield (key, value)
+
+ return dict(list(recurse(config)))
+
+
+def split_combined_args(kwargs):
+ """Splits the arguments that are combined with '__' into multiple arguments.
+ Combined arguments should have equal number of keys and values.
+ Keys are separated by '__' and Values are separated with ';'.
+ For example, '__n_bins__lr=256;0.001'
+
+ Args:
+ kwargs (dict): key-value pairs of arguments where key-value is optionally combined according to the above format.
+
+ Returns:
+ dict: Parsed dict with the combined arguments split into individual key-value pairs.
+ """
+ new_kwargs = dict(kwargs)
+ for key, value in kwargs.items():
+ if key.startswith("__"):
+ keys = key.split("__")[1:]
+ values = value.split(";")
+ assert len(keys) == len(
+ values), f"Combined arguments should have equal number of keys and values. Keys are separated by '__' and Values are separated with ';'. For example, '__n_bins__lr=256;0.001. Given (keys,values) is ({keys}, {values})"
+ for k, v in zip(keys, values):
+ new_kwargs[k] = v
+ return new_kwargs
+
+
+def parse_list(config, key, dtype=int):
+ """Parse a list of values for the key if the value is a string. The values are separated by a comma.
+ Modifies the config in place.
+ """
+ if key in config:
+ if isinstance(config[key], str):
+ config[key] = list(map(dtype, config[key].split(',')))
+ assert isinstance(config[key], list) and all([isinstance(e, dtype) for e in config[key]]
+ ), f"{key} should be a list of values dtype {dtype}. Given {config[key]} of type {type(config[key])} with values of type {[type(e) for e in config[key]]}."
+
+
+def get_model_config(model_name, model_version=None):
+ """Find and parse the .json config file for the model.
+
+ Args:
+ model_name (str): name of the model. The config file should be named config_{model_name}[_{model_version}].json under the models/{model_name} directory.
+ model_version (str, optional): Specific config version. If specified config_{model_name}_{model_version}.json is searched for and used. Otherwise config_{model_name}.json is used. Defaults to None.
+
+ Returns:
+ easydict: the config dictionary for the model.
+ """
+ config_fname = f"config_{model_name}_{model_version}.json" if model_version is not None else f"config_{model_name}.json"
+ config_file = os.path.join(ROOT, "models", model_name, config_fname)
+ if not os.path.exists(config_file):
+ return None
+
+ with open(config_file, "r") as f:
+ config = edict(json.load(f))
+
+ # handle dictionary inheritance
+ # only training config is supported for inheritance
+ if "inherit" in config.train and config.train.inherit is not None:
+ inherit_config = get_model_config(config.train["inherit"]).train
+ for key, value in inherit_config.items():
+ if key not in config.train:
+ config.train[key] = value
+ return edict(config)
+
+
+def update_model_config(config, mode, model_name, model_version=None, strict=False):
+ model_config = get_model_config(model_name, model_version)
+ if model_config is not None:
+ config = {**config, **
+ flatten({**model_config.model, **model_config[mode]})}
+ elif strict:
+ raise ValueError(f"Config file for model {model_name} not found.")
+ return config
+
+
+def check_choices(name, value, choices):
+ # return # No checks in dev branch
+ if value not in choices:
+ raise ValueError(f"{name} {value} not in supported choices {choices}")
+
+
+KEYS_TYPE_BOOL = ["use_amp", "distributed", "use_shared_dict", "same_lr", "aug", "three_phase",
+ "prefetch", "cycle_momentum"] # Casting is not necessary as their int casted values in config are 0 or 1
+
+
+def get_config(model_name, mode='train', dataset=None, **overwrite_kwargs):
+ """Main entry point to get the config for the model.
+
+ Args:
+ model_name (str): name of the desired model.
+ mode (str, optional): "train" or "infer". Defaults to 'train'.
+ dataset (str, optional): If specified, the corresponding dataset configuration is loaded as well. Defaults to None.
+
+ Keyword Args: key-value pairs of arguments to overwrite the default config.
+
+ The order of precedence for overwriting the config is (Higher precedence first):
+ # 1. overwrite_kwargs
+ # 2. "config_version": Config file version if specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{config_version}.json
+ # 3. "version_name": Default Model version specific config specified in overwrite_kwargs. The corresponding config loaded is config_{model_name}_{version_name}.json
+ # 4. common_config: Default config for all models specified in COMMON_CONFIG
+
+ Returns:
+ easydict: The config dictionary for the model.
+ """
+
+
+ check_choices("Model", model_name, ["zoedepth", "zoedepth_nk"])
+ check_choices("Mode", mode, ["train", "infer", "eval"])
+ if mode == "train":
+ check_choices("Dataset", dataset, ["nyu", "kitti", "mix", "places365", "marigold_nyu", None])
+
+ config = flatten({**COMMON_CONFIG, **COMMON_TRAINING_CONFIG})
+ config = update_model_config(config, mode, model_name)
+
+ # update with model version specific config
+ version_name = overwrite_kwargs.get("version_name", config["version_name"])
+ config = update_model_config(config, mode, model_name, version_name)
+
+ # update with config version if specified
+ config_version = overwrite_kwargs.get("config_version", None)
+ if config_version is not None:
+ print("Overwriting config with config_version", config_version)
+ config = update_model_config(config, mode, model_name, config_version)
+
+ # update with overwrite_kwargs
+ # Combined args are useful for hyperparameter search
+ overwrite_kwargs = split_combined_args(overwrite_kwargs)
+ config = {**config, **overwrite_kwargs}
+
+ # Casting to bool # TODO: Not necessary. Remove and test
+ for key in KEYS_TYPE_BOOL:
+ if key in config:
+ config[key] = bool(config[key])
+
+ # Model specific post processing of config
+ parse_list(config, "n_attractors")
+
+ # adjust n_bins for each bin configuration if bin_conf is given and n_bins is passed in overwrite_kwargs
+ if 'bin_conf' in config and 'n_bins' in overwrite_kwargs:
+ bin_conf = config['bin_conf'] # list of dicts
+ n_bins = overwrite_kwargs['n_bins']
+ new_bin_conf = []
+ for conf in bin_conf:
+ conf['n_bins'] = n_bins
+ new_bin_conf.append(conf)
+ config['bin_conf'] = new_bin_conf
+
+ if mode == "train":
+ orig_dataset = dataset
+ if dataset == "mix":
+ dataset = 'nyu' # Use nyu as default for mix. Dataset config is changed accordingly while loading the dataloader
+ if dataset is not None:
+ config['project'] = f"MonoDepth3-{orig_dataset}" # Set project for wandb
+
+ if dataset is not None:
+ config['dataset'] = dataset
+ config = {**DATASETS_CONFIG[dataset], **config}
+
+
+ config['model'] = model_name
+ typed_config = {k: infer_type(v) for k, v in config.items()}
+ # add hostname to config
+ config['hostname'] = platform.node()
+ return edict(typed_config)
+
+
+def change_dataset(config, new_dataset):
+ config.update(DATASETS_CONFIG[new_dataset])
+ return config
diff --git a/zoedepth/utils/easydict/__init__.py b/zoedepth/utils/easydict/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..15928179b0182c6045d98bc0a7be1c6ca45f675e
--- /dev/null
+++ b/zoedepth/utils/easydict/__init__.py
@@ -0,0 +1,158 @@
+"""
+EasyDict
+Copy/pasted from https://github.com/makinacorpus/easydict
+Original author: Mathieu Leplatre
+"""
+
+class EasyDict(dict):
+ """
+ Get attributes
+
+ >>> d = EasyDict({'foo':3})
+ >>> d['foo']
+ 3
+ >>> d.foo
+ 3
+ >>> d.bar
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'EasyDict' object has no attribute 'bar'
+
+ Works recursively
+
+ >>> d = EasyDict({'foo':3, 'bar':{'x':1, 'y':2}})
+ >>> isinstance(d.bar, dict)
+ True
+ >>> d.bar.x
+ 1
+
+ Bullet-proof
+
+ >>> EasyDict({})
+ {}
+ >>> EasyDict(d={})
+ {}
+ >>> EasyDict(None)
+ {}
+ >>> d = {'a': 1}
+ >>> EasyDict(**d)
+ {'a': 1}
+ >>> EasyDict((('a', 1), ('b', 2)))
+ {'a': 1, 'b': 2}
+
+ Set attributes
+
+ >>> d = EasyDict()
+ >>> d.foo = 3
+ >>> d.foo
+ 3
+ >>> d.bar = {'prop': 'value'}
+ >>> d.bar.prop
+ 'value'
+ >>> d
+ {'foo': 3, 'bar': {'prop': 'value'}}
+ >>> d.bar.prop = 'newer'
+ >>> d.bar.prop
+ 'newer'
+
+
+ Values extraction
+
+ >>> d = EasyDict({'foo':0, 'bar':[{'x':1, 'y':2}, {'x':3, 'y':4}]})
+ >>> isinstance(d.bar, list)
+ True
+ >>> from operator import attrgetter
+ >>> list(map(attrgetter('x'), d.bar))
+ [1, 3]
+ >>> list(map(attrgetter('y'), d.bar))
+ [2, 4]
+ >>> d = EasyDict()
+ >>> list(d.keys())
+ []
+ >>> d = EasyDict(foo=3, bar=dict(x=1, y=2))
+ >>> d.foo
+ 3
+ >>> d.bar.x
+ 1
+
+ Still like a dict though
+
+ >>> o = EasyDict({'clean':True})
+ >>> list(o.items())
+ [('clean', True)]
+
+ And like a class
+
+ >>> class Flower(EasyDict):
+ ... power = 1
+ ...
+ >>> f = Flower()
+ >>> f.power
+ 1
+ >>> f = Flower({'height': 12})
+ >>> f.height
+ 12
+ >>> f['power']
+ 1
+ >>> sorted(f.keys())
+ ['height', 'power']
+
+ update and pop items
+ >>> d = EasyDict(a=1, b='2')
+ >>> e = EasyDict(c=3.0, a=9.0)
+ >>> d.update(e)
+ >>> d.c
+ 3.0
+ >>> d['c']
+ 3.0
+ >>> d.get('c')
+ 3.0
+ >>> d.update(a=4, b=4)
+ >>> d.b
+ 4
+ >>> d.pop('a')
+ 4
+ >>> d.a
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'EasyDict' object has no attribute 'a'
+ """
+ def __init__(self, d=None, **kwargs):
+ if d is None:
+ d = {}
+ else:
+ d = dict(d)
+ if kwargs:
+ d.update(**kwargs)
+ for k, v in d.items():
+ setattr(self, k, v)
+ # Class attributes
+ for k in self.__class__.__dict__.keys():
+ if not (k.startswith('__') and k.endswith('__')) and not k in ('update', 'pop'):
+ setattr(self, k, getattr(self, k))
+
+ def __setattr__(self, name, value):
+ if isinstance(value, (list, tuple)):
+ value = [self.__class__(x)
+ if isinstance(x, dict) else x for x in value]
+ elif isinstance(value, dict) and not isinstance(value, self.__class__):
+ value = self.__class__(value)
+ super(EasyDict, self).__setattr__(name, value)
+ super(EasyDict, self).__setitem__(name, value)
+
+ __setitem__ = __setattr__
+
+ def update(self, e=None, **f):
+ d = e or dict()
+ d.update(f)
+ for k in d:
+ setattr(self, k, d[k])
+
+ def pop(self, k, d=None):
+ delattr(self, k)
+ return super(EasyDict, self).pop(k, d)
+
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
\ No newline at end of file
diff --git a/zoedepth/utils/geometry.py b/zoedepth/utils/geometry.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3da8c75b5a8e39b4b58a4dcd827b84d79b9115c
--- /dev/null
+++ b/zoedepth/utils/geometry.py
@@ -0,0 +1,98 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import numpy as np
+
+def get_intrinsics(H,W):
+ """
+ Intrinsics for a pinhole camera model.
+ Assume fov of 55 degrees and central principal point.
+ """
+ f = 0.5 * W / np.tan(0.5 * 55 * np.pi / 180.0)
+ cx = 0.5 * W
+ cy = 0.5 * H
+ return np.array([[f, 0, cx],
+ [0, f, cy],
+ [0, 0, 1]])
+
+def depth_to_points(depth, R=None, t=None):
+
+ K = get_intrinsics(depth.shape[1], depth.shape[2])
+ Kinv = np.linalg.inv(K)
+ if R is None:
+ R = np.eye(3)
+ if t is None:
+ t = np.zeros(3)
+
+ # M converts from your coordinate to PyTorch3D's coordinate system
+ M = np.eye(3)
+ M[0, 0] = -1.0
+ M[1, 1] = -1.0
+
+ height, width = depth.shape[1:3]
+
+ x = np.arange(width)
+ y = np.arange(height)
+ coord = np.stack(np.meshgrid(x, y), -1)
+ coord = np.concatenate((coord, np.ones_like(coord)[:, :, [0]]), -1) # z=1
+ coord = coord.astype(np.float32)
+ # coord = torch.as_tensor(coord, dtype=torch.float32, device=device)
+ coord = coord[None] # bs, h, w, 3
+
+ D = depth[:, :, :, None, None]
+ # print(D.shape, Kinv[None, None, None, ...].shape, coord[:, :, :, :, None].shape )
+ pts3D_1 = D * Kinv[None, None, None, ...] @ coord[:, :, :, :, None]
+ # pts3D_1 live in your coordinate system. Convert them to Py3D's
+ pts3D_1 = M[None, None, None, ...] @ pts3D_1
+ # from reference to targe tviewpoint
+ pts3D_2 = R[None, None, None, ...] @ pts3D_1 + t[None, None, None, :, None]
+ # pts3D_2 = pts3D_1
+ # depth_2 = pts3D_2[:, :, :, 2, :] # b,1,h,w
+ return pts3D_2[:, :, :, :3, 0][0]
+
+
+def create_triangles(h, w, mask=None):
+ """
+ Reference: https://github.com/google-research/google-research/blob/e96197de06613f1b027d20328e06d69829fa5a89/infinite_nature/render_utils.py#L68
+ Creates mesh triangle indices from a given pixel grid size.
+ This function is not and need not be differentiable as triangle indices are
+ fixed.
+ Args:
+ h: (int) denoting the height of the image.
+ w: (int) denoting the width of the image.
+ Returns:
+ triangles: 2D numpy array of indices (int) with shape (2(W-1)(H-1) x 3)
+ """
+ x, y = np.meshgrid(range(w - 1), range(h - 1))
+ tl = y * w + x
+ tr = y * w + x + 1
+ bl = (y + 1) * w + x
+ br = (y + 1) * w + x + 1
+ triangles = np.array([tl, bl, tr, br, tr, bl])
+ triangles = np.transpose(triangles, (1, 2, 0)).reshape(
+ ((w - 1) * (h - 1) * 2, 3))
+ if mask is not None:
+ mask = mask.reshape(-1)
+ triangles = triangles[mask[triangles].all(1)]
+ return triangles
diff --git a/zoedepth/utils/misc.py b/zoedepth/utils/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..d430aaae0957ca1cc347929916cc969077087b21
--- /dev/null
+++ b/zoedepth/utils/misc.py
@@ -0,0 +1,368 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+"""Miscellaneous utility functions."""
+
+from scipy import ndimage
+
+import base64
+import math
+import re
+from io import BytesIO
+
+import matplotlib
+import matplotlib.cm
+import numpy as np
+import requests
+import torch
+import torch.distributed as dist
+import torch.nn
+import torch.nn as nn
+import torch.utils.data.distributed
+from PIL import Image
+from torchvision.transforms import ToTensor
+
+
+class RunningAverage:
+ def __init__(self):
+ self.avg = 0
+ self.count = 0
+
+ def append(self, value):
+ self.avg = (value + self.count * self.avg) / (self.count + 1)
+ self.count += 1
+
+ def get_value(self):
+ return self.avg
+
+
+def denormalize(x):
+ """Reverses the imagenet normalization applied to the input.
+
+ Args:
+ x (torch.Tensor - shape(N,3,H,W)): input tensor
+
+ Returns:
+ torch.Tensor - shape(N,3,H,W): Denormalized input
+ """
+ mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device)
+ std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device)
+ return x * std + mean
+
+
+class RunningAverageDict:
+ """A dictionary of running averages."""
+ def __init__(self):
+ self._dict = None
+
+ def update(self, new_dict):
+ if new_dict is None:
+ return
+
+ if self._dict is None:
+ self._dict = dict()
+ for key, value in new_dict.items():
+ self._dict[key] = RunningAverage()
+
+ for key, value in new_dict.items():
+ self._dict[key].append(value)
+
+ def get_value(self):
+ if self._dict is None:
+ return None
+ return {key: value.get_value() for key, value in self._dict.items()}
+
+
+def colorize(value, vmin=None, vmax=None, cmap='gray_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):
+ """Converts a depth map to a color image.
+
+ Args:
+ value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
+ vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
+ vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
+ cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
+ invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
+ invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
+ background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
+ gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
+ value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
+
+ Returns:
+ numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
+ """
+ if isinstance(value, torch.Tensor):
+ value = value.detach().cpu().numpy()
+
+ value = value.squeeze()
+ if invalid_mask is None:
+ invalid_mask = value == invalid_val
+ mask = np.logical_not(invalid_mask)
+
+ # normalize
+ vmin = np.percentile(value[mask],2) if vmin is None else vmin
+ vmax = np.percentile(value[mask],85) if vmax is None else vmax
+ if vmin != vmax:
+ value = (value - vmin) / (vmax - vmin) # vmin..vmax
+ else:
+ # Avoid 0-division
+ value = value * 0.
+
+ # squeeze last dim if it exists
+ # grey out the invalid values
+
+ value[invalid_mask] = np.nan
+ cmapper = matplotlib.cm.get_cmap(cmap)
+ if value_transform:
+ value = value_transform(value)
+ # value = value / value.max()
+ value = cmapper(value, bytes=True) # (nxmx4)
+
+ # img = value[:, :, :]
+ img = value[...]
+ img[invalid_mask] = background_color
+
+ # return img.transpose((2, 0, 1))
+ if gamma_corrected:
+ # gamma correction
+ img = img / 255
+ img = np.power(img, 2.2)
+ img = img * 255
+ img = img.astype(np.uint8)
+ return img
+
+
+def count_parameters(model, include_all=False):
+ return sum(p.numel() for p in model.parameters() if p.requires_grad or include_all)
+
+
+def compute_errors(gt, pred):
+ """Compute metrics for 'pred' compared to 'gt'
+
+ Args:
+ gt (numpy.ndarray): Ground truth values
+ pred (numpy.ndarray): Predicted values
+
+ gt.shape should be equal to pred.shape
+
+ Returns:
+ dict: Dictionary containing the following metrics:
+ 'a1': Delta1 accuracy: Fraction of pixels that are within a scale factor of 1.25
+ 'a2': Delta2 accuracy: Fraction of pixels that are within a scale factor of 1.25^2
+ 'a3': Delta3 accuracy: Fraction of pixels that are within a scale factor of 1.25^3
+ 'abs_rel': Absolute relative error
+ 'rmse': Root mean squared error
+ 'log_10': Absolute log10 error
+ 'sq_rel': Squared relative error
+ 'rmse_log': Root mean squared error on the log scale
+ 'silog': Scale invariant log error
+ """
+ thresh = np.maximum((gt / pred), (pred / gt))
+ a1 = (thresh < 1.25).mean()
+ a2 = (thresh < 1.25 ** 2).mean()
+ a3 = (thresh < 1.25 ** 3).mean()
+
+ abs_rel = np.mean(np.abs(gt - pred) / gt)
+ sq_rel = np.mean(((gt - pred) ** 2) / gt)
+
+ rmse = (gt - pred) ** 2
+ rmse = np.sqrt(rmse.mean())
+
+ rmse_log = (np.log(gt) - np.log(pred)) ** 2
+ rmse_log = np.sqrt(rmse_log.mean())
+
+ err = np.log(pred) - np.log(gt)
+ silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
+
+ log_10 = (np.abs(np.log10(gt) - np.log10(pred))).mean()
+ return dict(a1=a1, a2=a2, a3=a3, abs_rel=abs_rel, rmse=rmse, log_10=log_10, rmse_log=rmse_log,
+ silog=silog, sq_rel=sq_rel)
+
+
+def compute_metrics(gt, pred, interpolate=True, garg_crop=False, eigen_crop=True, dataset='nyu', min_depth_eval=0.1, max_depth_eval=10, **kwargs):
+ """Compute metrics of predicted depth maps. Applies cropping and masking as necessary or specified via arguments. Refer to compute_errors for more details on metrics.
+ """
+ if 'config' in kwargs:
+ config = kwargs['config']
+ garg_crop = config.garg_crop
+ eigen_crop = config.eigen_crop
+ min_depth_eval = config.min_depth_eval
+ max_depth_eval = config.max_depth_eval
+
+ if gt.shape[-2:] != pred.shape[-2:] and interpolate:
+ pred = nn.functional.interpolate(
+ pred, gt.shape[-2:], mode='bilinear', align_corners=True)
+
+ pred = pred.squeeze().cpu().numpy()
+ pred[pred < min_depth_eval] = min_depth_eval
+ pred[pred > max_depth_eval] = max_depth_eval
+ pred[np.isinf(pred)] = max_depth_eval
+ pred[np.isnan(pred)] = min_depth_eval
+
+ gt_depth = gt.squeeze().cpu().numpy()
+ valid_mask = np.logical_and(
+ gt_depth > min_depth_eval, gt_depth < max_depth_eval)
+
+ if garg_crop or eigen_crop:
+ gt_height, gt_width = gt_depth.shape
+ eval_mask = np.zeros(valid_mask.shape)
+
+ if garg_crop:
+ eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height),
+ int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1
+
+ elif eigen_crop:
+ # print("-"*10, " EIGEN CROP ", "-"*10)
+ if dataset == 'kitti':
+ eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height),
+ int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1
+ else:
+ # assert gt_depth.shape == (480, 640), "Error: Eigen crop is currently only valid for (480, 640) images"
+ eval_mask[45:471, 41:601] = 1
+ else:
+ eval_mask = np.ones(valid_mask.shape)
+ valid_mask = np.logical_and(valid_mask, eval_mask)
+ return compute_errors(gt_depth[valid_mask], pred[valid_mask])
+
+
+#################################### Model uilts ################################################
+
+
+def parallelize(config, model, find_unused_parameters=True):
+
+ if config.gpu is not None:
+ torch.cuda.set_device(config.gpu)
+ model = model.cuda(config.gpu)
+
+ config.multigpu = False
+ if config.distributed and not config.debug_mode:
+ # Use DDP
+ config.multigpu = True
+ config.rank = config.rank * config.ngpus_per_node + config.gpu
+ dist.init_process_group(backend=config.dist_backend, init_method=config.dist_url,
+ world_size=config.world_size, rank=config.rank)
+ config.batch_size = int(config.batch_size / config.ngpus_per_node)
+ # config.batch_size = 8
+ config.workers = int(
+ (config.num_workers + config.ngpus_per_node - 1) / config.ngpus_per_node)
+ print("Device", config.gpu, "Rank", config.rank, "batch size",
+ config.batch_size, "Workers", config.workers)
+ torch.cuda.set_device(config.gpu)
+ model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
+ model = model.cuda(config.gpu)
+ model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[config.gpu], output_device=config.gpu,
+ find_unused_parameters=find_unused_parameters)
+
+ elif config.gpu is None:
+ # Use DP
+ config.multigpu = True
+ model = model.cuda()
+ model = torch.nn.DataParallel(model)
+
+ return model
+
+
+#################################################################################################
+
+
+#####################################################################################################
+
+
+class colors:
+ '''Colors class:
+ Reset all colors with colors.reset
+ Two subclasses fg for foreground and bg for background.
+ Use as colors.subclass.colorname.
+ i.e. colors.fg.red or colors.bg.green
+ Also, the generic bold, disable, underline, reverse, strikethrough,
+ and invisible work with the main class
+ i.e. colors.bold
+ '''
+ reset = '\033[0m'
+ bold = '\033[01m'
+ disable = '\033[02m'
+ underline = '\033[04m'
+ reverse = '\033[07m'
+ strikethrough = '\033[09m'
+ invisible = '\033[08m'
+
+ class fg:
+ black = '\033[30m'
+ red = '\033[31m'
+ green = '\033[32m'
+ orange = '\033[33m'
+ blue = '\033[34m'
+ purple = '\033[35m'
+ cyan = '\033[36m'
+ lightgrey = '\033[37m'
+ darkgrey = '\033[90m'
+ lightred = '\033[91m'
+ lightgreen = '\033[92m'
+ yellow = '\033[93m'
+ lightblue = '\033[94m'
+ pink = '\033[95m'
+ lightcyan = '\033[96m'
+
+ class bg:
+ black = '\033[40m'
+ red = '\033[41m'
+ green = '\033[42m'
+ orange = '\033[43m'
+ blue = '\033[44m'
+ purple = '\033[45m'
+ cyan = '\033[46m'
+ lightgrey = '\033[47m'
+
+
+def printc(text, color):
+ print(f"{color}{text}{colors.reset}")
+
+############################################
+
+def get_image_from_url(url):
+ response = requests.get(url)
+ img = Image.open(BytesIO(response.content)).convert("RGB")
+ return img
+
+def url_to_torch(url, size=(384, 384)):
+ img = get_image_from_url(url)
+ img = img.resize(size, Image.ANTIALIAS)
+ img = torch.from_numpy(np.asarray(img)).float()
+ img = img.permute(2, 0, 1)
+ img.div_(255)
+ return img
+
+def pil_to_batched_tensor(img):
+ return ToTensor()(img).unsqueeze(0)
+
+def save_raw_16bit(depth, fpath="raw.png"):
+ if isinstance(depth, torch.Tensor):
+ depth = depth.squeeze().cpu().numpy()
+
+ assert isinstance(depth, np.ndarray), "Depth must be a torch tensor or numpy array"
+ assert depth.ndim == 2, "Depth must be 2D"
+ depth = depth * 256 # scale for 16-bit png
+ depth = depth.astype(np.uint16)
+ depth = Image.fromarray(depth)
+ depth.save(fpath)
+ print("Saved raw depth to", fpath)
\ No newline at end of file